Line data Source code
1 0 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : #pragma once
5 :
6 : #include <cstddef>
7 : #include <optional>
8 : #include <string>
9 : #include <tuple>
10 : #include <variant>
11 : #include <vector>
12 :
13 : #include "DataStructures/DataBox/DataBox.hpp"
14 : #include "DataStructures/DataBox/PrefixHelpers.hpp"
15 : #include "DataStructures/DataBox/Tag.hpp"
16 : #include "DataStructures/DataVector.hpp"
17 : #include "DataStructures/Tensor/Tensor.hpp"
18 : #include "Domain/BlockLogicalCoordinates.hpp"
19 : #include "Domain/Domain.hpp"
20 : #include "Domain/ElementLogicalCoordinates.hpp"
21 : #include "Domain/Structure/ElementId.hpp"
22 : #include "IO/H5/AccessType.hpp"
23 : #include "IO/H5/File.hpp"
24 : #include "IO/H5/VolumeData.hpp"
25 : #include "IO/Importers/ObservationSelector.hpp"
26 : #include "IO/Importers/Tags.hpp"
27 : #include "NumericalAlgorithms/Interpolation/IrregularInterpolant.hpp"
28 : #include "NumericalAlgorithms/Spectral/LogicalCoordinates.hpp"
29 : #include "Parallel/AlgorithmExecution.hpp"
30 : #include "Parallel/ArrayComponentId.hpp"
31 : #include "Parallel/ArrayIndex.hpp"
32 : #include "Parallel/GlobalCache.hpp"
33 : #include "Parallel/Invoke.hpp"
34 : #include "Utilities/EqualWithinRoundoff.hpp"
35 : #include "Utilities/ErrorHandling/Assert.hpp"
36 : #include "Utilities/ErrorHandling/Error.hpp"
37 : #include "Utilities/FileSystem.hpp"
38 : #include "Utilities/Gsl.hpp"
39 : #include "Utilities/Literals.hpp"
40 : #include "Utilities/Overloader.hpp"
41 : #include "Utilities/Requires.hpp"
42 : #include "Utilities/Serialization/Serialize.hpp"
43 : #include "Utilities/TMPL.hpp"
44 : #include "Utilities/TaggedTuple.hpp"
45 :
46 1 : namespace importers {
47 :
48 : /// \cond
49 : template <typename Metavariables>
50 : struct ElementDataReader;
51 : namespace Actions {
52 : template <size_t Dim, typename FieldTagsList, typename ReceiveComponent>
53 : struct ReadAllVolumeDataAndDistribute;
54 : } // namespace Actions
55 : /// \endcond
56 :
57 1 : namespace Tags {
58 : /*!
59 : * \brief Indicates an available tensor field is selected for importing, along
60 : * with the name of the dataset in the volume data file.
61 : *
62 : * Set the value to a dataset name to import the `FieldTag` from that dataset,
63 : * or to `std::nullopt` to skip importing the `FieldTag`. The dataset name
64 : * excludes tensor component suffixes like "_x" or "_xy". These suffixes will be
65 : * added automatically. A sensible value for the dataset name is often
66 : * `db::tag_name<FieldTag>()`, but the user should generally be given the
67 : * opportunity to set the dataset name in the input file.
68 : */
69 : template <typename FieldTag>
70 1 : struct Selected : db::SimpleTag {
71 0 : using type = std::optional<std::string>;
72 : };
73 : } // namespace Tags
74 :
75 : namespace detail {
76 :
77 : // Read the single `tensor_name` from the `volume_file`, taking care of suffixes
78 : // like "_x" etc for its components.
79 : template <typename TensorType>
80 : void read_tensor_data(const gsl::not_null<TensorType*> tensor_data,
81 : const std::string& tensor_name,
82 : const h5::VolumeData& volume_file,
83 : const size_t observation_id) {
84 : for (size_t i = 0; i < tensor_data->size(); ++i) {
85 : const auto& tensor_component = volume_file.get_tensor_component(
86 : observation_id, tensor_name + tensor_data->component_suffix(
87 : tensor_data->get_tensor_index(i)));
88 : if (not std::holds_alternative<DataVector>(tensor_component.data)) {
89 : ERROR("The tensor component '"
90 : << tensor_component.name
91 : << "' is not a double-precision DataVector. Reading in "
92 : "single-precision volume data is not supported.");
93 : }
94 : (*tensor_data)[i] = std::get<DataVector>(tensor_component.data);
95 : }
96 : }
97 :
98 : // Read the `selected_fields` from the `volume_file`. Reads the data
99 : // for all elements in the `volume_file` at once. Invoked lazily when data
100 : // for an element in the volume file is needed.
101 : template <typename FieldTagsList>
102 : tuples::tagged_tuple_from_typelist<FieldTagsList> read_tensor_data(
103 : const h5::VolumeData& volume_file, const size_t observation_id,
104 : const tuples::tagged_tuple_from_typelist<
105 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>& selected_fields) {
106 : tuples::tagged_tuple_from_typelist<FieldTagsList> all_tensor_data{};
107 : tmpl::for_each<FieldTagsList>([&all_tensor_data, &volume_file,
108 : &observation_id,
109 : &selected_fields](auto field_tag_v) {
110 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
111 : const auto& selection = get<Tags::Selected<field_tag>>(selected_fields);
112 : if (not selection.has_value()) {
113 : return;
114 : }
115 : read_tensor_data(make_not_null(&get<field_tag>(all_tensor_data)),
116 : selection.value(), volume_file, observation_id);
117 : });
118 : return all_tensor_data;
119 : }
120 :
121 : // Extract this element's data from the read-in dataset
122 : template <typename FieldTagsList>
123 : tuples::tagged_tuple_from_typelist<FieldTagsList> extract_element_data(
124 : const std::pair<size_t, size_t>& element_data_offset_and_length,
125 : const tuples::tagged_tuple_from_typelist<FieldTagsList>& all_tensor_data,
126 : const tuples::tagged_tuple_from_typelist<
127 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>& selected_fields) {
128 : tuples::tagged_tuple_from_typelist<FieldTagsList> element_data{};
129 : tmpl::for_each<FieldTagsList>(
130 : [&element_data, &offset = element_data_offset_and_length.first,
131 : &num_points = element_data_offset_and_length.second, &all_tensor_data,
132 : &selected_fields](auto field_tag_v) {
133 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
134 : const auto& selection = get<Tags::Selected<field_tag>>(selected_fields);
135 : if (not selection.has_value()) {
136 : return;
137 : }
138 : auto& element_tensor_data = get<field_tag>(element_data);
139 : // Iterate independent components of the tensor
140 : for (size_t i = 0; i < element_tensor_data.size(); ++i) {
141 : const DataVector& data_tensor_component =
142 : get<field_tag>(all_tensor_data)[i];
143 : DataVector element_tensor_component{num_points};
144 : // Retrieve data from slice of the contigious dataset
145 : for (size_t j = 0; j < element_tensor_component.size(); ++j) {
146 : element_tensor_component[j] = data_tensor_component[offset + j];
147 : }
148 : element_tensor_data[i] = element_tensor_component;
149 : }
150 : });
151 : return element_data;
152 : }
153 :
154 : // Check that the source and target points are the same in case interpolation is
155 : // disabled. This is important to avoid hard-to-find bugs where data is loaded
156 : // to the wrong coordinates. For example, if the evolution domain deforms the
157 : // excision surfaces a bit but the initial data doesn't, then it would be wrong
158 : // to load the initial data to the evolution grid without an interpolation.
159 : template <size_t Dim>
160 : void verify_inertial_coordinates(
161 : const std::pair<size_t, size_t>& source_element_data_offset_and_length,
162 : const tnsr::I<DataVector, Dim, Frame::Inertial>& source_inertial_coords,
163 : const tnsr::I<DataVector, Dim, Frame::Inertial>& target_inertial_coords,
164 : const std::string& grid_name) {
165 : for (size_t d = 0; d < Dim; ++d) {
166 : const DataVector& source_coord = source_inertial_coords[d];
167 : const DataVector& target_coord = target_inertial_coords[d];
168 : if (target_coord.size() != source_element_data_offset_and_length.second) {
169 : ERROR_NO_TRACE(
170 : "The source and target coordinates don't match on grid "
171 : << grid_name << ". The source coordinates stored in the file have "
172 : << source_element_data_offset_and_length.second
173 : << " points, but the target grid has " << target_coord.size()
174 : << " points. Set 'Interpolate: True' to enable interpolation between "
175 : "the grids.");
176 : }
177 : for (size_t j = 0; j < source_element_data_offset_and_length.second; ++j) {
178 : if (not equal_within_roundoff(
179 : target_coord[j],
180 : source_coord[source_element_data_offset_and_length.first + j])) {
181 : ERROR_NO_TRACE(
182 : "The source and target coordinates don't match on grid "
183 : << grid_name << " in dimension " << d << " at point " << j
184 : << " (plus offset " << source_element_data_offset_and_length.first
185 : << " in the data file). Source coordinate: "
186 : << source_coord[source_element_data_offset_and_length.first + j]
187 : << ", target coordinate: " << target_coord[j]
188 : << ". Set 'Interpolate: True' to enable interpolation between the "
189 : "grids.");
190 : }
191 : }
192 : }
193 : }
194 :
195 : // Interpolate only the `selected_fields` in `source_element_data` to the
196 : // `target_logical_coords`.
197 : template <typename FieldTagsList, size_t Dim>
198 : void interpolate_selected_fields(
199 : const gsl::not_null<tuples::tagged_tuple_from_typelist<FieldTagsList>*>
200 : target_element_data,
201 : const tuples::tagged_tuple_from_typelist<FieldTagsList>&
202 : source_element_data,
203 : const Mesh<Dim>& source_mesh,
204 : const tnsr::I<DataVector, Dim, Frame::ElementLogical>&
205 : target_logical_coords,
206 : const std::vector<size_t>& offsets,
207 : const tuples::tagged_tuple_from_typelist<
208 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>& selected_fields) {
209 : const intrp::Irregular<Dim> interpolator{source_mesh, target_logical_coords};
210 : const size_t target_num_points = target_logical_coords.begin()->size();
211 : ASSERT(target_num_points == offsets.size(),
212 : "The number of target points ("
213 : << target_num_points << ") must match the number of offsets ("
214 : << offsets.size() << ").");
215 : DataVector target_tensor_component_buffer{target_num_points};
216 : tmpl::for_each<FieldTagsList>([&source_element_data, &target_element_data,
217 : &interpolator, &target_tensor_component_buffer,
218 : &selected_fields, &offsets](auto field_tag_v) {
219 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
220 : const auto& selection = get<Tags::Selected<field_tag>>(selected_fields);
221 : if (not selection.has_value()) {
222 : return;
223 : }
224 : const auto& source_tensor_data = get<field_tag>(source_element_data);
225 : auto& target_tensor_data = get<field_tag>(*target_element_data);
226 : // Iterate independent components of the tensor
227 : for (size_t i = 0; i < source_tensor_data.size(); ++i) {
228 : const DataVector& source_tensor_component = source_tensor_data[i];
229 : DataVector& target_tensor_component = target_tensor_data[i];
230 : // Interpolate
231 : interpolator.interpolate(make_not_null(&target_tensor_component_buffer),
232 : source_tensor_component);
233 : // Fill target element data at corresponding offsets
234 : for (size_t j = 0; j < target_tensor_component_buffer.size(); ++j) {
235 : target_tensor_component[offsets[j]] = target_tensor_component_buffer[j];
236 : }
237 : }
238 : });
239 : }
240 :
241 : } // namespace detail
242 :
243 0 : namespace Actions {
244 :
245 : /*!
246 : * \brief Read a volume data file and distribute the data to all registered
247 : * elements, interpolating to the target points if needed.
248 : *
249 : * \note Use this action if you want to quickly load and distribute volume data.
250 : * If you need to beyond that (such as more control over input-file options),
251 : * write a new action and dispatch to
252 : * `importers::Actions::ReadAllVolumeDataAndDistribute`.
253 : *
254 : * \details Invoke this action on the elements of an array parallel component to
255 : * dispatch reading the volume data file specified by options placed in the
256 : * `ImporterOptionsGroup`. The tensors in `FieldTagsList` will be loaded from
257 : * the file and distributed to all elements that have previously registered. Use
258 : * `importers::Actions::RegisterWithElementDataReader` to register the elements
259 : * of the array parallel component in a previous phase.
260 : *
261 : * Note that the volume data file will only be read once per node, triggered by
262 : * the first element that invokes this action. All subsequent invocations of
263 : * this action on the node will do nothing. See
264 : * `importers::Actions::ReadAllVolumeDataAndDistribute` for details.
265 : *
266 : * The data is distributed to the elements using `Parallel::receive_data`. The
267 : * elements can monitor `importers::Tags::VolumeData` in their inbox to wait for
268 : * the data and process it once it's available. We provide the action
269 : * `importers::Actions::ReceiveVolumeData` that waits for the data and moves it
270 : * directly into the DataBox. You can also implement a specialized action that
271 : * might verify and post-process the data before populating the DataBox.
272 : *
273 : * \see Dev guide on \ref dev_guide_importing
274 : */
275 : template <typename ImporterOptionsGroup, typename FieldTagsList>
276 1 : struct ReadVolumeData {
277 0 : using const_global_cache_tags =
278 : tmpl::list<Tags::ImporterOptions<ImporterOptionsGroup>>;
279 :
280 : template <typename DbTagsList, typename... InboxTags, typename Metavariables,
281 : size_t Dim, typename ActionList, typename ParallelComponent>
282 0 : static Parallel::iterable_action_return_t apply(
283 : db::DataBox<DbTagsList>& /*box*/,
284 : const tuples::TaggedTuple<InboxTags...>& /*inboxes*/,
285 : Parallel::GlobalCache<Metavariables>& cache,
286 : const ElementId<Dim>& /*array_index*/, const ActionList /*meta*/,
287 : const ParallelComponent* const /*meta*/) {
288 : // Not using `ckLocalBranch` here to make sure the simple action invocation
289 : // is asynchronous.
290 : auto& reader_component = Parallel::get_parallel_component<
291 : importers::ElementDataReader<Metavariables>>(cache);
292 : Parallel::simple_action<importers::Actions::ReadAllVolumeDataAndDistribute<
293 : Dim, FieldTagsList, ParallelComponent>>(
294 : reader_component,
295 : get<Tags::ImporterOptions<ImporterOptionsGroup>>(cache), 0_st);
296 : return {Parallel::AlgorithmExecution::Continue, std::nullopt};
297 : }
298 : };
299 :
300 : /*!
301 : * \brief Read a volume data file and distribute the data to all registered
302 : * elements, interpolating to the target points if needed.
303 : *
304 : * This action can be invoked on the `importers::ElementDataReader` component
305 : * once all elements have been registered with it. It opens the data file, reads
306 : * the data for each registered element and uses `Parallel::receive_data` to
307 : * distribute the data to the elements. The elements can monitor
308 : * `importers::Tags::VolumeData` in their inbox to wait for the data and process
309 : * it once it's available. You can use `importers::Actions::ReceiveVolumeData`
310 : * to wait for the data and move it directly into the DataBox, or implement a
311 : * specialized action that might verify and post-process the data.
312 : *
313 : * Note that instead of invoking this action directly on the
314 : * `importers::ElementDataReader` component you can invoke the iterable action
315 : * `importers::Actions::ReadVolumeData` on the elements of an array parallel
316 : * component for simple use cases.
317 : *
318 : * - Pass along the following arguments to the simple action invocation:
319 : * - `options`: `importers::ImporterOptions` that specify the H5 files
320 : * with volume data to load.
321 : * - `volume_data_id`: A number (or hash) that identifies this import
322 : * operation. Will also be used to identify the loaded volume data in the
323 : * inbox of the receiving elements.
324 : * - `selected_fields` (optional): See below.
325 : * - The `FieldTagsList` parameter specifies a typelist of tensor tags that
326 : * can be read from the file and provided to each element. The subset of tensors
327 : * that will actually be read and distributed can be selected at runtime with
328 : * the `selected_fields` argument that is passed to this simple action. See
329 : * importers::Tags::Selected for details. By default, all tensors in the
330 : * `FieldTagsList` are selected, and read from datasets named
331 : * `db::tag_name<Tag>() + suffix`, where the `suffix` is empty for scalars, or
332 : * `"_"` followed by the `Tensor::component_name` for each independent tensor
333 : * component.
334 : * - `Parallel::receive_data` is invoked on each registered element of the
335 : * `ReceiveComponent` to populate `importers::Tags::VolumeData` in the element's
336 : * inbox with a `tuples::tagged_tuple_from_typelist<FieldTagsList>` containing
337 : * the tensor data for that element. The `ReceiveComponent` must the the same
338 : * that was encoded into the `Parallel::ArrayComponentId` used to register the
339 : * elements. The `volume_data_id` passed to this action is used as key.
340 : *
341 : * \par Memory consumption
342 : * This action runs once on every node. It reads all volume data files on the
343 : * node, but doesn't keep them all in memory at once. The following items
344 : * contribute primarily to memory consumption and can be reconsidered if we run
345 : * into memory issues:
346 : *
347 : * - `all_tensor_data`: All requested tensor components in the volume data file
348 : * at the specified observation ID. Only data from one volume data file is
349 : * held in memory at any time. Only data from files that overlap with target
350 : * elements on this node are read in.
351 : * - `target_element_data_buffer`: Holds incomplete interpolated data for each
352 : * (target) element that resides on this node. In the worst case, when all
353 : * target elements need data from the last source element in the last volume
354 : * data file, the memory consumption of this buffer can grow to hold all
355 : * requested tensor components on all elements that reside on this node.
356 : * However, elements are erased from this buffer once their interpolated data
357 : * is complete (and sent to the target element), so the memory consumption
358 : * should remain much lower in practice.
359 : *
360 : * \see Dev guide on \ref dev_guide_importing
361 : */
362 : template <size_t Dim, typename FieldTagsList, typename ReceiveComponent>
363 1 : struct ReadAllVolumeDataAndDistribute {
364 : template <typename ParallelComponent, typename DataBox,
365 : typename Metavariables, typename ArrayIndex>
366 0 : static void apply(DataBox& box, Parallel::GlobalCache<Metavariables>& cache,
367 : const ArrayIndex& /*array_index*/,
368 : const ImporterOptions& options, const size_t volume_data_id,
369 : tuples::tagged_tuple_from_typelist<
370 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>
371 : selected_fields = select_all_fields(FieldTagsList{})) {
372 : const bool enable_interpolation =
373 : get<OptionTags::EnableInterpolation>(options);
374 :
375 : // Only read and distribute the volume data once
376 : // This action will be invoked by `importers::Actions::ReadVolumeData` from
377 : // every element on the node, but only the first invocation reads the file
378 : // and distributes the data to all elements. Subsequent invocations do
379 : // nothing. The `volume_data_id` identifies whether or not we have already
380 : // read the requested data. Doing this at runtime avoids having to collect
381 : // all data files that will be read in at compile-time to initialize a flag
382 : // in the DataBox for each of them.
383 : const auto& has_read_volume_data =
384 : db::get<Tags::ElementDataAlreadyRead>(box);
385 : if (has_read_volume_data.find(volume_data_id) !=
386 : has_read_volume_data.end()) {
387 : return;
388 : }
389 : db::mutate<Tags::ElementDataAlreadyRead>(
390 : [&volume_data_id](const auto local_has_read_volume_data) {
391 : local_has_read_volume_data->insert(volume_data_id);
392 : },
393 : make_not_null(&box));
394 :
395 : // This is the subset of elements that reside on this node. They have
396 : // registered themselves before. Our job is to fill them with volume data.
397 : std::unordered_set<ElementId<Dim>> target_element_ids{};
398 : for (const auto& target_element : get<Tags::RegisteredElements<Dim>>(box)) {
399 : const auto& element_array_component_id = target_element.first;
400 : const CkArrayIndex& raw_element_index =
401 : element_array_component_id.array_index();
402 : // Check if the parallel component of the registered element matches the
403 : // callback, because it's possible that elements from other components
404 : // with the same index are also registered.
405 : // Since the way the component is encoded in `ArrayComponentId` is
406 : // private to that class, we construct one and compare.
407 : // Can't use Parallel::make_array_component_id here because we need the
408 : // original array_index type, not a CkArrayIndex.
409 : if (element_array_component_id !=
410 : Parallel::ArrayComponentId(
411 : std::add_pointer_t<ReceiveComponent>{nullptr},
412 : raw_element_index)) {
413 : continue;
414 : }
415 : const auto target_element_id =
416 : Parallel::ArrayIndex<typename ReceiveComponent::array_index>(
417 : raw_element_index)
418 : .get_index();
419 : target_element_ids.insert(target_element_id);
420 : }
421 : if (UNLIKELY(target_element_ids.empty())) {
422 : return;
423 : }
424 :
425 : // Temporary buffer for data on target elements. These variables get filled
426 : // with interpolated data while we're reading in volume files. Once data on
427 : // an element is complete, the data is sent to that element and removed from
428 : // this list.
429 : std::unordered_map<ElementId<Dim>,
430 : tuples::tagged_tuple_from_typelist<FieldTagsList>>
431 : target_element_data_buffer{};
432 : std::unordered_map<ElementId<Dim>, std::vector<size_t>>
433 : all_indices_of_filled_interp_points{};
434 :
435 : // Resolve the file glob
436 : const std::string& file_glob = get<OptionTags::FileGlob>(options);
437 : const std::vector<std::string> file_paths = file_system::glob(file_glob);
438 : if (file_paths.empty()) {
439 : ERROR_NO_TRACE("The file glob '" << file_glob << "' matches no files.");
440 : }
441 :
442 : // Open every file in turn
443 : std::optional<size_t> prev_observation_id{};
444 : double observation_value = std::numeric_limits<double>::signaling_NaN();
445 : std::optional<Domain<Dim>> source_domain{};
446 : std::unordered_map<std::string,
447 : std::unique_ptr<domain::FunctionsOfTime::FunctionOfTime>>
448 : source_domain_functions_of_time{};
449 : for (const std::string& file_name : file_paths) {
450 : // Open the volume data file
451 : h5::H5File<h5::AccessType::ReadOnly> h5file(file_name);
452 : constexpr size_t version_number = 0;
453 : const auto& volume_file = h5file.get<h5::VolumeData>(
454 : "/" + get<OptionTags::Subgroup>(options), version_number);
455 :
456 : // Select observation ID
457 : const size_t observation_id = std::visit(
458 : Overloader{
459 : [&volume_file](const double local_obs_value) {
460 : return volume_file.find_observation_id(local_obs_value);
461 : },
462 : [&volume_file](const ObservationSelector local_obs_selector) {
463 : const std::vector<size_t> all_observation_ids =
464 : volume_file.list_observation_ids();
465 : switch (local_obs_selector) {
466 : case ObservationSelector::First:
467 : return all_observation_ids.front();
468 : case ObservationSelector::Last:
469 : return all_observation_ids.back();
470 : default:
471 : ERROR("Unknown importers::ObservationSelector: "
472 : << local_obs_selector);
473 : }
474 : }},
475 : get<OptionTags::ObservationValue>(options));
476 : if (prev_observation_id.has_value() and
477 : prev_observation_id.value() != observation_id) {
478 : ERROR("Inconsistent selection of observation ID in file "
479 : << file_name
480 : << ". Make sure all files select the same observation ID.");
481 : }
482 : prev_observation_id = observation_id;
483 : observation_value = volume_file.get_observation_value(observation_id);
484 :
485 : // Memory buffer for the tensor data stored in this file. The data is
486 : // loaded lazily when it is needed. We may find that we can skip loading
487 : // some files because none of their data is needed to fill the elements on
488 : // this node.
489 : std::optional<tuples::tagged_tuple_from_typelist<FieldTagsList>>
490 : all_tensor_data{};
491 : std::optional<tnsr::I<DataVector, Dim, Frame::Inertial>>
492 : source_inertial_coords{};
493 :
494 : // Retrieve the information needed to reconstruct which element the data
495 : // belongs to
496 : const auto source_grid_names = volume_file.get_grid_names(observation_id);
497 : const auto source_extents = volume_file.get_extents(observation_id);
498 : const auto source_bases = volume_file.get_bases(observation_id);
499 : const auto source_quadratures =
500 : volume_file.get_quadratures(observation_id);
501 : std::vector<ElementId<Dim>> source_element_ids{};
502 : if (enable_interpolation) {
503 : // Need to parse all source grid names to element IDs only if
504 : // interpolation is enabled
505 : source_element_ids.reserve(source_grid_names.size());
506 : for (const auto& grid_name : source_grid_names) {
507 : source_element_ids.push_back(ElementId<Dim>(grid_name));
508 : }
509 : // Reconstruct domain from volume data file
510 : const std::optional<std::vector<char>> serialized_domain =
511 : volume_file.get_domain(observation_id);
512 : if (not serialized_domain.has_value()) {
513 : ERROR_NO_TRACE("No serialized domain found in file '"
514 : << file_name << volume_file.subfile_path()
515 : << "'. The domain is needed for interpolation.");
516 : }
517 : if (source_domain.has_value()) {
518 : #ifdef SPECTRE_DEBUG
519 : // Check that the domain is the same in all files (only in debug mode)
520 : const auto deserialized_domain =
521 : deserialize<Domain<Dim>>(serialized_domain->data());
522 : if (*source_domain != deserialized_domain) {
523 : ERROR_NO_TRACE(
524 : "The domain in all volume files must be the same. Domain in "
525 : "file '"
526 : << file_name << volume_file.subfile_path()
527 : << "' differs from a previously read file.");
528 : }
529 : #endif
530 : } else {
531 : source_domain = deserialize<Domain<Dim>>(serialized_domain->data());
532 : }
533 : // Reconstruct functions of time from volume data file
534 : if (source_domain_functions_of_time.empty() and
535 : alg::any_of(source_domain->blocks(), [](const auto& block) {
536 : return block.is_time_dependent();
537 : })) {
538 : const std::optional<std::vector<char>> serialized_functions_of_time =
539 : volume_file.get_functions_of_time(observation_id);
540 : if (not serialized_functions_of_time.has_value()) {
541 : ERROR_NO_TRACE("No domain functions of time found in file '"
542 : << file_name << volume_file.subfile_path()
543 : << "'. The functions of time are needed for "
544 : "interpolating with time-dependent maps.");
545 : }
546 : source_domain_functions_of_time = deserialize<std::unordered_map<
547 : std::string,
548 : std::unique_ptr<domain::FunctionsOfTime::FunctionOfTime>>>(
549 : serialized_functions_of_time->data());
550 : }
551 : }
552 :
553 : // Distribute the tensor data to the registered (target) elements. We
554 : // erase target elements when they are complete. This allows us to search
555 : // only for incomplete elements in subsequent volume files, and to stop
556 : // early when all registered elements are complete.
557 : std::unordered_set<ElementId<Dim>> completed_target_elements{};
558 : for (const auto& target_element_id : target_element_ids) {
559 : const auto& target_points = get<Tags::RegisteredElements<Dim>>(box).at(
560 : Parallel::make_array_component_id<ReceiveComponent>(
561 : target_element_id));
562 : const auto target_grid_name = get_output(target_element_id);
563 :
564 : // Proceed with the registered element only if it overlaps with the
565 : // volume file. It's possible that the volume file only contains data
566 : // for a subset of elements, e.g., when each node of a simulation wrote
567 : // volume data for its elements to a separate file.
568 : std::vector<ElementId<Dim>> overlapping_source_element_ids{};
569 : std::unordered_map<ElementId<Dim>, ElementLogicalCoordHolder<Dim>>
570 : source_element_logical_coords{};
571 : if (enable_interpolation) {
572 : // Transform the target points to block logical coords in the source
573 : // domain
574 : const auto source_block_logical_coords = block_logical_coordinates(
575 : *source_domain, target_points, observation_value,
576 : source_domain_functions_of_time);
577 : // Find the target points in the subset of source elements contained
578 : // in this volume file
579 : source_element_logical_coords = element_logical_coordinates(
580 : source_element_ids, source_block_logical_coords);
581 : overlapping_source_element_ids.reserve(
582 : source_element_logical_coords.size());
583 : for (const auto& source_element_id_and_coords :
584 : source_element_logical_coords) {
585 : overlapping_source_element_ids.push_back(
586 : source_element_id_and_coords.first);
587 : }
588 : } else {
589 : // When interpolation is disabled we process only volume files that
590 : // contain the exact element
591 : if (std::find(source_grid_names.begin(), source_grid_names.end(),
592 : target_grid_name) == source_grid_names.end()) {
593 : continue;
594 : }
595 : overlapping_source_element_ids.push_back(target_element_id);
596 : }
597 :
598 : // Lazily load the tensor data from the file if needed
599 : if (not overlapping_source_element_ids.empty() and
600 : not all_tensor_data.has_value()) {
601 : all_tensor_data = detail::read_tensor_data<FieldTagsList>(
602 : volume_file, observation_id, selected_fields);
603 : }
604 :
605 : // Iterate over the source elements in this volume file that overlap
606 : // with the target element
607 : for (const auto& source_element_id : overlapping_source_element_ids) {
608 : const auto source_grid_name = get_output(source_element_id);
609 : // Find the data offset that corresponds to this element
610 : const auto element_data_offset_and_length =
611 : h5::offset_and_length_for_grid(source_grid_name,
612 : source_grid_names, source_extents);
613 : // Extract this element's data from the read-in dataset
614 : auto source_element_data =
615 : detail::extract_element_data<FieldTagsList>(
616 : element_data_offset_and_length, *all_tensor_data,
617 : selected_fields);
618 :
619 : if (enable_interpolation) {
620 : const auto source_mesh = h5::mesh_for_grid<Dim>(
621 : source_grid_name, source_grid_names, source_extents,
622 : source_bases, source_quadratures);
623 : const size_t target_num_points = target_points.begin()->size();
624 :
625 : // Get and resize target buffer
626 : auto& target_element_data =
627 : target_element_data_buffer[target_element_id];
628 : tmpl::for_each<FieldTagsList>([&target_element_data,
629 : &target_num_points,
630 : &selected_fields](auto field_tag_v) {
631 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
632 : if (get<Tags::Selected<field_tag>>(selected_fields).has_value()) {
633 : for (auto& component : get<field_tag>(target_element_data)) {
634 : component.destructive_resize(target_num_points);
635 : }
636 : }
637 : });
638 : auto& indices_of_filled_interp_points =
639 : all_indices_of_filled_interp_points[target_element_id];
640 :
641 : // Interpolate!
642 : const auto& source_logical_coords_of_target_points =
643 : source_element_logical_coords.at(source_element_id);
644 : detail::interpolate_selected_fields<FieldTagsList>(
645 : make_not_null(&target_element_data), source_element_data,
646 : source_mesh,
647 : source_logical_coords_of_target_points.element_logical_coords,
648 : source_logical_coords_of_target_points.offsets,
649 : selected_fields);
650 : indices_of_filled_interp_points.insert(
651 : indices_of_filled_interp_points.end(),
652 : source_logical_coords_of_target_points.offsets.begin(),
653 : source_logical_coords_of_target_points.offsets.end());
654 :
655 : if (indices_of_filled_interp_points.size() == target_num_points) {
656 : // Pass the (interpolated) data to the element. Now it can proceed
657 : // in parallel with transforming the data, taking derivatives on
658 : // the grid, etc.
659 : Parallel::receive_data<Tags::VolumeData<FieldTagsList>>(
660 : Parallel::get_parallel_component<ReceiveComponent>(
661 : cache)[target_element_id],
662 : volume_data_id, std::move(target_element_data));
663 : completed_target_elements.insert(target_element_id);
664 : target_element_data_buffer.erase(target_element_id);
665 : all_indices_of_filled_interp_points.erase(target_element_id);
666 : }
667 : } else {
668 : // Verify that the inertial coordinates of the source and target
669 : // elements match. To do so we retrieve the inertial coordinates
670 : // that are written alongside the tensor data in the file. This is
671 : // an important check. It avoids nasty bugs where tensor data is
672 : // read in to points that don't exactly match the input. Therefore
673 : // we DON'T restrict this check to Debug mode.
674 : if (not source_inertial_coords.has_value()) {
675 : tnsr::I<DataVector, Dim, Frame::Inertial> inertial_coords{};
676 : detail::read_tensor_data(make_not_null(&inertial_coords),
677 : "InertialCoordinates", volume_file,
678 : observation_id);
679 : source_inertial_coords = std::move(inertial_coords);
680 : }
681 : detail::verify_inertial_coordinates(
682 : element_data_offset_and_length, *source_inertial_coords,
683 : target_points, source_grid_name);
684 : // Pass data directly to the element when interpolation is disabled
685 : Parallel::receive_data<Tags::VolumeData<FieldTagsList>>(
686 : Parallel::get_parallel_component<ReceiveComponent>(
687 : cache)[target_element_id],
688 : volume_data_id, std::move(source_element_data));
689 : completed_target_elements.insert(target_element_id);
690 : }
691 : } // loop over overlapping source elements
692 : } // loop over registered elements
693 : for (const auto& completed_element_id : completed_target_elements) {
694 : target_element_ids.erase(completed_element_id);
695 : }
696 : // Stop early when all target elements are complete
697 : if (target_element_ids.empty()) {
698 : break;
699 : }
700 : } // loop over volume files
701 :
702 : // Have we completed all target elements? If we haven't, the target domain
703 : // probably extends outside the source domain. In that case we report the
704 : // coordinates that couldn't be filled.
705 : if (not target_element_ids.empty()) {
706 : std::unordered_map<ElementId<Dim>, std::vector<std::array<double, Dim>>>
707 : missing_coords{};
708 : size_t num_missing_points = 0;
709 : for (const auto& target_element_id : target_element_ids) {
710 : const auto& target_inertial_coords =
711 : get<Tags::RegisteredElements<Dim>>(box).at(
712 : Parallel::make_array_component_id<ReceiveComponent>(
713 : target_element_id));
714 : const size_t target_num_points = target_inertial_coords.begin()->size();
715 : const auto& indices_of_filled_interp_points =
716 : all_indices_of_filled_interp_points[target_element_id];
717 : for (size_t i = 0; i < target_num_points; ++i) {
718 : if (alg::find(indices_of_filled_interp_points, i) ==
719 : indices_of_filled_interp_points.end()) {
720 : missing_coords[target_element_id].push_back(
721 : [&target_inertial_coords, &i]() {
722 : std::array<double, Dim> x{};
723 : for (size_t d = 0; d < Dim; ++d) {
724 : x[d] = target_inertial_coords[d][i];
725 : }
726 : return x;
727 : }());
728 : ++num_missing_points;
729 : }
730 : }
731 : }
732 : ERROR_NO_TRACE("The following " << num_missing_points << " point(s) in "
733 : << missing_coords.size()
734 : << " element(s) could not be filled:\n"
735 : << missing_coords);
736 : }
737 : }
738 :
739 : private:
740 : template <typename... LocalFieldTags>
741 : static tuples::TaggedTuple<Tags::Selected<LocalFieldTags>...>
742 0 : select_all_fields(tmpl::list<LocalFieldTags...> /*meta*/) {
743 : return {db::tag_name<LocalFieldTags>()...};
744 : }
745 : };
746 :
747 : } // namespace Actions
748 : } // namespace importers
|