Line data Source code
1 0 : // Distributed under the MIT License.
2 : // See LICENSE.txt for details.
3 :
4 : #pragma once
5 :
6 : #include <cstddef>
7 : #include <optional>
8 : #include <string>
9 : #include <tuple>
10 : #include <variant>
11 : #include <vector>
12 :
13 : #include "DataStructures/DataBox/DataBox.hpp"
14 : #include "DataStructures/DataBox/PrefixHelpers.hpp"
15 : #include "DataStructures/DataBox/Tag.hpp"
16 : #include "DataStructures/DataVector.hpp"
17 : #include "DataStructures/Tensor/EagerMath/Magnitude.hpp"
18 : #include "DataStructures/Tensor/Tensor.hpp"
19 : #include "Domain/BlockLogicalCoordinates.hpp"
20 : #include "Domain/Domain.hpp"
21 : #include "Domain/ElementLogicalCoordinates.hpp"
22 : #include "Domain/ElementMap.hpp"
23 : #include "Domain/Structure/ElementId.hpp"
24 : #include "IO/H5/AccessType.hpp"
25 : #include "IO/H5/File.hpp"
26 : #include "IO/H5/TensorData.hpp"
27 : #include "IO/H5/VolumeData.hpp"
28 : #include "IO/Importers/ObservationSelector.hpp"
29 : #include "IO/Importers/Tags.hpp"
30 : #include "NumericalAlgorithms/Interpolation/IrregularInterpolant.hpp"
31 : #include "NumericalAlgorithms/Interpolation/RegularGridInterpolant.hpp"
32 : #include "NumericalAlgorithms/Spectral/LogicalCoordinates.hpp"
33 : #include "Parallel/AlgorithmExecution.hpp"
34 : #include "Parallel/ArrayCollection/IsDgElementCollection.hpp"
35 : #include "Parallel/ArrayComponentId.hpp"
36 : #include "Parallel/ArrayIndex.hpp"
37 : #include "Parallel/GlobalCache.hpp"
38 : #include "Parallel/Invoke.hpp"
39 : #include "Utilities/EqualWithinRoundoff.hpp"
40 : #include "Utilities/ErrorHandling/Assert.hpp"
41 : #include "Utilities/ErrorHandling/CaptureForError.hpp"
42 : #include "Utilities/ErrorHandling/Error.hpp"
43 : #include "Utilities/FileSystem.hpp"
44 : #include "Utilities/Gsl.hpp"
45 : #include "Utilities/Literals.hpp"
46 : #include "Utilities/Overloader.hpp"
47 : #include "Utilities/Requires.hpp"
48 : #include "Utilities/Serialization/Serialize.hpp"
49 : #include "Utilities/TMPL.hpp"
50 : #include "Utilities/TaggedTuple.hpp"
51 :
52 1 : namespace importers {
53 :
54 : /// \cond
55 : template <typename Metavariables>
56 : struct ElementDataReader;
57 : namespace Actions {
58 : template <size_t Dim, typename FieldTagsList, typename ReceiveComponent>
59 : struct ReadAllVolumeDataAndDistribute;
60 : } // namespace Actions
61 : /// \endcond
62 :
63 1 : namespace Tags {
64 : /*!
65 : * \brief Indicates an available tensor field is selected for importing, along
66 : * with the name of the dataset in the volume data file.
67 : *
68 : * Set the value to a dataset name to import the `FieldTag` from that dataset,
69 : * or to `std::nullopt` to skip importing the `FieldTag`. The dataset name
70 : * excludes tensor component suffixes like "_x" or "_xy". These suffixes will be
71 : * added automatically. A sensible value for the dataset name is often
72 : * `db::tag_name<FieldTag>()`, but the user should generally be given the
73 : * opportunity to set the dataset name in the input file.
74 : */
75 : template <typename FieldTag>
76 1 : struct Selected : db::SimpleTag {
77 0 : using type = std::optional<std::string>;
78 : };
79 : } // namespace Tags
80 :
81 : namespace detail {
82 :
83 : // Read the single `tensor_name` from the `volume_file`, taking care of suffixes
84 : // like "_x" etc for its components.
85 : template <typename TensorType>
86 : void read_tensor_data(const gsl::not_null<TensorType*> tensor_data,
87 : const std::string& tensor_name,
88 : const h5::VolumeData& volume_file,
89 : const size_t observation_id) {
90 : for (size_t i = 0; i < tensor_data->size(); ++i) {
91 : const auto& tensor_component = volume_file.get_tensor_component(
92 : observation_id, tensor_name + tensor_data->component_suffix(
93 : tensor_data->get_tensor_index(i)));
94 : if (not std::holds_alternative<DataVector>(tensor_component.data)) {
95 : ERROR("The tensor component '"
96 : << tensor_component.name
97 : << "' is not a double-precision DataVector. Reading in "
98 : "single-precision volume data is not supported.");
99 : }
100 : (*tensor_data)[i] = std::get<DataVector>(tensor_component.data);
101 : }
102 : }
103 :
104 : // Read the `selected_fields` from the `volume_file`. Reads the data
105 : // for all elements in the `volume_file` at once. Invoked lazily when data
106 : // for an element in the volume file is needed.
107 : template <typename FieldTagsList>
108 : tuples::tagged_tuple_from_typelist<FieldTagsList> read_tensor_data(
109 : const h5::VolumeData& volume_file, const size_t observation_id,
110 : const tuples::tagged_tuple_from_typelist<
111 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>& selected_fields) {
112 : tuples::tagged_tuple_from_typelist<FieldTagsList> all_tensor_data{};
113 : tmpl::for_each<FieldTagsList>([&all_tensor_data, &volume_file,
114 : &observation_id,
115 : &selected_fields](auto field_tag_v) {
116 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
117 : const auto& selection = get<Tags::Selected<field_tag>>(selected_fields);
118 : if (not selection.has_value()) {
119 : return;
120 : }
121 : read_tensor_data(make_not_null(&get<field_tag>(all_tensor_data)),
122 : selection.value(), volume_file, observation_id);
123 : });
124 : return all_tensor_data;
125 : }
126 :
127 : // Extract this element's data from the read-in dataset
128 : template <typename FieldTagsList>
129 : tuples::tagged_tuple_from_typelist<FieldTagsList> extract_element_data(
130 : const std::pair<size_t, size_t>& element_data_offset_and_length,
131 : const tuples::tagged_tuple_from_typelist<FieldTagsList>& all_tensor_data,
132 : const tuples::tagged_tuple_from_typelist<
133 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>& selected_fields) {
134 : tuples::tagged_tuple_from_typelist<FieldTagsList> element_data{};
135 : tmpl::for_each<FieldTagsList>(
136 : [&element_data, &offset = element_data_offset_and_length.first,
137 : &num_points = element_data_offset_and_length.second, &all_tensor_data,
138 : &selected_fields](auto field_tag_v) {
139 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
140 : const auto& selection = get<Tags::Selected<field_tag>>(selected_fields);
141 : if (not selection.has_value()) {
142 : return;
143 : }
144 : auto& element_tensor_data = get<field_tag>(element_data);
145 : // Iterate independent components of the tensor
146 : for (size_t i = 0; i < element_tensor_data.size(); ++i) {
147 : const DataVector& data_tensor_component =
148 : get<field_tag>(all_tensor_data)[i];
149 : DataVector element_tensor_component{num_points};
150 : // Retrieve data from slice of the contigious dataset
151 : for (size_t j = 0; j < element_tensor_component.size(); ++j) {
152 : element_tensor_component[j] = data_tensor_component[offset + j];
153 : }
154 : element_tensor_data[i] = element_tensor_component;
155 : }
156 : });
157 : return element_data;
158 : }
159 :
160 : // Check that the inertial coordinates computed with the given domain are the
161 : // same as the ones passed to this function.
162 : // This is important to avoid hard-to-find bugs where data is loaded
163 : // to the wrong coordinates. For example, if the evolution domain deforms the
164 : // excision surfaces a bit but the initial data doesn't, then it would be wrong
165 : // to load the initial data to the evolution grid without an interpolation.
166 : template <size_t Dim>
167 : void verify_inertial_coordinates(
168 : const Domain<Dim>& domain, const double time,
169 : const domain::FunctionsOfTimeMap& functions_of_time,
170 : const ElementId<Dim>& element_id, const Mesh<Dim>& mesh,
171 : const tnsr::I<DataVector, Dim, Frame::Inertial>& inertial_coords) {
172 : const auto logical_coords = logical_coordinates(mesh);
173 : ElementMap<Dim, Frame::Inertial> element_map{
174 : element_id, domain.blocks()[element_id.block_id()]};
175 : const auto mapped_inertial_coords =
176 : element_map(logical_coords, time, functions_of_time);
177 : const double scale = blaze::max(get(magnitude(mapped_inertial_coords)));
178 : if (not equal_within_roundoff(mapped_inertial_coords, inertial_coords,
179 : std::numeric_limits<double>::epsilon() * 100.0,
180 : scale)) {
181 : DataVector diff =
182 : square(get<0>(inertial_coords) - get<0>(mapped_inertial_coords));
183 : for (size_t d = 1; d < Dim; ++d) {
184 : diff += square(inertial_coords.get(d) - mapped_inertial_coords.get(d));
185 : }
186 : diff = sqrt(diff);
187 : const double max_coord_distance = blaze::max(diff);
188 : CAPTURE_FOR_ERROR(element_id);
189 : CAPTURE_FOR_ERROR(max_coord_distance);
190 : CAPTURE_FOR_ERROR(scale);
191 : ERROR_NO_TRACE(
192 : "The source and target domain don't match. Set 'ElementsAreIdentical: "
193 : "False' to enable interpolation between the grids.");
194 : }
195 : }
196 :
197 : // Interpolate only the `selected_fields` in `source_element_data` to the
198 : // arbitrary `target_logical_coords` (used when elements are not the same)
199 : template <typename FieldTagsList, size_t Dim>
200 : void interpolate_selected_fields(
201 : const gsl::not_null<tuples::tagged_tuple_from_typelist<FieldTagsList>*>
202 : target_element_data,
203 : const tuples::tagged_tuple_from_typelist<FieldTagsList>&
204 : source_element_data,
205 : const Mesh<Dim>& source_mesh,
206 : const tnsr::I<DataVector, Dim, Frame::ElementLogical>&
207 : target_logical_coords,
208 : const std::vector<size_t>& offsets,
209 : const tuples::tagged_tuple_from_typelist<
210 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>& selected_fields) {
211 : const intrp::Irregular<Dim> interpolator{source_mesh, target_logical_coords};
212 : const size_t target_num_points = target_logical_coords.begin()->size();
213 : ASSERT(target_num_points == offsets.size(),
214 : "The number of target points ("
215 : << target_num_points << ") must match the number of offsets ("
216 : << offsets.size() << ").");
217 : DataVector target_tensor_component_buffer{target_num_points};
218 : tmpl::for_each<FieldTagsList>([&source_element_data, &target_element_data,
219 : &interpolator, &target_tensor_component_buffer,
220 : &selected_fields, &offsets](auto field_tag_v) {
221 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
222 : const auto& selection = get<Tags::Selected<field_tag>>(selected_fields);
223 : if (not selection.has_value()) {
224 : return;
225 : }
226 : const auto& source_tensor_data = get<field_tag>(source_element_data);
227 : auto& target_tensor_data = get<field_tag>(*target_element_data);
228 : // Iterate independent components of the tensor
229 : for (size_t i = 0; i < source_tensor_data.size(); ++i) {
230 : const DataVector& source_tensor_component = source_tensor_data[i];
231 : DataVector& target_tensor_component = target_tensor_data[i];
232 : // Interpolate
233 : interpolator.interpolate(make_not_null(&target_tensor_component_buffer),
234 : source_tensor_component);
235 : // Fill target element data at corresponding offsets
236 : for (size_t j = 0; j < target_tensor_component_buffer.size(); ++j) {
237 : target_tensor_component[offsets[j]] = target_tensor_component_buffer[j];
238 : }
239 : }
240 : });
241 : }
242 :
243 : // Interpolate only the `selected_fields` in `source_element_data` to the
244 : // `target_mesh` (used when elements differ only by p-refinement)
245 : template <typename FieldTagsList, size_t Dim>
246 : void interpolate_selected_fields(
247 : const gsl::not_null<tuples::tagged_tuple_from_typelist<FieldTagsList>*>
248 : target_element_data,
249 : const tuples::tagged_tuple_from_typelist<FieldTagsList>&
250 : source_element_data,
251 : const Mesh<Dim>& source_mesh, const Mesh<Dim>& target_mesh,
252 : const tuples::tagged_tuple_from_typelist<
253 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>& selected_fields) {
254 : const intrp::RegularGrid<Dim> interpolator{source_mesh, target_mesh};
255 : tmpl::for_each<FieldTagsList>([&source_element_data, &target_element_data,
256 : &interpolator,
257 : &selected_fields](auto field_tag_v) {
258 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
259 : const auto& selection = get<Tags::Selected<field_tag>>(selected_fields);
260 : if (not selection.has_value()) {
261 : return;
262 : }
263 : const auto& source_tensor_data = get<field_tag>(source_element_data);
264 : auto& target_tensor_data = get<field_tag>(*target_element_data);
265 : // Iterate independent components of the tensor
266 : for (size_t i = 0; i < source_tensor_data.size(); ++i) {
267 : const DataVector& source_tensor_component = source_tensor_data[i];
268 : DataVector& target_tensor_component = target_tensor_data[i];
269 : // Interpolate
270 : interpolator.interpolate(make_not_null(&target_tensor_component),
271 : source_tensor_component);
272 : }
273 : });
274 : }
275 :
276 : } // namespace detail
277 :
278 0 : namespace Actions {
279 :
280 : /*!
281 : * \brief Read a volume data file and distribute the data to all registered
282 : * elements, interpolating to the target points if needed.
283 : *
284 : * \note Use this action if you want to quickly load and distribute volume data.
285 : * If you need to beyond that (such as more control over input-file options),
286 : * write a new action and dispatch to
287 : * `importers::Actions::ReadAllVolumeDataAndDistribute`.
288 : *
289 : * \details Invoke this action on the elements of an array parallel component to
290 : * dispatch reading the volume data file specified by options placed in the
291 : * `ImporterOptionsGroup`. The tensors in `FieldTagsList` will be loaded from
292 : * the file and distributed to all elements that have previously registered. Use
293 : * `importers::Actions::RegisterWithElementDataReader` to register the elements
294 : * of the array parallel component in a previous phase.
295 : *
296 : * Note that the volume data file will only be read once per node, triggered by
297 : * the first element that invokes this action. All subsequent invocations of
298 : * this action on the node will do nothing. See
299 : * `importers::Actions::ReadAllVolumeDataAndDistribute` for details.
300 : *
301 : * The data is distributed to the elements using `Parallel::receive_data`. The
302 : * elements can monitor `importers::Tags::VolumeData` in their inbox to wait for
303 : * the data and process it once it's available. We provide the action
304 : * `importers::Actions::ReceiveVolumeData` that waits for the data and moves it
305 : * directly into the DataBox. You can also implement a specialized action that
306 : * might verify and post-process the data before populating the DataBox.
307 : *
308 : * \see Dev guide on \ref dev_guide_importing
309 : */
310 : template <typename ImporterOptionsGroup, typename FieldTagsList>
311 1 : struct ReadVolumeData {
312 0 : using const_global_cache_tags =
313 : tmpl::list<Tags::ImporterOptions<ImporterOptionsGroup>>;
314 :
315 : template <typename DbTagsList, typename... InboxTags, typename Metavariables,
316 : size_t Dim, typename ActionList, typename ParallelComponent>
317 0 : static Parallel::iterable_action_return_t apply(
318 : db::DataBox<DbTagsList>& /*box*/,
319 : const tuples::TaggedTuple<InboxTags...>& /*inboxes*/,
320 : Parallel::GlobalCache<Metavariables>& cache,
321 : const ElementId<Dim>& /*array_index*/, const ActionList /*meta*/,
322 : const ParallelComponent* const /*meta*/) {
323 : // Not using `ckLocalBranch` here to make sure the simple action invocation
324 : // is asynchronous.
325 : auto& reader_component = Parallel::get_parallel_component<
326 : importers::ElementDataReader<Metavariables>>(cache);
327 : Parallel::simple_action<importers::Actions::ReadAllVolumeDataAndDistribute<
328 : Dim, FieldTagsList, ParallelComponent>>(
329 : reader_component,
330 : get<Tags::ImporterOptions<ImporterOptionsGroup>>(cache), 0_st);
331 : return {Parallel::AlgorithmExecution::Continue, std::nullopt};
332 : }
333 : };
334 :
335 : /*!
336 : * \brief Read a volume data file and distribute the data to all registered
337 : * elements, interpolating to the target points if needed.
338 : *
339 : * This action can be invoked on the `importers::ElementDataReader` component
340 : * once all elements have been registered with it. It opens the data file, reads
341 : * the data for each registered element and uses `Parallel::receive_data` to
342 : * distribute the data to the elements. The elements can monitor
343 : * `importers::Tags::VolumeData` in their inbox to wait for the data and process
344 : * it once it's available. You can use `importers::Actions::ReceiveVolumeData`
345 : * to wait for the data and move it directly into the DataBox, or implement a
346 : * specialized action that might verify and post-process the data.
347 : *
348 : * Note that instead of invoking this action directly on the
349 : * `importers::ElementDataReader` component you can invoke the iterable action
350 : * `importers::Actions::ReadVolumeData` on the elements of an array parallel
351 : * component for simple use cases.
352 : *
353 : * - Pass along the following arguments to the simple action invocation:
354 : * - `options`: `importers::ImporterOptions` that specify the H5 files
355 : * with volume data to load.
356 : * - `volume_data_id`: A number (or hash) that identifies this import
357 : * operation. Will also be used to identify the loaded volume data in the
358 : * inbox of the receiving elements.
359 : * - `selected_fields` (optional): See below.
360 : * - The `FieldTagsList` parameter specifies a typelist of tensor tags that
361 : * can be read from the file and provided to each element. The subset of tensors
362 : * that will actually be read and distributed can be selected at runtime with
363 : * the `selected_fields` argument that is passed to this simple action. See
364 : * importers::Tags::Selected for details. By default, all tensors in the
365 : * `FieldTagsList` are selected, and read from datasets named
366 : * `db::tag_name<Tag>() + suffix`, where the `suffix` is empty for scalars, or
367 : * `"_"` followed by the `Tensor::component_name` for each independent tensor
368 : * component.
369 : * - `Parallel::receive_data` is invoked on each registered element of the
370 : * `ReceiveComponent` to populate `importers::Tags::VolumeData` in the element's
371 : * inbox with a `tuples::tagged_tuple_from_typelist<FieldTagsList>` containing
372 : * the tensor data for that element. The `ReceiveComponent` must the the same
373 : * that was encoded into the `Parallel::ArrayComponentId` used to register the
374 : * elements. The `volume_data_id` passed to this action is used as key.
375 : *
376 : * \par Memory consumption
377 : * This action runs once on every node. It reads all volume data files on the
378 : * node, but doesn't keep them all in memory at once. The following items
379 : * contribute primarily to memory consumption and can be reconsidered if we run
380 : * into memory issues:
381 : *
382 : * - `all_tensor_data`: All requested tensor components in the volume data file
383 : * at the specified observation ID. Only data from one volume data file is
384 : * held in memory at any time. Only data from files that overlap with target
385 : * elements on this node are read in.
386 : * - `target_element_data_buffer`: Holds incomplete interpolated data for each
387 : * (target) element that resides on this node. In the worst case, when all
388 : * target elements need data from the last source element in the last volume
389 : * data file, the memory consumption of this buffer can grow to hold all
390 : * requested tensor components on all elements that reside on this node.
391 : * However, elements are erased from this buffer once their interpolated data
392 : * is complete (and sent to the target element), so the memory consumption
393 : * should remain much lower in practice.
394 : *
395 : * \see Dev guide on \ref dev_guide_importing
396 : */
397 : template <size_t Dim, typename FieldTagsList, typename ReceiveComponent>
398 1 : struct ReadAllVolumeDataAndDistribute {
399 : template <typename ParallelComponent, typename DataBox,
400 : typename Metavariables, typename ArrayIndex>
401 0 : static void apply(DataBox& box, Parallel::GlobalCache<Metavariables>& cache,
402 : const ArrayIndex& /*array_index*/,
403 : const ImporterOptions& options, const size_t volume_data_id,
404 : tuples::tagged_tuple_from_typelist<
405 : db::wrap_tags_in<Tags::Selected, FieldTagsList>>
406 : selected_fields = select_all_fields(FieldTagsList{})) {
407 : const bool elements_are_identical =
408 : get<OptionTags::ElementsAreIdentical>(options);
409 :
410 : // Only read and distribute the volume data once
411 : // This action will be invoked by `importers::Actions::ReadVolumeData` from
412 : // every element on the node, but only the first invocation reads the file
413 : // and distributes the data to all elements. Subsequent invocations do
414 : // nothing. The `volume_data_id` identifies whether or not we have already
415 : // read the requested data. Doing this at runtime avoids having to collect
416 : // all data files that will be read in at compile-time to initialize a flag
417 : // in the DataBox for each of them.
418 : const auto& has_read_volume_data =
419 : db::get<Tags::ElementDataAlreadyRead>(box);
420 : if (has_read_volume_data.find(volume_data_id) !=
421 : has_read_volume_data.end()) {
422 : return;
423 : }
424 : db::mutate<Tags::ElementDataAlreadyRead>(
425 : [&volume_data_id](const auto local_has_read_volume_data) {
426 : local_has_read_volume_data->insert(volume_data_id);
427 : },
428 : make_not_null(&box));
429 :
430 : // This is the subset of elements that reside on this node. They have
431 : // registered themselves before. Our job is to fill them with volume data.
432 : std::unordered_set<ElementId<Dim>> target_element_ids{};
433 : for (const auto& target_element : get<Tags::RegisteredElements<Dim>>(box)) {
434 : const auto& element_array_component_id = target_element.first;
435 : const CkArrayIndex& raw_element_index =
436 : element_array_component_id.array_index();
437 : // Check if the parallel component of the registered element matches the
438 : // callback, because it's possible that elements from other components
439 : // with the same index are also registered.
440 : // Since the way the component is encoded in `ArrayComponentId` is
441 : // private to that class, we construct one and compare.
442 : // Can't use Parallel::make_array_component_id here because we need the
443 : // original array_index type, not a CkArrayIndex.
444 : if (element_array_component_id !=
445 : Parallel::ArrayComponentId(
446 : std::add_pointer_t<ReceiveComponent>{nullptr},
447 : raw_element_index)) {
448 : continue;
449 : }
450 : const auto target_element_id =
451 : Parallel::ArrayIndex<ElementId<Dim>>(raw_element_index).get_index();
452 : target_element_ids.insert(target_element_id);
453 : }
454 : if (UNLIKELY(target_element_ids.empty())) {
455 : return;
456 : }
457 :
458 : // Temporary buffer for data on target elements. These variables get filled
459 : // with interpolated data while we're reading in volume files. Once data on
460 : // an element is complete, the data is sent to that element and removed from
461 : // this list.
462 : std::unordered_map<ElementId<Dim>,
463 : tuples::tagged_tuple_from_typelist<FieldTagsList>>
464 : target_element_data_buffer{};
465 : std::unordered_map<ElementId<Dim>, std::vector<size_t>>
466 : all_indices_of_filled_interp_points{};
467 :
468 : // Resolve the file glob
469 : const std::string& file_glob = get<OptionTags::FileGlob>(options);
470 : const std::vector<std::string> file_paths = file_system::glob(file_glob);
471 : if (file_paths.empty()) {
472 : ERROR_NO_TRACE("The file glob '" << file_glob << "' matches no files.");
473 : }
474 :
475 : // Open every file in turn
476 : std::optional<size_t> prev_observation_id{};
477 : double observation_value = std::numeric_limits<double>::signaling_NaN();
478 : std::optional<Domain<Dim>> source_domain{};
479 : domain::FunctionsOfTimeMap source_domain_functions_of_time{};
480 : for (const std::string& file_name : file_paths) {
481 : // Open the volume data file
482 : h5::H5File<h5::AccessType::ReadOnly> h5file(file_name);
483 : constexpr size_t version_number = 0;
484 : const auto& volume_file = h5file.get<h5::VolumeData>(
485 : "/" + get<OptionTags::Subgroup>(options), version_number);
486 :
487 : // Select observation ID
488 : const size_t observation_id = std::visit(
489 : Overloader{
490 : [&volume_file, &options](const double local_obs_value) {
491 : const auto& observation_value_epsilon =
492 : get<OptionTags::ObservationValueEpsilon>(options);
493 : return volume_file.find_observation_id(
494 : local_obs_value, observation_value_epsilon);
495 : },
496 : [&volume_file](const ObservationSelector local_obs_selector) {
497 : const std::vector<size_t> all_observation_ids =
498 : volume_file.list_observation_ids();
499 : switch (local_obs_selector) {
500 : case ObservationSelector::First:
501 : return all_observation_ids.front();
502 : case ObservationSelector::Last:
503 : return all_observation_ids.back();
504 : default:
505 : ERROR("Unknown importers::ObservationSelector: "
506 : << local_obs_selector);
507 : }
508 : }},
509 : get<OptionTags::ObservationValue>(options));
510 : if (prev_observation_id.has_value() and
511 : prev_observation_id.value() != observation_id) {
512 : ERROR("Inconsistent selection of observation ID in file "
513 : << file_name
514 : << ". Make sure all files select the same observation ID.");
515 : }
516 : prev_observation_id = observation_id;
517 : observation_value = volume_file.get_observation_value(observation_id);
518 :
519 : // Memory buffer for the tensor data stored in this file. The data is
520 : // loaded lazily when it is needed. We may find that we can skip loading
521 : // some files because none of their data is needed to fill the elements on
522 : // this node.
523 : std::optional<tuples::tagged_tuple_from_typelist<FieldTagsList>>
524 : all_tensor_data{};
525 :
526 : // Retrieve the information needed to reconstruct which element the data
527 : // belongs to
528 : const auto source_grid_names = volume_file.get_grid_names(observation_id);
529 : const auto source_extents = volume_file.get_extents(observation_id);
530 : const auto source_bases = volume_file.get_bases(observation_id);
531 : const auto source_quadratures =
532 : volume_file.get_quadratures(observation_id);
533 : std::vector<ElementId<Dim>> source_element_ids{};
534 : if (not elements_are_identical) {
535 : // Need to parse all source grid names to element IDs
536 : source_element_ids.reserve(source_grid_names.size());
537 : for (const auto& grid_name : source_grid_names) {
538 : source_element_ids.push_back(ElementId<Dim>(grid_name));
539 : }
540 : }
541 : // Reconstruct domain from volume data file
542 : const std::optional<std::vector<char>> serialized_domain =
543 : volume_file.get_domain(observation_id);
544 : if (serialized_domain.has_value()) {
545 : if (source_domain.has_value()) {
546 : #ifdef SPECTRE_DEBUG
547 : // Check that the domain is the same in all files (only in debug
548 : // mode)
549 : const auto deserialized_domain =
550 : deserialize<Domain<Dim>>(serialized_domain->data());
551 : if (*source_domain != deserialized_domain) {
552 : ERROR_NO_TRACE(
553 : "The domain in all volume files must be the same. Domain in "
554 : "file '"
555 : << file_name << volume_file.subfile_path()
556 : << "' differs from a previously read file.");
557 : }
558 : #endif
559 : } else {
560 : source_domain = deserialize<Domain<Dim>>(serialized_domain->data());
561 : }
562 : } else {
563 : if (elements_are_identical) {
564 : Parallel::printf(
565 : "WARNING: No serialized domain found in file. "
566 : "Verification that elements in the source and target domain "
567 : "match will be skipped.\n");
568 : } else {
569 : ERROR_NO_TRACE("No serialized domain found in file '"
570 : << file_name << volume_file.subfile_path()
571 : << "'. The domain is needed for interpolation.");
572 : }
573 : }
574 : // Reconstruct functions of time from volume data file
575 : if (source_domain_functions_of_time.empty() and
576 : source_domain.has_value() and
577 : alg::any_of(source_domain->blocks(), [](const auto& block) {
578 : return block.is_time_dependent();
579 : })) {
580 : const std::optional<std::vector<char>> serialized_functions_of_time =
581 : volume_file.get_functions_of_time(observation_id);
582 : if (not serialized_functions_of_time.has_value()) {
583 : ERROR_NO_TRACE("No domain functions of time found in file '"
584 : << file_name << volume_file.subfile_path()
585 : << "'. The functions of time are needed for "
586 : "interpolating with time-dependent maps.");
587 : }
588 : source_domain_functions_of_time =
589 : deserialize<domain::FunctionsOfTimeMap>(
590 : serialized_functions_of_time->data());
591 : }
592 :
593 : // Distribute the tensor data to the registered (target) elements. We
594 : // erase target elements when they are complete. This allows us to
595 : // search only for incomplete elements in subsequent volume files, and
596 : // to stop early when all registered elements are complete.
597 : std::unordered_set<ElementId<Dim>> completed_target_elements{};
598 : for (const auto& target_element_id : target_element_ids) {
599 : const auto& [target_points, target_mesh] =
600 : get<Tags::RegisteredElements<Dim>>(box).at(
601 : Parallel::make_array_component_id<ReceiveComponent>(
602 : target_element_id));
603 : const auto target_grid_name = get_output(target_element_id);
604 :
605 : // Proceed with the registered element only if it overlaps with the
606 : // volume file. It's possible that the volume file only contains data
607 : // for a subset of elements, e.g., when each node of a simulation
608 : // wrote volume data for its elements to a separate file.
609 : std::vector<ElementId<Dim>> overlapping_source_element_ids{};
610 : std::unordered_map<ElementId<Dim>, ElementLogicalCoordHolder<Dim>>
611 : source_element_logical_coords{};
612 : if (not elements_are_identical) {
613 : // Transform the target points to block logical coords in the source
614 : // domain
615 : const auto source_block_logical_coords = block_logical_coordinates(
616 : *source_domain, target_points, observation_value,
617 : source_domain_functions_of_time);
618 : // Find the target points in the subset of source elements contained
619 : // in this volume file
620 : source_element_logical_coords = element_logical_coordinates(
621 : source_element_ids, source_block_logical_coords);
622 : overlapping_source_element_ids.reserve(
623 : source_element_logical_coords.size());
624 : for (const auto& source_element_id_and_coords :
625 : source_element_logical_coords) {
626 : overlapping_source_element_ids.push_back(
627 : source_element_id_and_coords.first);
628 : }
629 : } else {
630 : // When elements match we process only volume files that contain the
631 : // exact element
632 : if (std::find(source_grid_names.begin(), source_grid_names.end(),
633 : target_grid_name) == source_grid_names.end()) {
634 : continue;
635 : }
636 : overlapping_source_element_ids.push_back(target_element_id);
637 : }
638 :
639 : // Lazily load the tensor data from the file if needed
640 : if (not overlapping_source_element_ids.empty() and
641 : not all_tensor_data.has_value()) {
642 : all_tensor_data = detail::read_tensor_data<FieldTagsList>(
643 : volume_file, observation_id, selected_fields);
644 : }
645 :
646 : // Iterate over the source elements in this volume file that overlap
647 : // with the target element
648 : for (const auto& source_element_id : overlapping_source_element_ids) {
649 : const auto source_grid_name = get_output(source_element_id);
650 : const auto source_mesh = h5::mesh_for_grid<Dim>(
651 : source_grid_name, source_grid_names, source_extents, source_bases,
652 : source_quadratures);
653 : // Find the data offset that corresponds to this element
654 : const auto element_data_offset_and_length =
655 : h5::offset_and_length_for_grid(source_grid_name,
656 : source_grid_names, source_extents);
657 : // Extract this element's data from the read-in dataset
658 : auto source_element_data =
659 : detail::extract_element_data<FieldTagsList>(
660 : element_data_offset_and_length, *all_tensor_data,
661 : selected_fields);
662 :
663 : if (not elements_are_identical) {
664 : const size_t target_num_points = target_points.begin()->size();
665 :
666 : // Get and resize target buffer
667 : auto& target_element_data =
668 : target_element_data_buffer[target_element_id];
669 : tmpl::for_each<FieldTagsList>([&target_element_data,
670 : &target_num_points,
671 : &selected_fields](auto field_tag_v) {
672 : using field_tag = tmpl::type_from<decltype(field_tag_v)>;
673 : if (get<Tags::Selected<field_tag>>(selected_fields).has_value()) {
674 : for (auto& component : get<field_tag>(target_element_data)) {
675 : component.destructive_resize(target_num_points);
676 : }
677 : }
678 : });
679 : auto& indices_of_filled_interp_points =
680 : all_indices_of_filled_interp_points[target_element_id];
681 :
682 : // Interpolate!
683 : const auto& source_logical_coords_of_target_points =
684 : source_element_logical_coords.at(source_element_id);
685 : detail::interpolate_selected_fields<FieldTagsList>(
686 : make_not_null(&target_element_data), source_element_data,
687 : source_mesh,
688 : source_logical_coords_of_target_points.element_logical_coords,
689 : source_logical_coords_of_target_points.offsets,
690 : selected_fields);
691 : indices_of_filled_interp_points.insert(
692 : indices_of_filled_interp_points.end(),
693 : source_logical_coords_of_target_points.offsets.begin(),
694 : source_logical_coords_of_target_points.offsets.end());
695 :
696 : if (indices_of_filled_interp_points.size() == target_num_points) {
697 : // Pass the (interpolated) data to the element. Now it can
698 : // proceed in parallel with transforming the data, taking
699 : // derivatives on the grid, etc.
700 : if constexpr (Parallel::is_dg_element_collection_v<
701 : ReceiveComponent>) {
702 : ERROR("Can't yet do numerical initial data with nodegroups");
703 : } else {
704 : Parallel::receive_data<Tags::VolumeData<FieldTagsList>>(
705 : Parallel::get_parallel_component<ReceiveComponent>(
706 : cache)[target_element_id],
707 : volume_data_id, std::move(target_element_data));
708 : }
709 : completed_target_elements.insert(target_element_id);
710 : target_element_data_buffer.erase(target_element_id);
711 : all_indices_of_filled_interp_points.erase(target_element_id);
712 : }
713 : } else {
714 : // Source and target element are the same (matching domains and
715 : // same h-refinement), so no interpolation across elements is
716 : // needed. We still may have to interpolate between different
717 : // meshes (p-refinement). First, verify this assumption:
718 : if (source_domain.has_value()) {
719 : detail::verify_inertial_coordinates(
720 : *source_domain, observation_value,
721 : source_domain_functions_of_time, target_element_id,
722 : target_mesh, target_points);
723 : }
724 : tuples::tagged_tuple_from_typelist<FieldTagsList>
725 : target_element_data{};
726 : if (source_mesh == target_mesh) {
727 : target_element_data = std::move(source_element_data);
728 : } else {
729 : detail::interpolate_selected_fields<FieldTagsList>(
730 : make_not_null(&target_element_data), source_element_data,
731 : source_mesh, target_mesh, selected_fields);
732 : }
733 : // Pass data directly to the target element
734 : if constexpr (Parallel::is_dg_element_collection_v<
735 : ReceiveComponent>) {
736 : ERROR("Can't yet do numerical initial data with nodegroups");
737 : } else {
738 : Parallel::receive_data<Tags::VolumeData<FieldTagsList>>(
739 : Parallel::get_parallel_component<ReceiveComponent>(
740 : cache)[target_element_id],
741 : volume_data_id, std::move(target_element_data));
742 : }
743 : completed_target_elements.insert(target_element_id);
744 : }
745 : } // loop over overlapping source elements
746 : } // loop over registered elements
747 : for (const auto& completed_element_id : completed_target_elements) {
748 : target_element_ids.erase(completed_element_id);
749 : }
750 : // Stop early when all target elements are complete
751 : if (target_element_ids.empty()) {
752 : break;
753 : }
754 : } // loop over volume files
755 :
756 : // Have we completed all target elements? If we haven't, the target domain
757 : // probably extends outside the source domain. In that case we report the
758 : // coordinates that couldn't be filled.
759 : if (not target_element_ids.empty()) {
760 : std::unordered_map<ElementId<Dim>, std::vector<std::array<double, Dim>>>
761 : missing_coords{};
762 : size_t num_missing_points = 0;
763 : for (const auto& target_element_id : target_element_ids) {
764 : const auto& target_inertial_coords =
765 : get<Tags::RegisteredElements<Dim>>(box)
766 : .at(Parallel::make_array_component_id<ReceiveComponent>(
767 : target_element_id))
768 : .first;
769 : const size_t target_num_points = target_inertial_coords.begin()->size();
770 : const auto& indices_of_filled_interp_points =
771 : all_indices_of_filled_interp_points[target_element_id];
772 : for (size_t i = 0; i < target_num_points; ++i) {
773 : if (alg::find(indices_of_filled_interp_points, i) ==
774 : indices_of_filled_interp_points.end()) {
775 : missing_coords[target_element_id].push_back(
776 : [&target_inertial_coords, &i]() {
777 : std::array<double, Dim> x{};
778 : for (size_t d = 0; d < Dim; ++d) {
779 : x[d] = target_inertial_coords[d][i];
780 : }
781 : return x;
782 : }());
783 : ++num_missing_points;
784 : }
785 : }
786 : }
787 : ERROR_NO_TRACE("The following " << num_missing_points << " point(s) in "
788 : << missing_coords.size()
789 : << " element(s) could not be filled:\n"
790 : << missing_coords);
791 : }
792 : }
793 :
794 : private:
795 : template <typename... LocalFieldTags>
796 : static tuples::TaggedTuple<Tags::Selected<LocalFieldTags>...>
797 0 : select_all_fields(tmpl::list<LocalFieldTags...> /*meta*/) {
798 : return {db::tag_name<LocalFieldTags>()...};
799 : }
800 : };
801 :
802 : } // namespace Actions
803 : } // namespace importers
|