33 template <
int dim,
int spacedim>
45 template <
int dim,
int spacedim>
51 const
unsigned int n_partitions) {
54 , currently_processing_create_triangulation_for_internal_usage(
false)
55 , currently_processing_prepare_coarsening_and_refinement_for_internal_usage(
61 template <
int dim,
int spacedim>
69 Assert(construction_data.comm == this->mpi_communicator,
70 ExcMessage(
"MPI communicators do not match!"));
73 settings = construction_data.settings;
80 typename ::Triangulation<dim, spacedim>::MeshSmoothing
>(
86 typename ::Triangulation<dim, spacedim>::MeshSmoothing
>(
96 if (construction_data.coarse_cell_vertices.empty())
104 auto cell = this->
begin();
106 cell->set_level_subdomain_id(
120 construction_data.coarse_cell_index_to_coarse_cell_id;
123 std::map<types::coarse_cell_id, unsigned int>
125 for (
unsigned int i = 0;
126 i < construction_data.coarse_cell_index_to_coarse_cell_id.size();
129 [construction_data.coarse_cell_index_to_coarse_cell_id[i]] = i;
132 this->coarse_cell_id_to_coarse_cell_index_vector.emplace_back(i);
145 auto cell_infos = construction_data.cell_infos;
149 for (
auto &cell_info : cell_infos)
150 std::sort(cell_info.begin(),
154 const CellId a_id(a.id);
155 const CellId b_id(b.id);
157 const auto a_coarse_cell_index =
158 this->coarse_cell_id_to_coarse_cell_index(
159 a_id.get_coarse_cell_id());
160 const auto b_coarse_cell_index =
161 this->coarse_cell_id_to_coarse_cell_index(
162 b_id.get_coarse_cell_id());
169 if (a_coarse_cell_index != b_coarse_cell_index)
170 return a_coarse_cell_index < b_coarse_cell_index;
179 if (cell->is_active())
180 cell->set_subdomain_id(
183 cell->set_level_subdomain_id(
188 for (
unsigned int level = 0;
189 level < cell_infos.size() && !cell_infos[level].empty();
192 auto cell = this->
begin(level);
193 auto cell_info = cell_infos[level].begin();
194 for (; cell_info != cell_infos[level].end(); ++cell_info)
197 while (cell_info->id != cell->id().template to_binary<dim>())
201 if (cell->is_active())
202 cell->set_subdomain_id(cell_info->subdomain_id);
206 construct_multigrid_hierarchy)
207 cell->set_level_subdomain_id(cell_info->level_subdomain_id);
218 template <
int dim,
int spacedim>
228 "You have called the method parallel::fullydistributed::Triangulation::create_triangulation() \n"
229 "that takes 3 arguments. If you have not called this function directly, \n"
230 "it might have been called via a function from the GridGenerator or GridIn \n"
231 "namespace. To be able to set up a fully-distributed Triangulation with these \n"
232 "utility functions nevertheless, please follow the following three steps:\n"
233 " 1) call the utility function for a (serial) Triangulation, \n"
234 " a parallel::shared::Triangulation, or a parallel::distributed::Triangulation object,\n"
235 " 2) use the functions TriangulationDescription::Utilities::create_description_from_triangulation() \n"
236 " or ::create_description_from_triangulation_in_groups() to create the \n"
237 " description of the local partition, and\n"
238 " 3) pass the created description to parallel::fullydistributed::Triangulation::create_triangulation()."));
247 template <
int dim,
int spacedim>
256 const ::Triangulation<dim, spacedim> *other_tria_ptr = &other_tria;
265 if (
dynamic_cast<const ::parallel::TriangulationBase<dim, spacedim>
266 *
>(&other_tria) ==
nullptr)
281 other_tria_ptr = &serial_tria;
296 template <
int dim,
int spacedim>
309 template <
int dim,
int spacedim>
321 template <
int dim,
int spacedim>
326 this->
signals.pre_distributed_repartition();
344 this->
signals.post_distributed_repartition();
349 template <
int dim,
int spacedim>
358 template <
int dim,
int spacedim>
364 ExcMessage(
"No coarsening and refinement is supported!"));
366 return ::Triangulation<dim, spacedim>::
367 prepare_coarsening_and_refinement();
372 template <
int dim,
int spacedim>
376 const std::size_t mem =
388 template <
int dim,
int spacedim>
400 template <
int dim,
int spacedim>
404 const
types::coarse_cell_id coarse_cell_id)
const
406 const auto coarse_cell_index = std::lower_bound(
410 [](
const std::pair<types::coarse_cell_id, unsigned int> &pair,
412 if (coarse_cell_index !=
414 return coarse_cell_index->second;
421 template <
int dim,
int spacedim>
425 const unsigned int coarse_cell_index)
const
430 const auto coarse_cell_id =
433 ExcMessage(
"You are trying to access a dummy cell!"));
434 return coarse_cell_id;
438 template <
int dim,
int spacedim>
447 if (cell->is_locally_owned())
448 this->local_cell_relations.emplace_back(
454 template <
int dim,
int spacedim>
458#ifdef DEAL_II_WITH_MPI
463 "Not all SolutionTransfer objects have been deserialized after the last call to load()."));
465 ExcMessage(
"Can not save() an empty Triangulation."));
475 unsigned int global_first_cell = 0;
477 int ierr = MPI_Exscan(&n_locally_owned_cells,
485 global_first_cell *=
sizeof(
unsigned int);
490 std::string fname = std::string(filename) +
".info";
491 std::ofstream f(fname);
492 f <<
"version nproc n_attached_fixed_size_objs n_attached_variable_size_objs n_global_active_cells"
509 int ierr = MPI_Info_create(&info);
512 const std::string fname_tria = filename +
"_triangulation.data";
518 MPI_MODE_CREATE | MPI_MODE_WRONLY,
523 ierr = MPI_File_set_size(fh, 0);
529 ierr = MPI_Info_free(&info);
540 std::vector<char> buffer;
544 const std::uint64_t buffer_size = buffer.size();
546 std::uint64_t offset = 0;
558 ierr = MPI_File_write_at(
560 myrank *
sizeof(std::uint64_t),
568 const std::uint64_t global_position =
569 mpisize *
sizeof(std::uint64_t) + offset;
581 ierr = MPI_File_close(&fh);
593 template <
int dim,
int spacedim>
597#ifdef DEAL_II_WITH_MPI
599 ExcMessage(
"load() only works if the Triangulation is empty!"));
602 unsigned int version, numcpus, attached_count_fixed,
605 std::string fname = std::string(filename) +
".info";
606 std::ifstream f(fname);
608 std::string firstline;
609 getline(f, firstline);
610 f >> version >> numcpus >> attached_count_fixed >>
615 ExcMessage(
"Incompatible version found in .info file."));
628 int ierr = MPI_Info_create(&info);
631 const std::string fname_tria = filename +
"_triangulation.data";
641 ierr = MPI_Info_free(&info);
645 std::uint64_t buffer_size;
647 ierr = MPI_File_read_at(
649 myrank *
sizeof(std::uint64_t),
656 std::uint64_t offset = 0;
668 const std::uint64_t global_position =
669 mpisize *
sizeof(std::uint64_t) + offset;
672 std::vector<char> buffer(buffer_size);
682 ierr = MPI_File_close(&fh);
685 auto construction_data = ::Utilities::template unpack<
698 unsigned int global_first_cell = 0;
700 int ierr = MPI_Exscan(&n_locally_owned_cells,
708 global_first_cell *=
sizeof(
unsigned int);
711 ExcMessage(
"Number of global active cells differ!"));
717 attached_count_fixed + attached_count_variable;
721 this->n_global_active_cells(),
724 attached_count_fixed,
725 attached_count_variable);
739 template <
int dim,
int spacedim>
742 const
bool autopartition)
750 template <
int dim,
int spacedim>
760 if (!cell->is_artificial())
761 number_of_global_coarse_cells =
762 std::max(number_of_global_coarse_cells,
763 cell->id().get_coarse_cell_id());
765 number_of_global_coarse_cells =
771 number_of_global_coarse_cells;
781#include "fully_distributed_tria.inst"
@ limit_level_difference_at_vertices
virtual void copy_triangulation(const Triangulation< dim, spacedim > &other_tria)
std::vector< typename internal::CellAttachedDataSerializer< dim, spacedim >::cell_relation_t > local_cell_relations
cell_iterator begin(const unsigned int level=0) const
virtual void set_mesh_smoothing(const MeshSmoothing mesh_smoothing)
std::vector< Point< spacedim > > vertices
void update_periodic_face_map()
void save_attached_data(const unsigned int global_first_cell, const unsigned int global_num_cells, const std::string &file_basename) const
void load_attached_data(const unsigned int global_first_cell, const unsigned int global_num_cells, const unsigned int local_num_cells, const std::string &file_basename, const unsigned int n_attached_deserialize_fixed, const unsigned int n_attached_deserialize_variable)
internal::CellAttachedData< dim, spacedim > cell_attached_data
unsigned int n_cells() const
DistributedTriangulationBase(const MPI_Comm mpi_communicator, const typename ::Triangulation< dim, spacedim >::MeshSmoothing smooth_grid=(::Triangulation< dim, spacedim >::none), const bool check_for_distorted_cells=false)
virtual std::size_t memory_consumption() const override
virtual types::global_cell_index n_global_active_cells() const override
const MPI_Comm mpi_communicator
unsigned int n_locally_owned_active_cells() const
virtual void update_number_cache()
virtual void clear() override
virtual void execute_coarsening_and_refinement() override
virtual void update_number_cache() override
bool currently_processing_create_triangulation_for_internal_usage
TriangulationDescription::Settings settings
Triangulation(const MPI_Comm mpi_communicator)
std::vector< types::coarse_cell_id > coarse_cell_index_to_coarse_cell_id_vector
virtual types::coarse_cell_id coarse_cell_index_to_coarse_cell_id(const unsigned int coarse_cell_index) const override
bool currently_processing_prepare_coarsening_and_refinement_for_internal_usage
void copy_triangulation(const ::Triangulation< dim, spacedim > &other_tria) override
void set_partitioner(const std::function< void(::Triangulation< dim, spacedim > &, const unsigned int)> &partitioner, const TriangulationDescription::Settings &settings)
void update_cell_relations()
SmartPointer< const RepartitioningPolicyTools::Base< dim, spacedim > > partitioner_distributed
virtual bool prepare_coarsening_and_refinement() override
virtual unsigned int coarse_cell_id_to_coarse_cell_index(const types::coarse_cell_id coarse_cell_id) const override
std::vector< std::pair< types::coarse_cell_id, unsigned int > > coarse_cell_id_to_coarse_cell_index_vector
virtual void save(const std::string &filename) const override
std::function< void(::Triangulation< dim, spacedim > &, const unsigned int)> partitioner
virtual bool is_multilevel_hierarchy_constructed() const override
void create_triangulation(const TriangulationDescription::Description< dim, spacedim > &construction_data) override
virtual void load(const std::string &filename) override
virtual std::size_t memory_consumption() const override
#define DEAL_II_NAMESPACE_OPEN
#define DEAL_II_CXX20_REQUIRES(condition)
#define DEAL_II_NAMESPACE_CLOSE
IteratorRange< active_cell_iterator > active_cell_iterators() const
IteratorRange< cell_iterator > cell_iterators() const
static ::ExceptionBase & ExcIO()
static ::ExceptionBase & ExcNeedsMPI()
#define Assert(cond, exc)
#define AssertDimension(dim1, dim2)
#define AssertThrowMPI(error_code)
#define AssertIndexRange(index, range)
static ::ExceptionBase & ExcMessage(std::string arg1)
#define AssertThrow(cond, exc)
#define DEAL_II_NOT_IMPLEMENTED()
void hyper_cube(Triangulation< dim, spacedim > &tria, const double left=0., const double right=1., const bool colorize=false)
std::enable_if_t< std::is_fundamental_v< T >, std::size_t > memory_consumption(const T &t)
Description< dim, spacedim > create_description_from_triangulation(const ::Triangulation< dim, spacedim > &tria, const MPI_Comm comm, const TriangulationDescription::Settings settings=TriangulationDescription::Settings::default_setting, const unsigned int my_rank_in=numbers::invalid_unsigned_int)
@ construct_multigrid_hierarchy
int File_write_at_c(MPI_File fh, MPI_Offset offset, const void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
int File_read_at_c(MPI_File fh, MPI_Offset offset, void *buf, MPI_Count count, MPI_Datatype datatype, MPI_Status *status)
unsigned int n_mpi_processes(const MPI_Comm mpi_communicator)
T max(const T &t, const MPI_Comm mpi_communicator)
unsigned int this_mpi_process(const MPI_Comm mpi_communicator)
const MPI_Datatype mpi_type_id_for_type
size_t pack(const T &object, std::vector< char > &dest_buffer, const bool allow_compression=true)
const types::coarse_cell_id invalid_coarse_cell_id
const types::subdomain_id artificial_subdomain_id
static const unsigned int invalid_unsigned_int
::VectorizedArray< Number, width > max(const ::VectorizedArray< Number, width > &, const ::VectorizedArray< Number, width > &)
global_cell_index coarse_cell_id