12#include <deal.II/base/vectorization.h>
13#include <deal.II/lac/sparse_matrix.h>
18 template <
int simd_length>
22 , mpi_communicator(MPI_COMM_SELF)
27 template <
int simd_length>
29 const unsigned int n_internal_dofs,
30 const dealii::DynamicSparsityPattern &sparsity,
31 const std::shared_ptr<const dealii::Utilities::MPI::Partitioner>
34 , mpi_communicator(MPI_COMM_SELF)
40 template <
int simd_length>
42 const unsigned int n_internal_dofs,
43 const dealii::DynamicSparsityPattern &dsp,
44 const std::shared_ptr<const dealii::Utilities::MPI::Partitioner>
47 this->mpi_communicator = partitioner->get_mpi_communicator();
49 this->n_internal_dofs = n_internal_dofs;
50 this->n_locally_owned_dofs = partitioner->locally_owned_size();
51 this->partitioner = partitioner;
53 const auto n_locally_relevant_dofs =
54 partitioner->locally_owned_size() + partitioner->n_ghost_indices();
63 dealii::DynamicSparsityPattern dsp_minimal(n_locally_relevant_dofs,
64 n_locally_relevant_dofs);
65 for (
unsigned int i = 0; i < n_locally_owned_dofs; ++i) {
66 const auto global_row = partitioner->local_to_global(i);
67 for (
auto it = dsp.begin(global_row); it != dsp.end(global_row); ++it) {
68 const auto global_column = it->column();
69 const auto j = partitioner->global_to_local(global_column);
70 dsp_minimal.add(i, j);
71 if (j >= n_locally_owned_dofs) {
72 Assert(j < n_locally_relevant_dofs, dealii::ExcInternalError());
73 dsp_minimal.add(j, i);
78 dealii::SparsityPattern sparsity;
79 sparsity.copy_from(dsp_minimal);
81 Assert(n_internal_dofs <= sparsity.n_rows(), dealii::ExcInternalError());
82 Assert(n_internal_dofs % simd_length == 0, dealii::ExcInternalError());
83 Assert(n_internal_dofs <= n_locally_owned_dofs, dealii::ExcInternalError());
84 Assert(n_locally_owned_dofs <= sparsity.n_rows(),
85 dealii::ExcInternalError());
87 row_starts.resize_fast(sparsity.n_rows() + 1);
88 column_indices.resize_fast(sparsity.n_nonzero_elements());
89 indices_transposed.resize_fast(sparsity.n_nonzero_elements());
90 AssertThrow(sparsity.n_nonzero_elements() <
91 std::numeric_limits<unsigned int>::max(),
92 dealii::ExcMessage(
"Transposed indices only support up to 4 "
93 "billion matrix entries per MPI rank. Try to"
94 " split into smaller problems with MPI"));
100 unsigned int *col_ptr = column_indices.data();
101 unsigned int *transposed_ptr = indices_transposed.data();
103 for (
unsigned int i = 0; i < n_internal_dofs; i += simd_length) {
104 auto jts = generate_iterators<simd_length>(
105 [&](
auto k) {
return sparsity.begin(i + k); });
108 for (
unsigned int k = 0; k < simd_length; ++k) {
109 const unsigned int column = jts[k]->column();
111 const std::size_t position = sparsity(column, i + k);
112 if (column < n_internal_dofs) {
113 const unsigned int my_row_length = sparsity.row_length(column);
114 const std::size_t position_diag = sparsity(column, column);
115 const std::size_t pos_within_row = position - position_diag;
116 const unsigned int simd_offset = column % simd_length;
117 *transposed_ptr++ = position - simd_offset * my_row_length -
118 pos_within_row + simd_offset +
119 pos_within_row * simd_length;
121 *transposed_ptr++ = position;
124 row_starts[i / simd_length + 1] = col_ptr - column_indices.data();
129 row_starts[n_internal_dofs] = row_starts[n_internal_dofs / simd_length];
131 for (
unsigned int i = n_internal_dofs; i < sparsity.n_rows(); ++i) {
132 for (
auto j = sparsity.begin(i); j != sparsity.end(i); ++j) {
133 const unsigned int column = j->column();
135 const std::size_t position = sparsity(column, i);
136 if (column < n_internal_dofs) {
137 const unsigned int my_row_length = sparsity.row_length(column);
138 const std::size_t position_diag = sparsity(column, column);
139 const std::size_t pos_within_row = position - position_diag;
140 const unsigned int simd_offset = column % simd_length;
141 *transposed_ptr++ = position - simd_offset * my_row_length -
142 pos_within_row + simd_offset +
143 pos_within_row * simd_length;
145 *transposed_ptr++ = position;
147 row_starts[i + 1] = col_ptr - column_indices.data();
150 Assert(col_ptr == column_indices.end(), dealii::ExcInternalError());
154 if (sparsity.n_rows() > n_locally_owned_dofs) {
166 const auto &ghost_targets = partitioner->ghost_targets();
168 receive_targets.resize(ghost_targets.size());
170 for (
unsigned int p = 0; p < receive_targets.size(); ++p) {
171 receive_targets[p].first = ghost_targets[p].first;
174 const auto gt_begin = ghost_targets.begin();
175 auto gt_ptr = ghost_targets.begin();
176 std::size_t index = 0;
177 unsigned int row_count = 0;
179 for (
unsigned int i = n_locally_owned_dofs; i < sparsity.n_rows(); ++i) {
180 index += sparsity.row_length(i);
182 if (row_count == gt_ptr->second) {
183 receive_targets[gt_ptr - gt_begin].second = index;
189 Assert(gt_ptr == partitioner->ghost_targets().end(),
190 dealii::ExcInternalError());
202 std::vector<unsigned int> ghost_ranges(ghost_targets.size() + 1);
203 ghost_ranges[0] = n_locally_owned_dofs;
204 for (
unsigned int p = 0; p < receive_targets.size(); ++p) {
205 ghost_ranges[p + 1] = ghost_ranges[p] + ghost_targets[p].second;
208 std::vector<unsigned int> import_indices_part;
209 for (
auto i : partitioner->import_indices())
210 for (
unsigned int j = i.first; j < i.second; ++j)
211 import_indices_part.push_back(j);
213 AssertDimension(import_indices_part.size(),
214 partitioner->n_import_indices());
216 const auto &import_targets = partitioner->import_targets();
217 entries_to_be_sent.clear();
218 send_targets.resize(import_targets.size());
219 auto idx = import_indices_part.begin();
226 unsigned int p_match = 0;
227 for (
unsigned int p = 0; p < import_targets.size(); ++p) {
239 p_match = (p_match == receive_targets.size() ? 0 : p_match);
240 while (p_match < receive_targets.size() &&
241 receive_targets[p_match].first != import_targets[p].first)
244 for (
unsigned int c = 0; c < import_targets[p].second; ++c, ++idx) {
250 if (p_match == receive_targets.size())
253 const unsigned int row = *idx;
255 entries_to_be_sent.emplace_back(row, 0);
256 for (
auto jt = ++sparsity.begin(row); jt != sparsity.end(row); ++jt) {
257 if (jt->column() >= ghost_ranges[p_match] &&
258 jt->column() < ghost_ranges[p_match + 1]) {
259 const unsigned int position_within_column =
260 jt - sparsity.begin(row);
261 entries_to_be_sent.emplace_back(row, position_within_column);
266 send_targets[p].first = partitioner->import_targets()[p].first;
267 send_targets[p].second = entries_to_be_sent.size();
273 template <
typename Number,
int n_components,
int simd_length>
280 template <
typename Number,
int n_components,
int simd_length>
283 : sparsity(&sparsity)
285 data.resize(
sparsity.n_nonzero_elements() * n_components);
289 template <
typename Number,
int n_components,
int simd_length>
293 this->sparsity = &sparsity;
298 template <
typename Number,
int n_components,
int simd_length>
299 template <
typename SparseMatrix>
301 const std::array<SparseMatrix, n_components> &sparse_matrix,
302 bool locally_indexed )
314 for (
unsigned int i = 0; i < sparsity->n_internal_dofs; i += simd_length) {
316 const unsigned int row_length = sparsity->row_length(i);
318 const unsigned int *js = sparsity->columns(i);
319 for (
unsigned int col_idx = 0; col_idx < row_length;
320 ++col_idx, js += simd_length) {
322 dealii::Tensor<1, n_components, VectorizedArray> temp;
323 for (
unsigned int k = 0; k < simd_length; ++k)
324 for (
unsigned int d = 0; d < n_components; ++d)
326 temp[d][k] = sparse_matrix[d](i + k, js[k]);
328 temp[d][k] = sparse_matrix[d].el(
329 sparsity->partitioner->local_to_global(i + k),
330 sparsity->partitioner->local_to_global(js[k]));
337 for (
unsigned int i = sparsity->n_internal_dofs;
338 i < sparsity->n_locally_owned_dofs;
340 const unsigned int row_length = sparsity->row_length(i);
341 const unsigned int *js = sparsity->columns(i);
342 for (
unsigned int col_idx = 0; col_idx < row_length; ++col_idx, ++js) {
344 dealii::Tensor<1, n_components, Number> temp;
345 for (
unsigned int d = 0; d < n_components; ++d)
347 temp[d] = sparse_matrix[d](i, js[0]);
349 temp[d] = sparse_matrix[d].el(
350 sparsity->partitioner->local_to_global(i),
351 sparsity->partitioner->local_to_global(js[0]));
360 template <
typename Number,
int n_components,
int simd_length>
361 template <
typename SparseMatrix>
363 const SparseMatrix &sparse_matrix,
bool locally_indexed )
375 for (
unsigned int i = 0; i < sparsity->n_internal_dofs; i += simd_length) {
377 const unsigned int row_length = sparsity->row_length(i);
379 const unsigned int *js = sparsity->columns(i);
380 for (
unsigned int col_idx = 0; col_idx < row_length;
381 ++col_idx, js += simd_length) {
384 for (
unsigned int k = 0; k < simd_length; ++k)
386 temp[k] = sparse_matrix(i + k, js[k]);
389 sparse_matrix.el(sparsity->partitioner->local_to_global(i + k),
390 sparsity->partitioner->local_to_global(js[k]));
397 for (
unsigned int i = sparsity->n_internal_dofs;
398 i < sparsity->n_locally_owned_dofs;
401 const unsigned int row_length = sparsity->row_length(i);
402 const unsigned int *js = sparsity->columns(i);
403 for (
unsigned int col_idx = 0; col_idx < row_length; ++col_idx, ++js) {
407 ? sparse_matrix(i, js[0])
409 sparsity->partitioner->local_to_global(i),
410 sparsity->partitioner->local_to_global(js[0]));
dealii::VectorizedArray< Number, simd_length > VectorizedArray
void read_in(const std::array< SparseMatrix, n_components > &sparse_matrix, bool locally_indexed=true)
void reinit(const SparsityPatternSIMD< simd_length > &sparsity)
dealii::AlignedVector< Number > data
const SparsityPatternSIMD< simd_length > * sparsity
void reinit(const unsigned int n_internal_dofs, const dealii::DynamicSparsityPattern &sparsity, const std::shared_ptr< const dealii::Utilities::MPI::Partitioner > &partitioner)
unsigned int n_internal_dofs
std::shared_ptr< const dealii::Utilities::MPI::Partitioner > partitioner
std::size_t n_nonzero_elements() const
#define RYUJIN_PARALLEL_REGION_BEGIN
#define RYUJIN_PARALLEL_REGION_END
DEAL_II_ALWAYS_INLINE void increment_iterators(T &iterators)
DEAL_II_ALWAYS_INLINE void write_entry(V &vector, const T &values, unsigned int i)