20 namespace ShallowWater
25 using namespace dealii;
27 template <
typename Description,
int dim,
typename Number>
29 const MPI_Comm &mpi_communicator,
30 std::map<std::string, dealii::Timer> &computing_timer,
34 const std::string &subsection )
35 : ParameterAcceptor(subsection)
36 , precompute_only_(false)
38 , indicator_parameters_(subsection +
"/indicator")
39 , limiter_parameters_(subsection +
"/limiter")
40 , riemann_solver_parameters_(subsection +
"/riemann solver")
41 , mpi_communicator_(mpi_communicator)
42 , computing_timer_(computing_timer)
43 , offline_data_(&offline_data)
44 , hyperbolic_system_(&hyperbolic_system)
45 , initial_values_(&initial_values)
50 cfl_with_boundary_dofs_ =
false;
51 add_parameter(
"cfl with boundary dofs",
52 cfl_with_boundary_dofs_,
53 "Use also the local wave-speed estimate d_ij of boundary "
54 "dofs when computing the maximal admissible step size");
58 template <
typename Description,
int dim,
typename Number>
62 std::cout <<
"HyperbolicModule<Description, dim, Number>::prepare()"
66 AssertThrow(limiter_parameters_.iterations() <= 2,
68 "The number of limiter iterations must be between [0,2]"));
72 const auto &scalar_partitioner = offline_data_->scalar_partitioner();
73 initial_precomputed_.reinit_with_scalar_partitioner(scalar_partitioner);
74 alpha_.reinit(scalar_partitioner);
75 bounds_.reinit_with_scalar_partitioner(scalar_partitioner);
77 const auto &vector_partitioner = offline_data_->vector_partitioner();
78 r_.reinit(vector_partitioner);
84 const auto &sparsity_simd = offline_data_->sparsity_pattern_simd();
85 dij_matrix_.reinit(sparsity_simd);
86 lij_matrix_.reinit(sparsity_simd);
87 lij_matrix_next_.reinit(sparsity_simd);
88 pij_matrix_.reinit(sparsity_simd);
90 initial_precomputed_ =
91 initial_values_->interpolate_initial_precomputed_vector();
101 template <
typename T>
102 bool all_below_diagonal(
unsigned int i,
const unsigned int *js)
104 if constexpr (std::is_same_v<T, typename get_value_type<T>::type>) {
112 constexpr auto simd_length = T::size();
114 bool all_below_diagonal =
true;
115 for (
unsigned int k = 0; k < simd_length; ++k)
116 if (js[k] >= i + k) {
117 all_below_diagonal =
false;
120 return all_below_diagonal;
126 template <
typename Description,
int dim,
typename Number>
127 template <
int stages>
130 std::array<std::reference_wrapper<const StateVector>, stages>
132 const std::array<Number, stages> stage_weights,
137 std::cout <<
"HyperbolicModule<Description, dim, Number>::step()"
141 auto &old_U = std::get<0>(old_state_vector);
142 auto &old_precomputed = std::get<1>(old_state_vector);
143 auto &new_U = std::get<0>(new_state_vector);
161 constexpr bool shallow_water =
162 std::is_same_v<Description, ShallowWater::Description>;
164 using VA = VectorizedArray<Number>;
168 constexpr auto simd_length = VA::size();
169 const unsigned int n_export_indices = offline_data_->n_export_indices();
170 const unsigned int n_internal = offline_data_->n_locally_internal();
171 const unsigned int n_owned = offline_data_->n_locally_owned();
175 const auto &sparsity_simd = offline_data_->sparsity_pattern_simd();
177 const auto &lumped_mass_matrix = offline_data_->lumped_mass_matrix();
178 const auto &lumped_mass_matrix_inverse =
179 offline_data_->lumped_mass_matrix_inverse();
180 const auto &mass_matrix = offline_data_->mass_matrix();
181 const auto &betaij_matrix = offline_data_->betaij_matrix();
182 const auto &cij_matrix = offline_data_->cij_matrix();
184 const auto &boundary_map = offline_data_->boundary_map();
185 const auto &coupling_boundary_pairs =
186 offline_data_->coupling_boundary_pairs();
188 const Number measure_of_omega_inverse =
189 Number(1.) / offline_data_->measure_of_omega();
192 unsigned int channel = 10;
196 const auto scoped_name = [&step_no](
const auto &name,
197 const bool advance =
true) {
198 advance || step_no--;
199 return "time step [H] " + std::to_string(++step_no) +
" - " + name;
203 std::atomic<bool> restart_needed =
false;
211 if constexpr (n_precomputation_cycles != 0) {
212 Scope scope(computing_timer_, scoped_name(
"precompute values"));
214 for (
unsigned int cycle = 0; cycle < n_precomputation_cycles; ++cycle) {
217 old_precomputed.update_ghost_values_start(channel++);
218 old_precomputed.update_ghost_values_finish();
224 auto loop = [&](
auto sentinel,
unsigned int left,
unsigned int right) {
225 using T =
decltype(sentinel);
228 bool thread_ready =
false;
230 const auto view = hyperbolic_system_->template view<dim, T>();
231 view.precomputation_loop(
233 [&](
const unsigned int i) {
234 synchronization_dispatch.check(
235 thread_ready, i >= n_export_indices && i < n_internal);
244 loop(Number(), n_internal, n_owned);
246 loop(VA(), 0, n_internal);
280 Scope scope(computing_timer_, scoped_name(
"compute d_ij, and alpha_i"));
282 SynchronizationDispatch synchronization_dispatch([&]() {
283 alpha_.update_ghost_values_start(channel++);
284 alpha_.update_ghost_values_finish();
290 auto loop = [&](
auto sentinel,
unsigned int left,
unsigned int right) {
291 using T =
decltype(sentinel);
292 unsigned int stride_size = get_stride_size<T>;
296 using RiemannSolver =
297 typename Description::template RiemannSolver<dim, T>;
298 RiemannSolver riemann_solver(
299 *hyperbolic_system_, riemann_solver_parameters_, old_precomputed);
301 using Indicator =
typename Description::template Indicator<dim, T>;
303 *hyperbolic_system_, indicator_parameters_, old_precomputed);
305 bool thread_ready =
false;
308 for (
unsigned int i = left; i < right; i += stride_size) {
311 const unsigned int row_length = sparsity_simd.row_length(i);
315 synchronization_dispatch.check(
316 thread_ready, i >= n_export_indices && i < n_internal);
318 const auto U_i = old_U.template get_tensor<T>(i);
320 indicator.reset(i, U_i);
322 const unsigned int *js = sparsity_simd.columns(i);
323 for (
unsigned int col_idx = 0; col_idx < row_length;
324 ++col_idx, js += stride_size) {
326 const auto U_j = old_U.template get_tensor<T>(js);
328 const auto c_ij = cij_matrix.template get_tensor<T>(i, col_idx);
330 indicator.accumulate(js, U_j, c_ij);
337 if (all_below_diagonal<T>(i, js))
340 const auto norm = c_ij.norm();
341 const auto n_ij = c_ij / norm;
342 const auto lambda_max =
343 riemann_solver.compute(U_i, U_j, i, js, n_ij);
344 const auto d_ij = norm * lambda_max;
346 dij_matrix_.write_entry(d_ij, i, col_idx,
true);
349 const auto mass = get_entry<T>(lumped_mass_matrix, i);
350 const auto hd_i = mass * measure_of_omega_inverse;
351 write_entry<T>(alpha_, indicator.alpha(hd_i), i);
356 loop(Number(), n_internal, n_owned);
358 loop(VA(), 0, n_internal);
370 std::atomic<Number> tau_max{std::numeric_limits<Number>::max()};
373 Scope scope(computing_timer_,
374 scoped_name(
"compute bdry d_ij, diag d_ii, and tau_max"));
382 using RiemannSolver =
383 typename Description::template RiemannSolver<dim, Number>;
384 RiemannSolver riemann_solver(
385 *hyperbolic_system_, riemann_solver_parameters_, old_precomputed);
387 Number local_tau_max = std::numeric_limits<Number>::max();
395 for (std::size_t k = 0; k < coupling_boundary_pairs.size(); ++k) {
396 const auto &[i, col_idx, j] = coupling_boundary_pairs[k];
398 const auto U_i = old_U.get_tensor(i);
399 const auto U_j = old_U.get_tensor(j);
401 const auto c_ji = cij_matrix.get_transposed_tensor(i, col_idx);
402 Assert(c_ji.norm() > 1.e-12, ExcInternalError());
403 const auto norm_ji = c_ji.norm();
404 const auto n_ji = c_ji / norm_ji;
406 const auto d_ij = dij_matrix_.get_entry(i, col_idx);
408 const auto lambda_max = riemann_solver.compute(U_j, U_i, j, &i, n_ji);
409 const auto d_ji = norm_ji * lambda_max;
411 dij_matrix_.write_entry(std::max(d_ij, d_ji), i, col_idx);
417 for (
unsigned int i = 0; i < n_owned; ++i) {
420 const unsigned int row_length = sparsity_simd.row_length(i);
424 Number d_sum = Number(0.);
427 const unsigned int *js = sparsity_simd.columns(i);
428 for (
unsigned int col_idx = 1; col_idx < row_length; ++col_idx) {
430 *(i < n_internal ? js + col_idx * simd_length : js + col_idx);
434 const auto d_ji = dij_matrix_.get_transposed_entry(i, col_idx);
435 dij_matrix_.write_entry(d_ji, i, col_idx);
438 d_sum -= dij_matrix_.get_entry(i, col_idx);
447 std::min(d_sum, Number(-1.e6) * std::numeric_limits<Number>::min());
450 dij_matrix_.write_entry(d_sum, i, 0);
452 const Number mass = lumped_mass_matrix.local_element(i);
453 const Number tau = cfl_ * mass / (Number(-2.) * d_sum);
454 if (boundary_map.count(i) == 0 || cfl_with_boundary_dofs_) {
455 local_tau_max = std::min(local_tau_max, tau);
460 Number current_tau_max = tau_max.load();
461 while (current_tau_max > local_tau_max &&
462 !tau_max.compare_exchange_weak(current_tau_max, local_tau_max))
470 Scope scope(computing_timer_,
471 scoped_name(
"synchronization barrier",
false));
474 tau_max.store(Utilities::MPI::min(tau_max.load(), mpi_communicator_));
477 !std::isnan(tau_max) && !std::isinf(tau_max) && tau_max > 0.,
479 "I'm sorry, Dave. I'm afraid I can't do that.\nWe crashed."));
481 tau = (tau == Number(0.) ? tau_max.load() : tau);
484 std::cout <<
" computed tau_max = " << tau_max << std::endl;
485 std::cout <<
" perform time-step with tau = " << tau << std::endl;
488 if (precompute_only_) {
490 std::cout <<
" return early" << std::endl;
498 dij_matrix_.update_ghost_rows();
508 Scope scope(computing_timer_,
509 scoped_name(
"l.-o. update, compute bounds, r_i, and p_ij"));
511 SynchronizationDispatch synchronization_dispatch([&]() {
512 r_.update_ghost_values_start(channel++);
513 r_.update_ghost_values_finish();
516 const Number weight =
517 -std::accumulate(stage_weights.begin(), stage_weights.end(), -1.);
523 auto loop = [&](
auto sentinel,
unsigned int left,
unsigned int right) {
524 using T =
decltype(sentinel);
526 typename Description::template HyperbolicSystemView<dim, T>;
527 using Limiter =
typename Description::template Limiter<dim, T>;
528 using flux_contribution_type =
typename View::flux_contribution_type;
529 using state_type =
typename View::state_type;
531 unsigned int stride_size = get_stride_size<T>;
533 const auto view = hyperbolic_system_->template view<dim, T>();
537 *hyperbolic_system_, limiter_parameters_, old_precomputed);
538 bool thread_ready =
false;
541 for (
unsigned int i = left; i < right; i += stride_size) {
544 const unsigned int row_length = sparsity_simd.row_length(i);
548 synchronization_dispatch.check(
549 thread_ready, i >= n_export_indices && i < n_internal);
551 const auto U_i = old_U.template get_tensor<T>(i);
554 const auto alpha_i = get_entry<T>(alpha_, i);
555 const auto m_i = get_entry<T>(lumped_mass_matrix, i);
556 const auto m_i_inv = get_entry<T>(lumped_mass_matrix_inverse, i);
558 const auto flux_i = view.flux_contribution(
559 old_precomputed, initial_precomputed_, i, U_i);
561 std::array<flux_contribution_type, stages> flux_iHs;
562 [[maybe_unused]] state_type S_iH;
564 for (
int s = 0; s < stages; ++s) {
565 const auto &[U_s, prec_s, V_s] = stage_state_vectors[s].get();
567 const auto U_iHs = U_s.template get_tensor<T>(i);
569 view.flux_contribution(prec_s, initial_precomputed_, i, U_iHs);
571 if constexpr (View::have_source_terms) {
573 stage_weights[s] * view.nodal_source(prec_s, i, U_iHs, tau);
577 [[maybe_unused]] state_type S_i;
580 if constexpr (View::have_source_terms) {
581 S_i = view.nodal_source(old_precomputed, i, U_i, tau);
582 S_iH += weight * S_i;
583 U_i_new += tau * S_i;
587 limiter.reset(i, U_i, flux_i);
595 [[maybe_unused]] state_type affine_shift;
597 const unsigned int *js = sparsity_simd.columns(i);
598 if constexpr (shallow_water) {
599 for (
unsigned int col_idx = 0; col_idx < row_length;
600 ++col_idx, js += stride_size) {
602 const auto U_j = old_U.template get_tensor<T>(js);
603 const auto flux_j = view.flux_contribution(
604 old_precomputed, initial_precomputed_, js, U_j);
606 const auto d_ij = dij_matrix_.template get_entry<T>(i, col_idx);
607 const auto c_ij = cij_matrix.template get_tensor<T>(i, col_idx);
609 const auto B_ij = view.affine_shift(flux_i, flux_j, c_ij, d_ij);
610 affine_shift += B_ij;
613 affine_shift *= tau * m_i_inv;
616 js = sparsity_simd.columns(i);
617 for (
unsigned int col_idx = 0; col_idx < row_length;
618 ++col_idx, js += stride_size) {
620 const auto U_j = old_U.template get_tensor<T>(js);
622 const auto alpha_j = get_entry<T>(alpha_, js);
624 const auto d_ij = dij_matrix_.template get_entry<T>(i, col_idx);
625 const auto d_ijH = d_ij * (alpha_i + alpha_j) * Number(.5);
635 dij_matrix_.template get_transposed_entry<T>(i, col_idx);
636 Assert(std::max(std::abs(d_ij - d_ji), T(1.0e-12)) == T(1.0e-12),
637 dealii::ExcMessage(
"d_ij not symmetric"));
640 const auto c_ij = cij_matrix.template get_tensor<T>(i, col_idx);
641 const auto regularization =
642 T(100. * std::numeric_limits<Number>::min());
643 const auto scaled_c_ij = c_ij / std::max(d_ij, regularization);
646 betaij_matrix.template get_entry<T>(i, col_idx);
648 const auto flux_j = view.flux_contribution(
649 old_precomputed, initial_precomputed_, js, U_j);
651 const auto m_ij = mass_matrix.template get_entry<T>(i, col_idx);
657 const auto flux_ij = view.flux_divergence(flux_i, flux_j, c_ij);
658 U_i_new += tau * m_i_inv * flux_ij;
659 auto P_ij = -flux_ij;
661 if constexpr (shallow_water) {
666 const auto &[U_star_ij, U_star_ji] =
667 view.equilibrated_states(flux_i, flux_j);
669 U_i_new += tau * m_i_inv * d_ij * (U_star_ji - U_star_ij);
670 F_iH += d_ijH * (U_star_ji - U_star_ij);
671 P_ij += (d_ijH - d_ij) * (U_star_ji - U_star_ij);
673 limiter.accumulate(U_j,
682 U_i_new += tau * m_i_inv * d_ij * (U_j - U_i);
683 F_iH += d_ijH * (U_j - U_i);
684 P_ij += (d_ijH - d_ij) * (U_j - U_i);
686 limiter.accumulate(js, U_j, flux_j, scaled_c_ij, beta_ij);
689 if constexpr (View::have_source_terms) {
698 if constexpr (View::have_high_order_flux) {
699 const auto high_order_flux_ij =
700 view.high_order_flux_divergence(flux_i, flux_j, c_ij);
701 F_iH += weight * high_order_flux_ij;
702 P_ij += weight * high_order_flux_ij;
704 F_iH += weight * flux_ij;
705 P_ij += weight * flux_ij;
708 if constexpr (View::have_source_terms) {
709 const auto S_j = view.nodal_source(old_precomputed, js, U_j, tau);
710 F_iH += weight * m_ij * S_j;
711 P_ij += weight * m_ij * S_j;
714 for (
int s = 0; s < stages; ++s) {
715 const auto &[U_s, prec_s, V_s] = stage_state_vectors[s].get();
717 const auto U_jHs = U_s.template get_tensor<T>(js);
718 const auto flux_jHs = view.flux_contribution(
719 prec_s, initial_precomputed_, js, U_jHs);
721 if constexpr (View::have_high_order_flux) {
722 const auto high_order_flux_ij = view.high_order_flux_divergence(
723 flux_iHs[s], flux_jHs, c_ij);
724 F_iH += stage_weights[s] * high_order_flux_ij;
725 P_ij += stage_weights[s] * high_order_flux_ij;
728 view.flux_divergence(flux_iHs[s], flux_jHs, c_ij);
729 F_iH += stage_weights[s] * flux_ij;
730 P_ij += stage_weights[s] * flux_ij;
733 if constexpr (View::have_source_terms) {
734 const auto S_js = view.nodal_source(prec_s, js, U_jHs, tau);
735 F_iH += stage_weights[s] * m_ij * S_js;
736 P_ij += stage_weights[s] * m_ij * S_js;
740 pij_matrix_.write_entry(P_ij, i, col_idx,
true);
744 if (!view.is_admissible(U_i_new)) {
745 restart_needed =
true;
749 new_U.template write_tensor<T>(U_i_new, i);
750 r_.template write_tensor<T>(F_iH, i);
752 const auto hd_i = m_i * measure_of_omega_inverse;
753 const auto relaxed_bounds = limiter.bounds(hd_i);
754 bounds_.template write_tensor<T>(relaxed_bounds, i);
759 loop(Number(), n_internal, n_owned);
761 loop(VA(), 0, n_internal);
773 if (limiter_parameters_.iterations() != 0) {
774 Scope scope(computing_timer_, scoped_name(
"compute p_ij, and l_ij"));
776 SynchronizationDispatch synchronization_dispatch([&]() {
777 lij_matrix_.update_ghost_rows_start(channel++);
778 lij_matrix_.update_ghost_rows_finish();
784 auto loop = [&](
auto sentinel,
unsigned int left,
unsigned int right) {
785 using T =
decltype(sentinel);
787 typename Description::template HyperbolicSystemView<dim, T>;
788 using Limiter =
typename Description::template Limiter<dim, T>;
790 unsigned int stride_size = get_stride_size<T>;
794 *hyperbolic_system_, limiter_parameters_, old_precomputed);
795 bool thread_ready =
false;
798 for (
unsigned int i = left; i < right; i += stride_size) {
801 const unsigned int row_length = sparsity_simd.row_length(i);
805 synchronization_dispatch.check(
806 thread_ready, i >= n_export_indices && i < n_internal);
809 bounds_.template get_tensor<T, std::array<T, n_bounds>>(i);
811 const auto m_i_inv = get_entry<T>(lumped_mass_matrix_inverse, i);
813 const auto U_i_new = new_U.template get_tensor<T>(i);
815 const auto F_iH = r_.template get_tensor<T>(i);
817 const auto lambda_inv = Number(row_length - 1);
818 const auto factor = tau * m_i_inv * lambda_inv;
821 const unsigned int *js = sparsity_simd.columns(i) + stride_size;
822 for (
unsigned int col_idx = 1; col_idx < row_length;
823 ++col_idx, js += stride_size) {
829 const auto m_j_inv = get_entry<T>(lumped_mass_matrix_inverse, js);
830 const auto m_ij = mass_matrix.template get_entry<T>(i, col_idx);
832 const auto b_ij = (col_idx == 0 ? T(1.) : T(0.)) - m_ij * m_j_inv;
834 const auto b_ji = (col_idx == 0 ? T(1.) : T(0.)) - m_ij * m_i_inv;
836 auto P_ij = pij_matrix_.template get_tensor<T>(i, col_idx);
837 const auto F_jH = r_.template get_tensor<T>(js);
838 P_ij += b_ij * F_jH - b_ji * F_iH;
840 pij_matrix_.write_entry(P_ij, i, col_idx);
846 const auto &[l_ij, success] = limiter.limit(bounds, U_i_new, P_ij);
847 lij_matrix_.template write_entry<T>(l_ij, i, col_idx,
true);
859 restart_needed =
true;
865 loop(Number(), n_internal, n_owned);
867 loop(VA(), 0, n_internal);
883 const auto n_iterations = limiter_parameters_.iterations();
884 for (
unsigned int pass = 0; pass < n_iterations; ++pass) {
885 bool last_round = (pass + 1 == n_iterations);
887 std::string additional_step = (last_round ?
"" :
", next l_ij");
890 scoped_name(
"symmetrize l_ij, h.-o. update" + additional_step));
892 if ((n_iterations == 2) && last_round) {
893 std::swap(lij_matrix_, lij_matrix_next_);
896 SynchronizationDispatch synchronization_dispatch([&]() {
898 lij_matrix_next_.update_ghost_rows_start(channel++);
899 lij_matrix_next_.update_ghost_rows_finish();
906 auto loop = [&](
auto sentinel,
unsigned int left,
unsigned int right) {
907 using T =
decltype(sentinel);
909 typename Description::template HyperbolicSystemView<dim, T>;
910 using Limiter =
typename Description::template Limiter<dim, T>;
912 unsigned int stride_size = get_stride_size<T>;
915 AlignedVector<T> lij_row;
917 *hyperbolic_system_, limiter_parameters_, old_precomputed);
918 bool thread_ready =
false;
921 for (
unsigned int i = left; i < right; i += stride_size) {
924 const unsigned int row_length = sparsity_simd.row_length(i);
928 synchronization_dispatch.check(
929 thread_ready, i >= n_export_indices && i < n_internal);
931 auto U_i_new = new_U.template get_tensor<T>(i);
933 const Number lambda = Number(1.) / Number(row_length - 1);
934 lij_row.resize_fast(row_length);
937 for (
unsigned int col_idx = 1; col_idx < row_length; ++col_idx) {
939 const auto l_ij = std::min(
940 lij_matrix_.template get_entry<T>(i, col_idx),
941 lij_matrix_.template get_transposed_entry<T>(i, col_idx));
943 const auto p_ij = pij_matrix_.template get_tensor<T>(i, col_idx);
945 U_i_new += l_ij * lambda * p_ij;
948 lij_row[col_idx] = l_ij;
952 const auto view = hyperbolic_system_->template view<dim, T>();
953 if (!view.is_admissible(U_i_new)) {
954 restart_needed =
true;
958 new_U.template write_tensor<T>(U_i_new, i);
965 bounds_.template get_tensor<T, std::array<T, n_bounds>>(i);
967 for (
unsigned int col_idx = 1; col_idx < row_length; ++col_idx) {
969 const auto old_l_ij = lij_row[col_idx];
971 const auto new_p_ij =
973 pij_matrix_.template get_tensor<T>(i, col_idx);
975 const auto &[new_l_ij, success] =
976 limiter.limit(bounds, U_i_new, new_p_ij);
990 restart_needed =
true;
999 const auto entry = (T(1.) - old_l_ij) * new_l_ij;
1000 lij_matrix_next_.write_entry(entry, i, col_idx,
true);
1006 loop(Number(), n_internal, n_owned);
1008 loop(VA(), 0, n_internal);
1016 typename Description::template HyperbolicSystemView<dim, Number>;
1022 restart_needed.store(
1023 Utilities::MPI::logical_or(restart_needed.load(), mpi_communicator_));
1025 if (restart_needed) {
1026 switch (id_violation_strategy_) {
1046 template <
typename Description,
int dim,
typename Number>
1051 std::cout <<
"HyperbolicModule<Description, dim, "
1052 "Number>::apply_boundary_conditions()"
1056 auto &[U, precomputed, V] = state_vector;
1058 const auto cycle_number = 5 + (n_precomputation_cycles > 0 ? 1 : 0) +
1059 limiter_parameters_.iterations();
1060 Scope scope(computing_timer_,
1061 "time step [H] " + std::to_string(cycle_number) +
1062 " - apply boundary conditions");
1066 const auto &boundary_map = offline_data_->boundary_map();
1068 for (
auto entry : boundary_map) {
1069 const auto i = entry.first;
1071 const auto &[normal, normal_mass, boundary_mass, id, position] =
1082 auto U_i = U.get_tensor(i);
1085 auto get_dirichlet_data = [position = position, t = t,
this]() {
1086 return initial_values_->initial_state(position, t);
1089 const auto view = hyperbolic_system_->template view<dim, Number>();
1090 U_i = view.apply_boundary_conditions(
id, U_i, normal, get_dirichlet_data);
1091 U.write_tensor(U_i, i);
1096 U.update_ghost_values();
Number step(StateVector &old_state_vector, std::array< std::reference_wrapper< const StateVector >, stages > stage_state_vectors, const std::array< Number, stages > stage_weights, StateVector &new_state_vector, Number tau=Number(0.)) const
typename Description::HyperbolicSystem HyperbolicSystem
typename View::StateVector StateVector
HyperbolicModule(const MPI_Comm &mpi_communicator, std::map< std::string, dealii::Timer > &computing_timer, const OfflineData< dim, Number > &offline_data, const HyperbolicSystem &hyperbolic_system, const InitialValues< Description, dim, Number > &initial_values, const std::string &subsection="/HyperbolicModule")
typename Description::template HyperbolicSystemView< dim, Number > View
void apply_boundary_conditions(StateVector &state_vector, Number t) const
#define RYUJIN_PARALLEL_REGION_BEGIN
#define RYUJIN_PARALLEL_REGION_END
#define LIKWID_MARKER_START(opt)
#define CALLGRIND_START_INSTRUMENTATION
#define LIKWID_MARKER_STOP(opt)
#define CALLGRIND_STOP_INSTRUMENTATION