Skip to content

Commit

Permalink
Merge branch 'main' into feature/NRSE-general-structure
Browse files Browse the repository at this point in the history
  • Loading branch information
mgovers authored Jan 16, 2024
2 parents bf13dbc + 2dc2839 commit 5197735
Show file tree
Hide file tree
Showing 19 changed files with 578 additions and 113 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/check-code-quality.yml
Original file line number Diff line number Diff line change
Expand Up @@ -36,7 +36,7 @@ jobs:
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: 3.8
python-version: 3.9

- name: Upgrade pip
run: pip install --upgrade pip
Expand Down
3 changes: 2 additions & 1 deletion docs/user_manual/calculations.md
Original file line number Diff line number Diff line change
Expand Up @@ -364,9 +364,10 @@ Multiple appliance measurements (power measurements) on one bus are aggregated a

$$
\begin{eqnarray}
\underline{S} = \sum_{k=1}^{N_{appliance}} \underline{S}_k
\underline{S} = \sum_{k=1}^{N_{appliance}} \underline{S}_k
\quad\text{and}\quad
\sigma_P^2 = \sum_{k=1}^{N_{appliance}} \sigma_{P,k}^2
\quad\text{and}\quad
\sigma_Q^2 = \sum_{k=1}^{N_{appliance}} \sigma_{Q,k}^2
\end{eqnarray}
$$
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -83,7 +83,7 @@ class LoadGen final : public std::conditional_t<is_gen, GenericGenerator, Generi
update_real_value<sym>(new_p_specified, ps, scalar);
update_real_value<sym>(new_q_specified, qs, scalar);

s_specified_ = ps + 1.0i * qs;
s_specified_ = ComplexValue<sym>{ps, qs};
}

// update for load_gen
Expand All @@ -108,14 +108,28 @@ class LoadGen final : public std::conditional_t<is_gen, GenericGenerator, Generi
}

private:
ComplexValue<sym> s_specified_{}; // specified power injection
ComplexValue<sym> s_specified_{std::complex<double>{nan, nan}}; // specified power injection

// direction of load_gen
static constexpr double direction_ = is_gen ? 1.0 : -1.0;

// override calc_param
ComplexValue<true> sym_calc_param() const override { return mean_val(s_specified_); }
ComplexValue<false> asym_calc_param() const override { return piecewise_complex_value(s_specified_); }
ComplexValue<true> sym_calc_param() const override {
if constexpr (sym) {
if (is_nan(real(s_specified_)) || is_nan(imag(s_specified_))) {
return {nan, nan};
}
}
return mean_val(s_specified_);
}
ComplexValue<false> asym_calc_param() const override {
if constexpr (sym) {
if (is_nan(real(s_specified_)) || is_nan(imag(s_specified_))) {
return ComplexValue<false>{std::complex{nan, nan}};
}
}
return piecewise_complex_value(s_specified_);
}
template <bool sym_calc> ApplianceMathOutput<sym_calc> u2si(ComplexValue<sym_calc> const& u) const {
ApplianceMathOutput<sym_calc> appliance_math_output;
appliance_math_output.s = scale_power<sym_calc>(u);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -411,21 +411,20 @@ class MainModelImpl<ExtraRetrievableTypes<ExtraRetrievableType...>, ComponentLis
// execute one power flow in the current instance, no batch calculation is needed
// NOTE: if the map is not empty but the datasets inside are empty
// that will be considered as a zero batch_size
bool const all_empty = update_data.empty();
if (all_empty) {
if (update_data.empty()) {
calculation_fn(*this, result_data, 0);
return BatchParameter{};
}

// get number of batches (can't be empty, because then all_empty would have been true)
Idx const n_batch = update_data.cbegin()->second.batch_size();
// get batch size (can't be empty; see previous check)
Idx const n_scenarios = update_data.cbegin()->second.batch_size();
// assert if all component types have the same number of batches
assert(std::all_of(update_data.cbegin(), update_data.cend(),
[n_batch](auto const& x) { return x.second.batch_size() == n_batch; }));
[n_scenarios](auto const& x) { return x.second.batch_size() == n_scenarios; }));

// if the batch_size is zero, it is a special case without doing any calculations at all
// we consider in this case the batch set is independent and but not topology cachable
if (n_batch == 0) {
if (n_scenarios == 0) {
return BatchParameter{};
}

Expand All @@ -436,86 +435,153 @@ class MainModelImpl<ExtraRetrievableTypes<ExtraRetrievableType...>, ComponentLis
// missing entries are provided in the update data
}

// error messages
std::vector<std::string> exceptions(n_scenarios, "");
std::vector<CalculationInfo> infos(n_scenarios);

// lambda for sub batch calculation
auto sub_batch = sub_batch_calculation_(calculation_fn, result_data, update_data, exceptions, infos);

batch_dispatch(sub_batch, n_scenarios, threading);

handle_batch_exceptions(exceptions);
calculation_info_ = main_core::merge_calculation_info(infos);

return BatchParameter{};
}

template <typename Calculate>
requires std::invocable<std::remove_cvref_t<Calculate>, MainModelImpl&, Dataset const&, Idx>
auto sub_batch_calculation_(Calculate&& calculation_fn, Dataset const& result_data, ConstDataset const& update_data,
std::vector<std::string>& exceptions, std::vector<CalculationInfo>& infos) {
// const ref of current instance
MainModelImpl const& base_model = *this;

// cache component update order if possible
bool const is_independent = MainModelImpl::is_update_independent(update_data);

SequenceIdx const sequence_idx = is_independent ? get_sequence_idx_map(update_data) : SequenceIdx{};

// error messages
std::vector<std::string> exceptions(n_batch, "");
std::vector<CalculationInfo> infos(n_batch);
return [&base_model, &exceptions, &infos, &calculation_fn, &result_data, &update_data,
is_independent](Idx start, Idx stride, Idx n_scenarios) {
assert(n_scenarios <= narrow_cast<Idx>(exceptions.size()));
assert(n_scenarios <= narrow_cast<Idx>(infos.size()));

// lambda for sub batch calculation
auto sub_batch = [&base_model, &exceptions, &infos, &calculation_fn, &result_data, &update_data, &sequence_idx,
n_batch, is_independent](Idx start, Idx stride) {
Timer const t_total(infos[start], 0000, "Total in thread");

auto model = [&base_model, &infos, start] {
Timer const t_copy_model(infos[start], 1100, "Copy model");
auto copy_model = [&base_model, &infos](Idx scenario_idx) {
Timer const t_copy_model(infos[scenario_idx], 1100, "Copy model");
return MainModelImpl{base_model};
}();
};
auto model = copy_model(start);

auto scenario_sequence = sequence_idx;
SequenceIdx scenario_sequence = is_independent ? model.get_sequence_idx_map(update_data) : SequenceIdx{};

for (Idx batch_number = start; batch_number < n_batch; batch_number += stride) {
Timer const t_total_single(infos[batch_number], 0100, "Total single calculation in thread");
// try to update model and run calculation
try {
{
Timer const t_update_model(infos[batch_number], 1200, "Update model");
if (!is_independent) {
scenario_sequence = model.get_sequence_idx_map(update_data, batch_number);
}
model.template update_component<cached_update_t>(update_data, batch_number, scenario_sequence);
}
calculation_fn(model, result_data, batch_number);
{
Timer const t_update_model(infos[batch_number], 1201, "Restore model");
model.restore_components(scenario_sequence);
if (!is_independent) {
std::ranges::for_each(scenario_sequence, [](auto& comp_seq_idx) { comp_seq_idx.clear(); });
}
}
} catch (std::exception const& ex) {
exceptions[batch_number] = ex.what();
} catch (...) {
exceptions[batch_number] = "unknown exception";
}
auto [setup, winddown] =
scenario_update_restore(model, update_data, is_independent, scenario_sequence, infos);

auto calculate_scenario = MainModelImpl::call_with<Idx>(
[&model, &calculation_fn, &result_data, &infos](Idx scenario_idx) {
calculation_fn(model, result_data, scenario_idx);
infos[scenario_idx].merge(model.calculation_info_);
},
std::move(setup), std::move(winddown), scenario_exception_handler(model, exceptions, infos),
[&model, &copy_model](Idx scenario_idx) { model = copy_model(scenario_idx); });

infos[batch_number].merge(model.calculation_info_);
for (Idx scenario_idx = start; scenario_idx < n_scenarios; scenario_idx += stride) {
Timer const t_total_single(infos[scenario_idx], 0100, "Total single calculation in thread");

calculate_scenario(scenario_idx);
}
};
}

// run sequential if
// specified threading < 0
// use hardware threads, but it is either unknown (0) or only has one thread (1)
// specified threading = 1
template <typename RunSubBatchFn>
requires std::invocable<std::remove_cvref_t<RunSubBatchFn>, Idx /*start*/, Idx /*stride*/, Idx /*n_scenarios*/>
static void batch_dispatch(RunSubBatchFn sub_batch, Idx n_scenarios, Idx threading) {
// run batches sequential or parallel
auto const hardware_thread = static_cast<Idx>(std::thread::hardware_concurrency());
// run sequential if
// specified threading < 0
// use hardware threads, but it is either unknown (0) or only has one thread (1)
// specified threading = 1
if (threading < 0 || threading == 1 || (threading == 0 && hardware_thread < 2)) {
// run all in sequential
sub_batch(0, 1);
sub_batch(0, 1, n_scenarios);
} else {
// create parallel threads
Idx const n_thread = std::min(threading == 0 ? hardware_thread : threading, n_batch);
Idx const n_thread = std::min(threading == 0 ? hardware_thread : threading, n_scenarios);
std::vector<std::thread> threads;
threads.reserve(n_thread);
for (Idx thread_number = 0; thread_number < n_thread; ++thread_number) {
// compute each sub batch with stride
threads.emplace_back(sub_batch, thread_number, n_thread);
threads.emplace_back(sub_batch, thread_number, n_thread, n_scenarios);
}
for (auto& thread : threads) {
thread.join();
}
}
}

handle_batch_exceptions(exceptions);
calculation_info_ = main_core::merge_calculation_info(infos);
template <typename... Args, typename RunFn, typename SetupFn, typename WinddownFn, typename HandleExceptionFn,
typename RecoverFromBadFn>
requires std::invocable<std::remove_cvref_t<RunFn>, Args const&...> &&
std::invocable<std::remove_cvref_t<SetupFn>, Args const&...> &&
std::invocable<std::remove_cvref_t<WinddownFn>, Args const&...> &&
std::invocable<std::remove_cvref_t<HandleExceptionFn>, Args const&...> &&
std::invocable<std::remove_cvref_t<RecoverFromBadFn>, Args const&...>
static auto call_with(RunFn run, SetupFn setup, WinddownFn winddown, HandleExceptionFn handle_exception,
RecoverFromBadFn recover_from_bad) {
return [setup_ = std::move(setup), run_ = std::move(run), winddown_ = std::move(winddown),
handle_exception_ = std::move(handle_exception),
recover_from_bad_ = std::move(recover_from_bad)](Args const&... args) {
try {
setup_(args...);
run_(args...);
winddown_(args...);
} catch (...) {
handle_exception_(args...);
try {
winddown_(args...);
} catch (...) {
recover_from_bad_(args...);
}
}
};
}

return BatchParameter{};
static auto scenario_update_restore(MainModelImpl& model, ConstDataset const& update_data,
bool const is_independent, SequenceIdx& scenario_sequence,
std::vector<CalculationInfo>& infos) {
return std::make_pair(
[&model, &update_data, &scenario_sequence, is_independent, &infos](Idx scenario_idx) {
Timer const t_update_model(infos[scenario_idx], 1200, "Update model");
if (!is_independent) {
scenario_sequence = model.get_sequence_idx_map(update_data, scenario_idx);
}
model.template update_component<cached_update_t>(update_data, scenario_idx, scenario_sequence);
},
[&model, &scenario_sequence, is_independent, &infos](Idx scenario_idx) {
Timer const t_update_model(infos[scenario_idx], 1201, "Restore model");
model.restore_components(scenario_sequence);
if (!is_independent) {
std::ranges::for_each(scenario_sequence, [](auto& comp_seq_idx) { comp_seq_idx.clear(); });
}
});
}

// Lippincott pattern
static auto scenario_exception_handler(MainModelImpl& model, std::vector<std::string>& messages,
std::vector<CalculationInfo>& infos) {
return [&model, &messages, &infos](Idx scenario_idx) {
std::exception_ptr const ex_ptr = std::current_exception();
try {
std::rethrow_exception(ex_ptr);
} catch (std::exception const& ex) {
messages[scenario_idx] = ex.what();
} catch (...) {
messages[scenario_idx] = "unknown exception";
}
infos[scenario_idx].merge(model.calculation_info_);
};
}

static void handle_batch_exceptions(std::vector<std::string> const& exceptions) {
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -203,7 +203,7 @@ template <class Tensor, class RHSVector, class XVector> class SparseLUSolver {
block_perm_array[pivot_row_col] = {lu_factor.permutationP(), lu_factor.permutationQ()};
return block_perm_array[pivot_row_col];
} else {
if (lu_matrix[pivot_idx] == 0.0) {
if (!is_normal(lu_matrix[pivot_idx])) {
throw SparseMatrixError{};
}
return {};
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -52,6 +52,11 @@ template <scalar_value T> class Vector : public Eigen3Vector<T> {
explicit Vector(std::piecewise_construct_t /* tag */, T const& x) { (*this) << x, x, x; }
// constructor of three values
Vector(T const& x1, T const& x2, T const& x3) { (*this) << x1, x2, x3; }
// for complex, it is possible to construct from real part and imaginary part
template <std::floating_point U>
requires std::same_as<T, std::complex<U>>
Vector(Vector<U> real_part, Vector<U> imag_part)
: Vector{{real_part(0), imag_part(0)}, {real_part(1), imag_part(1)}, {real_part(2), imag_part(2)}} {}
};

template <scalar_value T> class Tensor : public Eigen3Tensor<T> {
Expand Down Expand Up @@ -295,7 +300,16 @@ inline bool is_nan(Enum x) {

// is normal
inline auto is_normal(std::floating_point auto value) { return std::isnormal(value); }
inline auto is_normal(RealValue<false> const& value) {
template <std::floating_point T> inline auto is_normal(std::complex<T> const& value) {
if (value.real() == T{0}) {
return is_normal(value.imag());
}
if (value.imag() == T{0}) {
return is_normal(value.real());
}
return is_normal(value.real()) && is_normal(value.imag());
}
template <class Derived> inline auto is_normal(Eigen::ArrayBase<Derived> const& value) {
return is_normal(value(0)) && is_normal(value(1)) && is_normal(value(2));
}

Expand Down
7 changes: 3 additions & 4 deletions pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ classifiers = [
"Operating System :: MacOS",
"Topic :: Scientific/Engineering :: Physics",
]
requires-python = ">=3.8"
requires-python = ">=3.9"
dependencies = [
"numpy>=1.21.0",
]
Expand Down Expand Up @@ -79,7 +79,7 @@ addopts = ["--cov=power_grid_model", "--cov-report", "term", "--cov-report", "ht

[tool.black]
line-length = 120
target-version = ['py38']
target-version = ['py39']

[tool.isort]
profile = "black"
Expand All @@ -105,9 +105,8 @@ test-extras = ["dev"]
test-command = "pytest {package}/tests"
# we do not support
# PyPy
# musllinux when python < 3.9
# musllinux in aarch64
skip = ["pp*", "cp38-musllinux*", "*-musllinux_aarch64"]
skip = ["pp*", "*-musllinux_aarch64"]

[tool.cibuildwheel.linux]
archs = ["x86_64", "aarch64"]
Expand Down
Loading

0 comments on commit 5197735

Please sign in to comment.