Skip to content

Commit

Permalink
Fix: solve import issues for performance tests (#22)
Browse files Browse the repository at this point in the history
* Update test_get_downstream_nodes

Co-authored-by: jaapschoutenalliander <[email protected]>
Signed-off-by: Thijs Baaijen <[email protected]>

* Update performance tests

Co-authored-by: jaapschoutenalliander <[email protected]>
Signed-off-by: Thijs Baaijen <[email protected]>

---------

Signed-off-by: Thijs Baaijen <[email protected]>
Co-authored-by: jaapschoutenalliander <[email protected]>
  • Loading branch information
Thijss and jaapschoutenalliander authored Jan 29, 2025
1 parent 76fadb3 commit 0bb4403
Show file tree
Hide file tree
Showing 7 changed files with 127 additions and 123 deletions.
18 changes: 9 additions & 9 deletions tests/performance/_constants.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,18 +6,18 @@
"dtype = [('id', '<i8'), ('test_int', '<i8'), ('test_float', '<f8'), ('test_str', '<U50'), ('test_bool', '?')]; "
)

SETUP_CODES = {
"structured": "import numpy as np;" + NUMPY_DTYPE + "input_array = np.zeros({array_size}, dtype=dtype)",
"rec": "import numpy as np;" + NUMPY_DTYPE + "input_array = np.recarray(({array_size},),dtype=dtype)",
"fancy": "from tests.conftest import FancyTestArray; input_array=FancyTestArray.zeros({array_size});"
+ "import numpy as np;input_array.id = np.arange({array_size})",
ARRAY_SETUP_CODES = {
"structured": "import numpy as np;" + NUMPY_DTYPE + "input_array = np.zeros({size}, dtype=dtype)",
"rec": "import numpy as np;" + NUMPY_DTYPE + "input_array = np.recarray(({size},),dtype=dtype)",
"fancy": "from tests.conftest import FancyTestArray; input_array=FancyTestArray.zeros({size});"
+ "import numpy as np;input_array.id = np.arange({size})",
}

GRAPH_SETUP_CODES = {
"rustworkx": "from power_grid_model_ds.model.grids.base import Grid;"
+ "from power_grid_model_ds.data_source.generator.grid_generators import RadialGridGenerator;"
+ "from power_grid_model_ds.model.graphs.models import RustworkxGraphModel;"
+ "grid=RadialGridGenerator(nr_nodes={graph_size}, grid_class=Grid, graph_model=RustworkxGraphModel).run()",
"rustworkx": "from power_grid_model_ds import Grid;"
+ "from power_grid_model_ds.generators import RadialGridGenerator;"
+ "from power_grid_model_ds.graph_models import RustworkxGraphModel;"
+ "grid=RadialGridGenerator(nr_nodes={size}, grid_class=Grid, graph_model=RustworkxGraphModel).run()",
}

SINGLE_REPEATS = 1000
Expand Down
101 changes: 29 additions & 72 deletions tests/performance/_helpers.py
Original file line number Diff line number Diff line change
Expand Up @@ -4,100 +4,57 @@

import inspect
import timeit
from typing import Generator
from itertools import product
from typing import Generator, Union

from tests.performance._constants import GRAPH_SETUP_CODES, SETUP_CODES


def do_performance_test(code_to_test: str | dict[str, str], array_sizes: list[int], repeats: int):
"""Run the performance test for the given code."""

def do_performance_test(
code_to_test: Union[str, dict[str, str], list[str]],
size_list: list[int],
repeats: int,
setup_codes: dict[str, str],
):
"""Generalized performance test runner."""
print(f"{'-' * 20} {inspect.stack()[1][3]} {'-' * 20}")

for array_size in array_sizes:
for size in size_list:
formatted_setup_codes = {key: code.format(size=size) for key, code in setup_codes.items()}
if isinstance(code_to_test, dict):
code_to_test_list = [code_to_test[variant].format(array_size=array_size) for variant in SETUP_CODES]
else:
code_to_test_list = [code_to_test.format(array_size=array_size)] * len(SETUP_CODES)
print(f"\n\tArray size: {array_size}\n")
setup_codes = [setup_code.format(array_size=array_size) for setup_code in SETUP_CODES.values()]
timings = _get_timings(setup_codes, code_to_test_list, repeats)

if code_to_test == "pass":
_print_timings(timings, list(SETUP_CODES.keys()), setup_codes)
code_to_test_list = [code_to_test[variant].format(size=size) for variant in setup_codes]
test_generator = zip(formatted_setup_codes.items(), code_to_test_list)
elif isinstance(code_to_test, list):
code_to_test_list = [code.format(size=size) for code in code_to_test]
test_generator = product(formatted_setup_codes.items(), code_to_test_list)
else:
_print_timings(timings, list(SETUP_CODES.keys()), code_to_test_list)
print()
test_generator = product(formatted_setup_codes.items(), [code_to_test.format(size=size)])

print(f"\n\tsize: {size}\n")

def do_graph_test(code_to_test: str | dict[str, str], graph_sizes: list[int], repeats: int):
"""Run the performance test for the given code."""
timings = _get_timings(test_generator, repeats=repeats)
_print_timings(timings)

print(f"{'-' * 20} {inspect.stack()[1][3]} {'-' * 20}")

for graph_size in graph_sizes:
if isinstance(code_to_test, dict):
code_to_test_list = [code_to_test[variant] for variant in GRAPH_SETUP_CODES]
else:
code_to_test_list = [code_to_test] * len(GRAPH_SETUP_CODES)
print(f"\n\tGraph size: {graph_size}\n")
setup_codes = [setup_code.format(graph_size=graph_size) for setup_code in GRAPH_SETUP_CODES.values()]
timings = _get_timings(setup_codes, code_to_test_list, repeats)

if code_to_test == "pass":
_print_graph_timings(timings, list(GRAPH_SETUP_CODES.keys()), setup_codes)
else:
_print_graph_timings(timings, list(GRAPH_SETUP_CODES.keys()), code_to_test_list)
print()


def _print_test_code(code: str | dict[str, str], repeats: int):
print(f"{'-' * 40}")
if isinstance(code, dict):
for variant, code_variant in code.items():
print(f"{variant}")
print(f"\t{code_variant} (x {repeats})")
return
print(f"{code} (x {repeats})")


def _print_graph_timings(timings: Generator, graph_types: list[str], code_list: list[str]):
for graph_type, timing, code in zip(graph_types, timings, code_list):
if ";" in code:
code = code.split(";")[-1]

code = code.replace("\n", " ").replace("\t", " ")
code = f"{graph_type}: " + code

if isinstance(timing, Exception):
print(f"\t\t{code.ljust(100)} | Not supported")
continue
print(f"\t\t{code.ljust(100)} | {sum(timing):.2f}s")


def _print_timings(timings: Generator, array_types: list[str], code_list: list[str]):
for array, timing, code in zip(array_types, timings, code_list):
if ";" in code:
code = code.split(";")[-1]

code = code.replace("\n", " ").replace("\t", " ")
array_name = f"{array}_array"
code = code.replace("input_array", array_name)
def _print_timings(timings: Generator):
for key, code, timing in timings:
code = code.split(";")[-1].replace("\n", " ").replace("\t", " ")
code = f"{key}: {code}"

if isinstance(timing, Exception):
print(f"\t\t{code.ljust(100)} | Not supported")
continue
print(f"\t\t{code.ljust(100)} | {sum(timing):.2f}s")


def _get_timings(setup_codes: list[str], test_codes: list[str], repeats: int):
def _get_timings(test_generator, repeats: int):
"""Return a generator with the timings for each array type."""
for setup_code, test_code in zip(setup_codes, test_codes):
for (key, setup_code), test_code in test_generator:
if test_code == "pass":
yield timeit.repeat(setup_code, number=repeats)
yield key, "intialise", timeit.repeat(setup_code, number=repeats)
else:
try:
yield timeit.repeat(test_code, setup_code, number=repeats)
yield key, test_code, timeit.repeat(test_code, setup_code, number=repeats)
# pylint: disable=broad-exception-caught
except Exception as error: # noqa
yield error
yield key, test_code, error
54 changes: 30 additions & 24 deletions tests/performance/array_performance_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -11,18 +11,24 @@

import logging

from tests.performance._constants import ARRAY_SIZES_LARGE, ARRAY_SIZES_SMALL, LOOP_REPEATS, SINGLE_REPEATS
from tests.performance._constants import (
ARRAY_SETUP_CODES,
ARRAY_SIZES_LARGE,
ARRAY_SIZES_SMALL,
LOOP_REPEATS,
SINGLE_REPEATS,
)
from tests.performance._helpers import do_performance_test

logging.basicConfig(level=logging.INFO)


def perftest_initialize():
do_performance_test("pass", ARRAY_SIZES_LARGE, SINGLE_REPEATS)
do_performance_test("pass", ARRAY_SIZES_LARGE, SINGLE_REPEATS, ARRAY_SETUP_CODES)


def perftest_slice():
do_performance_test("input_array[0:10]", ARRAY_SIZES_LARGE, SINGLE_REPEATS)
do_performance_test("input_array[0:10]", ARRAY_SIZES_LARGE, SINGLE_REPEATS, ARRAY_SETUP_CODES)


def perftest_set_attr():
Expand All @@ -31,77 +37,77 @@ def perftest_set_attr():
"rec": "input_array.id = 1",
"fancy": "input_array.id = 1",
}
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS)
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS, ARRAY_SETUP_CODES)


def perftest_set_field():
do_performance_test("input_array['id'] = 1", ARRAY_SIZES_LARGE, SINGLE_REPEATS)
do_performance_test("input_array['id'] = 1", ARRAY_SIZES_LARGE, SINGLE_REPEATS, ARRAY_SETUP_CODES)


def perftest_loop_slice_1():
code_to_test = "for i in range({array_size}): input_array[i]"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS)
code_to_test = "for i in range({size}): input_array[i]"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS, ARRAY_SETUP_CODES)


def perftest_loop_data_slice_1():
code_to_test = {
"structured": "for i in range({array_size}): input_array[i]",
"rec": "for i in range({array_size}): input_array[i]",
"fancy": "for i in range({array_size}): input_array.data[i]",
"structured": "for i in range({size}): input_array[i]",
"rec": "for i in range({size}): input_array[i]",
"fancy": "for i in range({size}): input_array.data[i]",
}
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS)
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS, ARRAY_SETUP_CODES)


def perftest_loop_slice():
code_to_test = "for i in range({array_size}): input_array[i:i+1]"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS)
code_to_test = "for i in range({size}): input_array[i:i+1]"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS, ARRAY_SETUP_CODES)


def perftest_loop_set_field():
code_to_test = "for i in range({array_size}): input_array['id'][i] = 1"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS)
code_to_test = "for i in range({size}): input_array['id'][i] = 1"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS, ARRAY_SETUP_CODES)


def perftest_loop_get_field():
code_to_test = "for row in input_array: row['id']"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS)
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS, ARRAY_SETUP_CODES)


def perftest_loop_data_get_field():
code_to_test = "for row in input_array.data: row['id']"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS)
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, LOOP_REPEATS, ARRAY_SETUP_CODES)


def perftest_loop_get_attr():
code_to_test = "for row in input_array: row.id"
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, 100)
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, 100, ARRAY_SETUP_CODES)


def perftest_fancypy_concat():
code_to_test = {
"structured": "import numpy as np;np.concatenate([input_array, input_array])",
"rec": "import numpy as np;np.concatenate([input_array, input_array])",
"fancy": "import power_grid_model_ds._core.fancypy as fp;fp.concatenate(input_array, input_array)",
"fancy": "import power_grid_model_ds.fancypy as fp;fp.concatenate(input_array, input_array)",
}
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, 100)
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, 100, ARRAY_SETUP_CODES)


def perftest_fancypy_unique():
code_to_test = {
"structured": "import numpy as np;np.unique(input_array)",
"rec": "import numpy as np;np.unique(input_array)",
"fancy": "import power_grid_model_ds._core.fancypy as fp;fp.unique(input_array)",
"fancy": "import power_grid_model_ds.fancypy as fp;fp.unique(input_array)",
}
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, 100)
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, 100, ARRAY_SETUP_CODES)


def perftest_fancypy_sort():
code_to_test = {
"structured": "import numpy as np;np.sort(input_array)",
"rec": "import numpy as np;np.sort(input_array)",
"fancy": "import power_grid_model_ds._core.fancypy as fp;fp.sort(input_array)",
"fancy": "import power_grid_model_ds.fancypy as fp;fp.sort(input_array)",
}
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, 100)
do_performance_test(code_to_test, ARRAY_SIZES_SMALL, 100, ARRAY_SETUP_CODES)


if __name__ == "__main__":
Expand Down
14 changes: 7 additions & 7 deletions tests/performance/filter_performance_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,7 @@
#
# SPDX-License-Identifier: MPL-2.0

from tests.performance._constants import ARRAY_SIZES_LARGE, SINGLE_REPEATS
from tests.performance._constants import ARRAY_SETUP_CODES, ARRAY_SIZES_LARGE, SINGLE_REPEATS
from tests.performance._helpers import do_performance_test

# pylint: disable=missing-function-docstring
Expand All @@ -14,7 +14,7 @@ def perftest_get():
"rec": "input_array[np.isin(input_array['id'], 99)]",
"fancy": "try:\n\tinput_array.get(id=99)\nexcept:\n\tpass",
}
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS)
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS, ARRAY_SETUP_CODES)


def perftest_filter():
Expand All @@ -23,16 +23,16 @@ def perftest_filter():
"rec": "input_array[np.isin(input_array['id'], 99)]",
"fancy": "input_array.filter(id=99)",
}
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS)
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS, ARRAY_SETUP_CODES)


def perftest_update_by_id():
code_to_test = {
"structured": "input_array['test_float'][np.isin(input_array['id'], np.arange({array_size}))] = 42.0",
"rec": "input_array['test_float'][np.isin(input_array['id'], np.arange({array_size}))] = 42.0",
"fancy": "input_array.update_by_id(ids=np.arange({array_size}), test_float=42.0, allow_missing = False)",
"structured": "input_array['test_float'][np.isin(input_array['id'], np.arange({size}))] = 42.0",
"rec": "input_array['test_float'][np.isin(input_array['id'], np.arange({size}))] = 42.0",
"fancy": "input_array.update_by_id(ids=np.arange({size}), test_float=42.0, allow_missing = False)",
}
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS)
do_performance_test(code_to_test, ARRAY_SIZES_LARGE, SINGLE_REPEATS, ARRAY_SETUP_CODES)


if __name__ == "__main__":
Expand Down
19 changes: 10 additions & 9 deletions tests/performance/graph_performance_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
#
# SPDX-License-Identifier: MPL-2.0

from tests.performance._helpers import do_graph_test
from tests.performance._constants import GRAPH_SETUP_CODES
from tests.performance._helpers import do_performance_test

# pylint: disable=missing-function-docstring

Expand All @@ -11,40 +12,40 @@


def perftest_initialize():
do_graph_test("pass", [10, 100], 100)
do_performance_test("pass", [10, 100], 100, setup_codes=GRAPH_SETUP_CODES)


def perftest_get_components():
code_to_test = (
"from power_grid_model_ds._core.model.enums.nodes import NodeType;"
"from power_grid_model_ds.enums import NodeType;"
+ "feeder_node_ids=grid.node.filter(node_type=NodeType.SUBSTATION_NODE).id;"
+ "grid.graphs.active_graph.get_components(feeder_node_ids)"
)
do_graph_test(code_to_test, GRAPH_SIZES, 100)
do_performance_test(code_to_test, GRAPH_SIZES, 100, setup_codes=GRAPH_SETUP_CODES)


def perftest_set_feeder_ids():
code_to_test = "grid.set_feeder_ids()"
do_graph_test(code_to_test, GRAPH_SIZES, 100)
do_performance_test(code_to_test, GRAPH_SIZES, 100, setup_codes=GRAPH_SETUP_CODES)


def perftest_delete_node():
code_to_test = "grid.delete_node(grid.node[0]);"
do_graph_test(code_to_test, GRAPH_SIZES, 100)
do_performance_test(code_to_test, GRAPH_SIZES, 100, setup_codes=GRAPH_SETUP_CODES)


def perftest_from_arrays():
code_to_test = "grid.graphs.complete_graph.__class__.from_arrays(grid);"
do_graph_test(code_to_test, GRAPH_SIZES, 100)
do_performance_test(code_to_test, GRAPH_SIZES, 100, setup_codes=GRAPH_SETUP_CODES)


def perftest_add_node():
code_to_test = (
"from power_grid_model_ds._core.model.arrays import NodeArray;"
"from power_grid_model_ds.arrays import NodeArray;"
+ "new_node = NodeArray.zeros(1);"
+ "grid.add_node(node=new_node)"
)
do_graph_test(code_to_test, GRAPH_SIZES, 100)
do_performance_test(code_to_test, GRAPH_SIZES, 100, setup_codes=GRAPH_SETUP_CODES)


if __name__ == "__main__":
Expand Down
Loading

0 comments on commit 0bb4403

Please sign in to comment.