-
Notifications
You must be signed in to change notification settings - Fork 12
/
Copy pathbenchmark.py
159 lines (130 loc) · 5.14 KB
/
benchmark.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
"""Contains an abstract class that serves as a base for defining benchmarks."""
import time
from abc import ABC, abstractmethod
from typing import Union
# import cProfile
class Benchmark(ABC):
"""Abstract class that serves as a base for defining benchmarks."""
# profiler: cProfile.Profile
# """Include a profiler to easily track the causes of bad performance."""
def __init__(self, size: int = 500, *args, **kwargs):
"""Set up the internal attributes of the benchmark.
Args:
size (int): the number of iterations to be performed by the
benchmark for it to be considered as finished.
"""
super().__init__(*args, **kwargs)
self._size = size
self._iter_times = [None] * size
self._finished = False
# self.profiler = cProfile.Profile()
@property
def started(self) -> bool:
"""Whether the benchmark was iterated at least once."""
return self.iterations > 0
@property
def finished(self) -> bool:
"""Whether the benchmark finished all its programmed iterations."""
return self._finished or self.iterations >= self.size
@property
def executed(self) -> bool:
"""True of the benchmark is started and finished."""
return self.started and self.finished
@property
def duration(self) -> float:
"""The process time of the benchmark.
The process time is calculated using the time module from the Python
Standard Library. Check its definition on the library's files
https://docs.python.org/dev/library/time.html#time.process_time .
"""
return sum(float(x) for x in self._iter_times if x is not None)
@property
def iterations(self) -> int:
"""The number of iterations already executed."""
return len(tuple(None for x in self._iter_times if x is not None))
@property
def iteration(self) -> Union[int, None]:
"""The current iteration.
Returns:
Union[int, None]: either the current iteration or None if no
iterations were yet run.
"""
if self.iterations > 0:
return self.iterations - 1
else:
return None
@property
def size(self) -> int:
"""The number of iterations programmed on initialization.
When the number of executed iterations reaches the value of this
parameter, the benchmark is finished.
"""
return self._size
def set_up(self):
"""Set up the benchmark. The time spent in the setup is not counted."""
if not self.started and not self.finished:
self._benchmark_set_up()
elif self.started and not self.finished:
raise RuntimeError("This benchmark has already started.")
else: # Both are true.
raise StopIteration("This benchmark is finished.")
@abstractmethod
def _benchmark_set_up(self):
"""Implementation of the setup for a specific benchmark."""
pass
def tear_down(self):
"""Clean up after the benchmark. The time spent is not counted."""
self._benchmark_tear_down()
@abstractmethod
def _benchmark_tear_down(self):
"""Implementation of the teardown for a specific benchmark."""
pass
def iterate(self):
"""Perform one iteration of the benchmark.
Raises:
StopIteration: when all the iterations of the benchmark were
already executed.
"""
if self.finished:
raise StopIteration("This benchmark is finished.")
iteration = self.iterations
start = time.process_time()
# self.profiler.enable()
self._benchmark_iterate(iteration=iteration)
# self.profiler.disable()
end = time.process_time()
self._iter_times[iteration] = end - start
@abstractmethod
def _benchmark_iterate(self, iteration: int = None):
"""Implementation of a benchmark iteration for a specific benchmark.
The time taken to execute any code inside this method is registered.
Args:
iteration (int): the iteration number to be performed.
"""
def run(self):
"""Run a benchmark from start to finish.
This method will only work on a benchmark that has not been started
already. It runs all of its programmed iterations.
"""
self.set_up()
try:
for i in range(self.size):
self.iterate()
finally:
self.tear_down()
@classmethod
def iterate_pytest_benchmark(
cls, benchmark, size: int = 500, *args, **kwargs
):
"""Template wrapper function for pytest-benchmark.
Can be overridden on a benchmark basis if desired.
"""
kwargs["iterations"] = kwargs.get("rounds", 1)
kwargs["rounds"] = kwargs.get("rounds", size)
kwargs["warmup_rounds"] = kwargs.get("warmup_rounds", 0)
benchmark_instance = cls(size=size)
benchmark_instance.set_up()
try:
benchmark.pedantic(benchmark_instance.iterate, *args, **kwargs)
finally:
benchmark_instance.tear_down()