From 796d3b9db7ddd6cf9378b66f798e824d2c57574e Mon Sep 17 00:00:00 2001 From: UltralyticsAssistant Date: Sat, 1 Jun 2024 01:33:09 +0000 Subject: [PATCH] Auto-format by https://ultralytics.com/actions --- benchmark/evaluate_famous_models.py | 2 +- tests/test_conv2d.py | 2 +- tests/test_utils.py | 1 - thop/fx_profile.py | 3 +-- thop/vision/basic_hooks.py | 8 ++++---- thop/vision/efficientnet.py | 2 -- thop/vision/onnx_counter.py | 16 ++++++++-------- 7 files changed, 15 insertions(+), 19 deletions(-) diff --git a/benchmark/evaluate_famous_models.py b/benchmark/evaluate_famous_models.py index 299fb88..5fb1cbf 100644 --- a/benchmark/evaluate_famous_models.py +++ b/benchmark/evaluate_famous_models.py @@ -25,4 +25,4 @@ dsize = (1, 3, 299, 299) inputs = torch.randn(dsize).to(device) total_ops, total_params = profile(model, (inputs,), verbose=False) - print("%s | %.2f | %.2f" % (name, total_params / (1000 ** 2), total_ops / (1000 ** 3))) + print("%s | %.2f | %.2f" % (name, total_params / (1000**2), total_ops / (1000**3))) diff --git a/tests/test_conv2d.py b/tests/test_conv2d.py index cd8efd6..e058e25 100644 --- a/tests/test_conv2d.py +++ b/tests/test_conv2d.py @@ -55,5 +55,5 @@ def test_conv2d_random(self): flops, params = profile(net, inputs=(data,)) print(flops, params) assert ( - flops == n * out_c * oh * ow // g * in_c * kh * kw + flops == n * out_c * oh * ow // g * in_c * kh * kw ), f"{flops} v.s. {n * out_c * oh * ow // g * in_c * kh * kw}" diff --git a/tests/test_utils.py b/tests/test_utils.py index 95f8516..0358e7e 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -1,4 +1,3 @@ - from thop import utils diff --git a/thop/fx_profile.py b/thop/fx_profile.py index 1f703cd..288d9ce 100644 --- a/thop/fx_profile.py +++ b/thop/fx_profile.py @@ -213,12 +213,12 @@ def fx_profile(mod: nn.Module, input: th.Tensor, verbose=False): if __name__ == "__main__": + class MyOP(nn.Module): def forward(self, input): """Performs forward pass on given input data.""" return input / 1 - class MyModule(torch.nn.Module): def __init__(self): """Initializes MyModule with two linear layers and a custom MyOP operator.""" @@ -235,7 +235,6 @@ def forward(self, x): out2 = self.linear2(x).clamp(min=0.0, max=1.0) return self.myop(out1 + out2) - net = MyModule() data = th.randn(20, 5) flops = fx_profile(net, data, verbose=False) diff --git a/thop/vision/basic_hooks.py b/thop/vision/basic_hooks.py index 1acfc81..c80bafd 100644 --- a/thop/vision/basic_hooks.py +++ b/thop/vision/basic_hooks.py @@ -132,10 +132,10 @@ def count_adap_avgpool(m, x, y): def count_upsample(m, x, y): """Update the total operations counter in the given module for supported upsampling modes.""" if m.mode not in ( - "nearest", - "linear", - "bilinear", - "bicubic", + "nearest", + "linear", + "bilinear", + "bicubic", ): # "trilinear" logging.warning("mode %s is not implemented yet, take it a zero op" % m.mode) m.total_ops += 0 diff --git a/thop/vision/efficientnet.py b/thop/vision/efficientnet.py index 3666bc8..cbe3052 100644 --- a/thop/vision/efficientnet.py +++ b/thop/vision/efficientnet.py @@ -1,3 +1 @@ - - register_hooks = {} diff --git a/thop/vision/onnx_counter.py b/thop/vision/onnx_counter.py index 217b0ec..b8b09b5 100644 --- a/thop/vision/onnx_counter.py +++ b/thop/vision/onnx_counter.py @@ -67,8 +67,8 @@ def onnx_counter_conv(diction, node): group = attr.i # print(dim_dil) dim_input = diction[node.input[0]] - output_size = np.append(dim_input[0: -np.array(dim_kernel).size - 1], dim_weight[0]) - hw = np.array(dim_input[-np.array(dim_kernel).size:]) + output_size = np.append(dim_input[0 : -np.array(dim_kernel).size - 1], dim_weight[0]) + hw = np.array(dim_input[-np.array(dim_kernel).size :]) for i in range(hw.size): hw[i] = int((hw[i] + 2 * dim_pad[i] - dim_dil[i] * (dim_kernel[i] - 1) - 1) / dim_stride[i] + 1) output_size = np.append(output_size, hw) @@ -238,15 +238,15 @@ def onnx_counter_averagepool(diction, node): dim_dil = attr.ints # print(dim_dil) dim_input = diction[node.input[0]] - hw = dim_input[-np.array(dim_kernel).size:] + hw = dim_input[-np.array(dim_kernel).size :] if dim_pad is not None: for i in range(hw.size): hw[i] = int((hw[i] + 2 * dim_pad[i] - dim_kernel[i]) / dim_stride[i] + 1) - output_size = np.append(dim_input[0: -np.array(dim_kernel).size], hw) + output_size = np.append(dim_input[0 : -np.array(dim_kernel).size], hw) else: for i in range(hw.size): hw[i] = int((hw[i] - dim_kernel[i]) / dim_stride[i] + 1) - output_size = np.append(dim_input[0: -np.array(dim_kernel).size], hw) + output_size = np.append(dim_input[0 : -np.array(dim_kernel).size], hw) # print(macs, output_size, output_name) return macs, output_size, output_name @@ -293,15 +293,15 @@ def onnx_counter_maxpool(diction, node): dim_dil = attr.ints # print(dim_dil) dim_input = diction[node.input[0]] - hw = dim_input[-np.array(dim_kernel).size:] + hw = dim_input[-np.array(dim_kernel).size :] if dim_pad is not None: for i in range(hw.size): hw[i] = int((hw[i] + 2 * dim_pad[i] - dim_kernel[i]) / dim_stride[i] + 1) - output_size = np.append(dim_input[0: -np.array(dim_kernel).size], hw) + output_size = np.append(dim_input[0 : -np.array(dim_kernel).size], hw) else: for i in range(hw.size): hw[i] = int((hw[i] - dim_kernel[i]) / dim_stride[i] + 1) - output_size = np.append(dim_input[0: -np.array(dim_kernel).size], hw) + output_size = np.append(dim_input[0 : -np.array(dim_kernel).size], hw) # print(macs, output_size, output_name) return macs, output_size, output_name