From c967fa78debdd1281e11dc8dd261fe2b29ecee46 Mon Sep 17 00:00:00 2001 From: Fisher Date: Thu, 27 Apr 2023 11:07:56 +0000 Subject: [PATCH 1/3] Add reduce test using new test helper --- python/tests/ops/test_reduce_op_new.py | 210 +++++++++++++++++++++++++ 1 file changed, 210 insertions(+) create mode 100644 python/tests/ops/test_reduce_op_new.py diff --git a/python/tests/ops/test_reduce_op_new.py b/python/tests/ops/test_reduce_op_new.py new file mode 100644 index 0000000000..fc22efe0cc --- /dev/null +++ b/python/tests/ops/test_reduce_op_new.py @@ -0,0 +1,210 @@ +# Copyright (c) 2023 CINN Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import unittest +import numpy as np +from op_test import OpTest, OpTestTool +from op_test_helper import TestCaseHelper +import paddle +import cinn +from cinn.frontend import * +from cinn.common import * + + +@OpTestTool.skip_if(not is_compiled_with_cuda(), + "x86 test will be skipped due to timeout.") +class TestReduceOp(OpTest): + def setUp(self): + print(f"\nRunning {self.__class__.__name__}: {self.case}") + self.prepare_inputs() + + def prepare_inputs(self): + self.x_np = self.random( + shape=self.case["shape"], dtype=self.case["dtype"]) + + def build_paddle_program(self, target): + x = paddle.to_tensor(self.x_np, stop_gradient=True) + if self.case["op_type"] == "sum": + out = paddle.sum( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "prod": + out = paddle.prod( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "max": + out = paddle.max( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "min": + out = paddle.min( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "all": + out = paddle.all( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + elif self.case["op_type"] == "any": + out = paddle.any( + x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + else: + out = paddle.nn.Identity(x) + self.paddle_outputs = [out] + + def build_cinn_program(self, target): + builder = NetBuilder("reduce") + x = builder.create_input( + self.nptype2cinntype(self.case["dtype"]), self.case["shape"], "x") + if self.case["op_type"] == "sum": + out = builder.reduce_sum(x, [self.case["axis"]], + self.case["keepdim"]) + elif self.case["op_type"] == "prod": + out = builder.reduce_prod(x, [self.case["axis"]], + self.case["keepdim"]) + elif self.case["op_type"] == "max": + out = builder.reduce_max(x, [self.case["axis"]], + self.case["keepdim"]) + elif self.case["op_type"] == "min": + out = builder.reduce_min(x, [self.case["axis"]], + self.case["keepdim"]) + elif self.case["op_type"] == "all": + out = builder.reduce_all(x, [self.case["axis"]], + self.case["keepdim"]) + elif self.case["op_type"] == "any": + out = builder.reduce_any(x, [self.case["axis"]], + self.case["keepdim"]) + else: + out = builder.identity(x) + prog = builder.build() + res = self.get_cinn_output(prog, target, [x], [self.x_np], [out]) + self.cinn_outputs = res + + def test_check_results(self): + max_relative_error = self.case[ + "max_relative_error"] if "max_relative_error" in self.case else 1e-5 + self.check_outputs_and_grads(max_relative_error=max_relative_error) + + +class TestReduceAll(TestCaseHelper): + def init_attrs(self): + self.class_name = "TestReduceOpCase" + self.cls = TestReduceOp + self.inputs = [ + { + "shape": [1], + "axis": -1, + }, + { + "shape": [1024], + "axis": 0, + }, + { + "shape": [512, 256], + "axis": 1, + }, + { + "shape": [128, 64, 32], + "axis": 2, + }, + { + "shape": [16, 8, 4, 2], + "axis": 3, + }, + { + "shape": [16, 8, 4, 2, 1], + "axis": 3, + }, + { + "shape": [1, 1, 1, 1, 1], + "axis": 3, + }, + ] + self.dtypes = [ + { + "dtype": "int16", + }, + { + "dtype": "int32", + }, + { + "dtype": "int64", + }, + { + "dtype": "float16", + }, + { + "dtype": "float32", + }, + { + "dtype": "float64", + }, + ] + self.attrs = [ + { + "op_type": "sum", + "keepdim": True + }, + { + "op_type": "sum", + "keepdim": False + }, + { + "op_type": "prod", + "keepdim": True + }, + { + "op_type": "prod", + "keepdim": False + }, + { + "op_type": "max", + "keepdim": True + }, + { + "op_type": "max", + "keepdim": False + }, + { + "op_type": "min", + "keepdim": True + }, + { + "op_type": "min", + "keepdim": False + }, + ] + + +class TestReduceForBool(TestReduceAll): + def init_attrs(self): + super().init_attrs() + self.dtypes = [{"dtype": "bool"}] + self.attrs = [ + { + "op_type": "all", + "keepdim": True + }, + { + "op_type": "all", + "keepdim": False + }, + { + "op_type": "any", + "keepdim": True + }, + { + "op_type": "any", + "keepdim": False + }, + ] + + +if __name__ == "__main__": + TestReduceAll().run() + TestReduceForBool().run() From d869f5a5d414b625edddfa20a035b3b2633e94c6 Mon Sep 17 00:00:00 2001 From: Fisher Date: Thu, 4 May 2023 07:54:08 +0000 Subject: [PATCH 2/3] Fix output shape error when numel = 1 Add cast op on paddle reduce_sum when dtype is int32 --- cinn/frontend/net_builder.cc | 4 +++- python/tests/ops/test_reduce_op_new.py | 16 ++++++++++------ 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/cinn/frontend/net_builder.cc b/cinn/frontend/net_builder.cc index f61437dd46..9f3a517cd3 100644 --- a/cinn/frontend/net_builder.cc +++ b/cinn/frontend/net_builder.cc @@ -22,6 +22,7 @@ #include "cinn/hlir/pe/broadcast.h" #include "cinn/utils/functional.h" #include "cinn/utils/profiler.h" +#include "glog/logging.h" namespace cinn { namespace frontend { @@ -109,7 +110,8 @@ Variable NetBuilder::Reduce(const std::string& op_type, const Variable& x, const if (keep_dim) { return Identity(x); } else { - int new_rank = dim.empty() ? 1 : x->shape.size() - dim.size() + 1; + CHECK_GE(x->shape.size(), dim.size()) << "The inputs rank should be greater than or equal to axes."; + int new_rank = x->shape.size() == dim.size() ? 1 : x->shape.size() - dim.size(); std::vector new_shape(new_rank, 1); return Reshape(x, new_shape); } diff --git a/python/tests/ops/test_reduce_op_new.py b/python/tests/ops/test_reduce_op_new.py index fc22efe0cc..dd2bfac089 100644 --- a/python/tests/ops/test_reduce_op_new.py +++ b/python/tests/ops/test_reduce_op_new.py @@ -38,6 +38,8 @@ def build_paddle_program(self, target): if self.case["op_type"] == "sum": out = paddle.sum( x, axis=self.case["axis"], keepdim=self.case["keepdim"]) + if self.case["dtype"] == "int32": + out = out.cast(self.case["dtype"]) elif self.case["op_type"] == "prod": out = paddle.prod( x, axis=self.case["axis"], keepdim=self.case["keepdim"]) @@ -126,18 +128,20 @@ def init_attrs(self): }, ] self.dtypes = [ - { - "dtype": "int16", - }, + # Paddle reduce not support + # { + # "dtype": "int16", + # }, { "dtype": "int32", }, { "dtype": "int64", }, - { - "dtype": "float16", - }, + # Paddle reduce not support + # { + # "dtype": "float16", + # }, { "dtype": "float32", }, From 6e7c179a7636fd774f0a7d110ce53f4016197385 Mon Sep 17 00:00:00 2001 From: Fisher Date: Sat, 6 May 2023 11:06:13 +0000 Subject: [PATCH 3/3] Fix reduce result error when keepdim = True --- cinn/hlir/pe/ir_schedule_pe.cc | 8 ++- python/tests/ops/test_reduce_op_new.py | 53 ++++----------- python/tests/ops/test_reduce_op_other.py | 87 ++++++++++++++++++++++++ 3 files changed, 108 insertions(+), 40 deletions(-) create mode 100644 python/tests/ops/test_reduce_op_other.py diff --git a/cinn/hlir/pe/ir_schedule_pe.cc b/cinn/hlir/pe/ir_schedule_pe.cc index 8621aa2a38..bf75ffe1b8 100644 --- a/cinn/hlir/pe/ir_schedule_pe.cc +++ b/cinn/hlir/pe/ir_schedule_pe.cc @@ -31,6 +31,7 @@ #include "cinn/ir/ir_base.h" #include "cinn/optim/ir_simplify.h" #include "cinn/poly/isl_utils.h" +#include "cinn/utils/string.h" namespace cinn { namespace hlir { @@ -462,7 +463,12 @@ void IRCudaScheduleBlockReduce(ir::IRSchedule &ir_sch, } } - if (tmp_out->shape.size() == 1) { + // Special handling when keepdim = True in reduce stage 1. When keepdim = True, shape size may not be equal to 1. But + // we still need to split the loops, otherwise there will be a problem of data read and write conflict. + int numel = std::accumulate(tmp_out->shape.begin(), tmp_out->shape.end(), 1, [](const int &num, const ir::Expr &e) { + return num * e.as_int32(); + }); + if (tmp_out->shape.size() == 1 || (numel == tmp_out->shape.back().as_int32())) { CHECK_EQ(out->shape[0], Expr(1)); // block and root diff --git a/python/tests/ops/test_reduce_op_new.py b/python/tests/ops/test_reduce_op_new.py index dd2bfac089..af01ce28fc 100644 --- a/python/tests/ops/test_reduce_op_new.py +++ b/python/tests/ops/test_reduce_op_new.py @@ -56,7 +56,7 @@ def build_paddle_program(self, target): out = paddle.any( x, axis=self.case["axis"], keepdim=self.case["keepdim"]) else: - out = paddle.nn.Identity(x) + out = paddle.assign(x) self.paddle_outputs = [out] def build_cinn_program(self, target): @@ -64,22 +64,22 @@ def build_cinn_program(self, target): x = builder.create_input( self.nptype2cinntype(self.case["dtype"]), self.case["shape"], "x") if self.case["op_type"] == "sum": - out = builder.reduce_sum(x, [self.case["axis"]], + out = builder.reduce_sum(x, self.case["axis"], self.case["keepdim"]) elif self.case["op_type"] == "prod": - out = builder.reduce_prod(x, [self.case["axis"]], + out = builder.reduce_prod(x, self.case["axis"], self.case["keepdim"]) elif self.case["op_type"] == "max": - out = builder.reduce_max(x, [self.case["axis"]], + out = builder.reduce_max(x, self.case["axis"], self.case["keepdim"]) elif self.case["op_type"] == "min": - out = builder.reduce_min(x, [self.case["axis"]], + out = builder.reduce_min(x, self.case["axis"], self.case["keepdim"]) elif self.case["op_type"] == "all": - out = builder.reduce_all(x, [self.case["axis"]], + out = builder.reduce_all(x, self.case["axis"], self.case["keepdim"]) elif self.case["op_type"] == "any": - out = builder.reduce_any(x, [self.case["axis"]], + out = builder.reduce_any(x, self.case["axis"], self.case["keepdim"]) else: out = builder.identity(x) @@ -100,31 +100,31 @@ def init_attrs(self): self.inputs = [ { "shape": [1], - "axis": -1, + "axis": [-1], }, { "shape": [1024], - "axis": 0, + "axis": [0], }, { "shape": [512, 256], - "axis": 1, + "axis": [1], }, { "shape": [128, 64, 32], - "axis": 2, + "axis": [2], }, { "shape": [16, 8, 4, 2], - "axis": 3, + "axis": [3], }, { "shape": [16, 8, 4, 2, 1], - "axis": 3, + "axis": [3], }, { "shape": [1, 1, 1, 1, 1], - "axis": 3, + "axis": [3], }, ] self.dtypes = [ @@ -185,30 +185,5 @@ def init_attrs(self): ] -class TestReduceForBool(TestReduceAll): - def init_attrs(self): - super().init_attrs() - self.dtypes = [{"dtype": "bool"}] - self.attrs = [ - { - "op_type": "all", - "keepdim": True - }, - { - "op_type": "all", - "keepdim": False - }, - { - "op_type": "any", - "keepdim": True - }, - { - "op_type": "any", - "keepdim": False - }, - ] - - if __name__ == "__main__": TestReduceAll().run() - TestReduceForBool().run() diff --git a/python/tests/ops/test_reduce_op_other.py b/python/tests/ops/test_reduce_op_other.py new file mode 100644 index 0000000000..1f54dd3b20 --- /dev/null +++ b/python/tests/ops/test_reduce_op_other.py @@ -0,0 +1,87 @@ +# Copyright (c) 2023 CINN Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from test_reduce_op_new import TestReduceAll + + +class TestReduceForBool(TestReduceAll): + def init_attrs(self): + super().init_attrs() + self.dtypes = [{"dtype": "bool"}] + self.attrs = [ + { + "op_type": "all", + "keepdim": True + }, + { + "op_type": "all", + "keepdim": False + }, + { + "op_type": "any", + "keepdim": True + }, + { + "op_type": "any", + "keepdim": False + }, + ] + + +class TestReduceAxis(TestReduceAll): + def init_attrs(self): + super().init_attrs() + self.inputs = [ + { + "shape": [1, 512, 1], + "axis": [1], + }, + { + "shape": [1, 1024, 1], + "axis": [1], + }, + { + "shape": [1, 2048, 1], + "axis": [1], + }, + { + "shape": [64, 32, 16, 8, 4], + "axis": [0, 2], + }, + { + "shape": [64, 32, 16, 8, 4], + "axis": [1, 2, 3], + }, + { + # No axis, all reduce + "shape": [64, 32, 16, 8, 4], + "axis": [], + }, + ] + self.dtypes = [{"dtype": "float32"}] + self.attrs = [ + { + "op_type": "sum", + "keepdim": True, + }, + { + "op_type": "sum", + "keepdim": False, + }, + ] + + +if __name__ == "__main__": + TestReduceForBool().run() + TestReduceAxis().run()