Skip to content

Commit

Permalink
refactor mean op, *test=kunlun (#44000)
Browse files Browse the repository at this point in the history
* refactor mean op, *test=kunlun

* refactor mean op, *test=kunlun

* refactor mean op,*test=kunlun

* refactor mean op,*test=kunlun
  • Loading branch information
helen88 committed Jul 5, 2022
1 parent 75c975f commit 7d3b08d
Show file tree
Hide file tree
Showing 2 changed files with 58 additions and 58 deletions.
3 changes: 0 additions & 3 deletions python/paddle/fluid/tests/unittests/xpu/CMakeLists.txt
Expand Up @@ -16,9 +16,6 @@ if(WITH_XPU_BKCL)
list(APPEND DIST_TEST_OPS test_gen_bkcl_id_op)
endif()

list(REMOVE_ITEM TEST_OPS test_concat_op_xpu)
list(REMOVE_ITEM TEST_OPS test_mean_op_xpu)

foreach(TEST_OP ${TEST_OPS})
py_test_modules(${TEST_OP} MODULES ${TEST_OP})
endforeach()
Expand Down
113 changes: 58 additions & 55 deletions python/paddle/fluid/tests/unittests/xpu/test_mean_op_xpu.py
Expand Up @@ -28,29 +28,66 @@

np.random.seed(10)

import op_test
from op_test_xpu import XPUOpTest
from xpu.get_test_cover_info import create_test_class, get_xpu_op_support_types, XPUOpTestWrapper

paddle.enable_static()


class XPUTestMeanOp(XPUOpTestWrapper):

def __init__(self):
self.op_name = 'mean'
self.use_dynamic_create_class = False

class TestMeanOp(XPUOpTest):

def setUp(self):
self.init_dtype()
self.set_xpu()
self.op_type = "mean"
self.place = paddle.XPUPlace(0)
self.set_shape()
self.inputs = {'X': np.random.random(self.shape).astype(self.dtype)}
self.outputs = {'Out': np.mean(self.inputs["X"]).astype(np.float16)}

def init_dtype(self):
self.dtype = self.in_type

def set_shape(self):
self.shape = (10, 10)

def set_xpu(self):
self.__class__.use_xpu = True
self.__class__.no_need_check_grad = True
self.__class__.op_type = self.dtype

def test_check_output(self):
self.check_output_with_place(self.place)

def test_checkout_grad(self):
self.check_grad_with_place(self.place, ['X'], 'Out')

class TestMeanOp(XPUOpTest):
class TestMeanOp1(TestMeanOp):

def setUp(self):
self.op_type = "mean"
self.init_dtype_type()
self.inputs = {'X': np.random.random((10, 10)).astype(self.dtype)}
self.outputs = {'Out': np.mean(self.inputs["X"]).astype(np.float16)}
def set_shape(self):
self.shape = (5)

def init_dtype_type(self):
self.dtype = np.float32
class TestMeanOp2(TestMeanOp):

def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place, atol=2e-3)
def set_shape(self):
self.shape = (5, 7, 8)

def test_checkout_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')
class TestMeanOp3(TestMeanOp):

def set_shape(self):
self.shape = (10, 5, 7, 8)

class TestMeanOp4(TestMeanOp):

def set_shape(self):
self.shape = (2, 2, 3, 3, 3)


class TestMeanOpError(unittest.TestCase):
Expand All @@ -71,43 +108,9 @@ def test_errors(self):
fluid.layers.softmax(input3)


class TestXPUMeanOp(TestMeanOp):

def init_dtype_type(self):
self.dtype = np.float32

def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place)

def test_checkout_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'], 'Out')


class TestXPUMeanOpFp16(TestMeanOp):

def init_dtype_type(self):
self.dtype = np.float16

def test_check_output(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_output_with_place(place)

def test_checkout_grad(self):
if paddle.is_compiled_with_xpu():
paddle.enable_static()
place = paddle.XPUPlace(0)
self.check_grad_with_place(place, ['X'],
'Out',
max_relative_error=1.e1)

support_types = get_xpu_op_support_types('mean')
for stype in support_types:
create_test_class(globals(), XPUTestMeanOp, stype)

if __name__ == "__main__":
unittest.main()

0 comments on commit 7d3b08d

Please sign in to comment.