diff --git a/python/paddle/__init__.py b/python/paddle/__init__.py index 2e4dc53290226..613744b3529d0 100755 --- a/python/paddle/__init__.py +++ b/python/paddle/__init__.py @@ -276,6 +276,7 @@ from .tensor.math import outer # noqa: F401 from .tensor.math import heaviside # noqa: F401 from .tensor.math import frac # noqa: F401 +from .tensor.math import sgn # noqa: F401 from .tensor.random import bernoulli # noqa: F401 from .tensor.random import poisson # noqa: F401 @@ -649,4 +650,5 @@ 'put_along_axis', 'heaviside', 'tril_indices', + 'sgn', ] diff --git a/python/paddle/fluid/tests/unittests/test_sgn.py b/python/paddle/fluid/tests/unittests/test_sgn.py new file mode 100644 index 0000000000000..c4442c30a730e --- /dev/null +++ b/python/paddle/fluid/tests/unittests/test_sgn.py @@ -0,0 +1,85 @@ +# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from __future__ import print_function + +import unittest +import numpy as np +import paddle + + +def np_sgn(x: np.ndarray): + if x.dtype == 'complex128' or x.dtype == 'complex64': + x_abs = np.abs(x) + eps = np.finfo(x.dtype).eps + x_abs = np.maximum(x_abs, eps) + out = x / x_abs + else: + out = np.sign(x) + return out + + +class TestSgnError(unittest.TestCase): + + def test_errors(self): + # The input dtype of sgn must be float16, float32, float64,complex64,complex128. + input2 = paddle.to_tensor( + np.random.randint(-10, 10, size=[12, 20]).astype('int32')) + input3 = paddle.to_tensor( + np.random.randint(-10, 10, size=[12, 20]).astype('int64')) + + self.assertRaises(TypeError, paddle.sgn, input2) + self.assertRaises(TypeError, paddle.sgn, input3) + + +class TestSignAPI(unittest.TestCase): + + def setUp(self) -> None: + self.support_dtypes = [ + 'float16', 'float32', 'float64', 'complex64', 'complex128' + ] + if paddle.device.get_device() == 'cpu': + self.support_dtypes = [ + 'float32', 'float64', 'complex64', 'complex128' + ] + + def test_dtype(self): + for dtype in self.support_dtypes: + x = paddle.to_tensor( + np.random.randint(-10, 10, size=[12, 20, 2]).astype(dtype)) + + paddle.sgn(x) + + def test_complex(self): + for dtype in ['complex64', 'complex128']: + np_x = np.array([[3 + 4j, 7 - 24j, 0, 1 + 2j], [6 + 8j, 3, 0, -2]], + dtype=dtype) + x = paddle.to_tensor(np_x) + z = paddle.sgn(x) + np_z = z.numpy() + z_expected = np_sgn(np_x) + self.assertTrue(np.allclose(np_z, z_expected)) + + def test_float(self): + for dtype in self.support_dtypes: + np_x = np.random.randint(-10, 10, size=[12, 20, 2]).astype(dtype) + x = paddle.to_tensor(np_x) + z = paddle.sgn(x) + np_z = z.numpy() + z_expected = np_sgn(np_x) + self.assertTrue(np.allclose(np_z, z_expected)) + + +if __name__ == "__main__": + unittest.main() diff --git a/python/paddle/tensor/__init__.py b/python/paddle/tensor/__init__.py index 08d54734bfa9d..42da4030dec4c 100755 --- a/python/paddle/tensor/__init__.py +++ b/python/paddle/tensor/__init__.py @@ -233,6 +233,7 @@ from .math import outer # noqa: F401 from .math import heaviside # noqa: F401 from .math import frac # noqa: F401 +from .math import sgn # noqa: F401 from .random import multinomial # noqa: F401 from .random import standard_normal # noqa: F401 @@ -505,6 +506,7 @@ 'exponential_', 'heaviside', 'bucketize', + 'sgn', ] #this list used in math_op_patch.py for magic_method bind diff --git a/python/paddle/tensor/math.py b/python/paddle/tensor/math.py index 86b3b71998a12..b1a719c46b1cd 100644 --- a/python/paddle/tensor/math.py +++ b/python/paddle/tensor/math.py @@ -4700,3 +4700,45 @@ def frac(x, name=None): helper.append_op( type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y}) return _elementwise_op(LayerHelper(op_type, **locals())) + + +def sgn(x, name=None): + """ + For complex tensor, this API returns a new tensor whose elements have the same angles as the corresponding + elements of input and absolute values of one. + For other float dtype tensor, + this API returns sign of every element in `x`: 1 for positive, -1 for negative and 0 for zero, same as paddle.sign. + + Args: + x (Tensor): The input tensor, which data type should be float16, float32, float64, complex64, complex128. + name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. + + Returns: + Tensor: A sign Tensor for real input, or normalized Tensor for complex input, shape and data type are same as input. + + Examples: + .. code-block:: Python + + import paddle + + x = paddle.to_tensor([[3 + 4j, 7 - 24j, 0, 1 + 2j], [6 + 8j, 3, 0, -2]]) + print(paddle.sgn(x)) + #[[0.6+0.8j 0.28-0.96j 0.+0.j 0.4472136+0.8944272j] + # [0.6+0.8j 1.+0.j 0.+0.j -1.+0.j]] + + """ + if x.dtype not in [paddle.float16, paddle.float32, paddle.float64, paddle.complex64, paddle.complex128]: + raise TypeError( + "The data type of input must be one of ['float16', 'float32', 'float64', 'complex64', 'complex128'], but got {}" + .format(x.dtype)) + if paddle.is_complex(x): + expand_x = paddle.as_real(x) + x_abs = paddle.abs(x) + x_abs = paddle.unsqueeze(x_abs, axis=-1) + output = expand_x / x_abs + zeros = paddle.zeros_like(output) + output = paddle.where(paddle.isnan(output), zeros, output) + + return paddle.as_complex(output) + else: + return paddle.sign(x)