Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【Hackathon No.27】为 Paddle 新增 frac 数学计算API #41226

Merged
merged 5 commits into from Apr 12, 2022
Merged
Show file tree
Hide file tree
Changes from 2 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
1 change: 1 addition & 0 deletions python/paddle/__init__.py
Expand Up @@ -263,6 +263,7 @@
from .tensor.math import fmin # noqa: F401
from .tensor.math import inner # noqa: F401
from .tensor.math import outer # noqa: F401
from .tensor.math import frac # noqa: F401
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

需要在下面的__all__列表里加上frac


from .tensor.random import bernoulli # noqa: F401
from .tensor.random import poisson # noqa: F401
Expand Down
57 changes: 57 additions & 0 deletions python/paddle/fluid/tests/unittests/test_frac_api.py
@@ -0,0 +1,57 @@
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard


def ref_frac(x):
return x - np.trunc(x)


class TestFracAPI(unittest.TestCase):
def setUp(self):
self.x_np = np.random.uniform(-3, 3, [2, 3]).astype('float64')
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda() \
else paddle.CPUPlace()

def test_static_api(self):
paddle.enable_static()
with program_guard(Program()):
input = fluid.data('X', self.x_np.shape, self.x_np.dtype)
out = paddle.frac(input)
place = fluid.CPUPlace()
if fluid.core.is_compiled_with_cuda():
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out])
out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, res))

def test__dygraph_api(self):
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

test__dygraph_api->test_dygraph_api

paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out = paddle.frac(x)
out_ref = ref_frac(self.x_np)
self.assertTrue(np.allclose(out_ref, out.numpy()))
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

根据验收标准,请多补充下测试case。

  • 目前只测了float32,请补充下int32, int64, float64的case
  • 需要有test_errors异常的测试,可以测一下dtype不在上述4个类型中的情况



if __name__ == '__main__':
unittest.main()
2 changes: 2 additions & 0 deletions python/paddle/tensor/__init__.py
Expand Up @@ -227,6 +227,7 @@
from .math import fmin # noqa: F401
from .math import inner # noqa: F401
from .math import outer # noqa: F401
from .math import frac # noqa: F401

from .random import multinomial # noqa: F401
from .random import standard_normal # noqa: F401
Expand Down Expand Up @@ -452,6 +453,7 @@
'digamma',
'diagonal',
'trunc',
'frac'
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.


2022-04-07 21:45:56 API Difference is: 
2022-04-07 21:45:56 - paddle.Tensor.bitwise_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={}), ('document', '16c92f81b99632969af31978cf06dcbd'))
2022-04-07 21:45:56 + paddle.bitwise_and (ArgSpec(args=['x', 'y', 'out', 'name'], varargs=None, varkw=None, defaults=(None, None), kwonlyargs=[], kwonlydefaults=None, annotations={}), ('document', '16c92f81b99632969af31978cf06dcbd'))

static-check流水线显示:https://xly.bce.baidu.com/paddlepaddle/paddle/newipipe/detail/5338157/job/13849637
这里少了一个逗号。

'bitwise_and',
'bitwise_or',
'bitwise_xor',
Expand Down
51 changes: 51 additions & 0 deletions python/paddle/tensor/math.py
Expand Up @@ -3927,3 +3927,54 @@ def angle(x, name=None):
outputs = {"Out": out}
helper.append_op(type=op_type, inputs=inputs, outputs=outputs)
return out

def frac(x, name=None):
"""
This API is used to return the fractional portion of each element in input.

Args:
x (Tensor): The input tensor, which data type should be int32, int64, float32, float64.
name: (str, optional): Name for operation (optional, default is None). For more

Returns:
Tensor: The output Tensor of frac.

Examples:
.. code-block:: Python

import paddle
import numpy as np

input = paddle.rand([3, 3], 'float32')
print(input.numpy())
# [[ 1.2203873 -1.0035421 -0.35193074]
# [-0.00928353 0.58917075 -0.8407828 ]
# [-1.5131804 0.5850153 -0.17597814]]

output = paddle.frac(input)
print(output.numpy())
# [[ 0.22038734 -0.00354207 -0.35193074]
# [-0.00928353 0.58917075 -0.8407828 ]
# [-0.5131804 0.5850153 -0.17597814]]
"""
op_type = 'elementwise_sub'
axis = -1
act = None
if in_dygraph_mode():
y = _C_ops.final_state_trunc(x)
return _C_ops.final_state_substract(x, y)
else:
if _in_legacy_dygraph():
y = _C_ops.trunc(x)
return _elementwise_op_in_dygraph(
x, y, axis=axis, act=act, op_name=op_type)
else:
inputs = {"X": x}
attrs = {}

helper = LayerHelper("trunc", **locals())
check_variable_and_dtype(x, "X", ['int32', 'int64', 'float32', 'float64'], 'trunc')
y = helper.create_variable_for_type_inference(dtype=x.dtype)
helper.append_op(
type="trunc", inputs=inputs, attrs=attrs, outputs={"Out": y})
return _elementwise_op(LayerHelper(op_type, **locals()))