Skip to content

Commit

Permalink
[Eager] Fix CastPyArg2scalar for max value of int64 (#42098)
Browse files Browse the repository at this point in the history
* [Eager] Fix CastPyArg2Scalar in Long case

* Add more test cases for paddle.clip

* Use PyLong_AsLongLong
  • Loading branch information
veyron95 committed Apr 22, 2022
1 parent e49b7b6 commit 281a5be
Show file tree
Hide file tree
Showing 3 changed files with 25 additions and 5 deletions.
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/eager_utils.cc
Expand Up @@ -1058,7 +1058,7 @@ paddle::experimental::Scalar CastPyArg2Scalar(PyObject* obj,
bool value = CastPyArg2Boolean(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (PyLong_Check(obj)) {
int value = CastPyArg2Int(obj, op_type, arg_pos);
int64_t value = CastPyArg2Long(obj, op_type, arg_pos);
return paddle::experimental::Scalar(value);
} else if (PyFloat_Check(obj)) {
float value = CastPyArg2Float(obj, op_type, arg_pos);
Expand Down
2 changes: 1 addition & 1 deletion paddle/fluid/pybind/op_function_common.cc
Expand Up @@ -153,7 +153,7 @@ void CastPyArg2AttrInt(PyObject* obj,
int64_t CastPyArg2Long(PyObject* obj, const std::string& op_type,
ssize_t arg_pos) {
if (PyObject_CheckLongOrToLong(&obj)) {
return (int64_t)PyLong_AsLong(obj); // NOLINT
return (int64_t)PyLong_AsLongLong(obj); // NOLINT
} else {
PADDLE_THROW(platform::errors::InvalidArgument(
"%s(): argument (position %d) must be "
Expand Down
26 changes: 23 additions & 3 deletions python/paddle/fluid/tests/unittests/test_clip_op.py
Expand Up @@ -200,7 +200,7 @@ def test_clip(self):
np.allclose(res11, (data * 10).astype(np.int64).clip(2, 8)))
paddle.disable_static()

def test_clip_dygraph(self):
def func_clip_dygraph(self):
paddle.disable_static()
place = fluid.CUDAPlace(0) if fluid.core.is_compiled_with_cuda(
) else fluid.CPUPlace()
Expand Down Expand Up @@ -233,9 +233,29 @@ def test_clip_dygraph(self):
np.allclose(out_5.numpy(), (data * 10).astype(np.int64).clip(2, 8)))
self.assertTrue(np.allclose(out_6.numpy(), data.clip(0.2, 0.8)))

def test_eager(self):
def test_clip_dygraph(self):
with _test_eager_guard():
self.func_clip_dygraph()
self.func_clip_dygraph()

def test_clip_dygraph_default_max(self):
paddle.disable_static()
with _test_eager_guard():
self.test_clip_dygraph()
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
egr_out1 = paddle.clip(x_int32, min=1)
egr_out2 = paddle.clip(x_int64, min=1)
egr_out3 = paddle.clip(x_f32, min=1)
x_int32 = paddle.to_tensor([1, 2, 3], dtype="int32")
x_int64 = paddle.to_tensor([1, 2, 3], dtype="int64")
x_f32 = paddle.to_tensor([1, 2, 3], dtype="float32")
out1 = paddle.clip(x_int32, min=1)
out2 = paddle.clip(x_int64, min=1)
out3 = paddle.clip(x_f32, min=1)
self.assertTrue(np.allclose(out1.numpy(), egr_out1.numpy()))
self.assertTrue(np.allclose(out2.numpy(), egr_out2.numpy()))
self.assertTrue(np.allclose(out3.numpy(), egr_out3.numpy()))

def test_errors(self):
paddle.enable_static()
Expand Down

0 comments on commit 281a5be

Please sign in to comment.