Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 2】18、为 Paddle 新增 paddle.heaviside 和 paddle.Tensor.heaviside API #40934

Closed
Closed
Show file tree
Hide file tree
Changes from 11 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
299b726
增加heaviside的Python接口
BrilliantYuKaimin Mar 25, 2022
bfde6ff
增加heaviside算子的描述
BrilliantYuKaimin Mar 25, 2022
4c91cfa
增加heaviside的相关函子
BrilliantYuKaimin Mar 25, 2022
ea60774
增加heaviside及其梯度的实现
BrilliantYuKaimin Mar 25, 2022
a439d57
增加heaviside及其梯度的核函数的注册
BrilliantYuKaimin Mar 25, 2022
d1322bc
增加heaviside的签名
BrilliantYuKaimin Mar 25, 2022
99a8433
增加heaviside单测
BrilliantYuKaimin Mar 25, 2022
2106d2c
Update elementwise_heaviside_op.cc
BrilliantYuKaimin Mar 25, 2022
53cdf22
调整代码格式
BrilliantYuKaimin Mar 25, 2022
1d07d18
Update elementwise_functor.h
BrilliantYuKaimin Mar 26, 2022
36448fe
调整代码格式
BrilliantYuKaimin Mar 27, 2022
ea9ba8d
完善对输入的描述
BrilliantYuKaimin Mar 30, 2022
5397eb8
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Mar 30, 2022
0cf7567
Merge branch 'develop' into heaviside_2
BrilliantYuKaimin Mar 30, 2022
0ecc4fc
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Mar 30, 2022
a3b84e1
add elementwise_heaviside
BrilliantYuKaimin Apr 1, 2022
b97fcb9
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 1, 2022
0739b3d
Merge branch 'PaddlePaddle:develop' into heaviside_2
BrilliantYuKaimin Apr 7, 2022
e3f0831
Update elementwise_sig.cc
BrilliantYuKaimin Apr 11, 2022
570f4c4
add elementwise_heaviside
BrilliantYuKaimin Apr 11, 2022
d7609ee
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 12, 2022
9ca329e
Update math.py
BrilliantYuKaimin Apr 12, 2022
5bc256d
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 13, 2022
07d5607
Merge branch 'develop' into heaviside_2
BrilliantYuKaimin Apr 13, 2022
30cf8c6
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 14, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
63 changes: 63 additions & 0 deletions paddle/fluid/operators/elementwise/elementwise_heaviside_op.cc
@@ -0,0 +1,63 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"

namespace paddle {
namespace operators {

class ElementwiseHeavisideOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Heaviside"; }
std::string GetEquation() const override { return "Out = Heaviside(X, Y)"; }

void AddInputX() override {
AddInput("X", "The input tensor of Heaviside step function.");
}

void AddInputY() override {
AddInput("Y", "The tensor determining a Heaviside step function.");
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

完成

}

std::string GetOpFuntionality() const override {
return "Computes the Heaviside step function determined by Y "
"for each element in X.";
}
};

template <typename T>
class ElementwiseHeavisideGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("elementwise_heaviside_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OPERATOR(
elementwise_heaviside, ops::ElementwiseOp, ops::ElementwiseHeavisideOpMaker,
ops::ElementwiseHeavisideGradOpMaker<paddle::framework::OpDesc>,
ops::ElementwiseHeavisideGradOpMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(elementwise_heaviside_grad, ops::ElementwiseOpGrad);
9 changes: 9 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
Expand Up @@ -323,3 +323,12 @@ PD_REGISTER_KERNEL(minimum_grad,
int,
int64_t,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}
9 changes: 9 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_kernel.cc
Expand Up @@ -136,6 +136,15 @@ PD_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
fmin, CPU, ALL_LAYOUT, phi::FMinKernel, float, double, int, int64_t) {}

PD_REGISTER_KERNEL(elementwise_heaviside,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(add_raw,
CPU,
ALL_LAYOUT,
Expand Down
23 changes: 23 additions & 0 deletions paddle/phi/kernels/funcs/elementwise_functor.h
Expand Up @@ -538,5 +538,28 @@ struct InverseModuloFunctor<
return res;
}
};

// Heaviside
template <typename T>
struct HeavisideFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return a == static_cast<T>(0) ? b : static_cast<T>(a > 0);
}
};

template <typename T>
struct HeavisideGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(0);
}
};

template <typename T>
struct HeavisideGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(x == static_cast<T>(0));
}
};

} // namespace funcs
} // namespace phi
9 changes: 9 additions & 0 deletions paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
Expand Up @@ -382,3 +382,12 @@ PD_REGISTER_KERNEL(minimum_grad,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}
9 changes: 9 additions & 0 deletions paddle/phi/kernels/gpu/elementwise_kernel.cu
Expand Up @@ -69,6 +69,15 @@ PD_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
fmin, GPU, ALL_LAYOUT, phi::FMinKernel, float, double, int, int64_t) {}

PD_REGISTER_KERNEL(elementwise_heaviside,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(add_raw,
GPU,
ALL_LAYOUT,
Expand Down
50 changes: 50 additions & 0 deletions paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h
Expand Up @@ -666,4 +666,54 @@ struct MinGradDy {
return dout * static_cast<T>(x >= y);
}
};

template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& out_grad,
int axis,
DenseTensor* x_grad,
DenseTensor* y_grad) {
funcs::ElementwiseGradPreProcess(out_grad, x_grad);

auto out = out_grad; // Fake out, not used
auto x_dim = x.dims();
auto y_dim = y.dims();
if (x.dims() == y.dims()) {
funcs::ElemwiseGradComputeNoBroadcast<Context,
T,
funcs::HeavisideGradDx<T>,
funcs::HeavisideGradDy<T>>(
dev_ctx,
x_dim,
y_dim,
x,
y,
out,
out_grad,
axis,
x_grad,
y_grad,
funcs::HeavisideGradDx<T>(),
funcs::HeavisideGradDy<T>());
} else {
funcs::ElemwiseGradComputeWithBroadcast<T,
funcs::HeavisideGradDx<T>,
funcs::HeavisideGradDy<T>>(
dev_ctx,
x_dim,
y_dim,
x,
y,
out,
out_grad,
axis,
x_grad,
y_grad,
funcs::HeavisideGradDx<T>(),
funcs::HeavisideGradDy<T>());
}
}

} // namespace phi
11 changes: 11 additions & 0 deletions paddle/phi/kernels/impl/elementwise_kernel_impl.h
Expand Up @@ -44,4 +44,15 @@ void FMinKernel(const Context& dev_ctx,
dev_ctx, x, y, axis, funcs::FMinFunctor<T>(), out);
}

template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
funcs::ElementwiseCompute<funcs::HeavisideFunctor<T>, T, T>(
dev_ctx, x, y, axis, funcs::HeavisideFunctor<T>(), out);
}

} // namespace phi
11 changes: 11 additions & 0 deletions paddle/phi/ops/compat/elementwise_sig.cc
Expand Up @@ -200,6 +200,15 @@ KernelSignature ElementwiseMinGradOpArgumentMapping(
{"axis"},
{GradVarName("X"), GradVarName("Y")});
}

KernelSignature ElementwiseHeavisideGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_heaviside_grad",
{"X", "Y", GradVarName("Out")},
{"axis"},
{GradVarName("X"), GradVarName("Y")});
}

} // namespace phi

PD_REGISTER_BASE_KERNEL_NAME(elementwise_add, add);
Expand Down Expand Up @@ -272,3 +281,5 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_max_grad,
phi::ElementwiseMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad,
phi::ElementwiseMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside_grad,
phi::ElementwiseHeavisideGradOpArgumentMapping);
1 change: 1 addition & 0 deletions python/paddle/__init__.py
Expand Up @@ -263,6 +263,7 @@
from .tensor.math import fmin # noqa: F401
from .tensor.math import inner # noqa: F401
from .tensor.math import outer # noqa: F401
from .tensor.math import heaviside # noqa: F401

from .tensor.random import bernoulli # noqa: F401
from .tensor.random import poisson # noqa: F401
Expand Down
112 changes: 112 additions & 0 deletions python/paddle/fluid/tests/unittests/test_elementwise_heaviside_op.py
@@ -0,0 +1,112 @@
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

单测中需要对照设计文档中补充测试case,目前仍然缺少不同dtype/设备/动态图、错误检查等测试内容。
此外,因为check_grad耗时比较多,可以适当减少OPTEST的用例。

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已经补充了测试内容,OPTEST减少到了3个。

#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.

from __future__ import print_function

import unittest
import numpy as np
from op_test import OpTest
import paddle

paddle.enable_static()


class TestElementwiseOp(OpTest):
def setUp(self):
self.op_type = "elementwise_heaviside"
x = np.random.random((13, 17)).astype("float64")
y = np.random.random((13, 17)).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}

def test_check_output(self):
self.check_output()

def test_check_grad_normal(self):
self.check_grad(['X', 'Y'], 'Out')


class TestElementwiseHeavisideOp_Vector(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_heaviside"
x = np.random.random((100, )).astype("float64")
y = np.random.random((100, )).astype("float64")
self.inputs = {'X': x, 'Y': y}
self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseHeavisideOp_broadcast_0(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_heaviside"
x = np.random.random((100, 5, 2)).astype(np.float64)
y = np.random.random((100, 1, 1)).astype(np.float64)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 0}
self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}


class TestElementwiseHeavisideOp_broadcast_1(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_heaviside"
x = np.random.random((2, 100, 3)).astype(np.float64)
y = np.random.random((100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 1}
self.outputs = {
'Out': np.heaviside(self.inputs['X'],
self.inputs['Y'].reshape(1, 100, 1))
}


class TestElementwiseHeavisideOp_broadcast_2(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_heaviside"
x = np.random.random((1, 3, 100)).astype(np.float64)
y = np.random.random((100, )).astype(np.float64)
self.inputs = {'X': x, 'Y': y}

self.outputs = {
'Out':
np.heaviside(self.inputs['X'], self.inputs['Y'].reshape(1, 1, 100))
}


class TestElementwiseHeavisideOp_broadcast_3(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_heaviside"
x = np.random.random((2, 50, 2, 1)).astype(np.float64)
y = np.random.random((50, 2)).astype(np.float64)
self.inputs = {'X': x, 'Y': y}

self.attrs = {'axis': 1}
self.outputs = {
'Out': np.heaviside(self.inputs['X'],
self.inputs['Y'].reshape(1, 50, 2, 1))
}


class TestElementwiseHeavisideOp_broadcast_4(TestElementwiseOp):
def setUp(self):
self.op_type = "elementwise_heaviside"
x = np.random.random((2, 3, 4, 50)).astype(np.float64)
y = np.random.random((2, 3, 1, 50)).astype(np.float64)
self.inputs = {'X': x, 'Y': y}

self.outputs = {'Out': np.heaviside(self.inputs['X'], self.inputs['Y'])}


if __name__ == '__main__':
unittest.main()
2 changes: 2 additions & 0 deletions python/paddle/tensor/__init__.py
Expand Up @@ -227,6 +227,7 @@
from .math import fmin # noqa: F401
from .math import inner # noqa: F401
from .math import outer # noqa: F401
from .math import heaviside # noqa: F401

from .random import multinomial # noqa: F401
from .random import standard_normal # noqa: F401
Expand Down Expand Up @@ -487,6 +488,7 @@
'put_along_axis',
'put_along_axis_',
'exponential_',
'heaviside'
]

#this list used in math_op_patch.py for magic_method bind
Expand Down