Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 2】18、为 Paddle 新增 paddle.heaviside 和 paddle.Tensor.heaviside API #40934

Closed
Closed
Show file tree
Hide file tree
Changes from 24 commits
Commits
Show all changes
25 commits
Select commit Hold shift + click to select a range
299b726
增加heaviside的Python接口
BrilliantYuKaimin Mar 25, 2022
bfde6ff
增加heaviside算子的描述
BrilliantYuKaimin Mar 25, 2022
4c91cfa
增加heaviside的相关函子
BrilliantYuKaimin Mar 25, 2022
ea60774
增加heaviside及其梯度的实现
BrilliantYuKaimin Mar 25, 2022
a439d57
增加heaviside及其梯度的核函数的注册
BrilliantYuKaimin Mar 25, 2022
d1322bc
增加heaviside的签名
BrilliantYuKaimin Mar 25, 2022
99a8433
增加heaviside单测
BrilliantYuKaimin Mar 25, 2022
2106d2c
Update elementwise_heaviside_op.cc
BrilliantYuKaimin Mar 25, 2022
53cdf22
调整代码格式
BrilliantYuKaimin Mar 25, 2022
1d07d18
Update elementwise_functor.h
BrilliantYuKaimin Mar 26, 2022
36448fe
调整代码格式
BrilliantYuKaimin Mar 27, 2022
ea9ba8d
完善对输入的描述
BrilliantYuKaimin Mar 30, 2022
5397eb8
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Mar 30, 2022
0cf7567
Merge branch 'develop' into heaviside_2
BrilliantYuKaimin Mar 30, 2022
0ecc4fc
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Mar 30, 2022
a3b84e1
add elementwise_heaviside
BrilliantYuKaimin Apr 1, 2022
b97fcb9
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 1, 2022
0739b3d
Merge branch 'PaddlePaddle:develop' into heaviside_2
BrilliantYuKaimin Apr 7, 2022
e3f0831
Update elementwise_sig.cc
BrilliantYuKaimin Apr 11, 2022
570f4c4
add elementwise_heaviside
BrilliantYuKaimin Apr 11, 2022
d7609ee
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 12, 2022
9ca329e
Update math.py
BrilliantYuKaimin Apr 12, 2022
5bc256d
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 13, 2022
07d5607
Merge branch 'develop' into heaviside_2
BrilliantYuKaimin Apr 13, 2022
30cf8c6
Update test_elementwise_heaviside_op.py
BrilliantYuKaimin Apr 14, 2022
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
67 changes: 67 additions & 0 deletions paddle/fluid/operators/elementwise/elementwise_heaviside_op.cc
@@ -0,0 +1,67 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"

namespace paddle {
namespace operators {

class ElementwiseHeavisideOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Heaviside"; }
std::string GetEquation() const override { return "Out = Heaviside(X, Y)"; }

void AddInputX() override {
AddInput("X",
"(Tensor), The input tensor of Heaviside step function. "
"Its dtype can be int32, int64, float32 and float64");
}

void AddInputY() override {
AddInput("Y",
"(Tensor), The tensor determining a Heaviside step function, "
"which is the value when X = 0. Its dtype should be same as X.");
}

std::string GetOpFuntionality() const override {
return "Computes the Heaviside step function determined by Y "
"for each element in X.";
}
};

template <typename T>
class ElementwiseHeavisideGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("elementwise_heaviside_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OPERATOR(
elementwise_heaviside, ops::ElementwiseOp, ops::ElementwiseHeavisideOpMaker,
ops::ElementwiseHeavisideGradOpMaker<paddle::framework::OpDesc>,
ops::ElementwiseHeavisideGradOpMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(elementwise_heaviside_grad, ops::ElementwiseOpGrad);
10 changes: 10 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
Expand Up @@ -323,6 +323,16 @@ PD_REGISTER_KERNEL(minimum_grad,
int,
int64_t,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(elementwise_pow_grad,
CPU,
ALL_LAYOUT,
Expand Down
9 changes: 9 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_kernel.cc
Expand Up @@ -166,6 +166,15 @@ PD_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
fmin, CPU, ALL_LAYOUT, phi::FMinKernel, float, double, int, int64_t) {}

PD_REGISTER_KERNEL(elementwise_heaviside,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(add_raw,
CPU,
ALL_LAYOUT,
Expand Down
22 changes: 22 additions & 0 deletions paddle/phi/kernels/funcs/elementwise_functor.h
Expand Up @@ -539,6 +539,28 @@ struct InverseModuloFunctor<
}
};

// Heaviside
template <typename T>
struct HeavisideFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return a == static_cast<T>(0) ? b : static_cast<T>(a > 0);
}
};

template <typename T>
struct HeavisideGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(0);
}
};

template <typename T>
struct HeavisideGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(x == static_cast<T>(0));
}
};

template <typename T>
struct FloorDivideFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
Expand Up @@ -382,6 +382,16 @@ PD_REGISTER_KERNEL(minimum_grad,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(elementwise_pow_grad,
GPU,
ALL_LAYOUT,
Expand Down
9 changes: 9 additions & 0 deletions paddle/phi/kernels/gpu/elementwise_kernel.cu
Expand Up @@ -73,6 +73,15 @@ PD_REGISTER_KERNEL(
PD_REGISTER_KERNEL(
fmin, GPU, ALL_LAYOUT, phi::FMinKernel, float, double, int, int64_t) {}

PD_REGISTER_KERNEL(elementwise_heaviside,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(add_raw,
GPU,
ALL_LAYOUT,
Expand Down
49 changes: 49 additions & 0 deletions paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h
Expand Up @@ -667,6 +667,55 @@ struct MinGradDy {
}
};

template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& out_grad,
int axis,
DenseTensor* x_grad,
DenseTensor* y_grad) {
funcs::ElementwiseGradPreProcess(out_grad, x_grad);

auto out = out_grad; // Fake out, not used
auto x_dim = x.dims();
auto y_dim = y.dims();
if (x.dims() == y.dims()) {
funcs::ElemwiseGradComputeNoBroadcast<Context,
T,
funcs::HeavisideGradDx<T>,
funcs::HeavisideGradDy<T>>(
dev_ctx,
x_dim,
y_dim,
x,
y,
out,
out_grad,
axis,
x_grad,
y_grad,
funcs::HeavisideGradDx<T>(),
funcs::HeavisideGradDy<T>());
} else {
funcs::ElemwiseGradComputeWithBroadcast<T,
funcs::HeavisideGradDx<T>,
funcs::HeavisideGradDy<T>>(
dev_ctx,
x_dim,
y_dim,
x,
y,
out,
out_grad,
axis,
x_grad,
y_grad,
funcs::HeavisideGradDx<T>(),
funcs::HeavisideGradDy<T>());
}
}

template <typename T>
struct PowGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
Expand Down
11 changes: 11 additions & 0 deletions paddle/phi/kernels/impl/elementwise_kernel_impl.h
Expand Up @@ -44,4 +44,15 @@ void FMinKernel(const Context& dev_ctx,
dev_ctx, x, y, axis, funcs::FMinFunctor<T>(), out);
}

template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
dev_ctx.template Alloc<T>(out);
funcs::ElementwiseCompute<funcs::HeavisideFunctor<T>, T, T>(
dev_ctx, x, y, axis, funcs::HeavisideFunctor<T>(), out);
}

} // namespace phi
12 changes: 12 additions & 0 deletions paddle/phi/ops/compat/elementwise_sig.cc
Expand Up @@ -222,13 +222,23 @@ KernelSignature ElementwiseMinGradOpArgumentMapping(
{"axis"},
{GradVarName("X"), GradVarName("Y")});
}

KernelSignature ElementwiseHeavisideGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_heaviside_grad",
{"X", "Y", GradVarName("Out")},
{"axis"},
{GradVarName("X"), GradVarName("Y")});
}

KernelSignature ElementwisePowGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_pow_grad",
{"X", "Y", GradVarName("Out")},
{"axis"},
{GradVarName("X"), GradVarName("Y")});
}

} // namespace phi

PD_REGISTER_BASE_KERNEL_NAME(elementwise_add, add);
Expand Down Expand Up @@ -306,5 +316,7 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_max_grad,
phi::ElementwiseMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad,
phi::ElementwiseMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside_grad,
phi::ElementwiseHeavisideGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow_grad,
phi::ElementwisePowGradOpArgumentMapping);
1 change: 1 addition & 0 deletions python/paddle/__init__.py
Expand Up @@ -268,6 +268,7 @@
from .tensor.math import fmin # noqa: F401
from .tensor.math import inner # noqa: F401
from .tensor.math import outer # noqa: F401
from .tensor.math import heaviside # noqa: F401
from .tensor.math import frac # noqa: F401

from .tensor.random import bernoulli # noqa: F401
Expand Down