Skip to content

Commit

Permalink
【PaddlePaddle Hackathon 2】18、为 Paddle 新增 paddle.heaviside 和 paddle.Te…
Browse files Browse the repository at this point in the history
…nsor.heaviside API (#41872)

* Create elementwise_heaviside_op.cc

* add ElementwiseHeavisideFunctor

* Create test_elementwise_heaviside_op.py

* 增加heaviside的python接口

* add heaviside in white list

* 增加heaviside的签名

* 增加heaviside的核函数

* 增加heaviside梯度的核函数

* 增加heaviside梯度的注册

* 调整代码格式

* Update elementwise_sig.cc

* add heaviside in __all__

* Update heaviside docs

* Update math.py

* Update math.py

* Update math.py
  • Loading branch information
BrilliantYuKaimin committed May 10, 2022
1 parent 8164414 commit 4892d59
Show file tree
Hide file tree
Showing 17 changed files with 468 additions and 0 deletions.
70 changes: 70 additions & 0 deletions paddle/fluid/operators/elementwise/elementwise_heaviside_op.cc
@@ -0,0 +1,70 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"

namespace paddle {
namespace operators {

class ElementwiseHeavisideOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Heaviside"; }
std::string GetEquation() const override { return "Out = Heaviside(X, Y)"; }

void AddInputX() override {
AddInput("X",
"(Tensor), The input tensor of Heaviside step function. "
"Its dtype can be int32, int64, float32 and float64");
}

void AddInputY() override {
AddInput("Y",
"(Tensor), The tensor determining a Heaviside step function, "
"which is the value when X = 0. Its dtype should be same as X.");
}

std::string GetOpFuntionality() const override {
return "Computes the Heaviside step function determined by Y "
"for each element in X.";
}
};

template <typename T>
class ElementwiseHeavisideGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("elementwise_heaviside_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OPERATOR(
elementwise_heaviside, ops::ElementwiseOp, ops::ElementwiseHeavisideOpMaker,
ops::ElementwiseHeavisideGradOpMaker<paddle::framework::OpDesc>,
ops::ElementwiseHeavisideGradOpMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(elementwise_heaviside_grad, ops::ElementwiseOpGrad);
10 changes: 10 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
Expand Up @@ -88,6 +88,16 @@ PD_REGISTER_KERNEL(minimum_grad,
int,
int64_t,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(elementwise_pow_grad,
CPU,
ALL_LAYOUT,
Expand Down
20 changes: 20 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_kernel.cc
Expand Up @@ -95,6 +95,18 @@ void ElementwisePowRawKernel(const Context& dev_ctx,
dev_ctx, x, y, axis, funcs::ElementwisePowFunctor<T>(), out);
}

template <typename T, typename Context>
void ElementwiseHeavisideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
funcs::ElementwiseCompute<funcs::ElementwiseHeavisideFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ElementwiseHeavisideFunctor<T>(), out);
}

} // namespace phi

using complex64 = ::phi::dtype::complex<float>;
Expand Down Expand Up @@ -149,3 +161,11 @@ PD_REGISTER_KERNEL(elementwise_pow_raw,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside_raw,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideRawKernel,
float,
double,
int,
int64_t) {}
9 changes: 9 additions & 0 deletions paddle/phi/kernels/elementwise_grad_kernel.h
Expand Up @@ -55,6 +55,15 @@ void MinimumGradKernel(const Context& dev_ctx,
DenseTensor* dx,
DenseTensor* dy);

template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
int axis,
DenseTensor* dx,
DenseTensor* dy);

template <typename T, typename Context>
void ElementwisePowGradKernel(const Context& dev_ctx,
const DenseTensor& x,
Expand Down
25 changes: 25 additions & 0 deletions paddle/phi/kernels/elementwise_kernel.cc
Expand Up @@ -64,6 +64,15 @@ void ElementwisePowKernel(const Context& dev_ctx,
ElementwisePowRawKernel<T>(dev_ctx, x, y, axis, out);
}

template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
int axis = -1;
ElementwiseHeavisideRawKernel<T>(dev_ctx, x, y, axis, out);
}

} // namespace phi

using complex64 = ::phi::dtype::complex<float>;
Expand Down Expand Up @@ -91,6 +100,14 @@ PD_REGISTER_KERNEL(
modulo, CPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {}
PD_REGISTER_KERNEL(
floor_divide, CPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow,
CPU,
ALL_LAYOUT,
Expand Down Expand Up @@ -126,6 +143,14 @@ PD_REGISTER_KERNEL(
modulo, GPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {}
PD_REGISTER_KERNEL(
floor_divide, KPS, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow,
KPS,
ALL_LAYOUT,
Expand Down
24 changes: 24 additions & 0 deletions paddle/phi/kernels/elementwise_kernel.h
Expand Up @@ -98,6 +98,19 @@ void ElementwisePowKernel(const Context& dev_ctx,
const DenseTensor& y,
DenseTensor* out);

template <typename T, typename Context>
void ElementwiseHeavisideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out);

template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);

template <typename T, typename Context>
DenseTensor Maximum(const Context& dev_ctx,
const DenseTensor& x,
Expand Down Expand Up @@ -142,6 +155,17 @@ DenseTensor FloorDivide(const Context& dev_ctx,
return dense_out;
}

template <typename T, typename Context>
DenseTensor ElementwiseHeaviside(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
ElementwiseInferMeta(x, y, &meta_out);
ElementwiseHeavisideKernel<T, Context>(dev_ctx, x, y, &dense_out);
return dense_out;
}

template <typename T, typename Context>
DenseTensor ElementwisePow(const Context& dev_ctx,
const DenseTensor& x,
Expand Down
7 changes: 7 additions & 0 deletions paddle/phi/kernels/funcs/elementwise_functor.h
Expand Up @@ -543,6 +543,13 @@ struct InverseModuloFunctor<
}
};

template <typename T>
struct ElementwiseHeavisideFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return a == static_cast<T>(0) ? b : static_cast<T>(a > 0);
}
};

template <typename T>
struct FloorDivideFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
Expand Up @@ -128,6 +128,16 @@ PD_REGISTER_KERNEL(minimum_grad,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(elementwise_pow_grad,
GPU,
ALL_LAYOUT,
Expand Down
37 changes: 37 additions & 0 deletions paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h
Expand Up @@ -683,6 +683,43 @@ struct MinGradDy {
}
};

template <typename T>
struct HeavisideGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(0);
}
};

template <typename T>
struct HeavisideGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(x == static_cast<T>(0));
}
};

template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
int axis,
DenseTensor* dx,
DenseTensor* dy) {
funcs::ElementwiseGradPreProcess(dout, dx);
phi::funcs::
ElemwiseGradCompute<Context, T, HeavisideGradDx<T>, HeavisideGradDy<T>>(
dev_ctx,
x,
y,
dout,
dout,
axis,
dx,
dy,
HeavisideGradDx<T>(),
HeavisideGradDy<T>());
}

template <typename T>
struct PowGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/kernels/kps/elementwise_kernel.cu
Expand Up @@ -54,6 +54,8 @@ void FloorDivideKernel(const Context& dev_ctx,
int axis = -1;
FloorDivideRawKernel<T>(dev_ctx, x, y, axis, out);
}
// Create the definition of Heaviside
DEFINE_CUDA_ELEMENTWISE_OP(ElementwiseHeaviside)
// Create the definition of Pow
DEFINE_CUDA_ELEMENTWISE_OP(ElementwisePow)
template <typename T, typename Context>
Expand Down Expand Up @@ -130,6 +132,14 @@ PD_REGISTER_KERNEL(floor_divide_raw,
phi::FloorDivideRawKernel,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside_raw,
KPS,
ALL_LAYOUT,
phi::ElementwiseHeavisideRawKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow_raw,
KPS,
ALL_LAYOUT,
Expand Down
23 changes: 23 additions & 0 deletions paddle/phi/ops/compat/elementwise_sig.cc
Expand Up @@ -95,6 +95,16 @@ KernelSignature ElementwiseFloorDivOpArgumentMapping(
return KernelSignature("floor_divide_raw", {"X", "Y"}, {"axis"}, {"Out"});
}

KernelSignature ElementwiseHeavisideOpArgumentMapping(
const ArgumentMappingContext& ctx) {
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) {
return KernelSignature("elementwise_heaviside", {"X", "Y"}, {}, {"Out"});
}
return KernelSignature(
"elementwise_heaviside_raw", {"X", "Y"}, {"axis"}, {"Out"});
}

KernelSignature ElementwisePowOpArgumentMapping(
const ArgumentMappingContext& ctx) {
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
Expand Down Expand Up @@ -208,6 +218,15 @@ KernelSignature ElementwiseMinGradOpArgumentMapping(
return KernelSignature(
"minimum_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"});
}

KernelSignature ElementwiseHeavisideGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_heaviside_grad",
{"X", "Y", "Out@GRAD"},
{"axis"},
{"X@GRAD", "Y@GRAD"});
}

KernelSignature ElementwisePowGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_pow_grad",
Expand Down Expand Up @@ -258,6 +277,8 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_mod,
phi::ElementwiseModOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_floordiv,
phi::ElementwiseFloorDivOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside,
phi::ElementwiseHeavisideOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow,
phi::ElementwisePowOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_add_grad,
Expand Down Expand Up @@ -292,5 +313,7 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_max_grad,
phi::ElementwiseMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad,
phi::ElementwiseMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside_grad,
phi::ElementwiseHeavisideGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow_grad,
phi::ElementwisePowGradOpArgumentMapping);
2 changes: 2 additions & 0 deletions python/paddle/__init__.py
Expand Up @@ -269,6 +269,7 @@
from .tensor.math import fmin # noqa: F401
from .tensor.math import inner # noqa: F401
from .tensor.math import outer # noqa: F401
from .tensor.math import heaviside # noqa: F401
from .tensor.math import frac # noqa: F401

from .tensor.random import bernoulli # noqa: F401
Expand Down Expand Up @@ -635,4 +636,5 @@
'renorm',
'take_along_axis',
'put_along_axis',
'heaviside',
]

0 comments on commit 4892d59

Please sign in to comment.