Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

【PaddlePaddle Hackathon 2】18、为 Paddle 新增 paddle.heaviside 和 paddle.Tensor.heaviside API #41872

Merged
merged 19 commits into from May 10, 2022
Merged
Show file tree
Hide file tree
Changes from 12 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
70 changes: 70 additions & 0 deletions paddle/fluid/operators/elementwise/elementwise_heaviside_op.cc
@@ -0,0 +1,70 @@
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.

#include <string>
#include "paddle/fluid/operators/elementwise/elementwise_op.h"

namespace paddle {
namespace operators {

class ElementwiseHeavisideOpMaker : public ElementwiseOpMaker {
protected:
std::string GetName() const override { return "Heaviside"; }
std::string GetEquation() const override { return "Out = Heaviside(X, Y)"; }

void AddInputX() override {
AddInput("X",
"(Tensor), The input tensor of Heaviside step function. "
"Its dtype can be int32, int64, float32 and float64");
}

void AddInputY() override {
AddInput("Y",
"(Tensor), The tensor determining a Heaviside step function, "
"which is the value when X = 0. Its dtype should be same as X.");
}

std::string GetOpFuntionality() const override {
return "Computes the Heaviside step function determined by Y "
"for each element in X.";
}
};

template <typename T>
class ElementwiseHeavisideGradOpMaker : public framework::SingleGradOpMaker<T> {
public:
using framework::SingleGradOpMaker<T>::SingleGradOpMaker;

protected:
void Apply(GradOpPtr<T> op) const override {
op->SetType("elementwise_heaviside_grad");
op->SetInput("X", this->Input("X"));
op->SetInput("Y", this->Input("Y"));
op->SetInput(framework::GradVarName("Out"), this->OutputGrad("Out"));
op->SetOutput(framework::GradVarName("X"), this->InputGrad("X"));
op->SetOutput(framework::GradVarName("Y"), this->InputGrad("Y"));
op->SetAttrMap(this->Attrs());
}
};

} // namespace operators
} // namespace paddle

namespace ops = paddle::operators;
REGISTER_OPERATOR(
elementwise_heaviside, ops::ElementwiseOp, ops::ElementwiseHeavisideOpMaker,
ops::ElementwiseHeavisideGradOpMaker<paddle::framework::OpDesc>,
ops::ElementwiseHeavisideGradOpMaker<paddle::imperative::OpBase>);

REGISTER_OPERATOR(elementwise_heaviside_grad, ops::ElementwiseOpGrad);
10 changes: 10 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_grad_kernel.cc
Expand Up @@ -323,6 +323,16 @@ PD_REGISTER_KERNEL(minimum_grad,
int,
int64_t,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(elementwise_pow_grad,
CPU,
ALL_LAYOUT,
Expand Down
21 changes: 21 additions & 0 deletions paddle/phi/kernels/cpu/elementwise_kernel.cc
Expand Up @@ -142,6 +142,19 @@ void ElementwisePowRawKernel(const Context& dev_ctx,
funcs::ElementwiseCompute<funcs::ElementwisePowFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ElementwisePowFunctor<T>(), out);
}

template <typename T, typename Context>
void ElementwiseHeavisideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out) {
// allocate memory for out
dev_ctx.template Alloc<T>(out);
funcs::ElementwiseCompute<funcs::ElementwiseHeavisideFunctor<T>, T>(
dev_ctx, x, y, axis, funcs::ElementwiseHeavisideFunctor<T>(), out);
}

// Create the definition of Add
DEFINE_CPU_ELEMENTWISE_OP(Add)

Expand Down Expand Up @@ -250,3 +263,11 @@ PD_REGISTER_KERNEL(elementwise_pow_raw,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside_raw,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideRawKernel,
float,
double,
int,
int64_t) {}
9 changes: 9 additions & 0 deletions paddle/phi/kernels/elementwise_grad_kernel.h
Expand Up @@ -160,6 +160,15 @@ void MinimumGradKernel(const Context& dev_ctx,
DenseTensor* dx,
DenseTensor* dy);

template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
int axis,
DenseTensor* dx,
DenseTensor* dy);

template <typename T, typename Context>
void ElementwisePowGradKernel(const Context& dev_ctx,
const DenseTensor& x,
Expand Down
25 changes: 25 additions & 0 deletions paddle/phi/kernels/elementwise_kernel.cc
Expand Up @@ -100,6 +100,15 @@ void ElementwisePowKernel(const Context& dev_ctx,
ElementwisePowRawKernel<T>(dev_ctx, x, y, axis, out);
}

template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out) {
int axis = -1;
ElementwiseHeavisideRawKernel<T>(dev_ctx, x, y, axis, out);
}

} // namespace phi

using complex64 = ::phi::dtype::complex<float>;
Expand Down Expand Up @@ -172,6 +181,14 @@ PD_REGISTER_KERNEL(
modulo, CPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {}
PD_REGISTER_KERNEL(
floor_divide, CPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
CPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow,
CPU,
ALL_LAYOUT,
Expand Down Expand Up @@ -258,6 +275,14 @@ PD_REGISTER_KERNEL(
modulo, GPU, ALL_LAYOUT, phi::ModuloKernel, float, double, int, int64_t) {}
PD_REGISTER_KERNEL(
floor_divide, GPU, ALL_LAYOUT, phi::FloorDivideKernel, int, int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow,
GPU,
ALL_LAYOUT,
Expand Down
24 changes: 24 additions & 0 deletions paddle/phi/kernels/elementwise_kernel.h
Expand Up @@ -150,6 +150,19 @@ void ElementwisePowKernel(const Context& dev_ctx,
const DenseTensor& y,
DenseTensor* out);

template <typename T, typename Context>
void ElementwiseHeavisideRawKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
int axis,
DenseTensor* out);

template <typename T, typename Context>
void ElementwiseHeavisideKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
DenseTensor* out);

template <typename T, typename Context>
DenseTensor Add(const Context& dev_ctx,
const DenseTensor& x,
Expand Down Expand Up @@ -238,6 +251,17 @@ DenseTensor FloorDivide(const Context& dev_ctx,
return dense_out;
}

template <typename T, typename Context>
DenseTensor ElementwiseHeaviside(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y) {
DenseTensor dense_out;
MetaTensor meta_out(&dense_out);
ElementwiseInferMeta(x, y, &meta_out);
ElementwiseHeavisideKernel<T, Context>(dev_ctx, x, y, &dense_out);
return dense_out;
}

template <typename T, typename Context>
DenseTensor ElementwisePow(const Context& dev_ctx,
const DenseTensor& x,
Expand Down
7 changes: 7 additions & 0 deletions paddle/phi/kernels/funcs/elementwise_functor.h
Expand Up @@ -539,6 +539,13 @@ struct InverseModuloFunctor<
}
};

template <typename T>
struct ElementwiseHeavisideFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
return a == static_cast<T>(0) ? b : static_cast<T>(a > 0);
}
};

template <typename T>
struct FloorDivideFunctor {
inline HOSTDEVICE T operator()(const T a, const T b) const {
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/kernels/gpu/elementwise_grad_kernel.cu
Expand Up @@ -382,6 +382,16 @@ PD_REGISTER_KERNEL(minimum_grad,
int64_t,
phi::dtype::float16,
phi::dtype::bfloat16) {}

PD_REGISTER_KERNEL(elementwise_heaviside_grad,
GPU,
ALL_LAYOUT,
phi::ElementwiseHeavisideGradKernel,
float,
double,
int,
int64_t) {}

PD_REGISTER_KERNEL(elementwise_pow_grad,
GPU,
ALL_LAYOUT,
Expand Down
37 changes: 37 additions & 0 deletions paddle/phi/kernels/impl/elementwise_grad_kernel_impl.h
Expand Up @@ -667,6 +667,43 @@ struct MinGradDy {
}
};

template <typename T>
struct HeavisideGradDx {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(0);
}
};

template <typename T>
struct HeavisideGradDy {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
return dout * static_cast<T>(x == static_cast<T>(0));
}
};

template <typename T, typename Context>
void ElementwiseHeavisideGradKernel(const Context& dev_ctx,
const DenseTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
int axis,
DenseTensor* dx,
DenseTensor* dy) {
funcs::ElementwiseGradPreProcess(dout, dx);
phi::funcs::
ElemwiseGradCompute<Context, T, HeavisideGradDx<T>, HeavisideGradDy<T>>(
dev_ctx,
x,
y,
dout,
dout,
axis,
dx,
dy,
HeavisideGradDx<T>(),
HeavisideGradDy<T>());
}

template <typename T>
struct PowGradDX {
HOSTDEVICE T operator()(T x, T y, T out, T dout) const {
Expand Down
10 changes: 10 additions & 0 deletions paddle/phi/kernels/kps/elementwise_kernel.cu
Expand Up @@ -58,6 +58,8 @@ DEFINE_CUDA_ELEMENTWISE_OP(Minimum)
DEFINE_CUDA_ELEMENTWISE_OP(Modulo)
// Create the definition of FloorDivide
DEFINE_CUDA_ELEMENTWISE_OP(FloorDivide)
// Create the definition of Heaviside
DEFINE_CUDA_ELEMENTWISE_OP(ElementwiseHeaviside)
// Create the definition of Pow
DEFINE_CUDA_ELEMENTWISE_OP(ElementwisePow)

Expand Down Expand Up @@ -174,6 +176,14 @@ PD_REGISTER_KERNEL(floor_divide_raw,
phi::FloorDivideRawKernel,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_heaviside_raw,
KPS,
ALL_LAYOUT,
phi::ElementwiseHeavisideRawKernel,
float,
double,
int,
int64_t) {}
PD_REGISTER_KERNEL(elementwise_pow_raw,
KPS,
ALL_LAYOUT,
Expand Down
23 changes: 23 additions & 0 deletions paddle/phi/ops/compat/elementwise_sig.cc
Expand Up @@ -95,6 +95,16 @@ KernelSignature ElementwiseFloorDivOpArgumentMapping(
return KernelSignature("floor_divide_raw", {"X", "Y"}, {"axis"}, {"Out"});
}

KernelSignature ElementwiseHeavisideOpArgumentMapping(
const ArgumentMappingContext& ctx) {
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
if (axis == -1) {
return KernelSignature("elementwise_heaviside", {"X", "Y"}, {}, {"Out"});
}
return KernelSignature(
"elementwise_heaviside_raw", {"X", "Y"}, {"axis"}, {"Out"});
}

KernelSignature ElementwisePowOpArgumentMapping(
const ArgumentMappingContext& ctx) {
int axis = paddle::any_cast<int>(ctx.Attr("axis"));
Expand Down Expand Up @@ -208,6 +218,15 @@ KernelSignature ElementwiseMinGradOpArgumentMapping(
return KernelSignature(
"minimum_grad", {"X", "Y", "Out@GRAD"}, {"axis"}, {"X@GRAD", "Y@GRAD"});
}

KernelSignature ElementwiseHeavisideGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_heaviside_grad",
{"X", "Y", "Out@GRAD"},
{"axis"},
{"X@GRAD", "Y@GRAD"});
}

KernelSignature ElementwisePowGradOpArgumentMapping(
const ArgumentMappingContext& ctx) {
return KernelSignature("elementwise_pow_grad",
Expand Down Expand Up @@ -258,6 +277,8 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_mod,
phi::ElementwiseModOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_floordiv,
phi::ElementwiseFloorDivOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside,
phi::ElementwiseHeavisideOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow,
phi::ElementwisePowOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_add_grad,
Expand Down Expand Up @@ -292,5 +313,7 @@ PD_REGISTER_ARG_MAPPING_FN(elementwise_max_grad,
phi::ElementwiseMaxGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_min_grad,
phi::ElementwiseMinGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_heaviside_grad,
phi::ElementwiseHeavisideGradOpArgumentMapping);
PD_REGISTER_ARG_MAPPING_FN(elementwise_pow_grad,
phi::ElementwisePowGradOpArgumentMapping);
1 change: 1 addition & 0 deletions python/paddle/__init__.py
Expand Up @@ -268,6 +268,7 @@
from .tensor.math import fmin # noqa: F401
from .tensor.math import inner # noqa: F401
from .tensor.math import outer # noqa: F401
from .tensor.math import heaviside # noqa: F401
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里请也将heaviside添加到这个文件的__all__列表中,使得paddle.heviside作为公开API

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

完成

from .tensor.math import frac # noqa: F401

from .tensor.random import bernoulli # noqa: F401
Expand Down