Skip to content

Commit

Permalink
kernel and api name change
Browse files Browse the repository at this point in the history
  • Loading branch information
gsq7474741 committed Jun 15, 2022
1 parent b1a45ab commit 2be0b15
Show file tree
Hide file tree
Showing 9 changed files with 117 additions and 138 deletions.
2 changes: 1 addition & 1 deletion paddle/phi/kernels/activation_kernel.h
Expand Up @@ -71,7 +71,7 @@ DECLARE_ACTIVATION_KERNEL(Log1p)
DECLARE_ACTIVATION_KERNEL(Round)
DECLARE_ACTIVATION_KERNEL(Floor)
DECLARE_ACTIVATION_KERNEL(Ceil)
DECLARE_ACTIVATION_KERNEL(Opposite)
DECLARE_ACTIVATION_KERNEL(Negative)

DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS(LeakyRelu, alpha)
DECLARE_ACTIVATION_KERNEL_WITH_ONE_ATTRS(ThresholdedRelu, threshold)
Expand Down
6 changes: 3 additions & 3 deletions paddle/phi/kernels/cpu/activation_kernel.cc
Expand Up @@ -89,7 +89,7 @@ DEFINE_CPU_ACTIVATION_KERNEL(Log1p, Log1pFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Round, RoundFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Floor, FloorFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Ceil, CeilFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Opposite, OppositeFunctor)
DEFINE_CPU_ACTIVATION_KERNEL(Negative, NegativeFunctor)

DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(LeakyRelu, LeakyReluFunctor, alpha)
DEFINE_CPU_ACT_KERNEL_WITH_ONE_ATTRS(ThresholdedRelu,
Expand Down Expand Up @@ -183,10 +183,10 @@ PD_REGISTER_ACTIVATION_KERNEL(swish, SwishKernel)
PD_REGISTER_ACTIVATION_KERNEL(round, RoundKernel)
PD_REGISTER_ACTIVATION_KERNEL(floor, FloorKernel)
PD_REGISTER_ACTIVATION_KERNEL(ceil, CeilKernel)
PD_REGISTER_KERNEL(opposite,
PD_REGISTER_KERNEL(negative,
CPU,
ALL_LAYOUT,
phi::OppositeKernel,
phi::NegativeKernel,
float,
double,
int16_t,
Expand Down
2 changes: 1 addition & 1 deletion paddle/phi/kernels/funcs/activation_functor.h
Expand Up @@ -1815,7 +1815,7 @@ struct CeilFunctor : public BaseActivationFunctor<T> {
};

template <typename T>
struct OppositeFunctor : public BaseActivationFunctor<T> {
struct NegativeFunctor : public BaseActivationFunctor<T> {
template <typename Device, typename X, typename Out>
void operator()(Device d, X x, Out out) const {
out.device(d) = -x;
Expand Down
24 changes: 12 additions & 12 deletions paddle/phi/kernels/sparse/cpu/sparse_elementwise_grad_kernel.cc
Expand Up @@ -84,7 +84,7 @@ void ElementWiseSubtractCsrGradCPUKernel(const Context& dev_ctx,
if (dy) {
AllocCsrPtr<T, IntT>(dev_ctx, y, dy);
CopyCsr(dev_ctx, dout, dev_ctx.GetPlace(), false, dy);
phi::OppositeKernel<T, Context>(
phi::NegativeKernel<T, Context>(
dev_ctx, dout.non_zero_elements(), dy->mutable_non_zero_elements());
}
}
Expand Down Expand Up @@ -127,7 +127,7 @@ void ElementWiseDivideCsrGradCPUKernel(const Context& dev_ctx,
// -dout * out / y
AllocCsrPtr<T, IntT>(dev_ctx, y, dy);
CopyCsr(dev_ctx, dout, dev_ctx.GetPlace(), false, dy);
phi::OppositeKernel<T, Context>(
phi::NegativeKernel<T, Context>(
dev_ctx, dout.non_zero_elements(), dy->mutable_non_zero_elements());
auto tmp = sparse::ElementWiseMultiplyCsr<T, Context>(dev_ctx, *dy, out);
sparse::ElementWiseDivideCsrKernel<T, Context>(dev_ctx, tmp, y, dy);
Expand Down Expand Up @@ -173,7 +173,7 @@ void ElementWiseSubtractCooGradCPUKernel(const Context& dev_ctx,
if (dy) {
AllocCooPtr<T, IntT>(dev_ctx, y, dy);
CopyCoo(dev_ctx, dout, dev_ctx.GetPlace(), false, dy);
phi::OppositeKernel<T, Context>(
phi::NegativeKernel<T, Context>(
dev_ctx, dout.non_zero_elements(), dy->mutable_non_zero_elements());
}
}
Expand Down Expand Up @@ -216,7 +216,7 @@ void ElementWiseDivideCooGradCPUKernel(const Context& dev_ctx,
// -dout * out / y
AllocCooPtr<T, IntT>(dev_ctx, y, dy);
CopyCoo(dev_ctx, dout, dev_ctx.GetPlace(), false, dy);
phi::OppositeKernel<T, Context>(
phi::NegativeKernel<T, Context>(
dev_ctx, dout.non_zero_elements(), dy->mutable_non_zero_elements());
auto tmp = sparse::ElementWiseMultiplyCoo<T, Context>(dev_ctx, *dy, out);
sparse::ElementWiseDivideCooKernel<T, Context>(dev_ctx, tmp, y, dy);
Expand Down Expand Up @@ -298,7 +298,7 @@ DEFINE_ELEMENTWISE_GRAD_KERNEL(Multiply)
} // namespace sparse
} // namespace phi

PD_REGISTER_KERNEL(sparse_elementwise_add_grad_csr,
PD_REGISTER_KERNEL(sparse_csr_add_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCsrGradKernel,
Expand All @@ -312,7 +312,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_add_grad_csr,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_sub_grad_csr,
PD_REGISTER_KERNEL(sparse_csr_subtract_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCsrGradKernel,
Expand All @@ -326,7 +326,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_sub_grad_csr,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_mul_grad_csr,
PD_REGISTER_KERNEL(sparse_csr_multiply_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCsrGradKernel,
Expand All @@ -340,7 +340,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_mul_grad_csr,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_div_grad_csr,
PD_REGISTER_KERNEL(sparse_csr_divide_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCsrGradKernel,
Expand All @@ -355,7 +355,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_div_grad_csr,
kernel->InputAt(3).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_add_grad_coo,
PD_REGISTER_KERNEL(sparse_coo_add_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCooGradKernel,
Expand All @@ -369,7 +369,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_add_grad_coo,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(sparse_elementwise_sub_grad_coo,
PD_REGISTER_KERNEL(sparse_coo_subtract_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCooGradKernel,
Expand All @@ -383,7 +383,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_sub_grad_coo,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(sparse_elementwise_mul_grad_coo,
PD_REGISTER_KERNEL(sparse_coo_multiply_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCooGradKernel,
Expand All @@ -397,7 +397,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_mul_grad_coo,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(sparse_elementwise_div_grad_coo,
PD_REGISTER_KERNEL(sparse_coo_divide_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCooGradKernel,
Expand Down
16 changes: 8 additions & 8 deletions paddle/phi/kernels/sparse/cpu/sparse_elementwise_kernel.cc
Expand Up @@ -346,7 +346,7 @@ DEFINE_COO_ELEMENTWISE_KERNEL(Divide)
} // namespace sparse
} // namespace phi

PD_REGISTER_KERNEL(sparse_elementwise_add_csr,
PD_REGISTER_KERNEL(sparse_csr_add,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCsrKernel,
Expand All @@ -359,7 +359,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_add_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_add_coo,
PD_REGISTER_KERNEL(sparse_coo_add,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCooKernel,
Expand All @@ -372,7 +372,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_add_coo,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(sparse_elementwise_sub_csr,
PD_REGISTER_KERNEL(sparse_csr_subtract,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCsrKernel,
Expand All @@ -385,7 +385,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_sub_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_sub_coo,
PD_REGISTER_KERNEL(sparse_coo_subtract,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCooKernel,
Expand All @@ -398,7 +398,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_sub_coo,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(sparse_elementwise_mul_csr,
PD_REGISTER_KERNEL(sparse_csr_multiply,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCsrKernel,
Expand All @@ -411,7 +411,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_mul_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_mul_coo,
PD_REGISTER_KERNEL(sparse_coo_multiply,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCooKernel,
Expand All @@ -424,7 +424,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_mul_coo,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(sparse_elementwise_div_csr,
PD_REGISTER_KERNEL(sparse_csr_divide,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCsrKernel,
Expand All @@ -437,7 +437,7 @@ PD_REGISTER_KERNEL(sparse_elementwise_div_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(sparse_elementwise_div_coo,
PD_REGISTER_KERNEL(sparse_coo_divide,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCooKernel,
Expand Down
27 changes: 21 additions & 6 deletions python/paddle/fluid/tests/unittests/test_sparse_elementwise_op.py
Expand Up @@ -52,20 +52,35 @@ def setUp(self):

def func_test_csr(self, op):
for dtype in self.support_dtypes:
x = np.random.randint(-255, 255, size=self.csr_shape).astype(dtype)
y = np.random.randint(-255, 255, size=self.csr_shape).astype(dtype)
dense_x = paddle.to_tensor(x).astype(dtype)
dense_y = paddle.to_tensor(y).astype(dtype)
csr_x = dense_x.to_sparse_csr()
csr_y = dense_y.to_sparse_csr()
x = np.random.randint(-255, 255, size=self.coo_shape).astype(dtype)
y = np.random.randint(-255, 255, size=self.coo_shape).astype(dtype)

dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)

s_dense_x = paddle.to_tensor(x, dtype=dtype, stop_gradient=False)
s_dense_y = paddle.to_tensor(y, dtype=dtype, stop_gradient=False)
csr_x = s_dense_x.to_sparse_csr()
csr_y = s_dense_y.to_sparse_csr()

actual_res = get_actual_res(csr_x, csr_y, op)
actual_res.backward(actual_res)

expect_res = op(dense_x, dense_y)
expect_res.backward(expect_res)

self.assertTrue(
np.allclose(expect_res.numpy(),
actual_res.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_x.grad.numpy(),
csr_x.grad.to_dense().numpy(),
equal_nan=True))
self.assertTrue(
np.allclose(dense_y.grad.numpy(),
csr_y.grad.to_dense().numpy(),
equal_nan=True))

def func_test_coo(self, op):
for sparse_dim in range(len(self.coo_shape) - 1, len(self.coo_shape)):
Expand Down
52 changes: 32 additions & 20 deletions python/paddle/incubate/sparse/math.py
Expand Up @@ -85,10 +85,12 @@ def add(x, y, name=None):
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
assert x.is_sparse_csr() == y.is_sparse_csr(
), f"Expect sparse tensor type to be same"
if x.is_sparse_coo() and y.is_sparse_coo():
return _C_ops.final_state_sparse_add_coo(x, y)

return _C_ops.final_state_sparse_add_csr(x, y)
if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_add(x, y)
else:
raise ValueError(
"Currently, sparse.add only support the input of SparseCooTensor or SparseCsrTensor"
)


def subtract(x, y, name=None):
Expand Down Expand Up @@ -135,10 +137,12 @@ def subtract(x, y, name=None):
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
assert x.is_sparse_csr() == y.is_sparse_csr(
), f"Expect sparse tensor type to be same"
if x.is_sparse_coo() and y.is_sparse_coo():
return _C_ops.final_state_sparse_sub_coo(x, y)

return _C_ops.final_state_sparse_sub_csr(x, y)
if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_subtract(x, y)
else:
raise ValueError(
"Currently, sparse.subtract only support the input of SparseCooTensor or SparseCsrTensor"
)


def multiply(x, y, name=None):
Expand Down Expand Up @@ -185,10 +189,12 @@ def multiply(x, y, name=None):
assert in_dynamic_mode(), "Currently, Sparse API only support dynamic mode"
assert x.is_sparse_csr() == y.is_sparse_csr(
), f"Expect sparse tensor type to be same"
if x.is_sparse_coo() and y.is_sparse_coo():
return _C_ops.final_state_sparse_mul_coo(x, y)

return _C_ops.final_state_sparse_mul_csr(x, y)
if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_multiply(x, y)
else:
raise ValueError(
"Currently, sparse.multiply only support the input of SparseCooTensor or SparseCsrTensor"
)


def divide(x, y, name=None):
Expand Down Expand Up @@ -237,12 +243,18 @@ def divide(x, y, name=None):
), f"Expect sparse tensor type to be same"

if x.dtype in [int32, int64]:
cx = _cast(x, 'float32')
cy = _cast(y, 'float32')
if x.is_sparse_coo() and y.is_sparse_coo():
return _C_ops.final_state_sparse_div_coo(cx, cy)
return _C_ops.final_state_sparse_div_csr(cx, cy)
if x.is_sparse_coo() or x.is_sparse_csr():
cx = _cast(x, 'float32')
cy = _cast(y, 'float32')
return _C_ops.final_state_sparse_divide(cx, cy)
else:
raise ValueError(
"Currently, sparse.divide only support the input of SparseCooTensor or SparseCsrTensor"
)
else:
if x.is_sparse_coo() and y.is_sparse_coo():
return _C_ops.final_state_sparse_div_coo(x, y)
return _C_ops.final_state_sparse_div_csr(x, y)
if x.is_sparse_coo() or x.is_sparse_csr():
return _C_ops.final_state_sparse_divide(x, y)
else:
raise ValueError(
"Currently, sparse.divide only support the input of SparseCooTensor or SparseCsrTensor"
)

0 comments on commit 2be0b15

Please sign in to comment.