Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[Sparse]add sparse unary api(sin/tan/pow/neg/log1p/square/cast...) #44022

Merged
merged 1 commit into from Jul 12, 2022
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
25 changes: 25 additions & 0 deletions paddle/fluid/pybind/eager_method.cc
Expand Up @@ -1473,6 +1473,27 @@ static PyObject* tensor_method_get_map_tensor(TensorObject* self,
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_get_non_zero_nums(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
EAGER_TRY
PADDLE_ENFORCE(
self->tensor.is_sparse_coo_tensor() ||
self->tensor.is_sparse_csr_tensor(),
paddle::platform::errors::Fatal("this method is only effective for "
"SparseCooTensor or SparseCsrTensor"));
if (self->tensor.is_sparse_coo_tensor()) {
auto sparse_coo_tensor =
std::dynamic_pointer_cast<phi::SparseCooTensor>(self->tensor.impl());
return ToPyObject(sparse_coo_tensor->nnz());
} else {
auto sparse_csr_tensor =
std::dynamic_pointer_cast<phi::SparseCsrTensor>(self->tensor.impl());
return ToPyObject(sparse_csr_tensor->nnz());
}
EAGER_CATCH_AND_THROW_RETURN_NULL
}

static PyObject* tensor_method_get_non_zero_indices(TensorObject* self,
PyObject* args,
PyObject* kwargs) {
Expand Down Expand Up @@ -1962,6 +1983,10 @@ PyMethodDef variable_methods[] = {
METH_VARARGS | METH_KEYWORDS,
NULL},
/***the method of sparse tensor****/
{"nnz",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_nums,
METH_VARARGS | METH_KEYWORDS,
NULL},
{"indices",
(PyCFunction)(void (*)(void))tensor_method_get_non_zero_indices,
METH_VARARGS | METH_KEYWORDS,
Expand Down
1 change: 1 addition & 0 deletions paddle/phi/api/yaml/generator/sparse_bw_api_gen.py
Expand Up @@ -109,6 +109,7 @@ def source_include(header_file_path):

#include "glog/logging.h"

#include "paddle/phi/api/include/sparse_api.h"
#include "paddle/phi/api/lib/api_gen_utils.h"
#include "paddle/phi/api/lib/kernel_dispatch.h"
#include "paddle/phi/api/lib/sparse_api_custom_impl.h"
Expand Down
168 changes: 151 additions & 17 deletions paddle/phi/api/yaml/sparse_api.yaml
@@ -1,12 +1,85 @@
- api : abs
args : (Tensor x)
output : Tensor(out)
kernel :
func : abs_coo{sparse_coo -> sparse_coo},
abs_csr{sparse_csr -> sparse_csr}
layout : x
backward : abs_grad

- api : acos
args : (Tensor x)
output : Tensor(out)
kernel :
func : acos_coo{sparse_coo -> sparse_coo},
acos_csr{sparse_csr -> sparse_csr}
layout : x
backward : acos_grad

- api : acosh
args : (Tensor x)
output : Tensor(out)
kernel :
func : acosh_coo{sparse_coo -> sparse_coo},
acosh_csr{sparse_csr -> sparse_csr}
layout : x
backward : acosh_grad

- api : add
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : add_coo_coo{sparse_coo -> sparse_coo},
add_csr_csr{sparse_csr -> sparse_csr}
func : add_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
add_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
layout : x
backward : add_grad

- api : asin
args : (Tensor x)
output : Tensor(out)
kernel :
func : asin_coo{sparse_coo -> sparse_coo},
asin_csr{sparse_csr -> sparse_csr}
layout : x
backward : asin_grad

- api : asinh
args : (Tensor x)
output : Tensor(out)
kernel :
func : asinh_coo{sparse_coo -> sparse_coo},
asinh_csr{sparse_csr -> sparse_csr}
layout : x
backward : asinh_grad

- api : atan
args : (Tensor x)
output : Tensor(out)
kernel :
func : atan_coo{sparse_coo -> sparse_coo},
atan_csr{sparse_csr -> sparse_csr}
layout : x
backward : atan_grad

- api : atanh
args : (Tensor x)
output : Tensor(out)
kernel :
func : atanh_coo{sparse_coo -> sparse_coo},
atanh_csr{sparse_csr -> sparse_csr}
layout : x
backward : atanh_grad

- api : cast
args : (Tensor x, DataType index_dtype=DataType::UNDEFINED, DataType value_dtype=DataType::UNDEFINED)
output : Tensor(out)
kernel :
func : cast_coo{sparse_coo -> sparse_coo},
cast_csr{sparse_csr -> sparse_csr}
layout : x
data_type : x
backward : cast_grad

- api : conv3d
args : (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm)
output : Tensor(out), Tensor(rulebook)
Expand Down Expand Up @@ -41,38 +114,81 @@
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : divide_coo_coo{sparse_coo -> sparse_coo},
divide_csr_csr{sparse_csr -> sparse_csr}
func : divide_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
divide_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
layout : x
backward : divide_grad

- api : divide_scalar
args : (Tensor x, float scalar)
output : Tensor(out)
kernel :
func : divide_coo_scalar{sparse_coo -> sparse_coo},
divide_csr_scalar{sparse_csr -> sparse_csr}
backward : divide_scalar_grad

- api : log1p
args : (Tensor x)
output : Tensor(out)
kernel :
func : log1p_coo{sparse_coo -> sparse_coo},
log1p_csr{sparse_csr -> sparse_csr}
layout : x
backward : log1p_grad

- api : multiply
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : multiply_coo_coo{sparse_coo -> sparse_coo},
multiply_csr_csr{sparse_csr -> sparse_csr}
func : multiply_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
multiply_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
layout : x
backward : multiply_grad

- api : pow
args : (Tensor x, float factor)
output : Tensor(out)
kernel :
func : pow_coo{sparse_coo -> sparse_coo},
pow_csr{sparse_csr -> sparse_csr}
layout : x
backward : pow_grad

- api : relu
args : (Tensor x)
output : Tensor(out)
kernel :
func : sparse_coo_relu{sparse_coo -> sparse_coo},
sparse_csr_relu{sparse_csr -> sparse_csr}
func : relu_coo{sparse_coo -> sparse_coo},
relu_csr{sparse_csr -> sparse_csr}
layout : x
backward : relu_grad

- api : scale
args : (Tensor x, float scale, float bias, bool bias_after_scale)
output : Tensor(out)
kernel :
func : scale_coo{sparse_coo -> sparse_coo},
scale_csr{sparse_csr -> sparse_csr}
backward : scale_grad

- api : sin
args : (Tensor x)
output : Tensor(out@SparseCooTensor)
output : Tensor(out)
kernel :
func : sparse_coo_sin {sparse_coo -> sparse_coo},
sparse_csr_sin {sparse_csr -> sparse_csr}
func : sin_coo{sparse_coo -> sparse_coo},
sin_csr{sparse_csr -> sparse_csr}
layout : x
backward : sin_grad

- api : sinh
args : (Tensor x)
output : Tensor(out)
kernel :
func : sinh_coo{sparse_coo -> sparse_coo},
sinh_csr{sparse_csr -> sparse_csr}
layout : x
backward : sinh_grad

- api : softmax
args : (Tensor x, int axis=-1)
output : Tensor(out)
Expand All @@ -85,26 +201,44 @@
args : (Tensor x)
output : Tensor(out)
kernel :
func : sparse_coo_sqrt{sparse_coo -> sparse_coo},
sparse_csr_sqrt{sparse_csr -> sparse_csr}
func : sqrt_coo{sparse_coo -> sparse_coo},
sqrt_csr{sparse_csr -> sparse_csr}
layout : x
backward : sqrt_grad

- api : square
args : (Tensor x)
output : Tensor(out)
kernel :
func : square_coo{sparse_coo -> sparse_coo},
square_csr{sparse_csr -> sparse_csr}
layout : x
backward : square_grad

- api : subtract
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : subtract_coo_coo{sparse_coo -> sparse_coo},
subtract_csr_csr{sparse_csr -> sparse_csr}
func : subtract_coo_coo{sparse_coo, sparse_coo -> sparse_coo},
subtract_csr_csr{sparse_csr, sparse_csr -> sparse_csr}
layout : x
backward : subtract_grad

- api : tan
args : (Tensor x)
output : Tensor(out)
kernel :
func : tan_coo{sparse_coo -> sparse_coo},
tan_csr{sparse_csr -> sparse_csr}
layout : x
backward : tan_grad

- api : tanh
args : (Tensor x)
output : Tensor(out)
kernel :
func : sparse_coo_tanh{sparse_coo -> sparse_coo},
sparse_csr_tanh{sparse_csr -> sparse_csr}
func : tanh_coo{sparse_coo -> sparse_coo},
tanh_csr{sparse_csr -> sparse_csr}
layout : x
backward : tanh_grad

Expand Down