Skip to content

Commit

Permalink
[Sparse] Add sparse addmm kernel (dense+coo*dense->dense,dense+csr*de…
Browse files Browse the repository at this point in the history
…nse->dense)
  • Loading branch information
zhwesky2010 committed Jul 21, 2022
1 parent 3f70b1d commit 9b98dd8
Show file tree
Hide file tree
Showing 12 changed files with 726 additions and 3 deletions.
11 changes: 11 additions & 0 deletions paddle/phi/api/yaml/sparse_api.yaml
Expand Up @@ -266,6 +266,17 @@
layout : x
backward : values_grad

- api: addmm
args : (Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0)
output : Tensor(out)
kernel :
func : addmm_csr_dense {dense, sparse_csr, dense -> dense},
addmm_csr_csr {sparse_csr, sparse_csr, sparse_csr -> sparse_csr},
addmm_coo_dense {dense, sparse_coo, dense -> dense},
addmm_coo_coo {sparse_coo, sparse_coo, sparse_coo -> sparse_coo}
layout : x
backward: addmm_grad

- api: coalesce
args : (Tensor x)
output : Tensor(out)
Expand Down
12 changes: 11 additions & 1 deletion paddle/phi/api/yaml/sparse_bw_api.yaml
Expand Up @@ -30,6 +30,16 @@
func : add_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}

- backward_api : addmm_grad
forward : addmm(Tensor input, Tensor x, Tensor y, float alpha=1.0, float beta=1.0) -> Tensor(out)
args : (Tensor input, Tensor x, Tensor y, Tensor out_grad, float alpha=1.0, float beta=1.0)
output : Tensor(input_grad), Tensor(x_grad), Tensor(y_grad)
kernel :
func : addmm_csr_dense_grad {dense, sparse_csr, dense, dense -> dense, sparse_csr, dense},
addmm_csr_csr_grad {sparse_csr, sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr, sparse_csr},
addmm_coo_dense_grad {dense, sparse_coo, dense, dense -> dense, sparse_coo, dense},
addmm_coo_coo_grad {sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo, sparse_coo}

- backward_api : asin_grad
forward : asin(Tensor x) -> Tensor(out)
args : (Tensor x, Tensor out_grad)
Expand Down Expand Up @@ -255,7 +265,7 @@
- backward_api: fused_attention_grad
forward : fused_attention_csr(Tensor query, Tensor key, Tensor value, Tensor sparse_mask, Tensor key_padding_mask, Tensor attn_mask) -> Tensor(out), Tensor(softmax)
args: (Tensor query, Tensor key, Tensor value, Tensor softmax, Tensor out_grad)
output : Tensor(query_grad), Tensor(key_grad), Tensor(value_grad)
output : Tensor(query_grad), Tensor(key_grad), Tensor(value_grad)
kernel :
func : fused_attention_csr_grad{dense, dense, dense, sparse_csr, dense -> dense, dense, dense}
layout : softmax
Expand Down
77 changes: 77 additions & 0 deletions paddle/phi/kernels/sparse/addmm_grad_kernel.h
@@ -0,0 +1,77 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"

namespace phi {
namespace sparse {

// TODO(zhouwei25): implement Backward of " COO + COO @ COO -> COO"
template <typename T, typename Context>
void AddmmCooCooGradKernel(const Context& dev_ctx,
const SparseCooTensor& input,
const SparseCooTensor& x,
const SparseCooTensor& y,
const SparseCooTensor& dout,
float alpha,
float beta,
SparseCooTensor* dinput,
SparseCooTensor* dx,
SparseCooTensor* dy);

// Backward of "DENSE + COO @ DENSE -> DENSE"
template <typename T, typename Context>
void AddmmCooDenseGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCooTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
float alpha,
float beta,
DenseTensor* dinput,
SparseCooTensor* dx,
DenseTensor* dy);

// TODO(zhouwei25): implement Backward of " CSR + CSR @ CSR -> CSR"
template <typename T, typename Context>
void AddmmCsrCsrGradKernel(const Context& dev_ctx,
const SparseCsrTensor& input,
const SparseCsrTensor& x,
const SparseCsrTensor& y,
const SparseCsrTensor& dout,
float alpha,
float beta,
SparseCsrTensor* dinput,
SparseCsrTensor* dx,
SparseCsrTensor* dy);

/* Backward of "DENSE + CSR @ DENSE -> DENSE" */
template <typename T, typename Context>
void AddmmCsrDenseGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCsrTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
float alpha,
float beta,
DenseTensor* dinput,
SparseCsrTensor* dx,
DenseTensor* dy);

} // namespace sparse
} // namespace phi
65 changes: 65 additions & 0 deletions paddle/phi/kernels/sparse/addmm_kernel.h
@@ -0,0 +1,65 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#pragma once

#include "paddle/phi/core/dense_tensor.h"
#include "paddle/phi/core/sparse_coo_tensor.h"
#include "paddle/phi/core/sparse_csr_tensor.h"

namespace phi {
namespace sparse {

// TODO(zhouwei25): implement " COO + COO @ COO -> COO"
template <typename T, typename Context>
void AddmmCooCooKernel(const Context& dev_ctx,
const SparseCooTensor& input,
const SparseCooTensor& x,
const SparseCooTensor& y,
float alpha,
float beta,
SparseCooTensor* out);

/* DENSE + COO @ DENSE -> DENSE */
template <typename T, typename Context>
void AddmmCooDenseKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCooTensor& x,
const DenseTensor& y,
float alpha,
float beta,
DenseTensor* out);

// TODO(zhouwei25): implement " CSR + CSR @ CSR -> CSR"
template <typename T, typename Context>
void AddmmCsrCsrKernel(const Context& dev_ctx,
const SparseCsrTensor& input,
const SparseCsrTensor& x,
const SparseCsrTensor& y,
float alpha,
float beta,
SparseCsrTensor* out);

/* DENSE + CSR @ DENSE -> DENSE */
template <typename T, typename Context>
void AddmmCsrDenseKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCsrTensor& x,
const DenseTensor& y,
float alpha,
float beta,
DenseTensor* out);

} // namespace sparse
} // namespace phi
72 changes: 72 additions & 0 deletions paddle/phi/kernels/sparse/cpu/addmm_grad_kernel.cc
@@ -0,0 +1,72 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/sparse/addmm_grad_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"

namespace phi {
namespace sparse {

template <typename T, typename Context>
void AddmmCooDenseGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCooTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
float alpha,
float beta,
DenseTensor* dinput,
SparseCooTensor* dx,
DenseTensor* dy) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.addmm' now."));
}

template <typename T, typename Context>
void AddmmCsrDenseGradKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCsrTensor& x,
const DenseTensor& y,
const DenseTensor& dout,
float alpha,
float beta,
DenseTensor* dinput,
SparseCsrTensor* dx,
DenseTensor* dy) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of 'sparse.addmm' now."));
}

} // namespace sparse
} // namespace phi

PD_REGISTER_KERNEL(addmm_coo_dense_grad,
CPU,
ALL_LAYOUT,
phi::sparse::AddmmCooDenseGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(addmm_csr_dense_grad,
CPU,
ALL_LAYOUT,
phi::sparse::AddmmCsrDenseGradKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
67 changes: 67 additions & 0 deletions paddle/phi/kernels/sparse/cpu/addmm_kernel.cc
@@ -0,0 +1,67 @@
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */

#include "paddle/phi/kernels/sparse/addmm_kernel.h"

#include "paddle/phi/backends/cpu/cpu_context.h"
#include "paddle/phi/core/kernel_registry.h"
namespace phi {
namespace sparse {

/* DENSE + COO @ DENSE -> DENSE */
template <typename T, typename Context>
void AddmmCooDenseKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCooTensor& x,
const DenseTensor& y,
float alpha,
float beta,
DenseTensor* out) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.addmm' now."));
}

/* DENSE + CSR @ DENSE -> DENSE */
template <typename T, typename Context>
void AddmmCsrDenseKernel(const Context& dev_ctx,
const DenseTensor& input,
const SparseCsrTensor& x,
const DenseTensor& y,
float alpha,
float beta,
DenseTensor* out) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU kernel of 'sparse.addmm' now."));
}

} // namespace sparse
} // namespace phi

PD_REGISTER_KERNEL(addmm_coo_dense,
CPU,
ALL_LAYOUT,
phi::sparse::AddmmCooDenseKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(addmm_csr_dense,
CPU,
ALL_LAYOUT,
phi::sparse::AddmmCsrDenseKernel,
float,
double) {
kernel->InputAt(0).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}
4 changes: 2 additions & 2 deletions paddle/phi/kernels/sparse/cpu/matmul_grad_kernel.cc
Expand Up @@ -29,7 +29,7 @@ void MatmulCsrDenseGradKernel(const Context& dev_ctx,
SparseCsrTensor* dx,
DenseTensor* dy) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of Sparse Matmul now."));
"Not support CPU backward kernel of 'sparse.matmul' now."));
}

// TODO(zhouwei25): implement CPU kernel of " DENSE @ DENSE * CSR_MASK -> CSR"
Expand All @@ -41,7 +41,7 @@ void MaskedMatmulCsrGradKernel(const Context& dev_ctx,
DenseTensor* dx,
DenseTensor* dy) {
PADDLE_THROW(phi::errors::Unimplemented(
"Not support CPU backward kernel of Matmul Mask As Sparse now."));
"Not support CPU backward kernel of 'sparse.masked_matmul' now."));
}

} // namespace sparse
Expand Down

0 comments on commit 9b98dd8

Please sign in to comment.