Skip to content

Commit

Permalink
fix kernel name
Browse files Browse the repository at this point in the history
  • Loading branch information
gsq7474741 committed Jun 22, 2022
1 parent 581f0e2 commit 89d8cc5
Show file tree
Hide file tree
Showing 4 changed files with 32 additions and 32 deletions.
16 changes: 8 additions & 8 deletions paddle/phi/kernels/sparse/cpu/elementwise_grad_kernel.cc
Expand Up @@ -298,7 +298,7 @@ DEFINE_ELEMENTWISE_GRAD_KERNEL(Multiply)
} // namespace sparse
} // namespace phi

PD_REGISTER_KERNEL(add_grad_csr,
PD_REGISTER_KERNEL(add_csr_csr_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCsrGradKernel,
Expand All @@ -312,7 +312,7 @@ PD_REGISTER_KERNEL(add_grad_csr,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(subtract_grad_csr,
PD_REGISTER_KERNEL(subtract_csr_csr_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCsrGradKernel,
Expand All @@ -326,7 +326,7 @@ PD_REGISTER_KERNEL(subtract_grad_csr,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(multiply_grad_csr,
PD_REGISTER_KERNEL(multiply_csr_csr_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCsrGradKernel,
Expand All @@ -340,7 +340,7 @@ PD_REGISTER_KERNEL(multiply_grad_csr,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(divide_grad_csr,
PD_REGISTER_KERNEL(divide_csr_csr_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCsrGradKernel,
Expand All @@ -355,7 +355,7 @@ PD_REGISTER_KERNEL(divide_grad_csr,
kernel->InputAt(3).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(add_grad_coo,
PD_REGISTER_KERNEL(add_coo_coo_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCooGradKernel,
Expand All @@ -369,7 +369,7 @@ PD_REGISTER_KERNEL(add_grad_coo,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(subtract_grad_coo,
PD_REGISTER_KERNEL(subtract_coo_coo_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCooGradKernel,
Expand All @@ -383,7 +383,7 @@ PD_REGISTER_KERNEL(subtract_grad_coo,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(multiply_grad_coo,
PD_REGISTER_KERNEL(multiply_coo_coo_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCooGradKernel,
Expand All @@ -397,7 +397,7 @@ PD_REGISTER_KERNEL(multiply_grad_coo,
kernel->InputAt(2).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(divide_grad_coo,
PD_REGISTER_KERNEL(divide_coo_coo_grad,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCooGradKernel,
Expand Down
16 changes: 8 additions & 8 deletions paddle/phi/kernels/sparse/cpu/elementwise_kernel.cc
Expand Up @@ -346,7 +346,7 @@ DEFINE_COO_ELEMENTWISE_KERNEL(Divide)
} // namespace sparse
} // namespace phi

PD_REGISTER_KERNEL(add_csr,
PD_REGISTER_KERNEL(add_csr_csr,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCsrKernel,
Expand All @@ -359,7 +359,7 @@ PD_REGISTER_KERNEL(add_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(add_coo,
PD_REGISTER_KERNEL(add_coo_coo,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseAddCooKernel,
Expand All @@ -372,7 +372,7 @@ PD_REGISTER_KERNEL(add_coo,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(subtract_csr,
PD_REGISTER_KERNEL(subtract_csr_csr,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCsrKernel,
Expand All @@ -385,7 +385,7 @@ PD_REGISTER_KERNEL(subtract_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(subtract_coo,
PD_REGISTER_KERNEL(subtract_coo_coo,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseSubtractCooKernel,
Expand All @@ -398,7 +398,7 @@ PD_REGISTER_KERNEL(subtract_coo,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(multiply_csr,
PD_REGISTER_KERNEL(multiply_csr_csr,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCsrKernel,
Expand All @@ -411,7 +411,7 @@ PD_REGISTER_KERNEL(multiply_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(multiply_coo,
PD_REGISTER_KERNEL(multiply_coo_coo,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseMultiplyCooKernel,
Expand All @@ -424,7 +424,7 @@ PD_REGISTER_KERNEL(multiply_coo,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_COO);
}

PD_REGISTER_KERNEL(divide_csr,
PD_REGISTER_KERNEL(divide_csr_csr,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCsrKernel,
Expand All @@ -437,7 +437,7 @@ PD_REGISTER_KERNEL(divide_csr,
kernel->InputAt(1).SetDataLayout(phi::DataLayout::SPARSE_CSR);
}

PD_REGISTER_KERNEL(divide_coo,
PD_REGISTER_KERNEL(divide_coo_coo,
CPU,
ALL_LAYOUT,
phi::sparse::ElementWiseDivideCooKernel,
Expand Down
16 changes: 8 additions & 8 deletions python/paddle/utils/code_gen/sparse_api.yaml
Expand Up @@ -2,8 +2,8 @@
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : add_coo{sparse_coo -> sparse_coo},
add_csr{sparse_csr -> sparse_csr}
func : add_coo_coo{sparse_coo -> sparse_coo},
add_csr_csr{sparse_csr -> sparse_csr}
layout : x
backward : add_grad

Expand Down Expand Up @@ -41,17 +41,17 @@
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : divide_coo{sparse_coo -> sparse_coo},
divide_csr{sparse_csr -> sparse_csr}
func : divide_coo_coo{sparse_coo -> sparse_coo},
divide_csr_csr{sparse_csr -> sparse_csr}
layout : x
backward : divide_grad

- api : multiply
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : multiply_coo{sparse_coo -> sparse_coo},
multiply_csr{sparse_csr -> sparse_csr}
func : multiply_coo_coo{sparse_coo -> sparse_coo},
multiply_csr_csr{sparse_csr -> sparse_csr}
layout : x
backward : multiply_grad

Expand Down Expand Up @@ -86,8 +86,8 @@
args : (Tensor x, Tensor y)
output : Tensor(out)
kernel :
func : subtract_coo{sparse_coo -> sparse_coo},
subtract_csr{sparse_csr -> sparse_csr}
func : subtract_coo_coo{sparse_coo -> sparse_coo},
subtract_csr_csr{sparse_csr -> sparse_csr}
layout : x
backward : subtract_grad

Expand Down
16 changes: 8 additions & 8 deletions python/paddle/utils/code_gen/sparse_bw_api.yaml
Expand Up @@ -3,8 +3,8 @@
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
kernel :
func : add_grad_coo{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
add_grad_csr{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
func : add_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
add_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}

- backward_api : conv3d_grad
forward : conv3d (Tensor x, Tensor kernel, int[] paddings, int[] dilations, int[] strides, int groups, bool subm) -> Tensor(out@SparseCooTensor), Tensor(rulebook@DenseTensor)
Expand Down Expand Up @@ -38,8 +38,8 @@
args : (Tensor x, Tensor y, Tensor out, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
kernel :
func : divide_grad_coo{sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
divide_grad_csr{sparse_csr, sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
func : divide_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
divide_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}

- backward_api : masked_matmul_grad
forward : masked_matmul(Tensor x, Tensor y, Tensor mask) -> Tensor(out)
Expand All @@ -60,8 +60,8 @@
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
kernel :
func : multiply_grad_coo{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
multiply_grad_csr{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
func : multiply_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
multiply_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}

- backward_api : relu_grad
forward : relu(Tensor x) -> Tensor(out)
Expand Down Expand Up @@ -96,8 +96,8 @@
args : (Tensor x, Tensor y, Tensor out_grad)
output : Tensor(x_grad), Tensor(y_grad)
kernel :
func : subtract_grad_coo{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
subtract_grad_csr{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}
func : subtract_coo_coo_grad{sparse_coo, sparse_coo, sparse_coo -> sparse_coo, sparse_coo},
subtract_csr_csr_grad{sparse_csr, sparse_csr, sparse_csr -> sparse_csr, sparse_csr}

- backward_api : tanh_grad
forward : tanh(Tensor x) -> Tensor(out)
Expand Down

1 comment on commit 89d8cc5

@paddle-bot-old
Copy link

@paddle-bot-old paddle-bot-old bot commented on 89d8cc5 Jun 22, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

🕵️ CI failures summary

🔍 PR: #41857 Commit ID: 89d8cc5 contains failed CI.

🔹 Failed: PR-CI-APPROVAL

Unknown Failed
Unknown Failed

Please sign in to comment.