Skip to content

Commit

Permalink
fix docs
Browse files Browse the repository at this point in the history
  • Loading branch information
zkh2016 committed Apr 21, 2022
1 parent b609c2b commit 422e801
Show file tree
Hide file tree
Showing 2 changed files with 19 additions and 4 deletions.
16 changes: 16 additions & 0 deletions python/paddle/fluid/tests/unittests/test_sparse_norm_op.py
Expand Up @@ -69,3 +69,19 @@ def test_error_layout(self):
sparse_batch_norm = paddle.sparse.BatchNorm(
3, data_format='NCDHW')
sparse_batch_norm(sparse_x)

def test2(self):
with _test_eager_guard():
paddle.seed(123)
channels = 3
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
dense_x = paddle.to_tensor(x_data)
sparse_x = dense_x.to_sparse_coo(4)
batch_norm = paddle.sparse.BatchNorm(channels)
batch_norm_out = batch_norm(sparse_x)
print(batch_norm_out.shape)
# [1, 6, 6, 6, 3]


if __name__ == "__main__":
unittest.main()
7 changes: 3 additions & 4 deletions python/paddle/sparse/layer/norm.py
Expand Up @@ -66,8 +66,8 @@ class BatchNorm(paddle.nn.BatchNorm1D):
Parameters:
num_features(int): Indicate the number of channels of the input ``Tensor``.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
epsilon(float, optional): The small value added to the variance to prevent division by zero. Default: 1e-5.
weight_attr(ParamAttr|bool, optional): The parameter attribute for Parameter `scale`
of batch_norm. If it is set to None or one attribute of ParamAttr, batch_norm
will create ParamAttr as weight_attr. If it is set to Fasle, the weight is not learnable.
Expand All @@ -92,13 +92,12 @@ class BatchNorm(paddle.nn.BatchNorm1D):
.. code-block:: python
import paddle
import numpy as np
from paddle.fluid.framework import _test_eager_guard
with _test_eager_guard():
np.random.seed(123)
paddle.seed(123)
channels = 3
x_data = np.random.random(size=(1, 6, 6, 6, channels)).astype('float32')
x_data = paddle.randn((1, 6, 6, 6, channels)).astype('float32')
dense_x = paddle.to_tensor(x_data)
sparse_x = dense_x.to_sparse_coo(4)
batch_norm = paddle.sparse.BatchNorm(channels)
Expand Down

0 comments on commit 422e801

Please sign in to comment.