Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

[PaddleHackathon No.14] #41183

Merged
merged 21 commits into from Jun 17, 2022
Merged
Show file tree
Hide file tree
Changes from 1 commit
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
@@ -0,0 +1,190 @@
import paddle
import numpy as np
import unittest

def call_MultiLabelSoftMarginLoss_layer(input,
label,
weight=None,
reduction='mean',):
multilabel_margin_loss = paddle.nn.MultiLabelSoftMarginLoss(weight=weight,reduction=reduction)
res = multilabel_margin_loss(input=input,label=label,)
return res


def call_MultiLabelSoftMarginLoss_functional(input,
label,
weight=None,
reduction='mean',):
res = paddle.nn.functional.multi_label_soft_margin_loss(
input,
label,
reduction=reduction,
weight=weight,)
return res


def test_static(place,
input_np,
label_np,
weight_np = None,
reduction='mean',
functional=False):
paddle.enable_static()
prog = paddle.static.Program()
startup_prog = paddle.static.Program()
with paddle.static.program_guard(prog, startup_prog):
input = paddle.static.data(
name='input', shape=input_np.shape, dtype='float64')
label = paddle.static.data(
name='label', shape=label_np.shape, dtype='float64')
feed_dict = {"input": input_np, "label": label_np,}
weight = None
if weight_np is not None:
weight = paddle.static.data(name='weight', shape=weight_np.shape,dtype='float64')
feed_dict['weight']= weight_np

if functional:
res = call_MultiLabelSoftMarginLoss_functional(input=input,label=label,weight=weight,reduction=reduction)
else:
res = call_MultiLabelSoftMarginLoss_layer(input=input,label=label,weight=weight,reduction=reduction)

exe = paddle.static.Executor(place)
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res])
return static_result

def test_dygraph(place,
input_np,
label_np,
weight = None,
reduction='mean',
functional=False):
with paddle.fluid.dygraph.base.guard():
input = paddle.to_tensor(input_np)
label = paddle.to_tensor(label_np)
if weight is not None:
weight = paddle.to_tensor(weight)

if functional:
dy_res = call_MultiLabelSoftMarginLoss_functional(input=input, label=label, weight=weight, reduction=reduction)
else:
dy_res = call_MultiLabelSoftMarginLoss_layer(input=input, label=label, weight=weight, reduction=reduction)
dy_result = dy_res.numpy()
return dy_result


def calc_multilabel_margin_loss(input,
label,
weight = None,
reduction = "mean",):

def LogSigmoid(x):
return np.log(1/(1+np.exp(-x)))

loss = -(label * LogSigmoid(input) + (1 - label) * LogSigmoid(-input))

if weight is not None:
loss = loss * weight


loss = loss.mean(axis=-1) # only return N loss values

if reduction == "none":
return loss
elif reduction == "mean":
return np.mean(loss)
elif reduction == "sum":
return np.sum(loss)


class TestMultiLabelMarginLoss(unittest.TestCase):
def test_MultiLabelSoftMarginLoss(self):
input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64)
label = np.random.randint(0, 2, size=(20, 30)).astype(np.float64)

places = ['cpu']
if paddle.device.is_compiled_with_cuda():
places.append('gpu')
reductions = ['sum', 'mean', 'none']
for place in places:
for reduction in reductions:
expected = calc_multilabel_margin_loss(input=input, label=label,
reduction=reduction)

dy_result = test_dygraph(place=place,
input_np=input, label_np=label,
reduction=reduction)

static_result = test_static(place=place,
input_np=input, label_np=label,
reduction=reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static(place=place,
input_np=input, label_np=label,
reduction=reduction,functional=True)
dy_functional = test_dygraph(place=place,
input_np=input, label_np=label,
reduction=reduction,functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))

def test_MultiLabelSoftMarginLoss_error(self):
paddle.disable_static()
self.assertRaises(
ValueError,
paddle.nn.MultiLabelSoftMarginLoss,
reduction="unsupport reduction")
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32')
label = paddle.to_tensor([[0.0, 1.0]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.multi_label_soft_margin_loss,
input=input,
label = label,
reduction="unsupport reduction")
paddle.enable_static()

def test_MultiLabelSoftMarginLoss_weights(self):
input = np.random.uniform(0.1, 0.8, size=(20, 30)).astype(np.float64)
label = np.random.randint(0, 2, size=(20, 30)).astype(np.float64)
weight = np.random.randint(0, 2, size=(20, 30)).astype(np.float64)
place='cpu'
reduction = 'mean'
expected = calc_multilabel_margin_loss(input=input, label=label,weight=weight,
reduction=reduction)

dy_result = test_dygraph(place=place,
input_np=input, label_np=label,weight=weight,
reduction=reduction)

static_result = test_static(place=place,
input_np=input, label_np=label,weight_np=weight,
reduction=reduction)
self.assertTrue(np.allclose(static_result, expected))
self.assertTrue(np.allclose(static_result, dy_result))
self.assertTrue(np.allclose(dy_result, expected))
static_functional = test_static(place=place,
input_np=input, label_np=label,weight_np=weight,
reduction=reduction, functional=True)
dy_functional = test_dygraph(place=place,
input_np=input, label_np=label,weight=weight,
reduction=reduction, functional=True)
self.assertTrue(np.allclose(static_functional, expected))
self.assertTrue(np.allclose(static_functional, dy_functional))
self.assertTrue(np.allclose(dy_functional, expected))
def test_MultiLabelSoftMarginLoss_dimension(self):
paddle.disable_static()

input = paddle.to_tensor([[0.1, 0.3], [1, 2]], dtype='float32')
label = paddle.to_tensor([[0.2, 0.1]], dtype='float32')
self.assertRaises(
ValueError,
paddle.nn.functional.multi_label_soft_margin_loss,
input=input,
label=label)
paddle.enable_static()

if __name__ == "__main__":
unittest.main()
2 changes: 2 additions & 0 deletions python/paddle/nn/__init__.py
Expand Up @@ -106,6 +106,7 @@
from .layer.loss import CTCLoss # noqa: F401
from .layer.loss import SmoothL1Loss # noqa: F401
from .layer.loss import HingeEmbeddingLoss # noqa: F401
from .layer.loss import MultiLabelSoftMarginLoss
from .layer.norm import BatchNorm # noqa: F401
from .layer.norm import SyncBatchNorm # noqa: F401
from .layer.norm import GroupNorm # noqa: F401
Expand Down Expand Up @@ -313,4 +314,5 @@ def weight_norm(*args):
'MaxUnPool3D',
'HingeEmbeddingLoss',
'Identity',
'MultiLabelSoftMarginLoss',
]
2 changes: 2 additions & 0 deletions python/paddle/nn/functional/__init__.py
Expand Up @@ -89,6 +89,7 @@
from .loss import square_error_cost # noqa: F401
from .loss import ctc_loss # noqa: F401
from .loss import hinge_embedding_loss # noqa: F401
from .loss import multi_label_soft_margin_loss
from .norm import batch_norm # noqa: F401
from .norm import instance_norm # noqa: F401
from .norm import layer_norm # noqa: F401
Expand Down Expand Up @@ -228,4 +229,5 @@
'class_center_sample',
'sparse_attention',
'fold',
'multi_label_soft_margin_loss',
]
76 changes: 76 additions & 0 deletions python/paddle/nn/functional/loss.py
Expand Up @@ -2225,3 +2225,79 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None):
return paddle.sum(loss, name=name)
elif reduction == 'none':
return loss

def multi_label_soft_margin_loss(
Copy link
Contributor

@TCChenlong TCChenlong May 6, 2022

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里的英文文档,同下面的API,也需要在相关的位置空行~
这个问题,可以合入后,在提个PR,单独修复文档~
LGTM~~

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

1

input,
label,
weight = None,
reduction = "mean",
name = None):
r"""
betterpig marked this conversation as resolved.
Show resolved Hide resolved
Parameters:
input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1.
label (Tensor): Label tensor, the data type is float32 or float64. The shape of label is the same as the shape of input.
weight (Tensor,optional): a manual rescaling weight given to each class.
If given, has to be a Tensor of size C and the data type is float32, float64.
Default is ``'None'`` .
reduction (str, optional): Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
If :attr:`reduction` is ``'sum'``, the summed loss is returned.
Default: ``'mean'``
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Shape:
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements.
label: N-D Tensor, same shape as the input.
weight:N-D Tensor, the shape is [N,1]
output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input.
Returns:
Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label.
Examples:
.. code-block:: python
import paddle
import paddle.nn.functional as F
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
# label elements in {1., -1.}
label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)
loss = F.multi_label_soft_margin_loss(input, label, reduction='none')
print(loss)
# Tensor([3.49625897, 0.71111226, 0.43989015])
loss = F.multi_label_soft_margin_loss(input, label, reduction='mean')
print(loss)
# Tensor([1.54908717])
"""
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"'reduction' in 'multi_label_soft_margin_loss' should be 'sum', 'mean' or 'none', "
"but received {}.".format(reduction))

if not(input.shape==label.shape):
raise ValueError(
"The input and label should have same dimension,"
"but received {}!={}".format(input.shape,label.shape)
)

if not _non_static_mode():
check_variable_and_dtype(input, 'input', ['float32', 'float64'],
'multilabel_soft_margin_loss')
check_variable_and_dtype(label, 'label', ['float32', 'float64'],
'multilabel_soft_margin_loss')

loss = -(label * paddle.nn.functional.log_sigmoid(input) + (1 - label) * paddle.nn.functional.log_sigmoid(-input))

if weight is not None:
if not _non_static_mode():
check_variable_and_dtype(weight,'weight',['float32','float64'],
'multilabel_soft_margin_loss')
loss = loss * weight

loss = loss.mean(axis=-1) # only return N loss values

if reduction == "none":
return loss
elif reduction == "mean":
return paddle.mean(loss)
elif reduction == "sum":
return paddle.sum(loss)
1 change: 1 addition & 0 deletions python/paddle/nn/layer/__init__.py
Expand Up @@ -78,6 +78,7 @@
from .loss import CTCLoss # noqa: F401
from .loss import SmoothL1Loss # noqa: F401
from .loss import HingeEmbeddingLoss # noqa: F401
from .loss import MultiLabelSoftMarginLoss
from .norm import BatchNorm1D # noqa: F401
from .norm import BatchNorm2D # noqa: F401
from .norm import BatchNorm3D # noqa: F401
Expand Down
66 changes: 66 additions & 0 deletions python/paddle/nn/layer/loss.py
Expand Up @@ -1302,3 +1302,69 @@ def forward(self, input, label):
reduction=self.reduction,
margin=self.margin,
name=self.name)


class MultiLabelSoftMarginLoss(Layer):
r"""Creates a criterion that optimizes a multi-class multi-classification
yangguohao marked this conversation as resolved.
Show resolved Hide resolved
hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`)
and output :math:`y` (which is a 2D `Tensor` of target class indices).
For each sample in the mini-batch:
.. math::
yangguohao marked this conversation as resolved.
Show resolved Hide resolved
\text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)}
where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \
:math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \
:math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \
and :math:`i \neq y[j]` for all :math:`i` and :math:`j`.
:math:`y` and :math:`x` must have the same size.
Parameters:
yangguohao marked this conversation as resolved.
Show resolved Hide resolved
yangguohao marked this conversation as resolved.
Show resolved Hide resolved
reduction (str, optional): Indicate how to average the loss by batch_size,
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``.
If :attr:`reduction` is ``'none'``, the unreduced loss is returned;
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned;
If :attr:`reduction` is ``'sum'``, the summed loss is returned.
Default: ``'mean'``
name (str, optional): Name for the operation (optional, default is None).
For more information, please refer to :ref:`api_guide_Name`.
Call parameters:
yangguohao marked this conversation as resolved.
Show resolved Hide resolved
input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1.
label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input.
Shape:
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements.
label: N-D Tensor, same shape as the input.
output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input.
Returns:
yangguohao marked this conversation as resolved.
Show resolved Hide resolved
Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label.
Examples:
.. code-block:: python
import paddle
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

import paddle 前要空一行

import paddle.nn as nn
yangguohao marked this conversation as resolved.
Show resolved Hide resolved
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32)
# label elements in {1., -1.}
label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32)
multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none')
loss = multi_label_soft_margin_loss(input, label)
print(loss)
# Tensor([3.49625897, 0.71111226, 0.43989015])
multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean')
loss = multi_label_soft_margin_loss(input, label)
print(loss)
# Tensor([1.54908717])
"""
def __init__(self, weight=None, reduction="mean", name=None):
super(MultiLabelSoftMarginLoss, self).__init__()
if reduction not in ['sum', 'mean', 'none']:
raise ValueError(
"'reduction' in 'MultiLabelSoftMarginloss' should be 'sum', 'mean' or 'none', "
"but received {}.".format(reduction))
self.weight=weight
self.reduction = reduction
self.name = name

def forward(self, input, label):
return F.multi_label_soft_margin_loss(
input,
label,
reduction=self.reduction,
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

这里建议严谨一点, weight 在前,reduction 在后吧

Copy link
Contributor Author

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

已修改

weight=self.weight,
name=self.name)