New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
[PaddleHackathon No.14] #41183
[PaddleHackathon No.14] #41183
Changes from 7 commits
1132b5a
7adebf9
c3c1228
70992d8
8d2b7d4
64577ab
94236c5
3b59cfd
2d6cd54
6c60290
4b7aee7
1ae87b0
8196f51
568ebc0
46261ef
016fa33
1329248
aca7dbb
c30e9da
de173dd
ce13bcf
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,191 @@ | ||
import paddle | ||
import numpy as np | ||
import unittest | ||
|
||
def call_MultiLabelSoftMarginLoss_layer(input, | ||
label, | ||
weight=None, | ||
reduction='mean',): | ||
multilabel_margin_loss = paddle.nn.MultiLabelSoftMarginLoss(weight=weight,reduction=reduction) | ||
res = multilabel_margin_loss(input=input,label=label,) | ||
return res | ||
|
||
|
||
def call_MultiLabelSoftMarginLoss_functional(input, | ||
label, | ||
weight=None, | ||
reduction='mean',): | ||
res = paddle.nn.functional.multi_label_soft_margin_loss( | ||
input, | ||
label, | ||
reduction=reduction, | ||
weight=weight,) | ||
return res | ||
|
||
|
||
def test_static(place, | ||
input_np, | ||
label_np, | ||
weight_np = None, | ||
reduction='mean', | ||
functional=False): | ||
paddle.enable_static() | ||
prog = paddle.static.Program() | ||
startup_prog = paddle.static.Program() | ||
with paddle.static.program_guard(prog, startup_prog): | ||
input = paddle.static.data( | ||
name='input', shape=input_np.shape, dtype='float64') | ||
label = paddle.static.data( | ||
name='label', shape=label_np.shape, dtype='float64') | ||
feed_dict = {"input": input_np, "label": label_np,} | ||
weight = None | ||
if weight_np is not None: | ||
weight = paddle.static.data(name='weight', shape=weight_np.shape,dtype='float64') | ||
feed_dict['weight']= weight_np | ||
|
||
if functional: | ||
res = call_MultiLabelSoftMarginLoss_functional(input=input,label=label,weight=weight,reduction=reduction) | ||
else: | ||
res = call_MultiLabelSoftMarginLoss_layer(input=input,label=label,weight=weight,reduction=reduction) | ||
|
||
exe = paddle.static.Executor(place) | ||
static_result = exe.run(prog, feed=feed_dict, fetch_list=[res]) | ||
return static_result | ||
|
||
def test_dygraph(place, | ||
input_np, | ||
label_np, | ||
weight = None, | ||
reduction='mean', | ||
functional=False): | ||
with paddle.fluid.dygraph.base.guard(): | ||
input = paddle.to_tensor(input_np) | ||
label = paddle.to_tensor(label_np) | ||
if weight is not None: | ||
weight = paddle.to_tensor(weight) | ||
|
||
if functional: | ||
dy_res = call_MultiLabelSoftMarginLoss_functional(input=input, label=label, weight=weight, reduction=reduction) | ||
else: | ||
dy_res = call_MultiLabelSoftMarginLoss_layer(input=input, label=label, weight=weight, reduction=reduction) | ||
dy_result = dy_res.numpy() | ||
return dy_result | ||
|
||
|
||
def calc_multilabel_margin_loss(input, | ||
label, | ||
weight = None, | ||
reduction = "mean",): | ||
|
||
def LogSigmoid(x): | ||
return np.log(1/(1+np.exp(-x))) | ||
|
||
loss = -(label * LogSigmoid(input) + (1 - label) * LogSigmoid(-input)) | ||
|
||
if weight is not None: | ||
loss = loss * weight | ||
|
||
|
||
loss = loss.mean(axis=-1) # only return N loss values | ||
|
||
if reduction == "none": | ||
return loss | ||
elif reduction == "mean": | ||
return np.mean(loss) | ||
elif reduction == "sum": | ||
return np.sum(loss) | ||
|
||
|
||
class TestMultiLabelMarginLoss(unittest.TestCase): | ||
def test_MultiLabelSoftMarginLoss(self): | ||
input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) | ||
label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) | ||
|
||
places = ['cpu'] | ||
if paddle.device.is_compiled_with_cuda(): | ||
places.append('gpu') | ||
reductions = ['sum', 'mean', 'none'] | ||
for place in places: | ||
for reduction in reductions: | ||
expected = calc_multilabel_margin_loss(input=input, label=label, | ||
reduction=reduction) | ||
|
||
dy_result = test_dygraph(place=place, | ||
input_np=input, label_np=label, | ||
reduction=reduction) | ||
|
||
static_result = test_static(place=place, | ||
input_np=input, label_np=label, | ||
reduction=reduction) | ||
self.assertTrue(np.allclose(static_result, expected)) | ||
self.assertTrue(np.allclose(static_result, dy_result)) | ||
self.assertTrue(np.allclose(dy_result, expected)) | ||
static_functional = test_static(place=place, | ||
input_np=input, label_np=label, | ||
reduction=reduction,functional=True) | ||
dy_functional = test_dygraph(place=place, | ||
input_np=input, label_np=label, | ||
reduction=reduction,functional=True) | ||
self.assertTrue(np.allclose(static_functional, expected)) | ||
self.assertTrue(np.allclose(static_functional, dy_functional)) | ||
self.assertTrue(np.allclose(dy_functional, expected)) | ||
|
||
def test_MultiLabelSoftMarginLoss_error(self): | ||
paddle.disable_static() | ||
self.assertRaises( | ||
ValueError, | ||
paddle.nn.MultiLabelSoftMarginLoss, | ||
reduction="unsupport reduction") | ||
input = paddle.to_tensor([[0.1, 0.3]], dtype='float32') | ||
label = paddle.to_tensor([[0.0, 1.0]], dtype='float32') | ||
self.assertRaises( | ||
ValueError, | ||
paddle.nn.functional.multi_label_soft_margin_loss, | ||
input=input, | ||
label = label, | ||
reduction="unsupport reduction") | ||
paddle.enable_static() | ||
|
||
def test_MultiLabelSoftMarginLoss_weights(self): | ||
input = np.random.uniform(0.1, 0.8, size=(5, 5)).astype(np.float64) | ||
label = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) | ||
weight = np.random.randint(0, 2, size=(5, 5)).astype(np.float64) | ||
place='cpu' | ||
reduction = 'mean' | ||
expected = calc_multilabel_margin_loss(input=input, label=label,weight=weight, | ||
reduction=reduction) | ||
|
||
dy_result = test_dygraph(place=place, | ||
input_np=input, label_np=label,weight=weight, | ||
reduction=reduction) | ||
|
||
static_result = test_static(place=place, | ||
input_np=input, label_np=label,weight_np=weight, | ||
reduction=reduction) | ||
self.assertTrue(np.allclose(static_result, expected)) | ||
self.assertTrue(np.allclose(static_result, dy_result)) | ||
self.assertTrue(np.allclose(dy_result, expected)) | ||
static_functional = test_static(place=place, | ||
input_np=input, label_np=label,weight_np=weight, | ||
reduction=reduction, functional=True) | ||
dy_functional = test_dygraph(place=place, | ||
input_np=input, label_np=label,weight=weight, | ||
reduction=reduction, functional=True) | ||
self.assertTrue(np.allclose(static_functional, expected)) | ||
self.assertTrue(np.allclose(static_functional, dy_functional)) | ||
self.assertTrue(np.allclose(dy_functional, expected)) | ||
|
||
def test_MultiLabelSoftMarginLoss_dimension(self): | ||
paddle.disable_static() | ||
|
||
input = paddle.to_tensor([[0.1, 0.3], [1, 2]], dtype='float32') | ||
label = paddle.to_tensor([[0.2, 0.1]], dtype='float32') | ||
self.assertRaises( | ||
ValueError, | ||
paddle.nn.functional.multi_label_soft_margin_loss, | ||
input=input, | ||
label=label) | ||
paddle.enable_static() | ||
|
||
if __name__ == "__main__": | ||
unittest.main() |
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -1302,3 +1302,77 @@ def forward(self, input, label): | |
reduction=self.reduction, | ||
margin=self.margin, | ||
name=self.name) | ||
|
||
|
||
class MultiLabelSoftMarginLoss(Layer): | ||
r"""Creates a criterion that optimizes a multi-class multi-classification | ||
yangguohao marked this conversation as resolved.
Show resolved
Hide resolved
|
||
hinge loss (margin-based loss) between input :math:`x` (a 2D mini-batch `Tensor`) | ||
and output :math:`y` (which is a 2D `Tensor` of target class indices). | ||
For each sample in the mini-batch: | ||
.. math:: | ||
yangguohao marked this conversation as resolved.
Show resolved
Hide resolved
|
||
\text{loss}(x, y) = \sum_{ij}\frac{\max(0, 1 - (x[y[j]] - x[i]))}{\text{x.size}(0)} | ||
where :math:`x \in \left\{0, \; \cdots , \; \text{x.size}(0) - 1\right\}`, \ | ||
:math:`y \in \left\{0, \; \cdots , \; \text{y.size}(0) - 1\right\}`, \ | ||
:math:`0 \leq y[j] \leq \text{x.size}(0)-1`, \ | ||
and :math:`i \neq y[j]` for all :math:`i` and :math:`j`. | ||
:math:`y` and :math:`x` must have the same size. | ||
|
||
Parameters: | ||
yangguohao marked this conversation as resolved.
Show resolved
Hide resolved
yangguohao marked this conversation as resolved.
Show resolved
Hide resolved
|
||
reduction (str, optional): Indicate how to average the loss by batch_size, | ||
the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. | ||
If :attr:`reduction` is ``'none'``, the unreduced loss is returned; | ||
If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; | ||
If :attr:`reduction` is ``'sum'``, the summed loss is returned. | ||
Default: ``'mean'`` | ||
name (str, optional): Name for the operation (optional, default is None). | ||
For more information, please refer to :ref:`api_guide_Name`. | ||
|
||
Call parameters: | ||
yangguohao marked this conversation as resolved.
Show resolved
Hide resolved
|
||
input (Tensor): Input tensor, the data type is float32 or float64. Shape is (N, C), where C is number of classes, and if shape is more than 2D, this is (N, C, D1, D2,..., Dk), k >= 1. | ||
label (Tensor): Label tensor containing 1 or -1, the data type is float32 or float64. The shape of label is the same as the shape of input. | ||
|
||
Shape: | ||
input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. | ||
label: N-D Tensor, same shape as the input. | ||
output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. | ||
|
||
Returns: | ||
yangguohao marked this conversation as resolved.
Show resolved
Hide resolved
|
||
Tensor, The tensor variable storing the multi_label_soft_margin_loss of input and label. | ||
|
||
Examples: | ||
.. code-block:: python | ||
|
||
import paddle | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. import paddle 前要空一行 |
||
import paddle.nn as nn | ||
yangguohao marked this conversation as resolved.
Show resolved
Hide resolved
|
||
|
||
input = paddle.to_tensor([[1, -2, 3], [0, -1, 2], [1, 0, 1]], dtype=paddle.float32) | ||
label = paddle.to_tensor([[-1, 1, -1], [1, 1, 1], [1, -1, 1]], dtype=paddle.float32) | ||
|
||
multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='none') | ||
loss = multi_label_soft_margin_loss(input, label) | ||
print(loss) | ||
# Tensor([3.49625897, 0.71111226, 0.43989015]) | ||
|
||
multi_label_soft_margin_loss = nn.MultiLabelSoftMarginLoss(reduction='mean') | ||
loss = multi_label_soft_margin_loss(input, label) | ||
print(loss) | ||
# Tensor([1.54908717]) | ||
""" | ||
def __init__(self, weight=None, reduction="mean", name=None): | ||
super(MultiLabelSoftMarginLoss, self).__init__() | ||
if reduction not in ['sum', 'mean', 'none']: | ||
raise ValueError( | ||
"'reduction' in 'MultiLabelSoftMarginloss' should be 'sum', 'mean' or 'none', " | ||
"but received {}.".format(reduction)) | ||
self.weight=weight | ||
self.reduction = reduction | ||
self.name = name | ||
|
||
def forward(self, input, label): | ||
return F.multi_label_soft_margin_loss( | ||
input, | ||
label, | ||
reduction=self.reduction, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 这里建议严谨一点, weight 在前,reduction 在后吧 There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. 已修改 |
||
weight=self.weight, | ||
name=self.name) | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
这里的英文文档,同下面的API,也需要在相关的位置空行~
这个问题,可以合入后,在提个PR,单独修复文档~
LGTM~~
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
1