From f06ad6299f6676429278d013564c03e50c9e2e60 Mon Sep 17 00:00:00 2001 From: yangguohao <1901212980@pku.edu.cn> Date: Wed, 1 Jun 2022 22:49:37 +0800 Subject: [PATCH] 2022-06-01_pre-commit --- python/paddle/nn/functional/loss.py | 21 +++++++++--------- python/paddle/nn/layer/loss.py | 34 ++++++++++++++++++----------- 2 files changed, 31 insertions(+), 24 deletions(-) diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 31185295f834f..4ebf2c472de7d 100755 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -2235,7 +2235,7 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): def triplet_margin_loss(input, positive, negative, - margin=1.0, + margin=1.0, p=2, epsilon=1e-6, swap=False, @@ -2270,16 +2270,16 @@ def triplet_margin_loss(input, negative (Tensor): Negative tensor, the data type is float32 or float64. The shape of label is the same as the shape of input. - margin (float, Optional): Default: :math:`1`. + margin (float, Optional): Default: :math:`1`. p (int, Optional): The norm degree for pairwise distance. Default: :math:`2`. epsilon (float, Optional): Add small value to avoid division by zero, default value is 1e-6. - swap (bool,Optional): The distance swap change the negative distance to the distance between + swap (bool,Optional): The distance swap change the negative distance to the distance between positive sample and negative sample. For more details, see `Learning shallow convolutional feature descriptors with triplet losses`. - Default: ``False``. + Default: ``False``. reduction (str, Optional):Indicate how to average the loss by batch_size. @@ -2318,7 +2318,7 @@ def triplet_margin_loss(input, raise ValueError( "'reduction' in 'triplet_margin_loss' should be 'sum', 'mean' or 'none', " "but received {}.".format(reduction)) - if margin<0: + if margin < 0: raise ValueError( "The margin between positive samples and negative samples should be greater than 0." ) @@ -2330,11 +2330,10 @@ def triplet_margin_loss(input, check_variable_and_dtype(negative, 'negative', ['float32', 'float64'], 'triplet_margin_loss') - if not(input.shape==positive.shape==negative.shape): - raise ValueError( - "input's shape must equal to " - "positive's shape and " - "negative's shape") + if not (input.shape == positive.shape == negative.shape): + raise ValueError("input's shape must equal to " + "positive's shape and " + "negative's shape") distance_function = paddle.nn.PairwiseDistance(p, epsilon=epsilon) positive_dist = distance_function(input, positive) @@ -2344,7 +2343,7 @@ def triplet_margin_loss(input, swap_dist = distance_function(positive, negative) negative_dist = paddle.minimum(negative_dist, swap_dist) - loss = paddle.clip(positive_dist-negative_dist+margin, min=0.0) + loss = paddle.clip(positive_dist - negative_dist + margin, min=0.0) if reduction == 'mean': return paddle.mean(loss, name=name) diff --git a/python/paddle/nn/layer/loss.py b/python/paddle/nn/layer/loss.py index 2ec51706fd03c..3f67639b0ce4e 100644 --- a/python/paddle/nn/layer/loss.py +++ b/python/paddle/nn/layer/loss.py @@ -1352,13 +1352,13 @@ class TripletMarginLoss(Layer): Call Parameters: input (Tensor):Input tensor, the data type is float32 or float64. - the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64. + the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64. positive (Tensor):Positive tensor, the data type is float32 or float64. - The shape of label is the same as the shape of input. + The shape of label is the same as the shape of input. negative (Tensor):Negative tensor, the data type is float32 or float64. - The shape of label is the same as the shape of input. + The shape of label is the same as the shape of input. Returns: Tensor. The tensor variable storing the triplet_margin_loss of input and positive and negative. @@ -1383,7 +1383,14 @@ class TripletMarginLoss(Layer): # Tensor([0.19165580]) """ - def __init__(self, margin=1.0, p=2., epsilon= 1e-6, swap=False, reduction='mean', name=None): + + def __init__(self, + margin=1.0, + p=2., + epsilon=1e-6, + swap=False, + reduction='mean', + name=None): super(TripletMarginLoss, self).__init__() if reduction not in ['sum', 'mean', 'none']: raise ValueError( @@ -1397,12 +1404,13 @@ def __init__(self, margin=1.0, p=2., epsilon= 1e-6, swap=False, reduction='mean' self.name = name def forward(self, input, positive, negative): - return F.triplet_margin_loss(input, - positive, - negative, - margin=self.margin, - p=self.p, - epsilon=self.epsilon, - swap=self.swap, - reduction=self.reduction, - name=self.name) + return F.triplet_margin_loss( + input, + positive, + negative, + margin=self.margin, + p=self.p, + epsilon=self.epsilon, + swap=self.swap, + reduction=self.reduction, + name=self.name)