From b02c61bdf1a34908e0250ea56bc9f941de3a227b Mon Sep 17 00:00:00 2001 From: co63oc Date: Wed, 17 Jan 2024 15:36:29 +0800 Subject: [PATCH] Fix --- python/paddle/audio/backends/init_backend.py | 4 +- python/paddle/audio/backends/wave_backend.py | 2 +- python/paddle/audio/features/layers.py | 2 +- python/paddle/nn/functional/conv.py | 12 ++-- python/paddle/nn/functional/distance.py | 2 +- python/paddle/nn/functional/extension.py | 2 +- python/paddle/nn/functional/loss.py | 70 ++++++++++---------- python/paddle/nn/functional/norm.py | 4 +- python/paddle/nn/functional/pooling.py | 16 ++--- python/paddle/nn/functional/vision.py | 10 +-- python/paddle/nn/initializer/initializer.py | 2 +- python/paddle/tensor/einsum.py | 2 +- 12 files changed, 64 insertions(+), 64 deletions(-) diff --git a/python/paddle/audio/backends/init_backend.py b/python/paddle/audio/backends/init_backend.py index 12e3a0d84c9e3..2259fda8b846b 100644 --- a/python/paddle/audio/backends/init_backend.py +++ b/python/paddle/audio/backends/init_backend.py @@ -73,8 +73,8 @@ def list_available_backends() -> List[str]: package = "paddleaudio" warn_msg = ( "Failed importing {}. \n" - "only wave_banckend(only can deal with PCM16 WAV) supportted.\n" - "if want soundfile_backend(more audio type suppported),\n" + "only wave_backend(only can deal with PCM16 WAV) supported.\n" + "if want soundfile_backend(more audio type supported),\n" "please manually installed (usually with `pip install {} >= 1.0.2`). " ).format(package, package) warnings.warn(warn_msg) diff --git a/python/paddle/audio/backends/wave_backend.py b/python/paddle/audio/backends/wave_backend.py index 262ccafeb304a..8b7b241a48cc0 100644 --- a/python/paddle/audio/backends/wave_backend.py +++ b/python/paddle/audio/backends/wave_backend.py @@ -26,7 +26,7 @@ def _error_message(): package = "paddleaudio" warn_msg = ( - "only PCM16 WAV supportted. \n" + "only PCM16 WAV supported. \n" "if want support more other audio types, please " f"manually installed (usually with `pip install {package}`). \n " "and use paddle.audio.backends.set_backend('soundfile') to set audio backend" diff --git a/python/paddle/audio/features/layers.py b/python/paddle/audio/features/layers.py index 59b8967eb7d71..d966ef9d1391c 100644 --- a/python/paddle/audio/features/layers.py +++ b/python/paddle/audio/features/layers.py @@ -23,7 +23,7 @@ class Spectrogram(nn.Layer): """Compute spectrogram of given signals, typically audio waveforms. - The spectorgram is defined as the complex norm of the short-time Fourier transformation. + The spectrogram is defined as the complex norm of the short-time Fourier transformation. Args: n_fft (int, optional): The number of frequency components of the discrete Fourier transform. Defaults to 512. diff --git a/python/paddle/nn/functional/conv.py b/python/paddle/nn/functional/conv.py index dd2bc6b336f76..4efe50331d4ac 100644 --- a/python/paddle/nn/functional/conv.py +++ b/python/paddle/nn/functional/conv.py @@ -138,7 +138,7 @@ def _conv_nd( new_shape = [1] * len(x.shape) new_shape[channel_dim] = -1 bias = bias.reshape(new_shape) - # TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op + # TODO(qili93): temporary for ascend npu performance to be removed along with npu_identity op if ( _global_flags()['FLAGS_npu_storage_format'] and 'npu' in get_all_custom_device_type() @@ -452,8 +452,8 @@ def conv1d( l_type = 'depthwise_conv2d' use_cudnn = False - squeeze_aixs = -3 if channel_last else -2 - x = unsqueeze(x, axis=[squeeze_aixs]) + squeeze_axis = -3 if channel_last else -2 + x = unsqueeze(x, axis=[squeeze_axis]) if in_dynamic_or_pir_mode(): if l_type == 'conv2d': @@ -508,7 +508,7 @@ def conv1d( ) if bias is not None: out = _add_with_axis(out, bias, axis=channel_dim) - out = squeeze(out, axis=[squeeze_aixs]) + out = squeeze(out, axis=[squeeze_axis]) return out @@ -714,7 +714,7 @@ def conv2d( + bias.shape + [1 for i in range(len(x.shape) - channel_dim - 1)], ) - # TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op + # TODO(qili93): temporary for ascend npu performance to be removed along with npu_identity op if ( _global_flags()['FLAGS_npu_storage_format'] and 'npu' in get_all_custom_device_type() @@ -1353,7 +1353,7 @@ def conv3d( and strides, paddings, dilations, groups parameters. Input(Input) and Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of channels, D is the depth of the feature, H is the height of the feature, - and W is the width of the feature. Convlution3D is similar with Convlution2D + and W is the width of the feature. Convolution3D is similar with Convolution2D but adds one dimension(depth). If bias attribution and activation type are provided, bias is added to the output of the convolution, and the corresponding activation function is applied to the final result. diff --git a/python/paddle/nn/functional/distance.py b/python/paddle/nn/functional/distance.py index f14c220c0c0db..33061cb0855cc 100644 --- a/python/paddle/nn/functional/distance.py +++ b/python/paddle/nn/functional/distance.py @@ -26,7 +26,7 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None): r""" It computes the pairwise distance between two vectors. The - distance is calculated by p-oreder norm: + distance is calculated by p-order norm: .. math:: diff --git a/python/paddle/nn/functional/extension.py b/python/paddle/nn/functional/extension.py index dac7ba30d93fd..099612a04e8c7 100644 --- a/python/paddle/nn/functional/extension.py +++ b/python/paddle/nn/functional/extension.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -# TODO: define the extention functions +# TODO: define the extension functions from paddle import _C_ops, tensor diff --git a/python/paddle/nn/functional/loss.py b/python/paddle/nn/functional/loss.py index 809056cf39aaf..072044e3a19fc 100644 --- a/python/paddle/nn/functional/loss.py +++ b/python/paddle/nn/functional/loss.py @@ -55,7 +55,7 @@ def dice_loss(input, label, epsilon=0.00001, name=None): input (Tensor): Tensor, rank>=2, shape is :math:`[N_1, N_2, ..., N_k, D]`, where :math:`N_1` is the batch_size, :math:`D` is the number of categories. It is usually the output predictions of sigmoid activation. The data type can be float32 or float64. - label (Tensor): Tensor, the groud truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_k, 1]`. + label (Tensor): Tensor, the ground truth with the same rank as input, shape is :math:`[N_1, N_2, ..., N_k, 1]`. where :math:`N_1` is the batch_size. The data type can be int32 or int64. epsilon (float): The epsilon will be added to the numerator and denominator. If both input and label are empty, it makes sure dice is 1. @@ -217,7 +217,7 @@ def base_softmax_with_cross_entropy( Label is a ``Tensor`` in the same shape with :attr:`logits`. If :attr:`soft_label` is set to :attr:`True`, Label is a ``Tensor`` in the same shape with :attr:`logits` expect shape in dimension :attr:`axis` as 1. - soft_label (bool, optional): A flag to indicate whether to interpretant the given + soft_label (bool, optional): A flag to indicate whether to interpret the given labels as soft labels. Default False. ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Only valid @@ -626,7 +626,7 @@ def binary_cross_entropy( batch element. If given, has to be a Tensor of size nbatch and the data type is float32, float64. Default is ``'None'``. reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the summed loss is returned. @@ -765,7 +765,7 @@ def binary_cross_entropy_with_logits( batch element. If given, it has to be a 1D Tensor whose size is `[N, ]`, The data type is float32, float64. Default is ``'None'``. reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the summed loss is returned. @@ -1074,7 +1074,7 @@ def smooth_l1_loss(input, label, reduction='mean', delta=1.0, name=None): label (Tensor): Label tensor, the data type is float32 or float64. The shape of label is the same as the shape of input. reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. If :attr:`reduction` is ``'none'``, the unreduced loss is returned. @@ -1150,7 +1150,7 @@ def margin_ranking_loss( ): r""" - Calcluate the margin rank loss between the input, other and label, use the math function as follows. + Calculate the margin rank loss between the input, other and label, use the math function as follows. .. math:: margin\_rank\_loss = max(0, -label * (input - other) + margin) @@ -1172,7 +1172,7 @@ def margin_ranking_loss( other(Tensor): the second input tensor, it's data type should be float32, float64. label(Tensor): the label value corresponding to input, it's data type should be float32, float64. margin (float, optional): The margin value to add, default value is 0; - reduction (str, optional): Indicate the reduction to apply to the loss, the candicates are ``'none'``, ``'mean'``, ``'sum'``.If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. Default is ``'mean'``. + reduction (str, optional): Indicate the reduction to apply to the loss, the candidates are ``'none'``, ``'mean'``, ``'sum'``.If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned. If :attr:`reduction` is ``'sum'``, the reduced sum loss is returned. Default is ``'mean'``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: @@ -1286,7 +1286,7 @@ def l1_loss(input, label, reduction='mean', name=None): input (Tensor): The input tensor. The shapes is [N, `*`], where N is batch size and `*` means any number of additional dimensions. It's data type should be float32, float64, int32, int64. label (Tensor): label. The shapes is [N, `*`], same shape as ``input`` . It's data type should be float32, float64, int32, int64. reduction (str, optional): Indicate the reduction to apply to the loss, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If `reduction` is ``'none'``, the unreduced loss is returned; If `reduction` is ``'mean'``, the reduced mean loss is returned. If `reduction` is ``'sum'``, the reduced sum loss is returned. @@ -1383,10 +1383,10 @@ def nll_loss( ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Default is -100. reduction (str, optional): Indicate how to average the loss, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If `reduction` is ``'mean'``, the reduced mean loss is returned; if `reduction` is ``'sum'``, the reduced sum loss is returned; - if `reduction` is ``'none'``, no reduction will be apllied. + if `reduction` is ``'none'``, no reduction will be applied. Default is ``'mean'``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -1436,7 +1436,7 @@ def nll_loss( if input_shape[1] < 1: raise ValueError( - f"Expected 1 or more classess (got num classes{input_shape[1]})" + f"Expected 1 or more classes (got num classes{input_shape[1]})" ) n = input_shape[0] @@ -1520,10 +1520,10 @@ def poisson_nll_loss( A small value to avoid evaluation of :math:`\log(0)` when `log_input`\ =\ ``False``. ``epsilon > 0``. Default: 1e-8. reduction (str, optional): - Indicate how to reduce the loss, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + Indicate how to reduce the loss, the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If `reduction` is ``'mean'``, the reduced mean loss is returned; if `reduction` is ``'sum'``, the reduced sum loss is returned; - if `reduction` is ``'none'``, no reduction will be apllied. + if `reduction` is ``'none'``, no reduction will be applied. Default is ``'mean'``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -1554,7 +1554,7 @@ def poisson_nll_loss( # check parameter values if epsilon <= 0: raise ValueError( - "The value of `epsilon` in poisson_nll_loss should be positve, but received %f, which is not allowed" + "The value of `epsilon` in poisson_nll_loss should be positive, but received %f, which is not allowed" % epsilon ) @@ -1629,11 +1629,11 @@ def kl_div(input, label, reduction='mean', name=None): any number of additional dimensions. It's data type should be float32, float64. label (Tensor): label. The shapes is [N, *], same shape as ``input`` . It's data type should be float32, float64. reduction (str, optional): Indicate how to average the loss, - the candicates are ``'none'`` | ``'batchmean'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'batchmean'`` | ``'mean'`` | ``'sum'``. If `reduction` is ``'mean'``, the reduced mean loss is returned; If `reduction` is ``'batchmean'``, the sum loss divided by batch size is returned; if `reduction` is ``'sum'``, the reduced sum loss is returned; - if `reduction` is ``'none'``, no reduction will be apllied. + if `reduction` is ``'none'``, no reduction will be applied. Default is ``'mean'``. name(str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. @@ -1825,7 +1825,7 @@ def ctc_loss( input_lengths (Tensor): The length for each input sequence, it should have shape [batch_size] and dtype int64. label_lengths (Tensor): The length for each label sequence, it should have shape [batch_size] and dtype int64. blank (int, optional): The blank label index of Connectionist Temporal Classification (CTC) loss, which is in the half-opened interval [0, num_classes + 1). The data type must be int32. Default: 0. - reduction (str, optional): Indicate how to average the loss, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the output loss will be divided by the label_lengths, and then return the mean of quotient; If :attr:`reduction` is ``'sum'``, return the sum of loss; If :attr:`reduction` is ``'none'``, no reduction will be applied. Default: ``'mean'``. + reduction (str, optional): Indicate how to average the loss, the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the output loss will be divided by the label_lengths, and then return the mean of quotient; If :attr:`reduction` is ``'sum'``, return the sum of loss; If :attr:`reduction` is ``'none'``, no reduction will be applied. Default: ``'mean'``. norm_by_times (bool, optional): Whether to normalize the gradients by the number of time-step, which is also the sequence's length. There is no need to normalize the gradients if reduction mode is 'mean'. Default: False. Returns: @@ -1972,7 +1972,7 @@ def rnnt_loss( label_lengths (Tensor): The length for each label sequence, it should have shape [batch_size] and dtype int64. blank (int, optional): The blank label index of RNN-T loss, which is in the half-opened interval [0, B). The data type must be int32. Default is 0. fastemit_lambda (float, default 0.001): Regularization parameter for FastEmit (https://arxiv.org/pdf/2010.11148.pdf) - reduction (string, optional): Indicate how to average the loss, the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the output will be sum of loss and be divided by the batch_size; If :attr:`reduction` is ``'sum'``, return the sum of loss; If :attr:`reduction` is ``'none'``, no reduction will be applied. Default is ``'mean'``. + reduction (string, optional): Indicate how to average the loss, the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the output will be sum of loss and be divided by the batch_size; If :attr:`reduction` is ``'sum'``, return the sum of loss; If :attr:`reduction` is ``'none'``, no reduction will be applied. Default is ``'mean'``. name (str, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`. Returns: @@ -2107,7 +2107,7 @@ def margin_cross_entropy( Args: logits (Tensor): shape[N, local_num_classes], the output of the normalized X multiply the normalized W. The logits is shard_logits when using model parallel. - label (Tensor): shape[N] or shape[N, 1], the groud truth label. + label (Tensor): shape[N] or shape[N, 1], the ground truth label. margin1 (float, optional): m1 of margin loss, default value is `1.0`. margin2 (float, optional): m2 of margin loss, default value is `0.5`. margin3 (float, optional): m3 of margin loss, default value is `0.0`. @@ -2116,7 +2116,7 @@ def margin_cross_entropy( or ``None`` for global default group or ``False`` for data parallel (do not communication cross ranks). Default is ``None``. return_softmax (bool, optional): Whether return softmax probability. Default value is `False`. - reduction (str, optional): The candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + reduction (str, optional): The candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, return the average of loss; If :attr:`reduction` is ``'sum'``, return the sum of loss; If :attr:`reduction` is ``'none'``, no reduction will be applied. @@ -2438,7 +2438,7 @@ def softmax_with_cross_entropy( Label is a ``Tensor`` in the same shape with :attr:`logits`. If :attr:`soft_label` is set to :attr:`True`, Label is a ``Tensor`` in the same shape with :attr:`logits` expect shape in dimension :attr:`axis` as 1. - soft_label (bool, optional): A flag to indicate whether to interpretant the given + soft_label (bool, optional): A flag to indicate whether to interpret the given labels as soft labels. Default False. ignore_index (int, optional): Specifies a target value that is ignored and does not contribute to the input gradient. Only valid @@ -2644,7 +2644,7 @@ def cross_entropy( value needs to be ignored. Only valid when soft_label = False. Default is ``-100`` . reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`size_average` is ``'sum'``, the reduced sum loss is returned. If :attr:`reduction` is ``'none'``, the unreduced loss is returned. @@ -2740,18 +2740,18 @@ def cross_entropy( >>> reduction='mean' >>> weight = None >>> logits = paddle.uniform(shape, dtype='float64', min=0.1, max=1.0) - >>> interger_labels = paddle.randint(low=0, high=C, shape=[N], dtype='int64') - >>> one_hot_labels = paddle.nn.functional.one_hot(interger_labels, C).astype('float32') + >>> integer_labels = paddle.randint(low=0, high=C, shape=[N], dtype='int64') + >>> one_hot_labels = paddle.nn.functional.one_hot(integer_labels, C).astype('float32') >>> # integer labels - >>> paddle_interger_loss_mean = paddle.nn.functional.cross_entropy( + >>> paddle_integer_loss_mean = paddle.nn.functional.cross_entropy( ... logits, - ... interger_labels, + ... integer_labels, ... axis=axis, ... weight=weight, ... label_smoothing=label_smoothing, ... reduction=reduction) - >>> print(paddle_interger_loss_mean) + >>> print(paddle_integer_loss_mean) Tensor(shape=[], dtype=float64, place=Place(cpu), stop_gradient=True, 1.08317309) @@ -2784,7 +2784,7 @@ def cross_entropy( input_dims = len(list(input.shape)) if input_dims == 0: - raise ValueError('The dimention of input should be larger than zero!') + raise ValueError('The dimension of input should be larger than zero!') label_dims = len(list(label.shape)) if input_dims - 1 == label_dims: @@ -3125,7 +3125,7 @@ def sigmoid_focal_loss( gamma(int|float, optional): Hyper-parameter to modulate the easy and hard examples. Default value is set to 2.0. reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the summed loss is returned. @@ -3281,7 +3281,7 @@ def multi_label_soft_margin_loss( If given, has to be a Tensor of size C and the data type is float32, float64. Default is ``'None'`` . reduction (str, optional): Indicate how to average the loss by batch_size, - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the summed loss is returned. @@ -3290,7 +3290,7 @@ def multi_label_soft_margin_loss( For more information, please refer to :ref:`api_guide_Name`. Shape: - input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operationoperates over all the elements. + input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means number of classes, available dtype is float32, float64. The sum operation operates over all the elements. label: N-D Tensor, same shape as the input. weight:N-D Tensor, the shape is [N,1] output: scalar. If :attr:`reduction` is ``'none'``, then same shape as the input. @@ -3401,7 +3401,7 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): hinge_embedding_loss. When label is -1, Input smaller than margin are minimized with hinge_embedding_loss. Default = 1.0 reduction (str, optional): Indicate how to average the loss by batch_size. - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the summed loss is returned. @@ -3411,7 +3411,7 @@ def hinge_embedding_loss(input, label, margin=1.0, reduction='mean', name=None): Shape: - input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64. The sum operationoperates over all the elements. + input: N-D Tensor, the shape is [N, \*], N is batch size and `\*` means any number of additional dimensions, available dtype is float32, float64. The sum operation operates over all the elements. label: N-D Tensor, same shape as the input. tensor elements should containing 1 or -1, the data type is float32 or float64. @@ -3638,7 +3638,7 @@ def triplet_margin_with_distance_loss( and negative samples) if swap distance smaller than negative distance. Default: ``False``. reduction (str, optional):Indicate how to average the loss by batch_size. - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the summed loss is returned. @@ -3788,7 +3788,7 @@ def triplet_margin_loss( reduction (str, Optional):Indicate how to average the loss by batch_size. - the candicates are ``'none'`` | ``'mean'`` | ``'sum'``. + the candidates are ``'none'`` | ``'mean'`` | ``'sum'``. If :attr:`reduction` is ``'none'``, the unreduced loss is returned; If :attr:`reduction` is ``'mean'``, the reduced mean loss is returned; If :attr:`reduction` is ``'sum'``, the summed loss is returned. diff --git a/python/paddle/nn/functional/norm.py b/python/paddle/nn/functional/norm.py index a8f2ff96c2841..b72a19dc72f69 100644 --- a/python/paddle/nn/functional/norm.py +++ b/python/paddle/nn/functional/norm.py @@ -133,7 +133,7 @@ def batch_norm( nn.functional.batch_norm is used for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm. Parameters: - x(Tesnor): input value. It's data type should be float32, float64. + x(Tensor): input value. It's data type should be float32, float64. running_mean(Tensor): running mean. running_var(Tensor): running variance. weight(Tensor, optional): The weight tensor of batch_norm. Default: None. @@ -433,7 +433,7 @@ def instance_norm( eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5. momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9. use_input_stats(bool, optional): Default True. Obsolete (that is, no longer usable). - data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW". + data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Default "NCHW". name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`.. Returns: diff --git a/python/paddle/nn/functional/pooling.py b/python/paddle/nn/functional/pooling.py index 95aeb623e20f6..64f7aa7070bf8 100755 --- a/python/paddle/nn/functional/pooling.py +++ b/python/paddle/nn/functional/pooling.py @@ -171,7 +171,7 @@ def _expand_low_nd_padding(padding): padding = [0] + padding else: raise ValueError( - f"The size of padding's dimmention should be 1 or 2. But got padding={padding}" + f"The size of padding's dimension should be 1 or 2. But got padding={padding}" ) return padding @@ -250,7 +250,7 @@ def avg_pool1d( padding, 1, channel_last=channel_last, ceil_mode=ceil_mode ) - # use 2d to implenment 1d should expand padding in advance. + # use 2d to implement 1d should expand padding in advance. padding = _expand_low_nd_padding(padding) if in_dynamic_or_pir_mode(): @@ -566,7 +566,7 @@ def max_pool1d( name=None, ): """ - This API implements max pooling 1d opereation. + This API implements max pooling 1d operation. See more details in :ref:`api_paddle_nn_MaxPool1d` . Args: @@ -745,7 +745,7 @@ def max_unpool1d( name=None, ): r""" - This API implements max unpooling 1d opereation. + This API implements max unpooling 1d operation. `max_unpool1d` accepts the output of `max_pool1d` as input, including the indices of the maximum value and calculate the partial inverse. All non-maximum values are set to zero. @@ -767,7 +767,7 @@ def max_unpool1d( indices (Tensor): The indices given out by maxpooling1d which is a 3-D tensor with shape [N, C, L]. The format of input tensor is `"NCL"` , where `N` is batch size, `C` is the number of channels, `L` is - the length of the featuree. The data type is float32 or float64. + the length of the feature. The data type is float32 or float64. kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list, it must contain an integer. stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list, @@ -880,7 +880,7 @@ def max_unpool2d( name=None, ): r""" - This API implements max unpooling 2d opereation. + This API implements max unpooling 2d operation. See more details in :ref:`api_paddle_nn_MaxUnPool2D` . @@ -924,7 +924,7 @@ def max_unpool2d( Raises: ValueError: If the input is not a 4-D tensor. - ValueError: If indeces shape is not equal input shape. + ValueError: If indices shape is not equal input shape. Examples: @@ -1030,7 +1030,7 @@ def max_unpool3d( name=None, ): r""" - This API implements max unpooling 3d opereation. + This API implements max unpooling 3d operation. `max_unpool3d` accepts the output of `max_pool3d` as input, including the indices of the maximum value and calculate the partial inverse. All non-maximum values are set to zero. diff --git a/python/paddle/nn/functional/vision.py b/python/paddle/nn/functional/vision.py index d41e5cef8729b..3d7aa2f6fc848 100644 --- a/python/paddle/nn/functional/vision.py +++ b/python/paddle/nn/functional/vision.py @@ -349,7 +349,7 @@ def grid_sample( def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): """ This API implements pixel shuffle operation. - See more details in :ref:`PixelSuffle ` . + See more details in :ref:`PixelShuffle ` . Parameters: @@ -378,7 +378,7 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - f"But recevie Attr(data_format): {data_format} " + f"But receive Attr(data_format): {data_format} " ) if in_dygraph_mode(): return _C_ops.pixel_shuffle(x, upscale_factor, data_format) @@ -403,7 +403,7 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None): def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): """ This API implements pixel unshuffle operation. - See more details in :ref:`PixelUnSuffle ` . + See more details in :ref:`PixelUnShuffle ` . Parameters: x (Tensor): 4-D tensor, the data type should be float32 or float64. @@ -438,7 +438,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - f"But recevie Attr(data_format): {data_format} " + f"But receive Attr(data_format): {data_format} " ) if in_dygraph_mode(): @@ -516,7 +516,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None): if data_format not in ["NCHW", "NHWC"]: raise ValueError( "Attr(data_format) should be 'NCHW' or 'NHWC'." - f"But recevie Attr(data_format): {data_format} " + f"But receive Attr(data_format): {data_format} " ) if in_dynamic_or_pir_mode(): diff --git a/python/paddle/nn/initializer/initializer.py b/python/paddle/nn/initializer/initializer.py index 7b3901613f9e3..d86d20fe4fede 100644 --- a/python/paddle/nn/initializer/initializer.py +++ b/python/paddle/nn/initializer/initializer.py @@ -168,5 +168,5 @@ def calculate_gain(nonlinearity, param=None): return recommended_gain[nonlinearity] else: raise ValueError( - f"nonlinearity function {nonlinearity} is not suppported now." + f"nonlinearity function {nonlinearity} is not supported now." ) diff --git a/python/paddle/tensor/einsum.py b/python/paddle/tensor/einsum.py index 7b7af555cd04e..3df355d31b36d 100644 --- a/python/paddle/tensor/einsum.py +++ b/python/paddle/tensor/einsum.py @@ -143,7 +143,7 @@ def build_view(in_labels, out_labels): Returns ------- The inverse map from out_labels to in_labels. The length of the inverse map equals that of - out_labels. -1 is filled if there's no matching intput dimension for a specific label. + out_labels. -1 is filled if there's no matching input dimension for a specific label. Examples --------