Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix words #60891

Merged
merged 2 commits into from
Jan 18, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions python/paddle/audio/backends/init_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,8 +73,8 @@ def list_available_backends() -> List[str]:
package = "paddleaudio"
warn_msg = (
"Failed importing {}. \n"
"only wave_banckend(only can deal with PCM16 WAV) supportted.\n"
"if want soundfile_backend(more audio type suppported),\n"
"only wave_backend(only can deal with PCM16 WAV) supported.\n"
"if want soundfile_backend(more audio type supported),\n"
"please manually installed (usually with `pip install {} >= 1.0.2`). "
).format(package, package)
warnings.warn(warn_msg)
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/audio/backends/wave_backend.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@
def _error_message():
package = "paddleaudio"
warn_msg = (
"only PCM16 WAV supportted. \n"
"only PCM16 WAV supported. \n"
"if want support more other audio types, please "
f"manually installed (usually with `pip install {package}`). \n "
"and use paddle.audio.backends.set_backend('soundfile') to set audio backend"
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/audio/features/layers.py
Original file line number Diff line number Diff line change
Expand Up @@ -23,7 +23,7 @@

class Spectrogram(nn.Layer):
"""Compute spectrogram of given signals, typically audio waveforms.
The spectorgram is defined as the complex norm of the short-time Fourier transformation.
The spectrogram is defined as the complex norm of the short-time Fourier transformation.

Args:
n_fft (int, optional): The number of frequency components of the discrete Fourier transform. Defaults to 512.
Expand Down
12 changes: 6 additions & 6 deletions python/paddle/nn/functional/conv.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ def _conv_nd(
new_shape = [1] * len(x.shape)
new_shape[channel_dim] = -1
bias = bias.reshape(new_shape)
# TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op
# TODO(qili93): temporary for ascend npu performance to be removed along with npu_identity op
if (
_global_flags()['FLAGS_npu_storage_format']
and 'npu' in get_all_custom_device_type()
Expand Down Expand Up @@ -452,8 +452,8 @@ def conv1d(
l_type = 'depthwise_conv2d'
use_cudnn = False

squeeze_aixs = -3 if channel_last else -2
x = unsqueeze(x, axis=[squeeze_aixs])
squeeze_axis = -3 if channel_last else -2
x = unsqueeze(x, axis=[squeeze_axis])

if in_dynamic_or_pir_mode():
if l_type == 'conv2d':
Expand Down Expand Up @@ -508,7 +508,7 @@ def conv1d(
)
if bias is not None:
out = _add_with_axis(out, bias, axis=channel_dim)
out = squeeze(out, axis=[squeeze_aixs])
out = squeeze(out, axis=[squeeze_axis])
return out


Expand Down Expand Up @@ -714,7 +714,7 @@ def conv2d(
+ bias.shape
+ [1 for i in range(len(x.shape) - channel_dim - 1)],
)
# TODO(qili93): temporary for ascned npu performance to be removed along with npu_identity op
# TODO(qili93): temporary for ascend npu performance to be removed along with npu_identity op
if (
_global_flags()['FLAGS_npu_storage_format']
and 'npu' in get_all_custom_device_type()
Expand Down Expand Up @@ -1353,7 +1353,7 @@ def conv3d(
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are in NCDHW or NDHWC format. Where N is batch size C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. Convlution3D is similar with Convlution2D
and W is the width of the feature. Convolution3D is similar with Convolution2D
but adds one dimension(depth). If bias attribution and activation type are
provided, bias is added to the output of the convolution, and the
corresponding activation function is applied to the final result.
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/distance.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ def pairwise_distance(x, y, p=2.0, epsilon=1e-6, keepdim=False, name=None):
r"""

It computes the pairwise distance between two vectors. The
distance is calculated by p-oreder norm:
distance is calculated by p-order norm:

.. math::

Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/functional/extension.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,7 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.

# TODO: define the extention functions
# TODO: define the extension functions


from paddle import _C_ops, tensor
Expand Down
70 changes: 35 additions & 35 deletions python/paddle/nn/functional/loss.py

Large diffs are not rendered by default.

4 changes: 2 additions & 2 deletions python/paddle/nn/functional/norm.py
Original file line number Diff line number Diff line change
Expand Up @@ -133,7 +133,7 @@ def batch_norm(
nn.functional.batch_norm is used for nn.BatchNorm1D, nn.BatchNorm2D, nn.BatchNorm3D. Please use above API for BatchNorm.

Parameters:
x(Tesnor): input value. It's data type should be float32, float64.
x(Tensor): input value. It's data type should be float32, float64.
running_mean(Tensor): running mean.
running_var(Tensor): running variance.
weight(Tensor, optional): The weight tensor of batch_norm. Default: None.
Expand Down Expand Up @@ -433,7 +433,7 @@ def instance_norm(
eps(float, optional): A value added to the denominator for numerical stability. Default is 1e-5.
momentum(float, optional): The value used for the moving_mean and moving_var computation. Default: 0.9.
use_input_stats(bool, optional): Default True. Obsolete (that is, no longer usable).
data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Defalut "NCHW".
data_format(str, optional): Specify the input data format, may be "NC", "NCL", "NCHW" or "NCDHW". Default "NCHW".
name(str, optional): Name for the InstanceNorm, default is None. For more information, please refer to :ref:`api_guide_Name`..

Returns:
Expand Down
16 changes: 8 additions & 8 deletions python/paddle/nn/functional/pooling.py
Original file line number Diff line number Diff line change
Expand Up @@ -171,7 +171,7 @@ def _expand_low_nd_padding(padding):
padding = [0] + padding
else:
raise ValueError(
f"The size of padding's dimmention should be 1 or 2. But got padding={padding}"
f"The size of padding's dimension should be 1 or 2. But got padding={padding}"
)
return padding

Expand Down Expand Up @@ -250,7 +250,7 @@ def avg_pool1d(
padding, 1, channel_last=channel_last, ceil_mode=ceil_mode
)

# use 2d to implenment 1d should expand padding in advance.
# use 2d to implement 1d should expand padding in advance.
padding = _expand_low_nd_padding(padding)

if in_dynamic_or_pir_mode():
Expand Down Expand Up @@ -566,7 +566,7 @@ def max_pool1d(
name=None,
):
"""
This API implements max pooling 1d opereation.
This API implements max pooling 1d operation.
See more details in :ref:`api_paddle_nn_MaxPool1d` .

Args:
Expand Down Expand Up @@ -745,7 +745,7 @@ def max_unpool1d(
name=None,
):
r"""
This API implements max unpooling 1d opereation.
This API implements max unpooling 1d operation.
`max_unpool1d` accepts the output of `max_pool1d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values are set to zero.
Expand All @@ -767,7 +767,7 @@ def max_unpool1d(
indices (Tensor): The indices given out by maxpooling1d which is a 3-D tensor with
shape [N, C, L]. The format of input tensor is `"NCL"` ,
where `N` is batch size, `C` is the number of channels, `L` is
the length of the featuree. The data type is float32 or float64.
the length of the feature. The data type is float32 or float64.
kernel_size (int|list|tuple): The unpool kernel size. If unpool kernel size is a tuple or list,
it must contain an integer.
stride (int|list|tuple): The unpool stride size. If unpool stride size is a tuple or list,
Expand Down Expand Up @@ -880,7 +880,7 @@ def max_unpool2d(
name=None,
):
r"""
This API implements max unpooling 2d opereation.
This API implements max unpooling 2d operation.
See more details in :ref:`api_paddle_nn_MaxUnPool2D` .


Expand Down Expand Up @@ -924,7 +924,7 @@ def max_unpool2d(

Raises:
ValueError: If the input is not a 4-D tensor.
ValueError: If indeces shape is not equal input shape.
ValueError: If indices shape is not equal input shape.


Examples:
Expand Down Expand Up @@ -1030,7 +1030,7 @@ def max_unpool3d(
name=None,
):
r"""
This API implements max unpooling 3d opereation.
This API implements max unpooling 3d operation.
`max_unpool3d` accepts the output of `max_pool3d` as input,
including the indices of the maximum value and calculate the partial inverse.
All non-maximum values are set to zero.
Expand Down
10 changes: 5 additions & 5 deletions python/paddle/nn/functional/vision.py
Original file line number Diff line number Diff line change
Expand Up @@ -349,7 +349,7 @@ def grid_sample(
def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
"""
This API implements pixel shuffle operation.
See more details in :ref:`PixelSuffle <api_paddle_nn_PixelShuffle>` .
See more details in :ref:`PixelShuffle <api_paddle_nn_PixelShuffle>` .


Parameters:
Expand Down Expand Up @@ -378,7 +378,7 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'."
f"But recevie Attr(data_format): {data_format} "
f"But receive Attr(data_format): {data_format} "
)
if in_dygraph_mode():
return _C_ops.pixel_shuffle(x, upscale_factor, data_format)
Expand All @@ -403,7 +403,7 @@ def pixel_shuffle(x, upscale_factor, data_format="NCHW", name=None):
def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
"""
This API implements pixel unshuffle operation.
See more details in :ref:`PixelUnSuffle <api_paddle_nn_PixelUnshuffle>` .
See more details in :ref:`PixelUnShuffle <api_paddle_nn_PixelUnshuffle>` .

Parameters:
x (Tensor): 4-D tensor, the data type should be float32 or float64.
Expand Down Expand Up @@ -438,7 +438,7 @@ def pixel_unshuffle(x, downscale_factor, data_format="NCHW", name=None):
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'."
f"But recevie Attr(data_format): {data_format} "
f"But receive Attr(data_format): {data_format} "
)

if in_dygraph_mode():
Expand Down Expand Up @@ -516,7 +516,7 @@ def channel_shuffle(x, groups, data_format="NCHW", name=None):
if data_format not in ["NCHW", "NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NCHW' or 'NHWC'."
f"But recevie Attr(data_format): {data_format} "
f"But receive Attr(data_format): {data_format} "
)

if in_dynamic_or_pir_mode():
Expand Down
2 changes: 1 addition & 1 deletion python/paddle/nn/initializer/initializer.py
Original file line number Diff line number Diff line change
Expand Up @@ -168,5 +168,5 @@ def calculate_gain(nonlinearity, param=None):
return recommended_gain[nonlinearity]
else:
raise ValueError(
f"nonlinearity function {nonlinearity} is not suppported now."
f"nonlinearity function {nonlinearity} is not supported now."
)
2 changes: 1 addition & 1 deletion python/paddle/tensor/einsum.py
Original file line number Diff line number Diff line change
Expand Up @@ -143,7 +143,7 @@ def build_view(in_labels, out_labels):
Returns
-------
The inverse map from out_labels to in_labels. The length of the inverse map equals that of
out_labels. -1 is filled if there's no matching intput dimension for a specific label.
out_labels. -1 is filled if there's no matching input dimension for a specific label.

Examples
--------
Expand Down