2022-03-28 11:33:07 +08:00
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
2020-03-23 22:01:54 +08:00
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
2024-06-05 17:15:45 +08:00
from __future__ import annotations
2022-11-28 11:52:40 +08:00
2024-12-19 23:51:31 +08:00
import builtins
2022-04-13 18:20:54 +08:00
import math
2025-08-19 12:03:56 +08:00
import numbers
2026-02-25 16:32:17 +08:00
import os
2022-08-24 11:57:30 +08:00
import re
2024-12-06 16:55:30 +08:00
import warnings
2025-06-12 11:47:50 +08:00
from typing import TYPE_CHECKING , overload
2022-11-28 11:52:40 +08:00
import numpy as np
import paddle
2023-02-16 15:50:58 +08:00
from paddle import _C_ops
2025-12-22 10:38:30 +08:00
from paddle . _C_ops import diag , tril , triu # noqa: F401
2025-08-20 10:58:35 +08:00
from paddle . utils import deprecated
2025-08-22 14:23:09 +08:00
from paddle . utils . decorator_utils import (
param_one_alias ,
param_two_alias ,
2025-08-23 00:31:21 +08:00
size_args_decorator ,
2025-08-22 14:23:09 +08:00
)
【inplace api】batch add inplace api paddle.log_, paddle.i0_, paddle.nn.functional.leaky_relu_... (#55576)
* batch add inplace api
* add inplace test
* add activation inplace
* fix test
* remove atan2 ge, gt, le, lt, nq
* remove atan2 ge, gt, le, lt, nq
* fix windows ci error
* rerun ci
* fix typro
* fix bugs
---------
Co-authored-by: zhangrui34 <v_zhangrui34@baidu.com>
2023-07-27 15:33:52 +08:00
from paddle . utils . inplace_utils import inplace_apis_in_dygraph_only
2022-11-28 11:52:40 +08:00
2023-09-07 17:26:19 +08:00
from . . base . data_feeder import (
2022-10-23 20:01:27 +08:00
check_dtype ,
2022-11-28 11:52:40 +08:00
check_type ,
check_variable_and_dtype ,
2022-10-23 20:01:27 +08:00
convert_dtype ,
)
2023-09-07 17:26:19 +08:00
from . . base . framework import Variable , device_guard
from . . base . param_attr import ParamAttr
2022-11-28 11:52:40 +08:00
from . . framework import (
LayerHelper ,
_current_expected_place ,
2023-10-18 15:17:01 +08:00
_current_expected_place_ ,
2022-11-28 11:52:40 +08:00
_get_paddle_place ,
convert_np_dtype_to_dtype_ ,
core ,
2023-09-22 11:14:48 +08:00
dygraph_only ,
2023-05-22 20:56:38 +08:00
in_dynamic_mode ,
2023-09-16 15:30:42 +08:00
in_dynamic_or_pir_mode ,
2023-09-12 18:53:30 +08:00
in_pir_mode ,
2022-11-28 11:52:40 +08:00
)
2020-05-15 07:29:59 +08:00
2024-06-28 20:26:51 +08:00
if TYPE_CHECKING :
2024-08-10 12:05:32 +08:00
from collections . abc import Sequence
[API Compatibilities] Add eq, ne, not_equal, lt, less_than, le, less_equal, gt, greater, ge, greater_equal, Tensor.eq, Tensor.ne, Tensor.not_equal, Tensor.lt, Tensor.less, Tensor.le, Tensor.less_equal, Tensor.gt, Tensor.greater, Tensor.ge, Tensor.greater, from_numpy (#75206)
* See pr
* Refine typo, add todo before gt, add new api from_numpy
* Refine typo, add todo before gt, add new api from_numpy
* Refine on comments
* Implement less as alias for less_than
* Add type hint for out and type check in numpy
* Remove gt for old ir
* fix typing parse error and fix example code
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-09-16 19:36:11 +08:00
from typing import Any
from numpy . typing import NDArray
2024-08-10 12:05:32 +08:00
2024-06-29 14:02:32 +08:00
from paddle . _typing import (
DTypeLike ,
2024-11-05 10:55:10 +08:00
NestedNumericSequence ,
Numeric ,
2024-06-29 14:02:32 +08:00
ParamAttrLike ,
PlaceLike ,
ShapeLike ,
TensorLike ,
)
2024-06-28 20:26:51 +08:00
2021-04-29 19:31:40 +08:00
__all__ = [ ]
2025-08-20 13:31:31 +08:00
_warned_in_tensor = False
2024-12-24 10:48:03 +08:00
2020-04-02 16:40:36 +08:00
2024-06-17 13:25:16 +08:00
def _complex_to_real_dtype ( dtype : DTypeLike ) - > DTypeLike :
2022-04-13 18:20:54 +08:00
if dtype == core . VarDesc . VarType . COMPLEX64 :
return core . VarDesc . VarType . FP32
elif dtype == core . VarDesc . VarType . COMPLEX128 :
return core . VarDesc . VarType . FP64
2024-02-06 18:50:21 +08:00
elif dtype == paddle . pir . core . DataType . COMPLEX64 :
2024-08-20 10:59:14 +08:00
return paddle . pir . core . DataType . FLOAT32
2024-02-06 18:50:21 +08:00
elif dtype == paddle . pir . core . DataType . COMPLEX128 :
2024-08-20 10:59:14 +08:00
return paddle . pir . core . DataType . FLOAT64
2022-04-13 18:20:54 +08:00
else :
return dtype
2024-06-17 13:25:16 +08:00
def _real_to_complex_dtype ( dtype : DTypeLike ) - > DTypeLike :
2022-04-13 18:20:54 +08:00
if dtype == core . VarDesc . VarType . FP32 :
return core . VarDesc . VarType . COMPLEX64
elif dtype == core . VarDesc . VarType . FP64 :
return core . VarDesc . VarType . COMPLEX128
2024-08-20 10:59:14 +08:00
elif dtype == paddle . pir . core . DataType . FLOAT32 :
2024-02-06 18:50:21 +08:00
return paddle . pir . core . DataType . COMPLEX64
2024-08-20 10:59:14 +08:00
elif dtype == paddle . pir . core . DataType . FLOAT64 :
2024-02-06 18:50:21 +08:00
return paddle . pir . core . DataType . COMPLEX128
2022-04-13 18:20:54 +08:00
else :
return dtype
2022-12-09 14:09:06 +08:00
def create_global_var (
2024-06-17 13:25:16 +08:00
shape : ShapeLike ,
value : float ,
dtype : DTypeLike ,
persistable : bool = False ,
force_cpu : bool = False ,
name : str | None = None ,
) - > paddle . Tensor :
2022-12-09 14:09:06 +08:00
"""
This function creates a new tensor variable with value in the global block(block 0).
Args:
shape (list[int]|tuple[int]): Shape of the variable
value (float): The value of the variable. The new created
variable will be filled with it.
dtype (str): Data type of the variable
persistable (bool, optional): If this variable is persistable.
Default: False
force_cpu (bool, optional): Force this variable to be on CPU.
Default: False
2024-06-17 13:25:16 +08:00
name (str|None, optional): For detailed information, please refer to
2022-12-09 14:09:06 +08:00
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
Returns:
Variable: The created Variable
Examples:
2026-02-06 07:21:39 +08:00
.. code-block:: pycon
2022-12-09 14:09:06 +08:00
2026-02-06 07:21:39 +08:00
>>> # doctest: +SKIP( " paddle.static.create_global_var doesn ' t support PIR mode " )
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> paddle.enable_static()
2026-02-06 07:21:39 +08:00
>>> main_program = paddle.static.Program()
>>> startup_program = paddle.static.Program()
>>> with paddle.static.program_guard(main_program, startup_program):
... var = paddle.static.create_global_var(
... shape=[2, 3],
... value=1.0,
... dtype= " float32 " ,
... persistable=True,
... force_cpu=True,
... )
>>> var.shape
(2, 3)
2022-12-09 14:09:06 +08:00
"""
check_type ( shape , ' shape ' , ( list , tuple , np . ndarray ) , ' create_global_var ' )
for item in shape :
check_type (
item ,
' item of shape ' ,
(
int ,
np . uint8 ,
np . int8 ,
np . int16 ,
np . int32 ,
np . int64 ,
) ,
' create_global_var ' ,
)
check_dtype (
dtype ,
' dtype ' ,
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
' int8 ' ,
' int16 ' ,
' int32 ' ,
' int64 ' ,
' uint8 ' ,
' uint16 ' ,
] ,
' create_global_var ' ,
)
helper = LayerHelper ( " global_var " , * * locals ( ) )
var = helper . create_global_variable (
dtype = dtype ,
shape = shape ,
persistable = persistable ,
name = name ,
stop_gradient = True ,
)
helper . set_variable_initializer (
remove fluid.initializer.UniformInitializer, ConstantInitializer, NormalInitializer, TruncatedNormalInitializer, XavierInitializer, BilinearInitializer, MSRAInitializer, NumpyArrayInitializer and calculate_gain.. (#49498)
* move UniformInitializer and ConstantInitializer
* more modify
* circular import resolved
* another circular import resolved?
* more circular import 2
* circular import 3
* change import paddle in metric.py
* BuildStrategy import from fluid
* modify the framework import path in common.py
* change rnn.py import, from static to original framework
* change import static in the nn folder
* default_main_program should import from common_ops_import
* add import paddle in param_attr.py
* use core not paddle module for using VarDesc
* another old uniform
* mistake that use Uniform instead of UniformInitializer
* modify UniformInitializer doc
* move fluid.NormalInitializer to nn.initializer.NormalInitializer
* remove import of Normal in fluid.layers.nn.py
* remove more import of old Normal
* remove more import of old Normal
* sample code modify and tests modify import
* is_listen_failed passing arg should be log file
* problem solved
* a mistake solved
* comments resoleved and remove paddle.fluid.initializer.TruncatedNormalInitializer
* remove paddle.fluid.initializer.XavierInitializer and paddle.fluid.initializer.MSRAInitializer
* remove paddle.fluid.initializer.BilinearInitializer NumpyArrayInitializer and set_global_initializer
* change fluid to static
* change static to fluid to avoid circular import in distributed_strategy.py
* fix example code and test_initializer
* ValueType
* sample code fix
* change set_global_initializer back to fluid
* put paddle.static.BuildStrategy.ReduceStrategy into the fuction to avoid circular import
* remove calculate_gain, delete BilinearInitializer and revert set_global_initializer
* change the time of using UniformInitializer, ConstantInitializer, NormalInitializer, TruncatedNormalInitializer, XavierInitializer, MSRAInitializer, NumpyArrayInitializer as few as possible
* fix argument incampatible
* fix more arg incompatible
* fix test_prelu_op_xpu.py Constant
* fix inaccurate doc
* more doc fix: default value
2023-02-01 21:38:27 +08:00
var ,
initializer = paddle . nn . initializer . ConstantInitializer (
value = float ( value ) , force_cpu = force_cpu
) ,
2022-12-09 14:09:06 +08:00
)
return var
2022-12-02 12:59:26 +08:00
def create_parameter (
2024-06-17 13:25:16 +08:00
shape : ShapeLike ,
dtype : DTypeLike ,
name : str | None = None ,
2024-06-28 20:26:51 +08:00
attr : ParamAttrLike | None = None ,
2024-06-17 13:25:16 +08:00
is_bias : bool = False ,
default_initializer : paddle . nn . initializer . Initializer | None = None ,
) - > paddle . Tensor :
2022-12-02 12:59:26 +08:00
"""
This function creates a parameter. The parameter is a learnable variable, which can have
gradient, and can be optimized.
Note:
This is a very low-level API. This API is useful when you create operator by your self, instead of using layers.
Args:
shape (list of int): Shape of the parameter
2023-09-19 15:59:39 +08:00
dtype (str): Data type of the parameter. It can be set as ' float16 ' , ' float32 ' , ' float64 ' .
2024-06-17 13:25:16 +08:00
name(str|None, optional): For detailed information, please refer to
2022-12-02 12:59:26 +08:00
:ref:`api_guide_Name` . Usually name is no need to set and None by default.
2024-06-17 13:25:16 +08:00
attr (ParamAttr|None, optional): Attribute object of the specified argument. For detailed information, please refer to
2023-09-19 15:59:39 +08:00
:ref:`api_paddle_ParamAttr` None by default, which means that ParamAttr will be initialized as it is.
2022-12-02 12:59:26 +08:00
is_bias (bool, optional): This can affect which default initializer is chosen
when default_initializer is None. If is_bias,
initializer.Constant(0.0) will be used. Otherwise,
Xavier() will be used.
2024-06-17 13:25:16 +08:00
default_initializer (Initializer|None, optional): Initializer for the parameter
2022-12-02 12:59:26 +08:00
Returns:
The created parameter.
Examples:
2026-02-20 04:12:20 +08:00
.. code-block:: pycon
2022-12-02 12:59:26 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> W = paddle.create_parameter(shape=[784, 200], dtype= ' float32 ' )
2022-12-02 12:59:26 +08:00
"""
check_type ( shape , ' shape ' , ( list , tuple , np . ndarray ) , ' create_parameter ' )
for item in shape :
check_type (
item ,
' item of shape ' ,
(
int ,
np . uint8 ,
np . int8 ,
np . int16 ,
np . int32 ,
np . int64 ,
) ,
' create_parameter ' ,
)
check_dtype (
dtype ,
' dtype ' ,
[
' bool ' ,
' float16 ' ,
2023-04-14 11:20:43 +08:00
' uint16 ' ,
2022-12-02 12:59:26 +08:00
' float32 ' ,
' float64 ' ,
' int8 ' ,
' int16 ' ,
' int32 ' ,
' int64 ' ,
' uint8 ' ,
] ,
' create_parameter ' ,
)
check_type ( attr , ' attr ' , ( type ( None ) , ParamAttr ) , ' create_parameter ' )
check_type (
default_initializer ,
' default_initializer ' ,
remove fluid.initializer.UniformInitializer, ConstantInitializer, NormalInitializer, TruncatedNormalInitializer, XavierInitializer, BilinearInitializer, MSRAInitializer, NumpyArrayInitializer and calculate_gain.. (#49498)
* move UniformInitializer and ConstantInitializer
* more modify
* circular import resolved
* another circular import resolved?
* more circular import 2
* circular import 3
* change import paddle in metric.py
* BuildStrategy import from fluid
* modify the framework import path in common.py
* change rnn.py import, from static to original framework
* change import static in the nn folder
* default_main_program should import from common_ops_import
* add import paddle in param_attr.py
* use core not paddle module for using VarDesc
* another old uniform
* mistake that use Uniform instead of UniformInitializer
* modify UniformInitializer doc
* move fluid.NormalInitializer to nn.initializer.NormalInitializer
* remove import of Normal in fluid.layers.nn.py
* remove more import of old Normal
* remove more import of old Normal
* sample code modify and tests modify import
* is_listen_failed passing arg should be log file
* problem solved
* a mistake solved
* comments resoleved and remove paddle.fluid.initializer.TruncatedNormalInitializer
* remove paddle.fluid.initializer.XavierInitializer and paddle.fluid.initializer.MSRAInitializer
* remove paddle.fluid.initializer.BilinearInitializer NumpyArrayInitializer and set_global_initializer
* change fluid to static
* change static to fluid to avoid circular import in distributed_strategy.py
* fix example code and test_initializer
* ValueType
* sample code fix
* change set_global_initializer back to fluid
* put paddle.static.BuildStrategy.ReduceStrategy into the fuction to avoid circular import
* remove calculate_gain, delete BilinearInitializer and revert set_global_initializer
* change the time of using UniformInitializer, ConstantInitializer, NormalInitializer, TruncatedNormalInitializer, XavierInitializer, MSRAInitializer, NumpyArrayInitializer as few as possible
* fix argument incampatible
* fix more arg incompatible
* fix test_prelu_op_xpu.py Constant
* fix inaccurate doc
* more doc fix: default value
2023-02-01 21:38:27 +08:00
( type ( None ) , paddle . nn . initializer . Initializer ) ,
2022-12-02 12:59:26 +08:00
' create_parameter ' ,
)
helper = LayerHelper ( " create_parameter " , * * locals ( ) )
if attr is None :
attr = ParamAttr ( name = name )
return helper . create_parameter (
attr , shape , convert_dtype ( dtype ) , is_bias , default_initializer
)
2024-06-17 13:25:16 +08:00
def create_tensor (
dtype : DTypeLike , name : str | None = None , persistable : bool = False
) - > paddle . Tensor :
2022-12-05 11:40:41 +08:00
"""
Create a variable, which will hold a Tensor with data type dtype.
Args:
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): the data type of Tensor to be created, the
2022-12-05 11:40:41 +08:00
data type is bool, float16, float32, float64, int8, int16, int32 and int64.
name(string, optional): The default value is None. Normally there is no need for
user to set this property. For more information, please refer to :ref:`api_guide_Name`
persistable(bool): Set the persistable flag of the create tensor.
default value is False.
Returns:
Variable: The tensor to be created according to dtype.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2022-12-05 11:40:41 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> tensor = paddle.tensor.create_tensor(dtype= ' float32 ' )
2022-12-05 11:40:41 +08:00
"""
check_dtype (
dtype ,
' dtype ' ,
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
' int8 ' ,
' int32 ' ,
' int32 ' ,
' int64 ' ,
] ,
' create_tensor ' ,
)
helper = LayerHelper ( " create_tensor " , * * locals ( ) )
return helper . create_variable (
name = helper . name , dtype = dtype , persistable = persistable
)
2025-11-18 16:01:28 +08:00
@param_two_alias ( [ " stop " , " end " ] , [ " num " , " steps " ] )
2024-06-17 13:25:16 +08:00
def linspace (
start : float | paddle . Tensor ,
stop : float | paddle . Tensor ,
num : int | paddle . Tensor ,
dtype : DTypeLike | None = None ,
name : str | None = None ,
2025-11-18 16:01:28 +08:00
* ,
out : paddle . Tensor | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2022-04-13 18:20:54 +08:00
r """
2022-12-23 20:17:45 +08:00
Return fixed number of evenly spaced values within a given interval. Note: no gradient calculation is performed.
2022-04-13 18:20:54 +08:00
Args:
2022-11-14 20:07:08 +08:00
start(int|float|Tensor): The input :attr:`start` is start of range. It is a int, float, \
or a 0-D Tensor with data type int32, int64, float32 or float64.
2022-12-23 20:17:45 +08:00
stop(int|float|Tensor): The input :attr:`stop` is end of range. It is a int, float, \
2022-11-14 20:07:08 +08:00
or a 0-D Tensor with data type int32, int64, float32 or float64.
num(int|Tensor): The input :attr:`num` is given num of the sequence. It is an int, \
or a 0-D Tensor with data type int32.
2024-06-17 13:25:16 +08:00
dtype(str|paddle.dtype|np.dtype|None, optional): The data type of output tensor, it could be
2022-04-13 18:20:54 +08:00
int32, int64, float32 and float64. Default: if None, the data type is float32.
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-11-18 16:01:28 +08:00
out(Tensor|None, optional): Optional output tensor. If provided, the result will be stored in this tensor. \
The tensor must have the correct shape and dtype. Default: None.
device(str|paddle.CUDAPlace|paddle.CPUPlace|None, optional): The device where the output tensor will be placed. \
It can be a string (e.g., ' cpu ' , ' gpu:0 ' ), a paddle.CUDAPlace, or a paddle.CPUPlace object. \
If None, the current device context will be used. Default: None.
requires_grad(bool, optional): Whether the output tensor should have gradient computation enabled. \
If True, the output tensor ' s ``stop_gradient`` attribute will be set to False. Default: False.
2022-04-13 18:20:54 +08:00
Returns:
Tensor: the output data type will be float32, float64. The 1-D tensor with fixed number of evenly spaced values, \
the data shape of this tensor is :math:`[num]` . If the :attr:`num` is set 1, the output tensor just has \
2022-09-14 21:56:19 +08:00
the value with input :attr:`start`.
2022-04-13 18:20:54 +08:00
2025-11-18 16:01:28 +08:00
.. note::
**Alias Support:**
- The parameter name ``end`` can be used as an alias for ``stop``. \
For example, ``linspace(start=0, end=10, ...)`` is equivalent to ``linspace(start=0, stop=10, ...)``.
- The parameter name ``steps`` can be used as an alias for ``num``. \
For example, ``linspace(start=0, stop=10, steps=5)`` is equivalent to ``linspace(start=0, stop=10, num=5)``.
2022-04-13 18:20:54 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2022-04-13 18:20:54 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> data = paddle.linspace(0, 10, 5, ' float32 ' )
>>> print(data.numpy())
[0. 2.5 5. 7.5 10.]
>>> data = paddle.linspace(0, 10, 1, ' float32 ' )
>>> print(data.numpy())
[0.]
2022-04-13 18:20:54 +08:00
2025-11-18 16:01:28 +08:00
>>> # Using device parameter
>>> data = paddle.linspace(0, 10, 5, device= ' cpu ' )
>>> print(data.numpy())
[0. 2.5 5. 7.5 10.]
>>> # Using requires_grad parameter
>>> data = paddle.linspace(0, 10, 5, requires_grad=True)
>>> print(data.stop_gradient)
False
2022-04-13 18:20:54 +08:00
"""
if dtype is None :
2023-09-20 20:13:02 +08:00
dtype = paddle . get_default_dtype ( )
2025-11-18 16:01:28 +08:00
device = (
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
)
2022-04-13 18:20:54 +08:00
tensor_num = num
tensor_start = start
tensor_stop = stop
2024-01-19 14:59:45 +08:00
if not isinstance ( num , ( Variable , paddle . pir . Value ) ) :
2022-04-13 18:20:54 +08:00
check_type ( num , ' num ' , ( int ) , ' linspace ' )
2024-01-17 13:04:50 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , paddle . pir . core . DataType ) ) :
2022-04-13 18:20:54 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
2024-01-19 14:59:45 +08:00
if not isinstance ( start , ( Variable , paddle . pir . Value ) ) :
2022-04-13 18:20:54 +08:00
with device_guard ( " cpu " ) :
2022-05-16 14:28:42 +08:00
tensor_start = fill_constant ( [ 1 ] , dtype , start , force_cpu = True )
2024-01-19 14:59:45 +08:00
if not isinstance ( stop , ( Variable , paddle . pir . Value ) ) :
2022-04-13 18:20:54 +08:00
with device_guard ( " cpu " ) :
2022-05-16 14:28:42 +08:00
tensor_stop = fill_constant ( [ 1 ] , dtype , stop , force_cpu = True )
2024-01-19 14:59:45 +08:00
if not isinstance ( num , ( Variable , paddle . pir . Value ) ) :
2022-04-13 18:20:54 +08:00
with device_guard ( " cpu " ) :
2022-05-16 14:28:42 +08:00
tensor_num = fill_constant ( [ 1 ] , ' int32 ' , num , force_cpu = True )
2024-07-24 10:17:06 +08:00
if in_dynamic_mode ( ) :
2025-11-18 16:01:28 +08:00
out_tensor = _C_ops . linspace (
2024-07-24 10:17:06 +08:00
tensor_start ,
tensor_stop ,
tensor_num ,
dtype ,
2025-11-18 16:01:28 +08:00
device ,
out = out ,
2024-07-24 10:17:06 +08:00
)
2025-11-18 16:01:28 +08:00
if requires_grad :
out_tensor . stop_gradient = False
return out_tensor
2024-07-24 10:17:06 +08:00
elif in_pir_mode ( ) :
helper = LayerHelper ( " linspace " , * * locals ( ) )
start_dtype = convert_dtype ( tensor_start . dtype )
stop_dtype = convert_dtype ( tensor_stop . dtype )
out_dtype = convert_dtype ( dtype )
if isinstance ( start , paddle . pir . Value ) :
check_dtype (
start . dtype ,
' start ' ,
[ ' float16 ' , ' uint16 ' , ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
' linspace ' ,
)
else :
check_type ( start , ' start ' , ( int , float ) , ' linspace ' )
if isinstance ( stop , paddle . pir . Value ) :
check_dtype (
stop . dtype ,
' stop ' ,
[ ' float16 ' , ' uint16 ' , ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
' linspace ' ,
)
else :
check_type ( stop , ' stop ' , ( int , float ) , ' linspace ' )
if isinstance ( num , paddle . pir . Value ) :
2024-12-02 14:53:55 +08:00
check_dtype ( num . dtype , ' num ' , [ ' int32 ' , ' int64 ' ] , ' linspace ' )
2024-07-24 10:17:06 +08:00
check_dtype (
dtype ,
' dtype ' ,
[ ' float16 ' , ' uint16 ' , ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
' linspace ' ,
)
if (
( stop_dtype == " float64 " or start_dtype == " float64 " )
and out_dtype in [ " float32 " , " int32 " ]
) or (
( stop_dtype == " int64 " or start_dtype == " int64 " )
and out_dtype == " int32 "
) :
raise ValueError (
f " The dtype of start/stop is { start_dtype } / { stop_dtype } but the attr(dtype) of linspace is { dtype } , "
" which may cause data type overflows. Please reset attr(dtype) of linspace. "
)
if isinstance ( dtype , paddle . base . core . VarDesc . VarType ) :
dtype = paddle . pir . core . vartype_to_datatype [ dtype ]
2025-11-18 16:01:28 +08:00
out_tensor = _C_ops . linspace (
2022-10-23 20:01:27 +08:00
tensor_start ,
tensor_stop ,
tensor_num ,
dtype ,
2025-11-18 16:01:28 +08:00
device ,
out = out ,
2022-10-23 20:01:27 +08:00
)
2025-11-18 16:01:28 +08:00
if requires_grad :
out_tensor . stop_gradient = False
return out_tensor
2022-04-13 18:20:54 +08:00
else :
2022-12-27 09:06:13 +08:00
helper = LayerHelper ( " linspace " , * * locals ( ) )
start_dtype = convert_dtype ( tensor_start . dtype )
stop_dtype = convert_dtype ( tensor_stop . dtype )
out_dtype = convert_dtype ( dtype )
if isinstance ( start , Variable ) :
check_dtype (
start . dtype ,
' start ' ,
2023-04-19 20:28:30 +08:00
[ ' float16 ' , ' uint16 ' , ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
2022-12-27 09:06:13 +08:00
' linspace ' ,
)
else :
check_type ( start , ' start ' , ( int , float ) , ' linspace ' )
2022-04-13 18:20:54 +08:00
2022-12-27 09:06:13 +08:00
if isinstance ( stop , Variable ) :
check_dtype (
stop . dtype ,
' stop ' ,
2023-04-19 20:28:30 +08:00
[ ' float16 ' , ' uint16 ' , ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
2022-12-27 09:06:13 +08:00
' linspace ' ,
)
else :
check_type ( stop , ' stop ' , ( int , float ) , ' linspace ' )
if isinstance ( num , Variable ) :
check_dtype ( num . dtype , ' num ' , [ ' int32 ' ] , ' linspace ' )
2022-10-23 20:01:27 +08:00
check_dtype (
2023-03-30 16:50:35 +08:00
dtype ,
' dtype ' ,
2023-04-19 20:28:30 +08:00
[ ' float16 ' , ' uint16 ' , ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
2023-03-30 16:50:35 +08:00
' linspace ' ,
2022-10-23 20:01:27 +08:00
)
2022-12-27 09:06:13 +08:00
if (
( stop_dtype == " float64 " or start_dtype == " float64 " )
and out_dtype in [ " float32 " , " int32 " ]
) or (
( stop_dtype == " int64 " or start_dtype == " int64 " )
and out_dtype == " int32 "
) :
raise ValueError (
2024-04-01 10:20:33 +08:00
f " The dtype of start/stop is { start_dtype } / { stop_dtype } but the attr(dtype) of linspace is { dtype } , "
" which may cause data type overflows. Please reset attr(dtype) of linspace. "
2022-10-23 20:01:27 +08:00
)
2022-04-13 18:20:54 +08:00
2022-12-27 09:06:13 +08:00
out = helper . create_variable_for_type_inference ( dtype = dtype )
2022-04-13 18:20:54 +08:00
2022-12-27 09:06:13 +08:00
helper . append_op (
type = ' linspace ' ,
inputs = {
' Start ' : tensor_start ,
' Stop ' : tensor_stop ,
' Num ' : tensor_num ,
} ,
attrs = { ' dtype ' : dtype } ,
outputs = { ' Out ' : [ out ] } ,
)
if isinstance ( num , int ) :
out . desc . set_shape ( ( num , ) )
return out
2022-04-13 18:20:54 +08:00
2024-06-17 13:25:16 +08:00
def logspace (
start : float | paddle . Tensor ,
stop : float | paddle . Tensor ,
num : int | paddle . Tensor ,
base : float | paddle . Tensor = 10.0 ,
dtype : DTypeLike | None = None ,
name : str | None = None ,
) - > paddle . Tensor :
2022-04-20 17:42:50 +08:00
r """
2024-12-28 00:47:59 +08:00
Return fixed number of logarithmically-evenly spaced values within the interval \
2022-04-20 17:42:50 +08:00
:math:`[base^ {start} , base^ {stop} ]`.
2022-09-14 21:56:19 +08:00
2022-04-20 17:42:50 +08:00
Notes:
This API does not compute the gradient.
2022-09-14 21:56:19 +08:00
2022-04-20 17:42:50 +08:00
Args:
start(int|float|Tensor): The input :attr:`start` is exponent of first entry in \
2023-06-14 14:38:49 +08:00
the sequence. It is a scalar, or a 0-D Tensor of shape [] with input data \
2022-04-20 17:42:50 +08:00
type int32, int64, float32 or float64.
stop(int|float|Tensor): The input :attr:`stop` is exponent of last entry in the \
2023-06-14 14:38:49 +08:00
sequence. It is a scalar, or a 0-D Tensor of shape [] with input data \
2022-04-20 17:42:50 +08:00
type int32, int64, float32 or float64.
num(int|Tensor): The input :attr:`num` is given number of items in the sequence. \
2023-06-14 14:38:49 +08:00
It is an int scalar, or a 0-D Tensor of shape [] with data type int32.
2022-04-20 17:42:50 +08:00
base(int|float|Tensor): The input :attr:`base` is base of the logarithm function. \
2023-06-14 14:38:49 +08:00
It is a scalar, or a 0-D Tensor of shape [] with input data type int32, int64, \
2022-04-20 17:42:50 +08:00
float32 or float64.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): The data type of output tensor, it could be \
2022-04-20 17:42:50 +08:00
int32, int64, float32 or float64. Default: if None, the data type is float32. \
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2022-04-20 17:42:50 +08:00
Returns:
Tensor: The output data type will be float32, float64. The 1-D tensor with \
2024-12-28 00:47:59 +08:00
fixed number of logarithmically-evenly spaced values, the data shape of this \
2022-04-20 17:42:50 +08:00
tensor is :math:`[num]`. If the :attr:`num` is set 1, the output tensor \
2022-09-14 21:56:19 +08:00
just has the value with exponential of :attr:`start` with base :attr:`base`.
2022-04-20 17:42:50 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2022-04-20 17:42:50 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> data = paddle.logspace(0, 10, 5, 2, ' float32 ' )
>>> print(data.numpy())
[1.0000000e+00 5.6568542e+00 3.2000000e+01 1.8101933e+02 1.0240000e+03]
>>> data = paddle.logspace(0, 10, 1, 2, ' float32 ' )
>>> print(data.numpy())
[1.]
2022-04-20 17:42:50 +08:00
"""
if dtype is None :
2023-09-20 20:13:02 +08:00
dtype = paddle . get_default_dtype ( )
2022-04-20 17:42:50 +08:00
tensor_num = num
tensor_start = start
tensor_stop = stop
tensor_base = base
2024-01-19 14:59:45 +08:00
if not isinstance ( num , ( Variable , paddle . pir . Value ) ) :
2022-04-20 17:42:50 +08:00
check_type ( num , ' num ' , ( int ) , ' logspace ' )
2023-11-01 19:18:56 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , core . DataType ) ) :
2022-04-20 17:42:50 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
2024-01-19 14:59:45 +08:00
if not isinstance ( start , ( Variable , paddle . pir . Value ) ) :
2022-04-20 17:42:50 +08:00
with device_guard ( " cpu " ) :
tensor_start = fill_constant ( [ 1 ] , dtype , start )
2024-01-19 14:59:45 +08:00
if not isinstance ( stop , ( Variable , paddle . pir . Value ) ) :
2022-04-20 17:42:50 +08:00
with device_guard ( " cpu " ) :
tensor_stop = fill_constant ( [ 1 ] , dtype , stop )
2024-01-19 14:59:45 +08:00
if not isinstance ( num , ( Variable , paddle . pir . Value ) ) :
2022-04-20 17:42:50 +08:00
with device_guard ( " cpu " ) :
tensor_num = fill_constant ( [ 1 ] , ' int32 ' , num )
2024-01-19 14:59:45 +08:00
if not isinstance ( base , ( Variable , paddle . pir . Value ) ) :
2022-04-20 17:42:50 +08:00
with device_guard ( " cpu " ) :
tensor_base = fill_constant ( [ 1 ] , dtype , base )
2024-08-16 10:11:12 +08:00
if in_dynamic_mode ( ) :
return _C_ops . logspace (
tensor_start ,
tensor_stop ,
tensor_num ,
tensor_base ,
dtype ,
_current_expected_place ( ) ,
)
elif in_pir_mode ( ) :
start_dtype = convert_dtype ( tensor_start . dtype )
stop_dtype = convert_dtype ( tensor_stop . dtype )
base_dtype = convert_dtype ( tensor_base . dtype )
out_dtype = convert_dtype ( dtype )
if (
(
stop_dtype == " float64 "
or start_dtype == " float64 "
or base_dtype == " float64 "
)
and out_dtype in [ " float32 " , " int32 " ]
) or (
(
stop_dtype == " int64 "
or start_dtype == " int64 "
or base_dtype == " int64 "
)
and out_dtype == " int32 "
) :
raise ValueError (
f " The dtype of start/stop/base is { start_dtype } / { stop_dtype } / { base_dtype } but the attr(dtype) of logspace is { dtype } , "
" which may cause data type overflows. Please reset attr(dtype) of logspace. "
)
if isinstance ( num , paddle . pir . Value ) :
check_dtype ( num . dtype , ' num ' , [ ' int32 ' ] , ' logspace ' )
2023-02-16 15:50:58 +08:00
return _C_ops . logspace (
tensor_start ,
tensor_stop ,
tensor_num ,
tensor_base ,
dtype ,
_current_expected_place ( ) ,
2022-10-23 20:01:27 +08:00
)
2022-12-27 09:06:13 +08:00
else :
helper = LayerHelper ( " logspace " , * * locals ( ) )
2022-04-20 17:42:50 +08:00
2022-12-27 09:06:13 +08:00
start_dtype = convert_dtype ( tensor_start . dtype )
stop_dtype = convert_dtype ( tensor_stop . dtype )
base_dtype = convert_dtype ( tensor_base . dtype )
out_dtype = convert_dtype ( dtype )
if isinstance ( start , Variable ) :
check_dtype (
start . dtype ,
' start ' ,
[ ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
' logspace ' ,
)
else :
check_type ( start , ' start ' , ( int , float ) , ' logspace ' )
2022-04-20 17:42:50 +08:00
2022-12-27 09:06:13 +08:00
if isinstance ( stop , Variable ) :
check_dtype (
stop . dtype ,
' stop ' ,
[ ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
' logspace ' ,
)
else :
check_type ( stop , ' stop ' , ( int , float ) , ' logspace ' )
2022-04-20 17:42:50 +08:00
2022-12-27 09:06:13 +08:00
if isinstance ( num , Variable ) :
check_dtype ( num . dtype , ' num ' , [ ' int32 ' ] , ' logspace ' )
2022-04-20 17:42:50 +08:00
2022-12-27 09:06:13 +08:00
if isinstance ( base , Variable ) :
check_dtype (
base . dtype ,
' base ' ,
[ ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' ] ,
' logspace ' ,
)
else :
check_type ( base , ' base ' , ( int , float ) , ' logspace ' )
2022-04-20 17:42:50 +08:00
2022-10-23 20:01:27 +08:00
check_dtype (
2022-12-27 09:06:13 +08:00
dtype , ' dtype ' , [ ' int32 ' , ' int64 ' , ' float32 ' , ' float64 ' ] , ' logspace '
2022-10-23 20:01:27 +08:00
)
2022-12-27 09:06:13 +08:00
if (
(
stop_dtype == " float64 "
or start_dtype == " float64 "
or base_dtype == " float64 "
)
and out_dtype in [ " float32 " , " int32 " ]
) or (
(
stop_dtype == " int64 "
or start_dtype == " int64 "
or base_dtype == " int64 "
)
and out_dtype == " int32 "
) :
raise ValueError (
2024-04-01 10:20:33 +08:00
f " The dtype of start/stop/base is { start_dtype } / { stop_dtype } / { base_dtype } but the attr(dtype) of logspace is { dtype } , "
" which may cause data type overflows. Please reset attr(dtype) of logspace. "
2022-10-23 20:01:27 +08:00
)
2022-04-20 17:42:50 +08:00
2022-12-27 09:06:13 +08:00
out = helper . create_variable_for_type_inference ( dtype = dtype )
2022-04-20 17:42:50 +08:00
2022-12-27 09:06:13 +08:00
helper . append_op (
type = ' logspace ' ,
inputs = {
' Start ' : tensor_start ,
' Stop ' : tensor_stop ,
' Num ' : tensor_num ,
' Base ' : tensor_base ,
} ,
attrs = { ' dtype ' : dtype } ,
outputs = { ' Out ' : [ out ] } ,
)
if isinstance ( num , int ) :
out . desc . set_shape ( ( num , ) )
return out
2022-04-20 17:42:50 +08:00
2024-06-17 13:25:16 +08:00
def _to_tensor_non_static (
data : TensorLike ,
dtype : DTypeLike | None = None ,
place : PlaceLike | None = None ,
stop_gradient : bool = True ,
) - > paddle . Tensor :
def _handle_tensor_dtype (
tensor : paddle . Tensor , dtype : DTypeLike
) - > paddle . Tensor :
2023-05-08 15:12:59 +08:00
if dtype :
if convert_dtype ( dtype ) != convert_dtype ( tensor . dtype ) :
return tensor . astype ( convert_dtype ( dtype ) )
return tensor
2023-02-01 16:52:53 +08:00
if isinstance ( data , np . number ) : # Special case for numpy scalars
data = np . array ( data )
2020-08-15 20:00:36 +08:00
if not isinstance ( data , np . ndarray ) :
if np . isscalar ( data ) and not isinstance ( data , str ) :
2023-04-24 16:02:08 +08:00
data = np . array ( data )
2020-08-15 20:00:36 +08:00
elif isinstance ( data , ( list , tuple ) ) :
2025-10-23 17:14:15 +08:00
has_tensor = False
for d in data :
if isinstance ( d , paddle . Tensor ) :
has_tensor = True
break
if has_tensor :
if (
len ( data ) == 1
and isinstance ( data [ 0 ] , paddle . Tensor )
and data [ 0 ] . dtype == paddle . bfloat16
) :
data = np . array ( [ data [ 0 ] . numpy ( ) ] )
else :
data = np . array ( data )
if not dtype :
dtype = data . dtype
else :
data = np . array ( data )
2022-06-14 09:17:14 +08:00
if data . dtype == np . object_ :
2020-08-15 20:00:36 +08:00
raise ValueError (
2024-02-19 11:44:52 +08:00
" \n \t Failed to convert input data to a regular ndarray : \n \t - Usually "
2020-08-15 20:00:36 +08:00
" this means the input data contains nested lists with different lengths. "
)
2024-07-14 22:26:41 +08:00
elif isinstance ( data , paddle . Tensor ) :
2021-06-04 16:23:34 +08:00
data = data . _copy_to ( place , False )
2023-05-08 15:12:59 +08:00
data = _handle_tensor_dtype ( data , dtype )
2021-06-04 16:23:34 +08:00
data . stop_gradient = stop_gradient
2021-06-16 11:53:19 +08:00
return data
2024-11-15 09:37:18 +08:00
elif isinstance ( data , core . DenseTensor ) :
2025-01-24 15:26:44 +08:00
# shouldn't expose it to users, just for internal use.
2024-11-15 09:37:18 +08:00
# convert core.DenseTensor to Tensor first
2024-02-19 11:44:52 +08:00
# Currently, there is no copy when places are same
2024-09-12 12:08:03 +08:00
data = paddle . Tensor ( data , place = place )
2023-05-08 15:12:59 +08:00
data = _handle_tensor_dtype ( data , dtype )
2021-06-04 16:23:34 +08:00
data . stop_gradient = stop_gradient
2021-06-16 11:53:19 +08:00
return data
2020-08-15 20:00:36 +08:00
else :
raise TypeError (
2024-04-01 10:20:33 +08:00
f " Can ' t constructs a ' paddle.Tensor ' with data type { type ( data ) } , data type must be scalar|list|tuple|np.ndarray|paddle.Tensor "
2022-10-23 20:01:27 +08:00
)
2022-03-01 20:13:14 +08:00
if not dtype :
if data . dtype in [
2022-10-23 20:01:27 +08:00
' float16 ' ,
' float32 ' ,
' float64 ' ,
' complex64 ' ,
' complex128 ' ,
2022-03-01 20:13:14 +08:00
] :
default_type = paddle . get_default_dtype ( )
if np . iscomplexobj ( data ) :
2022-10-23 20:01:27 +08:00
default_type = (
' complex64 '
if default_type in [ ' float16 ' , ' float32 ' ]
else ' complex128 '
)
2025-06-12 11:47:50 +08:00
if convert_dtype ( default_type ) != convert_dtype ( data . dtype ) :
dtype = default_type
2022-03-01 20:13:14 +08:00
# Windows default type is 'int32', while Linux/Mac is 'int64'. Unify they.
if data . dtype in [ ' int32 ' ] :
2023-05-08 15:12:59 +08:00
data = data . astype ( " int64 " )
2020-09-03 13:32:58 +08:00
2025-06-12 11:47:50 +08:00
if dtype and convert_dtype ( dtype ) != convert_dtype ( data . dtype ) :
if convert_dtype ( dtype ) == ' uint16 ' :
tensor = core . eager . Tensor (
value = data ,
place = place ,
persistable = False ,
zero_copy = False ,
name = None ,
stop_gradient = True ,
)
tensor = tensor . astype ( dtype )
tensor . stop_gradient = stop_gradient
return tensor
else :
data = data . astype ( convert_dtype ( dtype ) )
2020-08-15 20:00:36 +08:00
2023-05-16 14:09:05 +08:00
if isinstance ( data , np . ndarray ) :
2026-02-25 16:32:17 +08:00
if core . is_compiled_with_custom_device (
" iluvatar_gpu "
) and os . environ . get ( ' FLAG_FORCE_FLOAT32 ' , ' ' ) . lower ( ) in [
' 1 ' ,
' true ' ,
' on ' ,
] :
import logging
if data . dtype == np . float64 :
logging . warning (
" Input data type is float64 which is not supported on iluvatar gpu, we will forcibly set tensor dtype to float32! "
)
data = data . astype ( np . float32 )
elif data . dtype == np . complex128 :
logging . warning (
" Input data type is complex128 which is not supported on iluvatar gpu, we will forcibly set tensor dtype to complex64! "
)
data = data . astype ( np . complex64 )
2025-06-09 11:16:18 +08:00
if (
data . dtype
in [
np . float32 ,
np . float64 ,
np . int32 ,
np . int64 ,
np . complex64 ,
np . complex128 ,
]
and data . size == 1
and (
isinstance ( place , core . CUDAPlace )
or ( isinstance ( place , core . Place ) and place . is_gpu_place ( ) )
)
) :
ret = paddle . full ( data . shape , data . reshape ( [ 1 ] ) [ 0 ] , data . dtype )
ret . stop_gradient = stop_gradient
return ret
else :
return core . eager . Tensor (
value = data ,
place = place ,
persistable = False ,
zero_copy = False ,
name = None ,
stop_gradient = stop_gradient ,
)
2021-12-24 09:49:31 +08:00
else :
2022-10-23 20:01:27 +08:00
return paddle . Tensor (
value = data ,
place = place ,
persistable = False ,
zero_copy = False ,
stop_gradient = stop_gradient ,
)
2020-08-15 20:00:36 +08:00
2024-06-17 13:25:16 +08:00
def _to_tensor_static (
data : TensorLike ,
dtype : DTypeLike | None = None ,
stop_gradient : bool = True ,
) - > paddle . Tensor :
2024-01-19 14:59:45 +08:00
if isinstance ( data , ( Variable , paddle . pir . Value ) ) :
2022-08-24 11:57:30 +08:00
output = data
2023-05-08 19:09:45 +08:00
if dtype is not None and dtype != data . dtype :
output = paddle . cast ( output , dtype )
2023-10-18 15:17:01 +08:00
2022-08-24 11:57:30 +08:00
else :
2023-02-01 16:52:53 +08:00
if isinstance ( data , np . number ) : # Special case for numpy scalars
data = np . array ( data )
2022-08-29 10:15:57 +08:00
if not isinstance ( data , np . ndarray ) :
if np . isscalar ( data ) and not isinstance ( data , str ) :
2023-04-24 16:02:08 +08:00
data = np . array ( data )
2022-08-29 10:15:57 +08:00
elif isinstance ( data , ( list , tuple ) ) :
2023-05-08 19:09:45 +08:00
try :
'''
In numpy version >= 1.24.0, case like:
np.array([Variable, 1, 2])
is not supported, it will raise error (numpy returns an numpy array with dtype= ' object ' in version <= 1.23.5)
Thus, process nested structure in except block
'''
2024-07-24 09:03:36 +08:00
array_data = np . array ( data )
2023-05-08 19:09:45 +08:00
# for numpy version <= 1.23.5
2024-07-24 09:03:36 +08:00
if array_data . dtype == ' object ' :
2023-05-08 19:09:45 +08:00
raise RuntimeError ( " Numpy get dtype `object`. " )
2024-07-24 09:03:36 +08:00
data = array_data
2023-05-08 19:09:45 +08:00
except :
to_stack_list = [ None ] * len ( data )
for idx , d in enumerate ( data ) :
to_stack_list [ idx ] = _to_tensor_static (
d , dtype , stop_gradient
)
data = paddle . stack ( to_stack_list )
else :
raise RuntimeError (
f " Do not support transform type ` { type ( data ) } ` to tensor "
)
# fix numpy default dtype
if data . dtype in [ ' float16 ' , ' float32 ' , ' float64 ' ] :
data = data . astype ( paddle . get_default_dtype ( ) )
2023-10-13 21:57:33 +08:00
# Windows default type is 'int32', while Linux/Mac is 'int64'. Unify they.
elif data . dtype in [ ' int32 ' ] :
data = data . astype ( " int64 " )
2022-08-29 10:15:57 +08:00
2022-08-24 11:57:30 +08:00
if dtype :
target_dtype = dtype
2022-08-29 10:15:57 +08:00
elif hasattr ( data , ' dtype ' ) and data . dtype != ' object ' :
2022-08-24 11:57:30 +08:00
target_dtype = data . dtype
else :
target_dtype = paddle . get_default_dtype ( )
target_dtype = convert_dtype ( target_dtype )
2023-10-20 11:08:17 +08:00
if data . dtype == " int16 " :
data = data . astype ( " int32 " )
2023-05-08 19:09:45 +08:00
output = assign ( data )
2022-08-24 11:57:30 +08:00
if convert_dtype ( output . dtype ) != target_dtype :
output = paddle . cast ( output , target_dtype )
output . stop_gradient = stop_gradient
return output
2025-08-20 13:31:31 +08:00
def tensor (
data : TensorLike | NestedNumericSequence ,
dtype : DTypeLike | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
pin_memory : bool = False ,
) - > paddle . Tensor :
r """
Constructs a ``paddle.Tensor`` from ``data`` ,
which can be scalar, tuple, list, numpy \ .ndarray, paddle \ .Tensor.
If the ``data`` is already a Tensor, copy will be performed and return a new tensor.
If you only want to change stop_gradient property, please call ``Tensor.stop_gradient = stop_gradient`` directly.
.. code-block:: text
We use the dtype conversion rules following this:
Keep dtype
np.number ───────────► paddle.Tensor
(0-D Tensor)
default_dtype
Python Number ───────────────► paddle.Tensor
(0-D Tensor)
Keep dtype
np.ndarray ───────────► paddle.Tensor
Args:
data(scalar|tuple|list|ndarray|Tensor): Initial data for the tensor.
Can be a scalar, list, tuple, numpy \ .ndarray, paddle \ .Tensor.
dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be ' bool ' , ' float16 ' ,
' float32 ' , ' float64 ' , ' int8 ' , ' int16 ' , ' int32 ' , ' int64 ' , ' uint8 ' ,
' complex64 ' , ' complex128 ' . Default: None, infers dtype from ``data``
except for python float number which gets dtype from ``get_default_type`` .
device(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be
CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``device`` is
string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs.
requires_grad(bool, optional): Whether to block the gradient propagation of Autograd. Default: False.
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
Returns:
Tensor: A Tensor constructed from ``data`` .
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2025-08-20 13:31:31 +08:00
2025-09-08 11:08:14 +08:00
>>> # type: ignore
2025-08-20 13:31:31 +08:00
>>> import paddle
>>> type(paddle.tensor(1))
<class ' paddle.Tensor ' >
>>> paddle.tensor(1)
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
1)
>>> x = paddle.tensor(1, requires_grad=True)
>>> print(x)
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=False,
1)
>>> paddle.tensor(x) # A new tensor will be created with default stop_gradient=True
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
1)
>>> paddle.tensor([[0.1, 0.2], [0.3, 0.4]], device=paddle.CPUPlace(), requires_grad=True)
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[0.10000000, 0.20000000],
[0.30000001, 0.40000001]])
2026-01-29 20:44:09 +08:00
>>> type(paddle.tensor([[1 + 1j, 2], [3 + 2j, 4]], dtype= ' complex64 ' ))
2025-08-20 13:31:31 +08:00
<class ' paddle.Tensor ' >
2026-01-29 20:44:09 +08:00
>>> paddle.tensor([[1 + 1j, 2], [3 + 2j, 4]], dtype= ' complex64 ' )
2025-08-20 13:31:31 +08:00
Tensor(shape=[2, 2], dtype=complex64, place=Place(cpu), stop_gradient=True,
2026-01-29 20:44:09 +08:00
[[(1.00000000+1.00000000j), (2.00000000+0.00000000j)],
[(3.00000000+2.00000000j), (4.00000000+0.00000000j)]])
2025-08-20 13:31:31 +08:00
"""
stop_gradient = not requires_grad
place = _get_paddle_place ( device )
if place is None :
place = _current_expected_place_ ( )
if pin_memory and not isinstance (
place , ( core . CUDAPinnedPlace , core . XPUPinnedPlace )
) :
if isinstance ( place , core . CUDAPlace ) :
place = core . CUDAPinnedPlace ( )
elif isinstance ( place , core . XPUPlace ) :
place = core . XPUPinnedPlace ( )
else :
raise RuntimeError ( f " Pinning memory is not supported for { place } . " )
if in_dynamic_mode ( ) :
is_tensor = paddle . is_tensor ( data )
if not is_tensor and hasattr ( data , " __cuda_array_interface__ " ) :
if not core . is_compiled_with_cuda ( ) :
raise RuntimeError (
" PaddlePaddle is not compiled with CUDA, but trying to create a Tensor from a CUDA array. "
)
tensor = core . tensor_from_cuda_array_interface ( data )
if pin_memory :
tensor = tensor . pin_memory ( )
else :
if is_tensor :
global _warned_in_tensor
if not _warned_in_tensor :
warnings . warn (
" To copy construct from a tensor, it is recommended to use sourceTensor.clone().detach(), "
" rather than paddle.to_tensor(sourceTensor). " ,
stacklevel = 2 ,
)
_warned_in_tensor = True
tensor = _to_tensor_non_static ( data , dtype , place , stop_gradient )
return tensor
# call assign for static graph
else :
re_exp = re . compile ( r ' [(](.+?)[)] ' , re . DOTALL )
place_str = re . findall ( re_exp , str ( place ) ) [ 0 ]
with paddle . static . device_guard ( place_str ) :
tensor = _to_tensor_static ( data , dtype , stop_gradient )
return tensor
2024-06-05 17:15:45 +08:00
def to_tensor (
2024-11-05 10:55:10 +08:00
data : TensorLike | NestedNumericSequence ,
2024-06-05 17:15:45 +08:00
dtype : DTypeLike | None = None ,
place : PlaceLike | None = None ,
stop_gradient : bool = True ,
) - > paddle . Tensor :
2022-08-18 20:10:16 +08:00
r """
2022-09-14 21:56:19 +08:00
Constructs a ``paddle.Tensor`` from ``data`` ,
2022-08-18 20:10:16 +08:00
which can be scalar, tuple, list, numpy \ .ndarray, paddle \ .Tensor.
If the ``data`` is already a Tensor, copy will be performed and return a new tensor.
If you only want to change stop_gradient property, please call ``Tensor.stop_gradient = stop_gradient`` directly.
2025-08-11 09:48:42 +08:00
.. note::
Alias Support: The parameter name ``device`` can be used as an alias for ``place``.
For example, ``device=paddle.CUDAPlace(0)`` is equivalent to ``place=paddle.CUDAPlace(0)``.
2023-02-01 16:52:53 +08:00
.. code-block:: text
We use the dtype conversion rules following this:
Keep dtype
np.number ───────────► paddle.Tensor
2023-05-16 19:45:15 +08:00
(0-D Tensor)
2023-02-01 16:52:53 +08:00
default_dtype
Python Number ───────────────► paddle.Tensor
2023-05-16 19:45:15 +08:00
(0-D Tensor)
2023-02-01 16:52:53 +08:00
Keep dtype
np.ndarray ───────────► paddle.Tensor
2022-08-18 20:10:16 +08:00
Args:
data(scalar|tuple|list|ndarray|Tensor): Initial data for the tensor.
Can be a scalar, list, tuple, numpy \ .ndarray, paddle \ .Tensor.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): The desired data type of returned tensor. Can be ' bool ' , ' float16 ' ,
2022-08-18 20:10:16 +08:00
' float32 ' , ' float64 ' , ' int8 ' , ' int16 ' , ' int32 ' , ' int64 ' , ' uint8 ' ,
2022-09-14 21:56:19 +08:00
' complex64 ' , ' complex128 ' . Default: None, infers dtype from ``data``
2022-08-18 20:10:16 +08:00
except for python float number which gets dtype from ``get_default_type`` .
2022-09-14 21:56:19 +08:00
place(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be
CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``place`` is
string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs.
2025-08-11 09:48:42 +08:00
device: An alias for ``place`` , with identical behavior.
2022-08-18 20:10:16 +08:00
stop_gradient(bool, optional): Whether to block the gradient propagation of Autograd. Default: True.
Returns:
Tensor: A Tensor constructed from ``data`` .
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2022-08-18 20:10:16 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
2022-09-14 21:56:19 +08:00
2023-08-29 14:23:59 +08:00
>>> type(paddle.to_tensor(1))
<class ' paddle.Tensor ' >
2022-08-18 20:10:16 +08:00
2023-08-29 14:23:59 +08:00
>>> paddle.to_tensor(1)
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
1)
2022-08-18 20:10:16 +08:00
2023-08-29 14:23:59 +08:00
>>> x = paddle.to_tensor(1, stop_gradient=False)
>>> print(x)
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=False,
1)
2022-08-18 20:10:16 +08:00
2023-08-29 14:23:59 +08:00
>>> paddle.to_tensor(x) # A new tensor will be created with default stop_gradient=True
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
1)
2022-08-18 20:10:16 +08:00
2023-08-29 14:23:59 +08:00
>>> paddle.to_tensor([[0.1, 0.2], [0.3, 0.4]], place=paddle.CPUPlace(), stop_gradient=False)
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[0.10000000, 0.20000000],
[0.30000001, 0.40000001]])
2022-08-18 20:10:16 +08:00
2026-01-29 20:44:09 +08:00
>>> type(paddle.to_tensor([[1 + 1j, 2], [3 + 2j, 4]], dtype= ' complex64 ' ))
2023-08-29 14:23:59 +08:00
<class ' paddle.Tensor ' >
2022-08-18 20:10:16 +08:00
2026-01-29 20:44:09 +08:00
>>> paddle.to_tensor([[1 + 1j, 2], [3 + 2j, 4]], dtype= ' complex64 ' )
2023-08-29 14:23:59 +08:00
Tensor(shape=[2, 2], dtype=complex64, place=Place(cpu), stop_gradient=True,
2026-01-29 20:44:09 +08:00
[[(1.00000000+1.00000000j), (2.00000000+0.00000000j)],
[(3.00000000+2.00000000j), (4.00000000+0.00000000j)]])
2022-08-18 20:10:16 +08:00
"""
2025-08-20 13:31:31 +08:00
return tensor (
data , dtype = dtype , device = place , requires_grad = not stop_gradient
)
2022-08-18 20:10:16 +08:00
[API Compatibilities] Add eq, ne, not_equal, lt, less_than, le, less_equal, gt, greater, ge, greater_equal, Tensor.eq, Tensor.ne, Tensor.not_equal, Tensor.lt, Tensor.less, Tensor.le, Tensor.less_equal, Tensor.gt, Tensor.greater, Tensor.ge, Tensor.greater, from_numpy (#75206)
* See pr
* Refine typo, add todo before gt, add new api from_numpy
* Refine typo, add todo before gt, add new api from_numpy
* Refine on comments
* Implement less as alias for less_than
* Add type hint for out and type check in numpy
* Remove gt for old ir
* fix typing parse error and fix example code
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-09-16 19:36:11 +08:00
def from_numpy ( ndarray : NDArray [ Any ] ) - > paddle . Tensor :
"""
Creates a ``paddle.Tensor`` from a ``numpy.ndarray``.
The returned Tensor and the input ``ndarray`` share the same underlying memory.
Changes to the Tensor will be reflected in the ``ndarray`` and vice versa.
Args:
ndarray(numpy.ndarray): The numpy ndarray to be converted to a Tensor.
Returns:
Tensor: A Tensor that shares the same memory with the input ``ndarray``.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
[API Compatibilities] Add eq, ne, not_equal, lt, less_than, le, less_equal, gt, greater, ge, greater_equal, Tensor.eq, Tensor.ne, Tensor.not_equal, Tensor.lt, Tensor.less, Tensor.le, Tensor.less_equal, Tensor.gt, Tensor.greater, Tensor.ge, Tensor.greater, from_numpy (#75206)
* See pr
* Refine typo, add todo before gt, add new api from_numpy
* Refine typo, add todo before gt, add new api from_numpy
* Refine on comments
* Implement less as alias for less_than
* Add type hint for out and type check in numpy
* Remove gt for old ir
* fix typing parse error and fix example code
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-09-16 19:36:11 +08:00
>>> import paddle
>>> import numpy as np
>>> np_data = np.array([1, 2, 3]).astype( ' int64 ' )
>>> tensor = paddle.from_numpy(np_data)
>>> print(tensor)
Tensor(shape=[3], dtype=int64, place=Place(cpu), stop_gradient=True,
[1, 2, 3])
"""
if not isinstance ( ndarray , np . ndarray ) :
raise TypeError (
f " The input type of from_numpy() must be numpy.ndarray, but received { type ( ndarray ) } . "
" To convert other types to tensor, please use paddle.tensor() instead. "
)
return tensor ( ndarray )
def asarray (
obj : TensorLike | NestedNumericSequence ,
* ,
dtype : DTypeLike | None = None ,
device : PlaceLike | None = None ,
copy : bool | None = None ,
requires_grad : bool = False ,
) :
r """
Constructs a ``paddle.Tensor`` from ``obj`` ,
which can be scalar, tuple, list, numpy \ .ndarray, paddle \ .Tensor.
If the ``obj`` is already a tensor, copy will be performed and return a new tensor.
.. note::
The parameter ``copy`` will not affect this api ' s behavior. Copy will always be performed if ``obj`` is a tensor.
.. code-block:: text
We use the dtype conversion rules following this:
Keep dtype
np.number ───────────► paddle.Tensor
(0-D Tensor)
default_dtype
Python Number ───────────────► paddle.Tensor
(0-D Tensor)
Keep dtype
np.ndarray ───────────► paddle.Tensor
Args:
obj(scalar|tuple|list|ndarray|Tensor): Initial data for the tensor.
Can be a scalar, list, tuple, numpy \ .ndarray, paddle \ .Tensor.
dtype(str|np.dtype, optional): The desired data type of returned tensor. Can be ' bool ' , ' float16 ' ,
' float32 ' , ' float64 ' , ' int8 ' , ' int16 ' , ' int32 ' , ' int64 ' , ' uint8 ' ,
' complex64 ' , ' complex128 ' . Default: None, infers dtype from ``data``
except for python float number which gets dtype from ``get_default_type`` .
device(CPUPlace|CUDAPinnedPlace|CUDAPlace|str, optional): The place to allocate Tensor. Can be
CPUPlace, CUDAPinnedPlace, CUDAPlace. Default: None, means global place. If ``place`` is
string, It can be ``cpu``, ``gpu:x`` and ``gpu_pinned``, where ``x`` is the index of the GPUs.
copy(bool, optional): This param is ignored and has no effect.
requires_grad(bool, optional): Whether to block the gradient propagation of autograd. Default: False.
Returns:
Tensor: A Tensor constructed from ``data`` .
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
[API Compatibilities] Add eq, ne, not_equal, lt, less_than, le, less_equal, gt, greater, ge, greater_equal, Tensor.eq, Tensor.ne, Tensor.not_equal, Tensor.lt, Tensor.less, Tensor.le, Tensor.less_equal, Tensor.gt, Tensor.greater, Tensor.ge, Tensor.greater, from_numpy (#75206)
* See pr
* Refine typo, add todo before gt, add new api from_numpy
* Refine typo, add todo before gt, add new api from_numpy
* Refine on comments
* Implement less as alias for less_than
* Add type hint for out and type check in numpy
* Remove gt for old ir
* fix typing parse error and fix example code
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-09-16 19:36:11 +08:00
>>> import paddle
>>> type(paddle.asarray(1))
<class ' paddle.Tensor ' >
>>> paddle.asarray(1)
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
1)
>>> x = paddle.asarray(1, requires_grad=True)
>>> print(x)
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=False,
1)
>>> paddle.asarray(x) # A new tensor will be created with default stop_gradient=True
Tensor(shape=[], dtype=int64, place=Place(cpu), stop_gradient=True,
1)
>>> paddle.asarray([[0.1, 0.2], [0.3, 0.4]], device=paddle.CPUPlace(), requires_grad=True)
Tensor(shape=[2, 2], dtype=float32, place=Place(cpu), stop_gradient=False,
[[0.10000000, 0.20000000],
[0.30000001, 0.40000001]])
2026-01-29 20:44:09 +08:00
>>> type(paddle.asarray([[1 + 1j, 2], [3 + 2j, 4]], dtype= ' complex64 ' ))
[API Compatibilities] Add eq, ne, not_equal, lt, less_than, le, less_equal, gt, greater, ge, greater_equal, Tensor.eq, Tensor.ne, Tensor.not_equal, Tensor.lt, Tensor.less, Tensor.le, Tensor.less_equal, Tensor.gt, Tensor.greater, Tensor.ge, Tensor.greater, from_numpy (#75206)
* See pr
* Refine typo, add todo before gt, add new api from_numpy
* Refine typo, add todo before gt, add new api from_numpy
* Refine on comments
* Implement less as alias for less_than
* Add type hint for out and type check in numpy
* Remove gt for old ir
* fix typing parse error and fix example code
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-09-16 19:36:11 +08:00
<class ' paddle.Tensor ' >
2026-01-29 20:44:09 +08:00
>>> paddle.asarray([[1 + 1j, 2], [3 + 2j, 4]], dtype= ' complex64 ' )
[API Compatibilities] Add eq, ne, not_equal, lt, less_than, le, less_equal, gt, greater, ge, greater_equal, Tensor.eq, Tensor.ne, Tensor.not_equal, Tensor.lt, Tensor.less, Tensor.le, Tensor.less_equal, Tensor.gt, Tensor.greater, Tensor.ge, Tensor.greater, from_numpy (#75206)
* See pr
* Refine typo, add todo before gt, add new api from_numpy
* Refine typo, add todo before gt, add new api from_numpy
* Refine on comments
* Implement less as alias for less_than
* Add type hint for out and type check in numpy
* Remove gt for old ir
* fix typing parse error and fix example code
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-09-16 19:36:11 +08:00
Tensor(shape=[2, 2], dtype=complex64, place=Place(cpu), stop_gradient=True,
2026-01-29 20:44:09 +08:00
[[(1.00000000+1.00000000j), (2.00000000+0.00000000j)],
[(3.00000000+2.00000000j), (4.00000000+0.00000000j)]])
[API Compatibilities] Add eq, ne, not_equal, lt, less_than, le, less_equal, gt, greater, ge, greater_equal, Tensor.eq, Tensor.ne, Tensor.not_equal, Tensor.lt, Tensor.less, Tensor.le, Tensor.less_equal, Tensor.gt, Tensor.greater, Tensor.ge, Tensor.greater, from_numpy (#75206)
* See pr
* Refine typo, add todo before gt, add new api from_numpy
* Refine typo, add todo before gt, add new api from_numpy
* Refine on comments
* Implement less as alias for less_than
* Add type hint for out and type check in numpy
* Remove gt for old ir
* fix typing parse error and fix example code
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-09-16 19:36:11 +08:00
"""
return tensor (
data = obj , dtype = dtype , device = device , requires_grad = requires_grad
)
2025-07-09 19:28:50 +08:00
class MmapStorage ( paddle . base . core . MmapStorage ) :
"""
This class will use mmap to load a file.
Args:
filename(str): the name of .safetensors file.
nbytes(int): number of bytes to map into memory.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2025-07-09 19:28:50 +08:00
>>> import paddle
2026-01-29 20:44:09 +08:00
>>> shape = [4, 5]
2025-07-09 19:28:50 +08:00
>>> dtype = paddle.float32
2026-01-29 20:44:09 +08:00
>>> a = paddle.arange(4 * 5).reshape(shape).astype(dtype)
2025-07-09 19:28:50 +08:00
>>> a.numpy().tofile( " test.pp " )
>>> size = a.size * a.element_size()
>>> t = paddle.MmapStorage( " test.pp " , size)
2026-01-29 20:44:09 +08:00
>>> t.get_slice(dtype=dtype, start=0, stop=a.size).reshape(shape)
2025-07-09 19:28:50 +08:00
Tensor(shape=[4, 5], dtype=float32, place=Place(cpu), stop_gradient=True,
[[0. , 1. , 2. , 3. , 4. ],
[5. , 6. , 7. , 8. , 9. ],
[10., 11., 12., 13., 14.],
[15., 16., 17., 18., 19.]])
"""
def __init__ ( self , filename : str , nbytes : int ) :
super ( ) . __init__ ( filename , nbytes )
def get_slice (
self ,
dtype : DTypeLike | None = " uint8 " ,
start : int = 0 ,
stop : int = - 1 ,
step : int = 1 ,
) - > paddle . Tensor :
"""
Slice the tensor from the mmapped file.
Args:
dtype (DTypeLike | None): The data type of the output tensor. Default: " uint8 " .
start (int): The start index of the slice. Default: 0.
stop (int): The end index of the slice. Default: -1.
step (int): The step size of the slice. Default: 1.
Returns:
Tensor: The sliced tensor.
"""
proto_dtype = paddle . base . framework . convert_to_proto_type ( dtype )
out : paddle . base . libpaddle . DenseTensor = super ( ) . get_slice (
proto_dtype , start , stop , step
)
return out
2025-08-22 14:23:09 +08:00
@param_one_alias ( [ " x " , " input " ] )
2024-06-17 13:25:16 +08:00
def full_like (
x : paddle . Tensor ,
2025-08-19 12:03:56 +08:00
fill_value : Numeric | str ,
2024-06-17 13:25:16 +08:00
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-04-08 11:11:29 +08:00
"""
2020-05-14 11:27:10 +08:00
2020-07-30 22:28:40 +08:00
This function creates a tensor filled with ``fill_value`` which has identical shape of ``x`` and ``dtype``.
If the ``dtype`` is None, the data type of Tensor is same with ``x``.
2020-04-24 21:10:16 +08:00
2025-08-22 14:23:09 +08:00
.. note::
Alias Support: The parameter name ``input`` can be used as an alias for ``x``.
For example, ``full_like(input=tensor_x, ...)`` is equivalent to ``full_like(x=tensor_x, ...)``.
2020-04-08 11:11:29 +08:00
Args:
2020-07-30 22:28:40 +08:00
x(Tensor): The input tensor which specifies shape and data type. The data type can be bool, float16, float32, float64, int32, int64.
2025-09-24 18:55:54 +08:00
fill_value(bool|float|int): The value to fill the tensor with. Note: this value shouldn ' t exceed the range of the output data type.
dtype(str|paddle.dtype|np.dtype, optional): The data type of output. The data type can be one
of bool, float16, float32, float64, int32, int64. The default value is None, which means the output
2020-07-09 11:31:21 +08:00
data type is the same as input.
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2022-09-14 21:56:19 +08:00
2020-04-08 11:11:29 +08:00
Returns:
2020-07-30 22:28:40 +08:00
Tensor: Tensor which is created according to ``x``, ``fill_value`` and ``dtype``.
2022-09-14 21:56:19 +08:00
2020-04-08 11:11:29 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-04-24 21:10:16 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
2022-09-14 21:56:19 +08:00
2023-08-29 14:23:59 +08:00
>>> input = paddle.full(shape=[2, 3], fill_value=0.0, dtype= ' float32 ' , name= ' input ' )
>>> output = paddle.full_like(input, 2.0)
>>> print(output.numpy())
[[2. 2. 2.]
[2. 2. 2.]]
2020-04-08 11:11:29 +08:00
"""
2025-08-19 12:03:56 +08:00
# Include str type check to handle string numeric values like "0.5" that occur in CI tests.
2026-01-29 20:44:09 +08:00
# The compatible method for fluid operators, may be it can be removed in the future.
2025-08-19 12:03:56 +08:00
if not isinstance (
fill_value ,
( numbers . Number , str , core . eager . Tensor , Variable , paddle . pir . Value ) ,
) :
raise TypeError (
f " The fill_value should be int, float, bool, complex, np.number, string numeric value or Tensor, but received { type ( fill_value ) } . "
)
2023-09-11 19:33:22 +08:00
2020-04-08 11:11:29 +08:00
if dtype is None :
2020-07-09 11:31:21 +08:00
dtype = x . dtype
2020-04-24 21:10:16 +08:00
else :
2023-09-11 19:33:22 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , core . DataType ) ) :
2020-07-09 11:31:21 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
2025-08-12 10:43:41 +08:00
if device is None :
device = x . place
2023-09-11 19:33:22 +08:00
2025-08-12 10:43:41 +08:00
if in_dynamic_or_pir_mode ( ) :
2025-08-28 15:03:51 +08:00
device = (
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
)
if (
pin_memory
and in_dynamic_mode ( )
and device is not None
and not isinstance (
device , ( core . CUDAPinnedPlace , core . XPUPinnedPlace )
2025-08-14 14:41:28 +08:00
)
2025-08-28 15:03:51 +08:00
) :
if isinstance ( device , core . CUDAPlace ) or (
isinstance ( device , core . Place ) and device . is_gpu_place ( )
) :
device = core . CUDAPinnedPlace ( )
elif isinstance ( device , core . XPUPlace ) or (
isinstance ( device , core . Place ) and device . is_xpu_place ( )
) :
device = core . XPUPinnedPlace ( )
else :
raise RuntimeError (
f " Pinning memory is not supported for { device } "
)
tensor = _C_ops . full_like ( x , fill_value , dtype , device )
2025-08-12 10:43:41 +08:00
if requires_grad is True :
tensor . stop_gradient = False
2025-08-28 15:03:51 +08:00
if pin_memory and in_dynamic_mode ( ) :
tensor = tensor . pin_memory ( )
2025-08-12 10:43:41 +08:00
return tensor
2022-12-27 09:06:13 +08:00
else :
helper = LayerHelper ( " full_like " , * * locals ( ) )
check_variable_and_dtype (
x ,
' x ' ,
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
' int16 ' ,
' int32 ' ,
' int64 ' ,
2023-03-01 14:31:48 +08:00
' uint16 ' ,
2022-12-27 09:06:13 +08:00
] ,
' full_like ' ,
2022-10-23 20:01:27 +08:00
)
2022-12-27 09:06:13 +08:00
check_dtype (
dtype ,
' dtype ' ,
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
' int16 ' ,
' int32 ' ,
' int64 ' ,
2023-03-01 14:31:48 +08:00
' uint16 ' ,
2022-12-27 09:06:13 +08:00
] ,
' full_like/zeros_like/ones_like ' ,
)
out = helper . create_variable_for_type_inference ( dtype = dtype )
2020-04-08 11:11:29 +08:00
2022-12-27 09:06:13 +08:00
helper . append_op (
type = ' fill_any_like ' ,
inputs = { ' X ' : [ x ] } ,
attrs = { ' value ' : fill_value , " dtype " : dtype } ,
outputs = { ' Out ' : [ out ] } ,
)
out . stop_gradient = True
return out
2020-04-08 11:11:29 +08:00
2024-06-17 13:25:16 +08:00
def fill_constant (
shape : ShapeLike ,
dtype : DTypeLike ,
2024-06-19 22:00:57 +08:00
value : bool | float | paddle . Tensor ,
2024-06-17 13:25:16 +08:00
force_cpu : bool = False ,
out : paddle . Tensor | None = None ,
2025-08-12 10:43:41 +08:00
place : PlaceLike | None = None ,
2024-06-17 13:25:16 +08:00
name : str | None = None ,
) - > paddle . Tensor :
2024-08-26 14:24:50 +08:00
shape = [ shape ] if isinstance ( shape , int ) else shape
2023-09-16 15:30:42 +08:00
if in_dynamic_or_pir_mode ( ) :
2025-08-12 10:43:41 +08:00
if place is None :
place = _current_expected_place ( )
else :
2025-08-14 14:41:28 +08:00
place = _get_paddle_place ( place )
2025-08-12 10:43:41 +08:00
2023-03-20 11:38:22 +08:00
if force_cpu :
place = core . CPUPlace ( )
2023-09-16 15:30:42 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , core . DataType ) ) :
2023-03-20 11:38:22 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
2023-11-07 11:04:44 +08:00
if in_pir_mode ( ) and isinstance ( dtype , core . VarDesc . VarType ) :
dtype = paddle . pir . core . vartype_to_datatype [ dtype ]
2023-10-13 09:18:44 +08:00
if in_dynamic_mode ( ) :
if isinstance ( shape , ( list , tuple ) ) :
shape = paddle . utils . convert_shape_to_list ( shape )
else :
2024-03-21 19:16:31 +08:00
paddle . utils . check_shape ( shape )
2023-10-13 09:18:44 +08:00
if isinstance ( shape , ( list , tuple ) ) :
2023-10-16 12:47:41 +08:00
if paddle . utils . _contain_var ( shape ) :
2024-04-17 15:33:03 +08:00
shape = paddle . utils . get_int_tensor_list ( shape )
2024-01-19 14:59:45 +08:00
elif isinstance ( shape , paddle . pir . Value ) :
2023-10-13 09:18:44 +08:00
pass
else :
2024-03-21 19:16:31 +08:00
raise TypeError ( " Shape only supports Value, or list, or tuple. " )
2023-10-13 09:18:44 +08:00
2023-03-20 11:38:22 +08:00
if out is None :
2023-09-16 15:30:42 +08:00
out = _C_ops . full ( shape , value , dtype , place )
2023-03-20 11:38:22 +08:00
out . stop_gradient = True
return out
2024-07-11 09:17:32 +08:00
if out . dtype != dtype :
raise TypeError (
[CodeStyle][Typos][Q-[1-2],R-[1-12]] Fix typos (`qucik`,`quitted`,`runned`,`readed`,`recived`,`recevied`,`recieved`,`reveived`,`recieves`,`recive`,`receving`,`recommand`,`recomplie`,`recored`,`Recusive`,`recusive`,`Recursivly`,`recursivly`,`reduntant`) (#70674)
* fix
* fix
2025-01-08 17:17:39 +08:00
" Required out.dtype == dtype if specifying out, but received f {out.dtype} != f {dtype} "
2024-07-11 09:17:32 +08:00
)
out = _C_ops . full_ ( out , shape , value , dtype , place )
2024-06-17 13:25:16 +08:00
out . stop_gradient = True
return out
2023-10-13 09:18:44 +08:00
2023-03-20 11:38:22 +08:00
else :
attrs = { ' force_cpu ' : force_cpu }
dtype = convert_dtype ( dtype )
if not isinstance ( value , Variable ) :
2023-03-28 15:56:46 +08:00
if dtype in [ ' int8 ' , ' uint8 ' , ' int16 ' , ' int32 ' , ' int64 ' ] :
2023-03-20 11:38:22 +08:00
attrs [ ' str_value ' ] = str ( int ( value ) )
attrs [ ' value ' ] = int ( value )
else :
attrs [ ' str_value ' ] = str ( float ( value ) )
attrs [ ' value ' ] = float ( value )
helper = LayerHelper ( " fill_constant " , * * locals ( ) )
inputs = { }
if isinstance ( value , Variable ) :
if convert_dtype ( value . dtype ) != dtype :
value = paddle . cast ( value , dtype )
inputs [ ' ValueTensor ' ] = value
paddle . utils . check_shape ( shape )
check_dtype (
dtype ,
' dtype ' ,
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
2023-03-28 15:56:46 +08:00
' int8 ' ,
2023-03-20 11:38:22 +08:00
' uint8 ' ,
' int16 ' ,
' int32 ' ,
' int64 ' ,
' complex64 ' ,
' complex128 ' ,
' uint16 ' ,
2024-06-19 18:34:23 +08:00
' float8_e4m3fn ' ,
' float8_e5m2 ' ,
2023-03-20 11:38:22 +08:00
] ,
' fill_constant ' ,
)
check_type ( shape , ' shape ' , ( Variable , list , tuple ) , ' fill_constant ' )
if out is not None :
check_variable_and_dtype (
out , ' out ' , [ convert_dtype ( dtype ) ] , ' fill_constant '
)
helper = LayerHelper ( " fill_constant " , * * locals ( ) )
paddle . utils . get_shape_tensor_inputs (
inputs = inputs , attrs = attrs , shape = shape , op_type = ' fill_constant '
)
if out is None :
out = helper . create_variable_for_type_inference ( dtype = dtype )
attrs [ ' dtype ' ] = out . dtype
helper . append_op (
type = ' fill_constant ' ,
inputs = inputs ,
outputs = { ' Out ' : [ out ] } ,
attrs = attrs ,
stop_gradient = True ,
)
out . stop_gradient = True
return out
2026-02-10 15:20:14 +08:00
@overload
def ones (
shape : ShapeLike ,
dtype : DTypeLike | None = None ,
name : str | None = None ,
* ,
out : paddle . Tensor | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
pin_memory : bool = False ,
) - > paddle . Tensor : . . .
@overload
def ones (
* size : int ,
out : paddle . Tensor | None = None ,
dtype : DTypeLike | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
pin_memory : bool = False ,
) - > paddle . Tensor : . . .
2025-08-23 00:31:21 +08:00
@size_args_decorator
2024-06-17 13:25:16 +08:00
def ones (
2025-08-12 10:43:41 +08:00
shape : ShapeLike ,
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
2025-08-15 00:14:23 +08:00
out : paddle . Tensor | None = None ,
2025-08-12 10:43:41 +08:00
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-04-04 12:29:06 +08:00
"""
2022-06-01 15:19:10 +08:00
Create a Tensor of specified :attr:`shape` and :attr:`dtype` and fill it with 1.
2020-04-04 12:29:06 +08:00
Args:
2022-11-14 20:07:08 +08:00
shape (tuple|list|Tensor): Shape of the Tensor to be created. The data type is ``int32`` or ``int64`` .
If ``shape`` is a list or tuple, the elements of it should be integers or 0-D Tensor with shape [].
2026-02-10 15:20:14 +08:00
If ``shape`` is a Tensor, it should be a 1-D Tensor which represents a list.
2022-06-01 15:19:10 +08:00
dtype (np.dtype|str, optional): Data type of output Tensor, it should be one of
bool, float16, float32, float64, int32 and int64. If it is set to None, the data type will be float32.
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-15 00:14:23 +08:00
out(Tensor, optional): The output tensor.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2022-09-14 21:56:19 +08:00
2020-04-04 12:29:06 +08:00
Returns:
2022-06-01 15:19:10 +08:00
Tensor: A Tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements are 1.
2020-04-04 12:29:06 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-04-04 12:29:06 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> # shape is a list/tuple
>>> data1 = paddle.ones(shape=[3, 2])
>>> print(data1.numpy())
[[1. 1.]
[1. 1.]
[1. 1.]]
>>> # shape is a Tensor
>>> shape = paddle.to_tensor([3, 2])
>>> data2 = paddle.ones(shape=shape)
>>> print(data2.numpy())
[[1. 1.]
[1. 1.]
[1. 1.]]
>>> # shape is a Tensor List
>>> shape = [paddle.to_tensor(3), paddle.to_tensor(2)]
>>> data3 = paddle.ones(shape=shape)
>>> print(data3.numpy())
[[1. 1.]
[1. 1.]
[1. 1.]]
2026-02-10 15:20:14 +08:00
>>> # shape can be a variable number of arguments
>>> data4 = paddle.ones(3, 2)
>>> print(data4.numpy())
[[1. 1.]
[1. 1.]
[1. 1.]]
2020-04-04 12:29:06 +08:00
"""
2025-08-12 10:43:41 +08:00
return full (
shape ,
1 ,
dtype ,
2025-08-15 00:14:23 +08:00
out = out ,
2025-08-12 10:43:41 +08:00
device = device ,
requires_grad = requires_grad ,
2025-08-28 15:03:51 +08:00
pin_memory = pin_memory ,
2025-08-12 10:43:41 +08:00
name = name ,
)
2020-04-04 12:29:06 +08:00
2026-01-30 20:20:08 +08:00
@param_one_alias ( [ " x " , " input " ] )
2024-06-17 13:25:16 +08:00
def ones_like (
2025-08-12 10:43:41 +08:00
x : paddle . Tensor ,
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-04-04 12:29:06 +08:00
"""
2022-05-19 10:23:41 +08:00
Returns a Tensor filled with the value 1, with the same shape and
2020-07-28 09:32:54 +08:00
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
2020-04-04 12:29:06 +08:00
2025-08-19 17:41:55 +08:00
.. note::
Alias Support: The parameter name ``input`` can be used as an alias for ``x``.
For example, ``ones_like(input=tensor_x, ...)`` is equivalent to ``ones_like(x=tensor_x, ...)``.
2020-04-04 12:29:06 +08:00
Args:
2020-07-28 09:32:54 +08:00
x(Tensor): The input tensor which specifies shape and dtype. The
dtype of ``x`` can be bool, float16, float32, float64, int32, int64.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): The data type of the
2020-07-28 09:32:54 +08:00
output tensor. Supported data types: bool, float16, float32, float64,
int32, int64. If ``dtype`` is None, the data type is the same as ``x``.
Default is None.
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2020-07-28 09:32:54 +08:00
2020-04-04 12:29:06 +08:00
Returns:
2020-07-28 09:32:54 +08:00
Tensor: A Tensor filled with the value 1, with the same shape and
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
2020-04-04 12:29:06 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-04-04 12:29:06 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
2020-04-04 12:29:06 +08:00
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1, 2, 3])
2023-08-29 14:23:59 +08:00
>>> out1 = paddle.ones_like(x)
>>> print(out1.numpy())
[1 1 1]
>>> out2 = paddle.ones_like(x, dtype= ' int32 ' )
>>> print(out2.numpy())
[1 1 1]
2020-04-04 12:29:06 +08:00
2020-07-28 09:32:54 +08:00
"""
2025-08-12 10:43:41 +08:00
return full_like (
x = x ,
fill_value = 1 ,
dtype = dtype ,
name = name ,
device = device ,
2025-08-28 15:03:51 +08:00
pin_memory = pin_memory ,
2025-08-12 10:43:41 +08:00
requires_grad = requires_grad ,
)
2020-04-04 12:29:06 +08:00
2026-02-10 15:20:14 +08:00
@overload
def zeros (
shape : ShapeLike ,
dtype : DTypeLike | None = None ,
name : str | None = None ,
* ,
out : paddle . Tensor | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
pin_memory : bool = False ,
) - > paddle . Tensor : . . .
@overload
def zeros (
* size : int ,
out : paddle . Tensor | None = None ,
dtype : DTypeLike | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
pin_memory : bool = False ,
) - > paddle . Tensor : . . .
2025-08-23 00:31:21 +08:00
@size_args_decorator
2024-06-17 13:25:16 +08:00
def zeros (
shape : ShapeLike ,
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
2025-08-15 00:14:23 +08:00
out : paddle . Tensor | None = None ,
2025-08-12 10:43:41 +08:00
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-04-04 12:29:06 +08:00
"""
2022-05-19 10:23:41 +08:00
Creates a tensor of specified :attr:`shape` and :attr:`dtype`, and fills it with 0.
2020-04-04 12:29:06 +08:00
2025-08-19 17:41:55 +08:00
.. note::
Alias Support: The parameter name ``size`` can be used as an alias for ``shape``.
``shape`` can be a variable number of arguments.
For example:
``paddle.ones(1, 2, 3, dtype=paddle.float32)``
``paddle.ones(size=[1, 2, 3], dtype=paddle.float32)``
2020-04-04 12:29:06 +08:00
Args:
2025-08-19 17:41:55 +08:00
shape (tuple|list|Tensor|variable number of arguments): Shape of the Tensor to be created. The data type is ``int32`` or ``int64`` .
alias: ``size``.
2022-11-14 20:07:08 +08:00
If ``shape`` is a list or tuple, each element of it should be integer or 0-D Tensor with shape [].
2026-02-10 15:20:14 +08:00
If ``shape`` is a Tensor, it should be a 1-D Tensor which represents a list.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): Data type of output Tensor, it supports
2023-09-07 11:57:43 +08:00
bool, float16, float32, float64, int32 and int64. Default: if None, the data type is float32.
2020-07-12 00:39:10 +08:00
property. For more information, please refer to :ref:`api_guide_Name`.
2025-08-15 00:14:23 +08:00
out(Tensor, optional): The output tensor.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-15 00:14:23 +08:00
name(str|None, optional): The default value is None. Normally there is no need for user to set this
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2020-04-04 12:29:06 +08:00
Returns:
2020-07-30 22:28:40 +08:00
Tensor: A tensor of data type :attr:`dtype` with shape :attr:`shape` and all elements set to 0.
2020-04-04 12:29:06 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-04-04 12:29:06 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> # shape is a list/tuple
>>> data1 = paddle.zeros(shape=[3, 2])
>>> print(data1.numpy())
[[0. 0.]
[0. 0.]
[0. 0.]]
>>> # shape is a Tensor
>>> shape = paddle.to_tensor([3, 2])
>>> data2 = paddle.zeros(shape=shape)
>>> print(data2.numpy())
[[0. 0.]
[0. 0.]
[0. 0.]]
>>> # shape is a Tensor List
>>> shape = [paddle.to_tensor(3), paddle.to_tensor(2)]
>>> data3 = paddle.zeros(shape=shape)
>>> print(data3.numpy())
[[0. 0.]
[0. 0.]
[0. 0.]]
2026-02-10 15:20:14 +08:00
>>> # shape can be a variable number of arguments
>>> data4 = paddle.zeros(3, 2)
>>> print(data4.numpy())
[[0. 0.]
[0. 0.]
[0. 0.]]
2020-04-04 12:29:06 +08:00
"""
2025-08-12 10:43:41 +08:00
return full (
shape ,
0 ,
dtype ,
2025-08-15 00:14:23 +08:00
out = out ,
2025-08-12 10:43:41 +08:00
device = device ,
requires_grad = requires_grad ,
2025-08-28 15:03:51 +08:00
pin_memory = pin_memory ,
2025-08-12 10:43:41 +08:00
name = name ,
)
2020-04-04 12:29:06 +08:00
2026-01-30 20:20:08 +08:00
@param_one_alias ( [ " x " , " input " ] )
2024-06-17 13:25:16 +08:00
def zeros_like (
2025-08-12 10:43:41 +08:00
x : paddle . Tensor ,
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-04-04 12:29:06 +08:00
"""
2022-06-17 15:15:02 +08:00
Returns a Tensor filled with the value 0, with the same shape and
2020-07-29 15:06:04 +08:00
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
2020-04-04 12:29:06 +08:00
[API compatibility] concat, empty_like, full, norm, outer, where, zeros_like (#74548)
* [API compatibility] concat, empty_like, full, norm, outer, where, zeros_like
* fix where
* fix where doctest
2025-08-14 09:58:08 +08:00
.. note::
Alias Support: The parameter name ``input`` can be used as an alias for ``x``.
For example, ``zeros_like(input=x, ...)`` is equivalent to ``zeros_like(x=x, ...)``.
2020-04-04 12:29:06 +08:00
Args:
2020-07-29 15:06:04 +08:00
x(Tensor): The input tensor which specifies shape and dtype. The
dtype of ``x`` can be bool, float16, float32, float64, int32, int64.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): The data type of the
2020-07-29 15:06:04 +08:00
output tensor. Supported data types: bool, float16, float32, float64,
int32, int64. If ``dtype`` is None, the data type is the same as ``x``.
Default is None.
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2020-04-04 12:29:06 +08:00
Returns:
2020-07-29 15:06:04 +08:00
Tensor: A Tensor filled with the value 0, with the same shape and
data type (use ``dtype`` if ``dtype`` is not None) as ``x``.
2020-04-04 12:29:06 +08:00
2020-07-14 19:34:45 +08:00
2020-04-04 12:29:06 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-04-04 12:29:06 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
2020-04-04 12:29:06 +08:00
2023-08-29 14:23:59 +08:00
>>> x = paddle.to_tensor([1, 2, 3])
>>> out1 = paddle.zeros_like(x)
>>> print(out1.numpy())
[0 0 0]
>>> out2 = paddle.zeros_like(x, dtype= ' int32 ' )
>>> print(out2.numpy())
[0 0 0]
2020-04-04 12:29:06 +08:00
2020-07-14 19:34:45 +08:00
"""
2025-08-12 10:43:41 +08:00
return full_like (
x = x ,
fill_value = 0 ,
dtype = dtype ,
name = name ,
device = device ,
requires_grad = requires_grad ,
2025-08-28 15:03:51 +08:00
pin_memory = pin_memory ,
2025-08-12 10:43:41 +08:00
)
2020-04-04 12:29:06 +08:00
2025-08-22 14:23:09 +08:00
@param_two_alias ( [ " num_rows " , " n " ] , [ " num_columns " , " m " ] )
2024-06-17 13:25:16 +08:00
def eye (
2025-08-20 10:58:35 +08:00
num_rows : int | paddle . Tensor ,
num_columns : int | paddle . Tensor | None = None ,
2024-06-17 13:25:16 +08:00
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
2025-08-15 00:14:23 +08:00
out : paddle . Tensor | None = None ,
2025-08-12 10:43:41 +08:00
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-04-14 00:19:01 +08:00
"""
2022-09-14 21:56:19 +08:00
2020-07-21 00:58:06 +08:00
This function constructs 2-D Tensor with ones on the diagonal and zeros elsewhere.
2020-04-20 14:58:29 +08:00
2025-08-22 14:23:09 +08:00
.. note::
Alias Support: The parameter name ``n`` can be used as an alias for ``num_rows``, and ``m`` can be used as an alias for ``num_columns``.
For example, ``eye(n=tensor_x, m=tensor_y, ...)`` is equivalent to ``eye(num_rows=tensor_x, num_columns=tensor_y, ...)``.
2020-04-14 00:19:01 +08:00
Args:
2025-08-20 10:58:35 +08:00
num_rows(int | paddle.Tensor): the number of rows in each batch Tensor.
2025-08-22 14:23:09 +08:00
Alias: ``n``.
2025-08-20 10:58:35 +08:00
num_columns(int | paddle.Tensor | None, optional): the number of columns in each batch Tensor.
2020-07-21 00:58:06 +08:00
If None, default: num_rows.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): The data type of the returned Tensor.
2024-03-29 16:45:35 +08:00
It should be int32, int64, float16, float32, float64, complex64, complex128. Default: if None, the data type
2020-07-21 00:58:06 +08:00
is float32.
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-15 00:14:23 +08:00
out(Tensor, optional): The output tensor.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2020-04-20 14:58:29 +08:00
2020-04-14 00:19:01 +08:00
Returns:
2024-11-14 19:46:56 +08:00
Tensor: An identity Tensor or DenseTensor of shape [num_rows, num_columns].
2020-04-20 14:58:29 +08:00
2020-04-14 00:19:01 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2022-09-14 21:56:19 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> data = paddle.eye(3, dtype= ' int32 ' )
>>> print(data.numpy())
[[1 0 0]
[0 1 0]
[0 0 1]]
>>> data = paddle.eye(2, 3, dtype= ' int32 ' )
>>> print(data.numpy())
[[1 0 0]
[0 1 0]]
2020-04-14 00:19:01 +08:00
"""
2022-08-29 14:05:42 +08:00
def _check_attr ( attr , message ) :
2024-07-24 10:17:06 +08:00
if isinstance ( attr , ( ( Variable , core . eager . Tensor , paddle . pir . Value ) ) ) :
2024-09-23 11:58:53 +08:00
assert len ( attr . shape ) == 0 or (
len ( attr . shape ) == 1 and attr . shape [ 0 ] in [ 1 , - 1 ]
)
2024-11-04 23:39:41 +08:00
elif not isinstance ( attr , ( int , np . integer ) ) or attr < 0 :
2023-03-31 10:11:56 +08:00
raise TypeError ( f " { message } should be a non-negative int. " )
2022-08-29 14:05:42 +08:00
_check_attr ( num_rows , " num_rows " )
2020-07-21 00:58:06 +08:00
if dtype is None :
2023-09-20 20:13:02 +08:00
dtype = paddle . get_default_dtype ( )
2024-01-17 13:04:50 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , paddle . pir . core . DataType ) ) :
2022-04-13 18:20:54 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
if num_columns is not None :
2022-08-29 14:05:42 +08:00
_check_attr ( num_columns , " num_columns " )
2022-04-13 18:20:54 +08:00
else :
num_columns = num_rows
2023-11-15 11:38:33 +08:00
if in_dynamic_or_pir_mode ( ) :
2025-08-28 15:03:51 +08:00
device = (
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
)
if (
pin_memory
and in_dynamic_mode ( )
and device is not None
and not isinstance (
device , ( core . CUDAPinnedPlace , core . XPUPinnedPlace )
)
) :
if isinstance ( device , core . CUDAPlace ) or (
isinstance ( device , core . Place ) and device . is_gpu_place ( )
) :
device = core . CUDAPinnedPlace ( )
elif isinstance ( device , core . XPUPlace ) or (
isinstance ( device , core . Place ) and device . is_xpu_place ( )
) :
device = core . XPUPinnedPlace ( )
else :
raise RuntimeError (
f " Pinning memory is not supported for { device } "
)
2025-08-12 10:43:41 +08:00
tensor = _C_ops . eye (
num_rows ,
num_columns ,
dtype ,
2025-08-28 15:03:51 +08:00
device ,
2025-08-15 00:14:23 +08:00
out = out ,
2022-12-27 09:06:13 +08:00
)
2025-08-12 10:43:41 +08:00
if requires_grad is True :
tensor . stop_gradient = False
2025-08-30 17:16:19 +08:00
if out is not None :
out . stop_gradient = False
2025-08-28 15:03:51 +08:00
if pin_memory and in_dynamic_mode ( ) :
tensor = tensor . pin_memory ( )
2025-08-12 10:43:41 +08:00
return tensor
2022-04-13 18:20:54 +08:00
else :
helper = LayerHelper ( " eye " , * * locals ( ) )
2022-10-23 20:01:27 +08:00
check_dtype (
dtype ,
' dtype ' ,
2024-03-29 16:45:35 +08:00
[
' float16 ' ,
' float32 ' ,
' float64 ' ,
2024-05-23 18:11:28 +08:00
' uint16 ' ,
2024-03-29 16:45:35 +08:00
' int32 ' ,
' int64 ' ,
' complex64 ' ,
2024-12-03 10:49:18 +08:00
' complex128 ' ,
2024-03-29 16:45:35 +08:00
] ,
2022-10-23 20:01:27 +08:00
' eye ' ,
)
2022-04-13 18:20:54 +08:00
out = helper . create_variable_for_type_inference ( dtype = dtype )
2022-10-23 20:01:27 +08:00
helper . append_op (
type = ' eye ' ,
inputs = { } ,
outputs = { ' Out ' : [ out ] } ,
attrs = {
' num_rows ' : num_rows ,
' num_columns ' : num_columns ,
' dtype ' : dtype ,
} ,
stop_gradient = True ,
)
2022-04-13 18:20:54 +08:00
out . stop_gradient = True
return out
2020-04-14 00:19:01 +08:00
2026-01-30 20:20:08 +08:00
@param_one_alias ( [ " shape " , " size " ] )
2024-06-17 13:25:16 +08:00
def full (
shape : ShapeLike ,
2025-08-19 12:03:56 +08:00
fill_value : Numeric | str ,
2024-06-17 13:25:16 +08:00
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
2025-08-15 00:14:23 +08:00
out : paddle . Tensor | None = None ,
2025-08-12 10:43:41 +08:00
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-04-02 16:40:36 +08:00
"""
2020-05-14 11:27:10 +08:00
2022-06-17 15:15:02 +08:00
Return a Tensor with the ``fill_value`` which size is same as ``shape``.
2022-09-14 21:56:19 +08:00
[API compatibility] concat, empty_like, full, norm, outer, where, zeros_like (#74548)
* [API compatibility] concat, empty_like, full, norm, outer, where, zeros_like
* fix where
* fix where doctest
2025-08-14 09:58:08 +08:00
.. note::
Alias Support: The parameter name ``size`` can be used as an alias for ``shape``.
For example, ``full(size=[2, 3], …)`` is equivalent to ``full(shape=[2, 3], …)``.
2020-04-02 16:40:36 +08:00
Args:
2022-11-14 20:07:08 +08:00
shape (tuple|list|Tensor): Shape of the Tensor to be created. The data type is ``int32`` or ``int64`` .
If ``shape`` is a list or tuple, each element of it should be integer or 0-D Tensor with shape [].
If ``shape`` is an Tensor, it should be an 1-D Tensor which represents a list.
[API compatibility] concat, empty_like, full, norm, outer, where, zeros_like (#74548)
* [API compatibility] concat, empty_like, full, norm, outer, where, zeros_like
* fix where
* fix where doctest
2025-08-14 09:58:08 +08:00
Alias: ``size``.
2025-08-19 12:03:56 +08:00
fill_value(Scalar|Tensor): The constant value used to initialize the Tensor to be created.
2024-01-23 14:43:28 +08:00
If ``fill_value`` is an Tensor, it should be an 0-D Tensor which represents a scalar.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): Data type of the output Tensor
which can be float16, float32, float64, int32, int64, if dtype is `None`, the data
2022-06-17 15:15:02 +08:00
type of created Tensor is `float32`.
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-15 00:14:23 +08:00
out(Tensor, optional): The output tensor.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2022-09-14 21:56:19 +08:00
2020-04-08 08:53:23 +08:00
Returns:
2020-07-30 22:28:40 +08:00
Tensor: Tensor which is created according to ``shape``, ``fill_value`` and ``dtype``.
2020-04-08 08:53:23 +08:00
2020-04-02 16:40:36 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-04-02 16:40:36 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> # shape is a list/tuple
2026-01-29 20:44:09 +08:00
>>> data1 = paddle.full(shape=[3, 2], fill_value=1.0)
2023-08-29 14:23:59 +08:00
>>> print(data1.numpy())
[[1. 1.]
[1. 1.]
[1. 1.]]
>>> # shape is a Tensor
>>> shape = paddle.to_tensor([3, 2])
2026-01-29 20:44:09 +08:00
>>> data2 = paddle.full(shape=shape, fill_value=2.0)
2023-08-29 14:23:59 +08:00
>>> print(data2.numpy())
[[2. 2.]
[2. 2.]
[2. 2.]]
>>> # shape is a Tensor List
>>> shape = [paddle.to_tensor(3), paddle.to_tensor(2)]
2026-01-29 20:44:09 +08:00
>>> data3 = paddle.full(shape=shape, fill_value=3.0)
2023-08-29 14:23:59 +08:00
>>> print(data3.numpy())
[[3. 3.]
[3. 3.]
[3. 3.]]
>>> # fill_value is a Tensor.
>>> val = paddle.full([], 2.0, " float32 " )
>>> data5 = paddle.full(shape=[3, 2], fill_value=val)
>>> print(data5.numpy())
[[2. 2.]
[2. 2.]
[2. 2.]]
2020-04-02 16:40:36 +08:00
"""
2025-08-19 12:03:56 +08:00
# Include str type check to handle string numeric values like "0.5" that occur in CI tests.
2026-01-29 20:44:09 +08:00
# The compatible method for fluid operators, may be it can be removed in the future.
2025-08-19 12:03:56 +08:00
if not isinstance (
fill_value ,
( numbers . Number , str , core . eager . Tensor , Variable , paddle . pir . Value ) ,
) :
raise TypeError (
f " The fill_value should be int, float, bool, complex, np.number, string numeric values or Tensor, but received { type ( fill_value ) } . "
)
2020-04-02 16:40:36 +08:00
if dtype is None :
2024-12-19 23:51:31 +08:00
if isinstance ( fill_value , ( bool ) ) :
dtype = " bool "
elif isinstance ( fill_value , ( builtins . complex ) ) :
dtype = " complex128 "
else :
dtype = paddle . get_default_dtype ( )
2025-08-28 15:03:51 +08:00
if in_dynamic_or_pir_mode ( ) :
device = (
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
)
if (
pin_memory
and in_dynamic_mode ( )
and device is not None
and not isinstance (
device , ( core . CUDAPinnedPlace , core . XPUPinnedPlace )
)
) :
if isinstance ( device , core . CUDAPlace ) or (
isinstance ( device , core . Place ) and device . is_gpu_place ( )
) :
device = core . CUDAPinnedPlace ( )
elif isinstance ( device , core . XPUPlace ) or (
isinstance ( device , core . Place ) and device . is_xpu_place ( )
) :
device = core . XPUPinnedPlace ( )
else :
raise RuntimeError (
f " Pinning memory is not supported for { device } "
)
2020-04-02 16:40:36 +08:00
2025-08-12 10:43:41 +08:00
tensor = fill_constant (
2025-08-15 00:14:23 +08:00
shape = shape ,
dtype = dtype ,
value = fill_value ,
out = out ,
place = device ,
name = name ,
2025-08-12 10:43:41 +08:00
)
if requires_grad is True :
tensor . stop_gradient = False
2025-08-30 17:16:19 +08:00
if out is not None :
out . stop_gradient = False
2025-08-28 15:03:51 +08:00
if pin_memory and in_dynamic_mode ( ) :
tensor = tensor . pin_memory ( )
2025-08-12 10:43:41 +08:00
return tensor
2020-04-19 17:57:10 +08:00
2024-06-17 13:25:16 +08:00
def arange (
start : float | paddle . Tensor = 0 ,
end : float | paddle . Tensor | None = None ,
step : float | paddle . Tensor = 1 ,
dtype : DTypeLike | None = None ,
2025-08-15 10:42:08 +08:00
* ,
out : paddle . Tensor | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
name : str | None = None ,
) - > paddle . Tensor :
2020-04-19 17:57:10 +08:00
"""
2022-05-18 11:05:32 +08:00
Returns a 1-D Tensor with spaced values within a given interval.
2020-04-19 17:57:10 +08:00
2020-07-29 15:06:04 +08:00
Values are generated into the half-open interval [``start``, ``end``) with
the ``step``. (the interval including ``start`` but excluding ``end``).
2020-07-17 14:22:46 +08:00
2020-07-29 15:06:04 +08:00
If ``dtype`` is float32 or float64, we advise adding a small epsilon to
``end`` to avoid floating point rounding errors when comparing against ``end``.
2020-04-19 17:57:10 +08:00
Parameters:
2020-07-29 15:06:04 +08:00
start(float|int|Tensor): Start of interval. The interval includes this
value. If ``end`` is None, the half-open interval is [0, ``start``).
2022-11-14 20:07:08 +08:00
If ``start`` is a Tensor, it is a 0-D Tensor which represents a scalar
and data type is int32, int64, float32, float64. Default is 0.
2020-07-29 15:06:04 +08:00
end(float|int|Tensor, optional): End of interval. The interval does not
2022-11-14 20:07:08 +08:00
include this value. If ``end`` is a Tensor, it is a 0-D Tensor which
represents a scalar and data type is int32, int64, float32, float64.
If ``end`` is None, the half-open interval is [0, ``start``).
Default is None.
2020-07-29 15:06:04 +08:00
step(float|int|Tensor, optional): Spacing between values. For any out,
2024-02-19 11:44:52 +08:00
it is the instance between two adjacent values, out[i+1] - out[i].
2022-11-14 20:07:08 +08:00
If ``step`` is a Tensor, it is a 0-D Tensor which represents a scalar
and data type is int32, int64, float32, float64. . Default is 1.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): The data type of the
2020-07-29 15:06:04 +08:00
output tensor. Supported data types: int32, int64, float32, float64.
2024-02-19 11:44:52 +08:00
If ``dtype`` is None, the data type is float32. Default is None.
2025-08-15 10:42:08 +08:00
out(Tensor, optional): The output tensor.
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2020-07-17 14:22:46 +08:00
2022-09-14 21:56:19 +08:00
Returns:
2020-07-29 15:06:04 +08:00
Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
2020-11-17 09:38:13 +08:00
taken with common difference ``step`` beginning from ``start``. Its
data type is set by ``dtype``.
2020-04-19 17:57:10 +08:00
2020-11-17 09:38:13 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-04-19 17:57:10 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
2020-04-19 17:57:10 +08:00
2023-08-29 14:23:59 +08:00
>>> out1 = paddle.arange(5)
>>> print(out1.numpy())
[0 1 2 3 4]
2020-04-19 17:57:10 +08:00
2023-08-29 14:23:59 +08:00
>>> out2 = paddle.arange(3, 9, 2.0)
>>> print(out2.numpy())
[3. 5. 7.]
2020-04-19 17:57:10 +08:00
2023-08-29 14:23:59 +08:00
>>> # use 4.999 instead of 5.0 to avoid floating point rounding errors
>>> out3 = paddle.arange(4.999, dtype= ' float32 ' )
>>> print(out3.numpy())
[0. 1. 2. 3. 4.]
2020-04-19 17:57:10 +08:00
2023-08-29 14:23:59 +08:00
>>> start_var = paddle.to_tensor(3)
>>> out4 = paddle.arange(start_var, 7)
>>> print(out4.numpy())
[3 4 5 6]
2022-09-14 21:56:19 +08:00
2020-07-17 14:22:46 +08:00
"""
if end is None :
end = start
start = 0
2020-04-19 17:57:10 +08:00
2023-05-23 11:06:40 +08:00
if dtype is None :
for val in [ start , end , step ] :
2024-01-19 14:59:45 +08:00
if isinstance ( val , ( Variable , paddle . pir . Value ) ) :
2023-09-22 23:15:25 +08:00
if not paddle . is_integer ( val ) :
2023-06-02 11:30:19 +08:00
dtype = paddle . get_default_dtype ( )
break
else :
dtype = ' int64 '
2023-05-23 11:06:40 +08:00
else :
2023-06-02 11:30:19 +08:00
if not isinstance ( val , np . integer ) and not isinstance ( val , int ) :
dtype = paddle . get_default_dtype ( )
break
else :
dtype = ' int64 '
2023-05-23 11:06:40 +08:00
2023-04-12 13:25:20 +08:00
out_shape = None
2023-10-31 11:39:04 +08:00
is_value_input = (
2024-01-19 14:59:45 +08:00
not isinstance ( start , ( Variable , paddle . pir . Value ) )
and not isinstance ( end , ( Variable , paddle . pir . Value ) )
and not isinstance ( step , ( Variable , paddle . pir . Value ) )
2023-10-31 11:39:04 +08:00
)
if not in_dynamic_mode ( ) and is_value_input :
2023-04-12 13:25:20 +08:00
out_shape = [ int ( math . ceil ( ( end - start ) / step ) ) ]
2023-09-22 23:15:25 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , core . DataType ) ) :
2022-04-13 18:20:54 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
2025-08-28 15:03:51 +08:00
if in_dynamic_or_pir_mode ( ) :
device = (
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
)
if (
pin_memory
and in_dynamic_mode ( )
and device is not None
and not isinstance (
device , ( core . CUDAPinnedPlace , core . XPUPinnedPlace )
)
) :
if isinstance ( device , core . CUDAPlace ) or (
isinstance ( device , core . Place ) and device . is_gpu_place ( )
) :
device = core . CUDAPinnedPlace ( )
elif isinstance ( device , core . XPUPlace ) or (
isinstance ( device , core . Place ) and device . is_xpu_place ( )
) :
device = core . XPUPinnedPlace ( )
else :
raise RuntimeError (
f " Pinning memory is not supported for { device } "
)
2023-10-31 11:39:04 +08:00
if is_value_input and in_pir_mode ( ) :
2025-08-15 10:42:08 +08:00
tensor = _C_ops . arange (
start ,
end ,
step ,
dtype ,
2025-08-28 15:03:51 +08:00
device ,
2025-08-15 10:42:08 +08:00
out = out ,
)
tensor . stop_gradient = not requires_grad
2025-08-30 17:16:19 +08:00
if out is not None :
out . stop_gradient = not requires_grad
2025-08-28 15:03:51 +08:00
if pin_memory and in_dynamic_mode ( ) :
tensor = tensor . pin_memory ( )
2025-08-15 10:42:08 +08:00
return tensor
2023-10-31 11:39:04 +08:00
2024-01-19 14:59:45 +08:00
if not isinstance ( start , ( Variable , paddle . pir . Value ) ) :
2022-04-13 18:20:54 +08:00
with device_guard ( " cpu " ) :
2025-08-15 14:48:19 +08:00
if not np . isfinite ( start ) :
raise ValueError (
2025-08-18 00:05:11 +08:00
f " The value of start must be finite, but received: { start } . "
2025-08-15 14:48:19 +08:00
)
2022-04-13 18:20:54 +08:00
start = fill_constant ( [ 1 ] , dtype , start , force_cpu = True )
elif start . dtype != dtype :
2025-08-15 14:48:19 +08:00
if in_dynamic_mode ( ) and not paddle . isfinite ( start ) :
raise ValueError (
2025-08-18 00:05:11 +08:00
f " The value of start must be finite, but received: { start } . "
2025-08-15 14:48:19 +08:00
)
2022-04-13 18:20:54 +08:00
start = paddle . cast ( start , dtype )
2024-01-19 14:59:45 +08:00
if not isinstance ( end , ( Variable , paddle . pir . Value ) ) :
2022-04-13 18:20:54 +08:00
with device_guard ( " cpu " ) :
2025-08-15 14:48:19 +08:00
if not np . isfinite ( end ) :
raise ValueError (
2025-08-18 00:05:11 +08:00
f " The value of end must be finite, but received: { end } . "
2025-08-15 14:48:19 +08:00
)
2022-04-13 18:20:54 +08:00
end = fill_constant ( [ 1 ] , dtype , end , force_cpu = True )
elif end . dtype != dtype :
2025-08-15 14:48:19 +08:00
if in_dynamic_mode ( ) and not paddle . isfinite ( end ) :
raise ValueError (
2025-08-18 00:05:11 +08:00
f " The value of end must be finite, but received: { end } . "
2025-08-15 14:48:19 +08:00
)
2022-04-13 18:20:54 +08:00
end = paddle . cast ( end , dtype )
2024-01-19 14:59:45 +08:00
if not isinstance ( step , ( Variable , paddle . pir . Value ) ) :
2022-04-13 18:20:54 +08:00
with device_guard ( " cpu " ) :
step = fill_constant ( [ 1 ] , dtype , step , force_cpu = True )
elif step . dtype != dtype :
step = paddle . cast ( step , dtype )
2023-09-22 23:15:25 +08:00
if in_dynamic_or_pir_mode ( ) :
2025-08-15 10:42:08 +08:00
tensor = _C_ops . arange (
start ,
end ,
step ,
dtype ,
(
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
) ,
out = out ,
)
tensor . stop_gradient = not requires_grad
2025-08-30 17:16:19 +08:00
if out is not None :
out . stop_gradient = not requires_grad
2025-08-28 15:03:51 +08:00
if pin_memory and in_dynamic_mode ( ) :
tensor = tensor . pin_memory ( )
2025-08-15 10:42:08 +08:00
return tensor
2022-12-27 09:06:13 +08:00
else :
check_dtype (
dtype ,
' dtype ' ,
2023-03-09 20:33:12 +08:00
[ ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' , ' float16 ' , ' uint16 ' ] ,
2022-12-27 09:06:13 +08:00
' range/arange ' ,
)
helper = LayerHelper ( ' range ' , * * locals ( ) )
out = helper . create_variable_for_type_inference ( dtype , shape = out_shape )
helper . append_op (
type = ' range ' ,
inputs = { ' Start ' : start , ' End ' : end , ' Step ' : step } ,
outputs = { ' Out ' : out } ,
)
2022-04-13 18:20:54 +08:00
out . stop_gradient = True
2022-12-27 09:06:13 +08:00
if out_shape is not None :
out . desc . set_shape ( out_shape )
2022-04-13 18:20:54 +08:00
return out
2020-04-05 03:01:32 +08:00
2025-08-20 10:58:35 +08:00
@deprecated (
reason = (
" paddle.range is deprecated and will be removed in a future release because its behavior is inconsistent with Python ' s range builtin. "
" Instead, use paddle.arange, which produces values in [start, end) "
2025-08-23 00:31:21 +08:00
) ,
level = 1 ,
2025-08-20 10:58:35 +08:00
)
2025-08-18 19:40:17 +08:00
def range (
start : float | paddle . Tensor = 0 ,
end : float | paddle . Tensor | None = None ,
step : float | paddle . Tensor = 1 ,
2025-08-23 00:31:21 +08:00
dtype : DTypeLike = None ,
2025-08-18 19:40:17 +08:00
* ,
out : paddle . Tensor | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
name : str | None = None ,
) :
r """
Returns a 1-D Tensor of size $$ \ lfloor \ dfrac { end - start} {step} \ rfloor + 1 $$ with values
from ``start`` to ``end`` with ``step``. ``step`` is the gap between two values in the tensor.
$$
out_ { i+1} = out_ {i} + step
$$
Values are generated into the half-open interval [``start``, ``end``) with
the ``step``. (the interval including ``start`` but excluding ``end``).
If ``dtype`` is float32 or float64, we advise adding a small epsilon to
``end`` to avoid floating point rounding errors when comparing against ``end``.
Parameters:
start(float|int|Tensor): Start of interval. The interval includes this
value. If ``end`` is None, the half-open interval is [0, ``start``).
If ``start`` is a Tensor, it is a 0-D Tensor which represents a scalar
and data type is int32, int64, float32, float64. Default is 0.
end(float|int|Tensor, optional): End of interval. The interval does not
include this value. If ``end`` is a Tensor, it is a 0-D Tensor which
represents a scalar and data type is int32, int64, float32, float64.
If ``end`` is None, the half-open interval is [0, ``start``).
Default is None.
step(float|int|Tensor, optional): Spacing between values. For any out,
it is the instance between two adjacent values, out[i+1] - out[i].
If ``step`` is a Tensor, it is a 0-D Tensor which represents a scalar
and data type is int32, int64, float32, float64. . Default is 1.
dtype(str|np.dtype, optional): The data type of the
output tensor. Supported data types: int32, int64, float32, float64.
If ``dtype`` is None, the data type is float32. Default is None.
out(Tensor, optional): The output tensor.
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
Returns:
Tensor: A 1-D Tensor with values from the interval [``start``, ``end``)
taken with common difference ``step`` beginning from ``start``. Its
data type is set by ``dtype``.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2025-08-18 19:40:17 +08:00
>>> import paddle
>>> out1 = paddle.range(5)
>>> print(out1.numpy())
2026-01-29 20:44:09 +08:00
[0. 1. 2. 3. 4. 5.]
2025-08-18 19:40:17 +08:00
>>> out2 = paddle.range(3, 9, 2.0)
>>> print(out2.numpy())
[3. 5. 7. 9.]
>>> # use 4.999 instead of 5.0 to avoid floating point rounding errors
>>> out3 = paddle.range(4.999, dtype= ' float32 ' )
>>> print(out3.numpy())
[0. 1. 2. 3. 4.]
>>> start_var = paddle.to_tensor(3)
>>> out4 = paddle.range(start_var, 7)
>>> print(out4.numpy())
2026-01-29 20:44:09 +08:00
[3. 4. 5. 6. 7.]
2025-08-18 19:40:17 +08:00
"""
if end is None :
end = start
start = 0
if dtype is None :
2025-08-23 00:31:21 +08:00
dtype = paddle . get_default_dtype ( )
2025-08-18 19:40:17 +08:00
is_value_input = (
not isinstance ( start , ( Variable , paddle . pir . Value ) )
and not isinstance ( end , ( Variable , paddle . pir . Value ) )
and not isinstance ( step , ( Variable , paddle . pir . Value ) )
)
if not isinstance ( dtype , ( core . VarDesc . VarType , core . DataType ) ) :
dtype = convert_np_dtype_to_dtype_ ( dtype )
if is_value_input and in_pir_mode ( ) :
tensor = _C_ops . range_v2 (
start ,
end ,
step ,
dtype ,
(
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
) ,
out = out ,
)
tensor . stop_gradient = not requires_grad
2025-08-30 17:16:19 +08:00
if out is not None :
out . stop_gradient = not requires_grad
2025-08-18 19:40:17 +08:00
return tensor
if not isinstance ( start , ( Variable , paddle . pir . Value ) ) :
with device_guard ( " cpu " ) :
start = fill_constant ( [ 1 ] , dtype , start , force_cpu = True )
elif start . dtype != dtype :
start = paddle . cast ( start , dtype )
if not isinstance ( end , ( Variable , paddle . pir . Value ) ) :
with device_guard ( " cpu " ) :
end = fill_constant ( [ 1 ] , dtype , end , force_cpu = True )
elif end . dtype != dtype :
end = paddle . cast ( end , dtype )
if not isinstance ( step , ( Variable , paddle . pir . Value ) ) :
with device_guard ( " cpu " ) :
step = fill_constant ( [ 1 ] , dtype , step , force_cpu = True )
elif step . dtype != dtype :
step = paddle . cast ( step , dtype )
tensor = _C_ops . range_v2 (
start ,
end ,
step ,
dtype ,
(
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
) ,
out = out ,
)
tensor . stop_gradient = not requires_grad
2025-08-30 17:16:19 +08:00
if out is not None :
out . stop_gradient = not requires_grad
2025-08-18 19:40:17 +08:00
return tensor
2024-06-17 13:25:16 +08:00
def _tril_triu_op ( helper : LayerHelper ) - > paddle . Tensor :
2022-10-23 20:01:27 +08:00
""" Base op of tril_op and triu_op """
2020-04-05 03:01:32 +08:00
op_type = helper . layer_type
2020-08-07 11:01:01 +08:00
x = helper . kwargs . get ( ' x ' , None )
2020-04-05 03:01:32 +08:00
2023-03-31 10:11:56 +08:00
assert x is not None , f ' x cannot be None in { op_type } '
2021-01-12 10:28:43 +08:00
check_variable_and_dtype (
2022-10-23 20:01:27 +08:00
x ,
' x ' ,
2023-11-09 10:52:12 +08:00
[
' float16 ' ,
' uint16 ' ,
' float32 ' ,
' float64 ' ,
' int32 ' ,
' int64 ' ,
' bool ' ,
' complex64 ' ,
' complex128 ' ,
] ,
2022-10-23 20:01:27 +08:00
op_type ,
)
2020-04-05 03:01:32 +08:00
if len ( x . shape ) < 2 :
2023-03-31 10:11:56 +08:00
raise ValueError ( f " x shape in { op_type } must be at least 2-D " )
2020-04-05 03:01:32 +08:00
diagonal = helper . kwargs . get ( ' diagonal ' , 0 )
2022-10-23 20:01:27 +08:00
if not isinstance ( diagonal , ( int , ) ) :
2023-03-31 10:11:56 +08:00
raise TypeError ( f " diagonal in { op_type } must be a python Int " )
2020-04-05 03:01:32 +08:00
name = helper . kwargs . get ( ' name ' , None )
if name is None :
out = helper . create_variable_for_type_inference ( dtype = x . dtype )
else :
2022-10-23 20:01:27 +08:00
out = helper . create_variable (
name = name , dtype = x . dtype , persistable = False
)
2020-04-05 03:01:32 +08:00
helper . append_op (
type = " tril_triu " ,
inputs = { " X " : x } ,
attrs = {
" diagonal " : diagonal ,
" lower " : True if op_type == ' tril ' else False ,
} ,
2022-06-05 10:58:58 +08:00
outputs = { " Out " : out } ,
)
2020-04-05 03:01:32 +08:00
return out
【inplace api】batch add inplace api paddle.log_, paddle.i0_, paddle.nn.functional.leaky_relu_... (#55576)
* batch add inplace api
* add inplace test
* add activation inplace
* fix test
* remove atan2 ge, gt, le, lt, nq
* remove atan2 ge, gt, le, lt, nq
* fix windows ci error
* rerun ci
* fix typro
* fix bugs
---------
Co-authored-by: zhangrui34 <v_zhangrui34@baidu.com>
2023-07-27 15:33:52 +08:00
@inplace_apis_in_dygraph_only
2024-06-17 13:25:16 +08:00
def tril_ (
x : paddle . Tensor , diagonal : int = 0 , name : str | None = None
) - > paddle . Tensor | None :
【inplace api】batch add inplace api paddle.log_, paddle.i0_, paddle.nn.functional.leaky_relu_... (#55576)
* batch add inplace api
* add inplace test
* add activation inplace
* fix test
* remove atan2 ge, gt, le, lt, nq
* remove atan2 ge, gt, le, lt, nq
* fix windows ci error
* rerun ci
* fix typro
* fix bugs
---------
Co-authored-by: zhangrui34 <v_zhangrui34@baidu.com>
2023-07-27 15:33:52 +08:00
r """
Inplace version of ``tril`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_tril`.
"""
if in_dynamic_mode ( ) :
return _C_ops . tril_ ( x , diagonal )
@inplace_apis_in_dygraph_only
2024-06-17 13:25:16 +08:00
def triu_ (
x : paddle . Tensor , diagonal : int = 0 , name : str | None = None
) - > paddle . Tensor | None :
【inplace api】batch add inplace api paddle.log_, paddle.i0_, paddle.nn.functional.leaky_relu_... (#55576)
* batch add inplace api
* add inplace test
* add activation inplace
* fix test
* remove atan2 ge, gt, le, lt, nq
* remove atan2 ge, gt, le, lt, nq
* fix windows ci error
* rerun ci
* fix typro
* fix bugs
---------
Co-authored-by: zhangrui34 <v_zhangrui34@baidu.com>
2023-07-27 15:33:52 +08:00
r """
Inplace version of ``triu`` API, the output Tensor will be inplaced with input ``x``.
Please refer to :ref:`api_paddle_triu`.
"""
if in_dynamic_mode ( ) :
return _C_ops . triu_ ( x , diagonal )
2024-06-17 13:25:16 +08:00
@overload
def meshgrid (
2025-11-10 11:05:34 +08:00
args : Sequence [ paddle . Tensor ] ,
name : str | None = None ,
indexing : str | None = None ,
2024-08-08 10:10:45 +08:00
) - > list [ paddle . Tensor ] : . . .
2024-06-17 13:25:16 +08:00
@overload
2024-06-26 09:19:14 +08:00
def meshgrid (
2025-11-10 11:05:34 +08:00
* args : paddle . Tensor , name : str | None = None , indexing : str | None = None
2024-08-08 10:10:45 +08:00
) - > list [ paddle . Tensor ] : . . .
2024-06-17 13:25:16 +08:00
2020-07-14 18:05:09 +08:00
def meshgrid ( * args , * * kwargs ) :
2020-04-14 12:34:20 +08:00
"""
[Docs]fix math api en docs issue (#47448)
* fix_docx_stanh
* fix einsum api en docs issue
* fix model api en docs issue
* for codestyle
* fix_einsum.py_einsum, test=document_fix
* fix_model.py_Model, test=ducument_fix
* fix_creation.py_meshgrid, test=document_fix
* fix_linalg.py_slogdet, test=document_fix
* fix_loss.py_SoftMarginLoss_CrossEntropyLoss_NLLLoss_BCELoss, test=document_fix
* norm.py_SyncBatchNorm, test=document-fix
* norm.py_SyncBatchNorm, test=document_fix
* norm.py_SyncBatchNorm, test=document_fix
* list18-30, test=document_fix
* refix_list1-15, test=document_fix
* deletefiles, test=document_fix
* fixedapi_pre-commit, test=document_fix
* fix_list31-45, test=document_fix
* list111, test=document_fix
* some_fix, test=document_fix
* some_fix, test=document_fix
* somefix, test=document_fix
* somefix, test=document_fix
* refix, test=document_fix
* refix, test=document_fix
* refix, test=document_fix
* refix, test=document_fix
* rerfix, test=document_fix
Co-authored-by: Ligoml <limengliu@tiaozhan.com>
2022-11-22 17:31:35 +08:00
2022-11-09 11:25:32 +08:00
Takes a list of N tensors as input :attr:`*args`, each of which is 1-dimensional vector, and creates N-dimensional grids.
2022-09-14 21:56:19 +08:00
2020-04-14 12:34:20 +08:00
Args:
2022-09-14 21:56:19 +08:00
*args(Tensor|list of Tensor) : tensors (tuple(list) of tensor): the shapes of input k tensors are (N1,),
2024-06-03 14:52:21 +08:00
(N2,),..., (Nk,). Support data types: ``float64``, ``bfloat16``, ``float16``, ``float32``, ``int32``, ``int64``, ``complex64``, ``complex128``.
2022-09-14 21:56:19 +08:00
**kwargs (optional): Currently, only accept name in **kwargs
2020-07-14 18:05:09 +08:00
The default value is None. Normally there is no need for
2020-04-14 12:34:20 +08:00
user to set this property. For more information, please refer to :ref:`api_guide_Name`.
2025-11-10 11:05:34 +08:00
indexing (Optional[str]) : the indexing mode, either “xy” or “ij”, defaults to “ij”.If “xy” is selected, the first dimension corresponds to the cardinality
of the second input and the second dimension corresponds to the cardinality of the first input. If “ij” is selected, the dimensions are in the
same order as the cardinality of the inputs.
2020-04-14 12:34:20 +08:00
Returns:
2020-09-29 17:50:54 +08:00
Tensor: k tensors. The shape of each tensor is (N1, N2, ..., Nk)
2020-04-14 12:34:20 +08:00
Examples:
[CodeStyle][Xdoctest][8,12,15,16,18-20,22-26,29-32,34-36,38-40,42,43,45-48,50,51,60-65,75,80,82,83,85-87,89-94,99-141,143,145,147-167,169-187,207-220,257,258,260-275,277-313,315-325][API Compatibility] Update shape output format in documentation examples (#76574)
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-11-26 10:31:46 +08:00
.. code-block:: pycon
2020-04-14 12:34:20 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
2020-09-29 17:50:54 +08:00
2023-08-29 14:23:59 +08:00
>>> x = paddle.randint(low=0, high=100, shape=[100])
>>> y = paddle.randint(low=0, high=100, shape=[200])
2020-04-14 12:34:20 +08:00
2023-08-29 14:23:59 +08:00
>>> grid_x, grid_y = paddle.meshgrid(x, y)
2020-04-14 12:34:20 +08:00
2023-08-29 14:23:59 +08:00
>>> print(grid_x.shape)
[CodeStyle][Xdoctest][8,12,15,16,18-20,22-26,29-32,34-36,38-40,42,43,45-48,50,51,60-65,75,80,82,83,85-87,89-94,99-141,143,145,147-167,169-187,207-220,257,258,260-275,277-313,315-325][API Compatibility] Update shape output format in documentation examples (#76574)
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-11-26 10:31:46 +08:00
paddle.Size([100, 200])
2023-08-29 14:23:59 +08:00
>>> print(grid_y.shape)
[CodeStyle][Xdoctest][8,12,15,16,18-20,22-26,29-32,34-36,38-40,42,43,45-48,50,51,60-65,75,80,82,83,85-87,89-94,99-141,143,145,147-167,169-187,207-220,257,258,260-275,277-313,315-325][API Compatibility] Update shape output format in documentation examples (#76574)
---------
Co-authored-by: SigureMo <sigure.qaq@gmail.com>
2025-11-26 10:31:46 +08:00
paddle.Size([100, 200])
2020-04-14 12:34:20 +08:00
"""
2025-11-10 11:05:34 +08:00
name = kwargs . get ( " name " , None )
indexing = kwargs . pop ( " indexing " , None )
if indexing is None :
indexing = " ij "
2020-04-14 12:34:20 +08:00
2020-07-14 18:05:09 +08:00
if len ( args ) == 1 and isinstance ( args [ 0 ] , ( list , tuple ) ) :
args = args [ 0 ]
2025-11-10 11:05:34 +08:00
if indexing not in ( " ij " , " xy " ) :
raise ValueError (
f " meshgrid: indexing must be ' ij ' or ' xy ' , but got { indexing } "
)
swap_xy = indexing == " xy " and len ( args ) > = 2
if swap_xy :
args = ( args [ 1 ] , args [ 0 ] , * args [ 2 : ] )
2023-11-01 19:18:56 +08:00
if in_dynamic_or_pir_mode ( ) :
2025-11-10 11:05:34 +08:00
out = _C_ops . meshgrid ( list ( args ) )
2022-12-27 09:06:13 +08:00
else :
helper = LayerHelper ( ' meshgrid ' , * * locals ( ) )
2020-04-14 12:34:20 +08:00
2022-12-27 09:06:13 +08:00
if not isinstance ( args , ( list , tuple ) ) :
raise TypeError (
" The type of input args in meshgrid should be list. "
)
2020-04-14 12:34:20 +08:00
2022-12-27 09:06:13 +08:00
for id , input_ in enumerate ( args ) :
check_dtype (
input_ . dtype ,
' create data type ' ,
2024-06-03 14:52:21 +08:00
[
' uint16 ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
' int32 ' ,
' int64 ' ,
' complex64 ' ,
' complex128 ' ,
] ,
2022-12-27 09:06:13 +08:00
' meshgrid ' ,
)
2020-04-14 12:34:20 +08:00
2022-12-27 09:06:13 +08:00
num = len ( args )
out = [
helper . create_variable_for_type_inference ( dtype = args [ i ] . dtype )
2025-08-18 19:40:17 +08:00
for i in builtins . range ( num )
2022-12-27 09:06:13 +08:00
]
helper . append_op (
type = ' meshgrid ' , inputs = { ' X ' : list ( args ) } , outputs = { ' Out ' : out }
2022-10-23 20:01:27 +08:00
)
2020-04-14 12:34:20 +08:00
2025-11-10 11:05:34 +08:00
if swap_xy :
out [ 0 ] , out [ 1 ] = out [ 1 ] , out [ 0 ]
return out
def split_with_sizes (
self : paddle . Tensor , split_sizes : list [ int ] , dim : int = 0
) - > list [ paddle . Tensor ] :
"""
Splits the input tensor into multiple sub tensors according to given split sizes.
Args:
self (Tensor): The input tensor to be split.
split_sizes (list[int]): A list of non negative integers specifying
the sizes of each split along dimension ``dim``. The sum of all
elements in this list must equal the size of ``self`` along ``dim``.
dim (int, optional): The dimension along which to split the tensor.
Defaults to 0.
Returns:
list[Tensor]: A list of sub tensors resulting from splitting ``self``
along the specified dimension.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2025-11-10 11:05:34 +08:00
>>> import paddle
>>> x = paddle.to_tensor([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]])
>>> # Split into two parts along the first dimension, of sizes 1 and 2
>>> splits = paddle.Tensor.split_with_sizes(x, [1, 2], dim=0)
>>> print(splits)
2026-01-29 20:44:09 +08:00
[Tensor(shape=[1, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
[[1, 2, 3, 4]]), Tensor(shape=[2, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
[[5 , 6 , 7 , 8 ],
[9 , 10, 11, 12]])]
2025-11-10 11:05:34 +08:00
"""
for size in split_sizes :
if size < 0 :
raise ValueError (
" split_with_sizes expects split_sizes have only non-negative entries "
)
total = sum ( split_sizes )
if total != self . shape [ dim ] :
raise ValueError (
f " Split sizes add up to { total } but got the tensor ' s size of { self . shape [ dim ] } "
)
outs = [ ]
start = 0
for size in split_sizes :
end = start + size
out = paddle . slice ( self , axes = [ dim ] , starts = [ start ] , ends = [ end ] )
outs . append ( out )
start = end
return outs
2020-08-22 13:50:53 +08:00
2024-06-17 13:25:16 +08:00
def diag_embed (
input : TensorLike , offset : int = 0 , dim1 : int = - 2 , dim2 : int = - 1
) - > paddle . Tensor :
2023-10-23 19:33:00 +08:00
"""
Creates a tensor whose diagonals of certain 2D planes (specified by dim1 and dim2)
are filled by ``input``. By default, a 2D plane formed by the last two dimensions
of the returned tensor will be selected.
The argument ``offset`` determines which diagonal is generated:
- If offset = 0, it is the main diagonal.
- If offset > 0, it is above the main diagonal.
- If offset < 0, it is below the main diagonal.
Args:
input(Tensor|numpy.ndarray): The input tensor. Must be at least 1-dimensional. The input data type should be float32, float64, int32, int64.
offset(int, optional): Which diagonal to consider. Default: 0 (main diagonal).
dim1(int, optional): The first dimension with respect to which to take diagonal. Default: -2.
dim2(int, optional): The second dimension with respect to which to take diagonal. Default: -1.
Returns:
Tensor, the output data type is the same as input data type.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2023-10-23 19:33:00 +08:00
>>> import paddle
>>> diag_embed_input = paddle.arange(6)
>>> diag_embed_output1 = paddle.diag_embed(diag_embed_input)
>>> print(diag_embed_output1)
Tensor(shape=[6, 6], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0],
[0, 0, 0, 3, 0, 0],
[0, 0, 0, 0, 4, 0],
[0, 0, 0, 0, 0, 5]])
2026-01-29 20:44:09 +08:00
>>> diag_embed_output2 = paddle.diag_embed(diag_embed_input, offset=-1, dim1=0, dim2=1)
2023-10-23 19:33:00 +08:00
>>> print(diag_embed_output2)
Tensor(shape=[7, 7], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0],
[0, 0, 2, 0, 0, 0, 0],
[0, 0, 0, 3, 0, 0, 0],
[0, 0, 0, 0, 4, 0, 0],
[0, 0, 0, 0, 0, 5, 0]])
2026-01-29 20:44:09 +08:00
>>> diag_embed_input_2dim = paddle.reshape(diag_embed_input, [2, 3])
2023-10-23 19:33:00 +08:00
>>> print(diag_embed_input_2dim)
Tensor(shape=[2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 2],
[3, 4, 5]])
2026-01-29 20:44:09 +08:00
>>> diag_embed_output3 = paddle.diag_embed(diag_embed_input_2dim, offset=0, dim1=0, dim2=2)
2023-10-23 19:33:00 +08:00
>>> print(diag_embed_output3)
Tensor(shape=[3, 2, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[[0, 0, 0],
[3, 0, 0]],
[[0, 1, 0],
[0, 4, 0]],
[[0, 0, 2],
[0, 0, 5]]])
"""
if not isinstance ( input , Variable ) :
input = assign ( input )
2023-11-10 10:40:55 +08:00
if in_dynamic_or_pir_mode ( ) :
2023-10-23 19:33:00 +08:00
return _C_ops . diag_embed ( input , offset , dim1 , dim2 )
inputs = { ' Input ' : [ input ] }
attrs = { ' offset ' : offset , ' dim1 ' : dim1 , ' dim2 ' : dim2 }
def __check_input ( input , offset , dim1 , dim2 ) :
check_dtype (
input . dtype ,
' Input ' ,
[ ' int32 ' , ' int64 ' , ' float16 ' , ' float32 ' , ' float64 ' ] ,
' diag_embed ' ,
)
input_shape = list ( input . shape )
assert len ( input_shape ) > = 1 , (
" Input must be at least 1-dimensional, "
2024-06-29 02:48:21 +08:00
f " But received Input ' s dimensional: { len ( input_shape ) } . \n "
2023-10-23 19:33:00 +08:00
)
2025-08-21 02:07:41 +08:00
assert np . abs ( dim1 ) < = len ( input_shape ) , (
f " Dim1 is out of range (expected to be in range of [ { - ( len ( input_shape ) + 1 ) } , { len ( input_shape ) } ], but got { dim1 } ). \n "
)
2023-10-23 19:33:00 +08:00
2025-08-21 02:07:41 +08:00
assert np . abs ( dim2 ) < = len ( input_shape ) , (
f " Dim2 is out of range (expected to be in range of [ { - ( len ( input_shape ) + 1 ) } , { len ( input_shape ) } ], but got { dim2 } ). \n "
)
2023-10-23 19:33:00 +08:00
dim1_ = dim1 if dim1 > = 0 else len ( input_shape ) + dim1 + 1
dim2_ = dim2 if dim2 > = 0 else len ( input_shape ) + dim2 + 1
assert dim1_ != dim2_ , (
" dim1 and dim2 cannot be the same dimension. "
2024-12-08 11:45:43 +08:00
f " But received dim1 = { dim1 } , dim2 = { dim2 } \n "
2023-10-23 19:33:00 +08:00
)
__check_input ( input , offset , dim1 , dim2 )
helper = LayerHelper ( " diag_embed " , * * locals ( ) )
out = helper . create_variable_for_type_inference ( dtype = input . dtype )
helper . append_op (
type = ' diag_embed ' ,
inputs = { ' Input ' : [ input ] } ,
attrs = { ' offset ' : offset , ' dim1 ' : dim1 , ' dim2 ' : dim2 } ,
outputs = { ' Out ' : [ out ] } ,
)
out . stop_gradient = True
return out
2026-03-26 20:44:28 +08:00
@param_one_alias ( [ ' x ' , ' input ' ] )
2024-06-17 13:25:16 +08:00
def diagflat (
x : paddle . Tensor , offset : int = 0 , name : str | None = None
) - > paddle . Tensor :
2021-06-09 19:04:48 +08:00
"""
2021-08-16 20:54:30 +08:00
If ``x`` is a vector (1-D tensor), a 2-D square tensor with the elements of ``x`` as the diagonal is returned.
2021-06-09 19:04:48 +08:00
If ``x`` is a tensor (more than 1-D), a 2-D square tensor with the elements of flattened ``x`` as the diagonal is returned.
The argument ``offset`` controls the diagonal offset.
If ``offset`` = 0, it is the main diagonal.
If ``offset`` > 0, it is superdiagonal.
If ``offset`` < 0, it is subdiagonal.
Args:
2023-03-01 11:25:53 +08:00
x (Tensor): The input tensor. It can be any shape. Its data type should be float16, float32, float64, int32, int64.
2026-03-26 20:44:28 +08:00
Alias: ``input``.
2021-06-09 19:04:48 +08:00
offset (int, optional): The diagonal offset. A positive value represents superdiagonal, 0 represents the main diagonal, and a negative value represents subdiagonal. Default: 0 (main diagonal).
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2021-06-09 19:04:48 +08:00
Returns:
Tensor, a square matrix. The output data type is the same as input data type.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2023-12-12 15:09:54 +08:00
:name: diagflat-example-1
2021-06-09 19:04:48 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> x = paddle.to_tensor([1, 2, 3])
>>> y = paddle.diagflat(x)
>>> print(y)
Tensor(shape=[3, 3], dtype=int64, place=Place(cpu), stop_gradient=True,
[[1, 0, 0],
[0, 2, 0],
[0, 0, 3]])
>>> y = paddle.diagflat(x, offset=1)
>>> print(y)
Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 0, 0],
[0, 0, 2, 0],
[0, 0, 0, 3],
[0, 0, 0, 0]])
>>> y = paddle.diagflat(x, offset=-1)
>>> print(y)
Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0, 0, 0],
[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0]])
2021-06-09 19:04:48 +08:00
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2023-12-12 15:09:54 +08:00
:name: diagflat-example-2
2021-06-09 19:04:48 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> x = paddle.to_tensor([[1, 2], [3, 4]])
>>> y = paddle.diagflat(x)
>>> print(y)
Tensor(shape=[4, 4], dtype=int64, place=Place(cpu), stop_gradient=True,
[[1, 0, 0, 0],
[0, 2, 0, 0],
[0, 0, 3, 0],
[0, 0, 0, 4]])
>>> y = paddle.diagflat(x, offset=1)
>>> print(y)
Tensor(shape=[5, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 0, 0, 0],
[0, 0, 2, 0, 0],
[0, 0, 0, 3, 0],
[0, 0, 0, 0, 4],
[0, 0, 0, 0, 0]])
>>> y = paddle.diagflat(x, offset=-1)
>>> print(y)
Tensor(shape=[5, 5], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0, 0, 0, 0],
[1, 0, 0, 0, 0],
[0, 2, 0, 0, 0],
[0, 0, 3, 0, 0],
[0, 0, 0, 4, 0]])
2023-12-06 16:09:32 +08:00
2021-06-09 19:04:48 +08:00
"""
2023-11-10 10:40:55 +08:00
if in_dynamic_or_pir_mode ( ) :
2022-12-07 15:00:59 +08:00
if len ( x . shape ) < = 1 :
2022-12-27 09:06:13 +08:00
return _C_ops . diag ( x , offset , 0 )
2022-08-12 17:30:32 +08:00
else :
2022-08-26 16:11:47 +08:00
y = _C_ops . flatten ( x , 0 , - 1 )
2022-12-27 09:06:13 +08:00
return _C_ops . diag ( y , offset , 0 )
else :
padding_value = 0
check_type ( x , ' x ' , ( Variable ) , ' diagflat ' )
check_dtype (
2023-03-01 11:25:53 +08:00
x . dtype ,
' x ' ,
2024-05-23 18:11:28 +08:00
[ ' float16 ' , ' float32 ' , ' float64 ' , ' int32 ' , ' int64 ' , ' uint16 ' ] ,
2023-03-01 11:25:53 +08:00
' diagflat ' ,
2022-12-27 09:06:13 +08:00
)
check_type ( offset , ' offset ' , ( int ) , ' diagflat ' )
2022-08-12 17:30:32 +08:00
2022-12-27 09:06:13 +08:00
helper = LayerHelper ( " diagflat " , * * locals ( ) )
out1 = helper . create_variable_for_type_inference ( dtype = x . dtype )
out1_shape = helper . create_variable_for_type_inference ( x . dtype )
out2 = helper . create_variable_for_type_inference ( dtype = x . dtype )
if len ( x . shape ) < = 1 :
helper . append_op (
type = ' diag_v2 ' ,
inputs = { ' X ' : x } ,
outputs = { ' Out ' : out2 } ,
attrs = { ' offset ' : offset , ' padding_value ' : padding_value } ,
2022-10-23 20:01:27 +08:00
)
2021-06-09 19:04:48 +08:00
else :
2022-12-27 09:06:13 +08:00
helper . append_op (
type = ' flatten_contiguous_range ' ,
inputs = { ' X ' : x } ,
outputs = { ' Out ' : out1 , ' XShape ' : out1_shape } ,
attrs = { ' start_axis ' : 0 , ' stop_axis ' : - 1 } ,
2022-10-23 20:01:27 +08:00
)
2022-12-27 09:06:13 +08:00
out1 . stop_gradient = True
2021-06-09 19:04:48 +08:00
2022-12-27 09:06:13 +08:00
helper . append_op (
type = ' diag_v2 ' ,
inputs = { ' X ' : out1 } ,
outputs = { ' Out ' : out2 } ,
attrs = { ' offset ' : offset , ' padding_value ' : padding_value } ,
)
out2 . stop_gradient = True
return out2
2021-06-09 19:04:48 +08:00
2026-02-10 15:20:14 +08:00
@overload
def empty (
shape : ShapeLike ,
dtype : DTypeLike | None = None ,
name : str | None = None ,
* ,
out : paddle . Tensor | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
pin_memory : bool = False ,
) - > paddle . Tensor : . . .
@overload
def empty (
* size : int ,
out : paddle . Tensor | None = None ,
dtype : DTypeLike | None = None ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
pin_memory : bool = False ,
) - > paddle . Tensor : . . .
2025-08-23 00:31:21 +08:00
@size_args_decorator
2024-06-17 13:25:16 +08:00
def empty (
2025-08-12 10:43:41 +08:00
shape : ShapeLike ,
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
2025-08-15 00:14:23 +08:00
out : paddle . Tensor | None = None ,
2025-08-12 10:43:41 +08:00
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-26 16:43:05 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-09-11 09:02:04 +08:00
"""
2022-05-18 11:05:32 +08:00
Returns a Tensor with uninitialized data which size is same as ``shape``.
2022-09-14 21:56:19 +08:00
2020-09-11 09:02:04 +08:00
Args:
2022-11-14 20:07:08 +08:00
shape (tuple|list|Tensor): Shape of the Tensor to be created. The data type is ``int32`` or ``int64`` .
If ``shape`` is a list or tuple, each element of it should be integer or 0-D Tensor with shape [].
2026-02-10 15:20:14 +08:00
If ``shape`` is a Tensor, it should be a 1-D Tensor which represents a list.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): Data type of the output Tensor
2024-02-19 11:44:52 +08:00
which can be bool, float16, float32, float64, int32, int64, complex64, complex128 if dtype is `None`, the data
2020-09-11 09:02:04 +08:00
type of created Tensor use global default dtype (see ``get_default_dtype``
for details).
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-15 00:14:23 +08:00
out(Tensor, optional): The output tensor.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-26 16:43:05 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2022-09-14 21:56:19 +08:00
2020-09-11 09:02:04 +08:00
Returns:
Tensor: Tensor which is created according to ``shape`` and ``dtype``, and is uninitialized.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-09-11 09:02:04 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> # shape is a list/tuple
>>> data1 = paddle.empty(shape=[3, 2])
>>> print(data1.numpy())
>>> # doctest: +SKIP( ' change everytime ' )
[[1. 1.]
[1. 1.]
[1. 1.]]
>>> # shape is a Tensor
>>> shape = paddle.to_tensor([3, 2])
>>> data2 = paddle.empty(shape=shape)
>>> print(data2.numpy())
>>> # doctest: +SKIP( ' change everytime ' )
[[1. 1.]
[1. 1.]
[1. 1.]]
>>> # shape is a Tensor List
>>> shape = [paddle.to_tensor(3), paddle.to_tensor(2)]
>>> data3 = paddle.empty(shape=shape)
>>> print(data3.numpy())
>>> # doctest: +SKIP( ' change everytime ' )
[[1. 1.]
[1. 1.]
[1. 1.]]
2026-02-10 15:20:14 +08:00
>>> # shape can be a variable number of arguments
>>> data4 = paddle.empty(3, 2)
>>> print(data4.numpy())
>>> # doctest: +SKIP( ' change everytime ' )
[[1. 1.]
[1. 1.]
[1. 1.]]
2020-09-11 09:02:04 +08:00
"""
if dtype is None :
dtype = paddle . get_default_dtype ( )
dtype = convert_dtype ( dtype )
2023-09-22 11:30:19 +08:00
if in_dynamic_or_pir_mode ( ) :
2024-04-09 15:54:22 +08:00
if in_dynamic_mode ( ) :
shape = paddle . utils . convert_shape_to_list ( shape )
else :
check_dtype (
dtype ,
' dtype ' ,
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
' uint16 ' ,
2024-05-10 10:59:58 +08:00
' int8 ' ,
' int16 ' ,
2024-04-09 15:54:22 +08:00
' int32 ' ,
' int64 ' ,
' complex64 ' ,
' complex128 ' ,
2025-02-24 14:21:51 +08:00
' float8_e4m3fn ' ,
2024-04-09 15:54:22 +08:00
] ,
' empty ' ,
)
paddle . utils . check_shape ( shape )
if isinstance ( shape , np . ndarray ) :
shape = shape . tolist ( )
if isinstance ( shape , ( list , tuple ) ) :
if paddle . utils . _contain_var ( shape ) :
2024-04-17 15:33:03 +08:00
shape = paddle . utils . get_int_tensor_list ( shape )
2024-04-09 15:54:22 +08:00
elif isinstance ( shape , paddle . pir . Value ) :
pass
else :
raise TypeError ( " Shape only supports Value, or list, or tuple. " )
2025-08-26 16:43:05 +08:00
device = (
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
)
if (
pin_memory
and in_dynamic_mode ( )
and device is not None
and not isinstance (
device , ( core . CUDAPinnedPlace , core . XPUPinnedPlace )
)
) :
if isinstance ( device , core . CUDAPlace ) or (
isinstance ( device , core . Place ) and device . is_gpu_place ( )
) :
device = core . CUDAPinnedPlace ( )
elif isinstance ( device , core . XPUPlace ) or (
isinstance ( device , core . Place ) and device . is_xpu_place ( )
) :
device = core . XPUPinnedPlace ( )
else :
raise RuntimeError (
2025-08-28 15:03:51 +08:00
f " Pinning memory is not supported for { device } "
2025-08-26 16:43:05 +08:00
)
2025-08-12 10:43:41 +08:00
tensor = _C_ops . empty (
shape ,
convert_np_dtype_to_dtype_ ( dtype ) ,
2025-08-26 16:43:05 +08:00
device ,
2025-08-15 00:14:23 +08:00
out = out ,
2022-10-23 20:01:27 +08:00
)
2025-08-26 16:43:05 +08:00
if pin_memory and in_dynamic_mode ( ) :
tensor = tensor . pin_memory ( )
2025-08-12 10:43:41 +08:00
if requires_grad is True :
tensor . stop_gradient = False
2025-08-30 17:16:19 +08:00
if out is not None :
out . stop_gradient = False
2025-08-12 10:43:41 +08:00
return tensor
2022-12-27 09:06:13 +08:00
else :
helper = LayerHelper ( " empty " , * * locals ( ) )
inputs = { }
2022-08-12 17:30:32 +08:00
2022-12-27 09:06:13 +08:00
check_dtype (
dtype ,
' dtype ' ,
2023-03-14 11:54:25 +08:00
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
2024-09-15 13:00:06 +08:00
' uint16 ' ,
2024-05-10 10:59:58 +08:00
' int8 ' ,
' int16 ' ,
2023-03-14 11:54:25 +08:00
' int32 ' ,
' int64 ' ,
' complex64 ' ,
' complex128 ' ,
2025-02-24 14:21:51 +08:00
' float8_e4m3fn ' ,
2023-03-14 11:54:25 +08:00
] ,
2022-12-27 09:06:13 +08:00
' empty ' ,
2022-10-23 20:01:27 +08:00
)
2022-12-27 09:06:13 +08:00
check_type ( shape , ' shape ' , ( Variable , list , tuple ) , ' empty ' )
2020-09-11 09:02:04 +08:00
2022-12-27 09:06:13 +08:00
if isinstance ( shape , Variable ) :
check_dtype ( shape . dtype , ' shape ' , [ ' int32 ' , ' int64 ' ] , ' empty ' )
2020-09-11 09:02:04 +08:00
2022-12-27 09:06:13 +08:00
attrs = { }
2023-03-09 20:30:52 +08:00
paddle . utils . get_shape_tensor_inputs (
2022-12-27 09:06:13 +08:00
inputs = inputs , attrs = attrs , shape = shape , op_type = ' empty '
)
2020-09-11 09:02:04 +08:00
2022-12-27 09:06:13 +08:00
out = helper . create_variable_for_type_inference ( dtype = dtype )
attrs [ ' dtype ' ] = convert_np_dtype_to_dtype_ ( dtype )
helper . append_op (
type = ' empty ' ,
inputs = inputs ,
outputs = { ' Out ' : [ out ] } ,
attrs = attrs ,
stop_gradient = True ,
)
out . stop_gradient = True
return out
2020-09-17 20:34:35 +08:00
2026-01-30 20:20:08 +08:00
@param_one_alias ( [ " x " , " input " ] )
2024-06-17 13:25:16 +08:00
def empty_like (
2025-08-12 10:43:41 +08:00
x : paddle . Tensor ,
dtype : DTypeLike | None = None ,
2025-08-18 18:47:47 +08:00
name : str | None = None ,
2025-08-12 10:43:41 +08:00
* ,
device : PlaceLike | None = None ,
requires_grad : bool = False ,
2025-08-28 15:03:51 +08:00
pin_memory : bool = False ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2020-09-17 20:34:35 +08:00
"""
2022-05-19 10:23:41 +08:00
Returns a Tensor with uninitialized data which has identical shape of ``x`` and ``dtype``.
2020-09-17 20:34:35 +08:00
If the ``dtype`` is None, the data type of Tensor is same with ``x``.
2022-09-14 21:56:19 +08:00
[API compatibility] concat, empty_like, full, norm, outer, where, zeros_like (#74548)
* [API compatibility] concat, empty_like, full, norm, outer, where, zeros_like
* fix where
* fix where doctest
2025-08-14 09:58:08 +08:00
.. note::
Alias Support: The parameter name ``input`` can be used as an alias for ``x``.
For example, ``empty_like(input=tensor_x)`` is equivalent to ``empty_like(x=tensor_x)``.
2020-09-17 20:34:35 +08:00
Args:
x(Tensor): The input tensor which specifies shape and data type. The data type can be bool, float16, float32, float64, int32, int64.
2025-09-24 18:55:54 +08:00
dtype(str|paddle.dtype|np.dtype, optional): The data type of output. The data type can be one
2022-09-14 21:56:19 +08:00
of bool, float16, float32, float64, int32, int64. The default value is None, which means the output
2020-09-17 20:34:35 +08:00
data type is the same as input.
2025-08-18 18:47:47 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-12 10:43:41 +08:00
device(PlaceLike|None, optional): The desired device of returned tensor.
if None, uses the current device for the default tensor type (see paddle.device.set_device()).
device will be the CPU for CPU tensor types and the current CUDA device for CUDA tensor types. Default: None.
requires_grad(bool, optional): If autograd should record operations on the returned tensor. Default: False.
2025-08-28 15:03:51 +08:00
pin_memory(bool, optional): If set, return tensor would be allocated in the pinned memory. Works only for CPU tensors. Default: False
2022-09-14 21:56:19 +08:00
2020-09-17 20:34:35 +08:00
Returns:
Tensor: Tensor which is created according to ``x`` and ``dtype``, and is uninitialized.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2020-09-17 20:34:35 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
2020-09-17 20:34:35 +08:00
2023-08-29 14:23:59 +08:00
>>> paddle.set_device( " cpu " ) # and use cpu device
2020-09-17 20:34:35 +08:00
2023-08-29 14:23:59 +08:00
>>> x = paddle.randn([2, 3], ' float32 ' )
>>> output = paddle.empty_like(x)
>>> print(output)
>>> # doctest: +SKIP( ' change everytime ' )
[[1.8491974e+20 1.8037303e+28 1.7443726e+28]
[4.9640171e+28 3.0186127e+32 5.6715899e-11]]
2020-09-17 20:34:35 +08:00
"""
if dtype is None :
dtype = x . dtype
2025-08-12 10:43:41 +08:00
if device is None :
device = x . place
2020-09-17 20:34:35 +08:00
dtype = convert_dtype ( dtype )
2025-08-12 10:43:41 +08:00
if in_dynamic_or_pir_mode ( ) :
2025-08-28 15:03:51 +08:00
device = (
_get_paddle_place ( device )
if device is not None
else _current_expected_place ( )
)
if (
pin_memory
and in_dynamic_mode ( )
and device is not None
and not isinstance (
device , ( core . CUDAPinnedPlace , core . XPUPinnedPlace )
)
) :
if isinstance ( device , core . CUDAPlace ) or (
isinstance ( device , core . Place ) and device . is_gpu_place ( )
) :
device = core . CUDAPinnedPlace ( )
elif isinstance ( device , core . XPUPlace ) or (
isinstance ( device , core . Place ) and device . is_xpu_place ( )
) :
device = core . XPUPinnedPlace ( )
else :
raise RuntimeError (
f " Pinning memory is not supported for { device } "
)
2025-08-12 10:43:41 +08:00
if in_dynamic_mode ( ) :
x_shape = x . shape
else :
x_shape = paddle . shape ( x )
tensor = _C_ops . empty (
x_shape ,
2023-11-27 14:48:12 +08:00
convert_np_dtype_to_dtype_ ( dtype ) ,
2025-08-28 15:03:51 +08:00
device ,
2023-11-27 14:48:12 +08:00
)
2025-08-12 10:43:41 +08:00
if requires_grad is True :
tensor . stop_gradient = False
2025-08-28 15:03:51 +08:00
if pin_memory and in_dynamic_mode ( ) :
tensor = tensor . pin_memory ( )
2025-08-12 10:43:41 +08:00
return tensor
2022-12-27 09:06:13 +08:00
else :
helper = LayerHelper ( " empty_like " , * * locals ( ) )
check_variable_and_dtype (
x ,
' x ' ,
2023-04-11 11:19:51 +08:00
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
2024-05-10 10:59:58 +08:00
' int8 ' ,
' int16 ' ,
2023-04-11 11:19:51 +08:00
' int32 ' ,
' int64 ' ,
' uint16 ' ,
2024-05-10 10:59:58 +08:00
' complex64 ' ,
' complex128 ' ,
2023-04-11 11:19:51 +08:00
] ,
2022-12-27 09:06:13 +08:00
' empty_like ' ,
)
check_dtype (
dtype ,
' dtype ' ,
2023-04-11 11:19:51 +08:00
[
' bool ' ,
' float16 ' ,
' float32 ' ,
' float64 ' ,
2024-05-10 10:59:58 +08:00
' int8 ' ,
' int16 ' ,
2023-04-11 11:19:51 +08:00
' int32 ' ,
' int64 ' ,
' uint16 ' ,
2024-05-10 10:59:58 +08:00
' complex64 ' ,
' complex128 ' ,
2023-04-11 11:19:51 +08:00
] ,
2022-12-27 09:06:13 +08:00
' empty_like ' ,
)
out = helper . create_variable_for_type_inference ( dtype = dtype )
2022-08-12 17:30:32 +08:00
2022-12-27 09:06:13 +08:00
inputs = { }
attrs = { }
attrs [ ' dtype ' ] = convert_np_dtype_to_dtype_ ( dtype )
shape = paddle . shape ( x )
2023-03-09 20:30:52 +08:00
paddle . utils . get_shape_tensor_inputs (
2022-12-27 09:06:13 +08:00
inputs = inputs , attrs = attrs , shape = shape , op_type = ' empty_like '
)
helper . append_op (
type = ' empty ' ,
inputs = inputs ,
outputs = { ' Out ' : [ out ] } ,
attrs = attrs ,
stop_gradient = True ,
2022-10-23 20:01:27 +08:00
)
2020-09-17 20:34:35 +08:00
out . stop_gradient = True
return out
2020-10-19 16:00:09 +08:00
2024-06-17 13:25:16 +08:00
def assign ( x : TensorLike , output : paddle . Tensor | None = None ) - > paddle . Tensor :
2020-10-19 16:00:09 +08:00
"""
2021-12-23 13:39:15 +08:00
2022-06-07 14:09:24 +08:00
Copy value of the :attr:`x` to the :attr:`output`.
2022-09-14 21:56:19 +08:00
2020-10-19 16:00:09 +08:00
Parameters:
2022-06-07 14:09:24 +08:00
x (Tensor|np.ndarray|list|tuple|scalar): A Tensor, numpy ndarray, tuple/list of scalar,
or scalar. Its data type can be float16, float32, float64, int32, int64 or bool. Note: the float64 data will be converted to float32 because of current platform protobuf
2021-04-26 15:48:12 +08:00
data limitation.
2024-06-17 13:25:16 +08:00
output (Tensor|None, optional): A Tensor. If :attr:`output` is None, a new Tensor will be created as :attr:`output`. Default: None.
2022-09-14 21:56:19 +08:00
2020-10-19 16:00:09 +08:00
Returns:
2022-06-07 14:09:24 +08:00
Tensor: A Tensor with the same shape, data type and value as :attr:`x`.
2022-09-14 21:56:19 +08:00
2020-10-19 16:00:09 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2022-06-07 14:09:24 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> import numpy as np
>>> data = paddle.full(shape=[3, 2], fill_value=2.5, dtype= ' float64 ' )
>>> print(data.numpy())
[[2.5 2.5]
[2.5 2.5]
[2.5 2.5]]
2026-01-29 20:44:09 +08:00
>>> array = np.array([[1, 1], [3, 4], [1, 3]]).astype(np.int64)
2023-08-29 14:23:59 +08:00
>>> result1 = paddle.zeros(shape=[3, 3], dtype= ' float32 ' )
>>> paddle.assign(array, result1)
>>> print(result1.numpy())
[[1 1]
[3 4]
[1 3]]
>>> result2 = paddle.assign(data)
>>> print(result2.numpy())
[[2.5 2.5]
[2.5 2.5]
[2.5 2.5]]
>>> result3 = paddle.assign(np.array([[2.5, 2.5], [2.5, 2.5], [2.5, 2.5]], dtype= ' float32 ' ))
>>> print(result3.numpy())
[[2.5 2.5]
[2.5 2.5]
[2.5 2.5]]
2020-10-19 16:00:09 +08:00
"""
2023-05-04 09:41:19 +08:00
# speed up
2024-01-17 10:37:10 +08:00
if x is output and isinstance ( x , ( Variable , paddle . pir . Value ) ) :
2023-05-04 09:41:19 +08:00
return x
2022-04-13 18:20:54 +08:00
input = x
helper = LayerHelper ( ' assign ' , * * locals ( ) )
2022-10-23 20:01:27 +08:00
check_type (
input ,
' input ' ,
2023-09-27 19:27:30 +08:00
(
Variable ,
2024-01-17 10:37:10 +08:00
paddle . pir . Value ,
2023-09-27 19:27:30 +08:00
np . ndarray ,
list ,
tuple ,
float ,
int ,
bool ,
) ,
2022-10-23 20:01:27 +08:00
' assign ' ,
)
2022-04-13 18:20:54 +08:00
if np . isscalar ( input ) and not isinstance ( input , str ) :
input = np . array ( [ input ] )
elif isinstance ( input , ( list , tuple ) ) :
input = np . array ( input )
2024-11-15 09:37:18 +08:00
# NOTE(Aurelius84): Why we judge core.DenseTensor?
2023-03-30 10:11:14 +08:00
# In case of @to_static, a Tensor can be as input of `assign`,
2023-05-22 20:56:38 +08:00
# but in_dynamic_mode()==False under @to_static, which means
2023-03-30 10:11:14 +08:00
# isinstance(Tensor, Variable) == False. It will cause return None
2022-04-13 18:20:54 +08:00
# after this api.
2024-01-17 10:37:10 +08:00
if isinstance ( input , ( Variable , core . eager . Tensor , paddle . pir . Value ) ) :
2024-07-09 15:47:27 +08:00
if in_dynamic_or_pir_mode ( ) :
2023-09-27 19:27:30 +08:00
if output is None :
output = _C_ops . assign ( input )
else :
2025-11-18 10:06:46 +08:00
output = _C_ops . assign_out_ ( input , output )
2022-04-13 18:20:54 +08:00
else :
2022-10-23 20:01:27 +08:00
check_dtype (
input . dtype ,
' input ' ,
[
' float16 ' ,
' uint16 ' ,
' float32 ' ,
' float64 ' ,
2024-07-24 09:03:36 +08:00
' int16 ' ,
2022-10-23 20:01:27 +08:00
' int32 ' ,
' int64 ' ,
' uint8 ' ,
2023-03-28 15:56:46 +08:00
' int8 ' ,
2022-10-23 20:01:27 +08:00
' bool ' ,
2023-12-28 17:14:26 +08:00
' complex64 ' ,
' complex128 ' ,
2022-10-23 20:01:27 +08:00
] ,
' assign ' ,
' (When the type of input in assign is Variable.) ' ,
)
2022-04-13 18:20:54 +08:00
if output is None :
output = helper . create_variable_for_type_inference (
2022-10-23 20:01:27 +08:00
dtype = input . dtype
)
helper . append_op (
type = ' assign ' , inputs = { ' X ' : [ input ] } , outputs = { ' Out ' : [ output ] }
)
2022-04-13 18:20:54 +08:00
elif isinstance ( input , np . ndarray ) :
2022-08-03 14:49:51 +08:00
# We now support the form of [var, VAR...] if the Var.shape=[1,]
2023-09-27 19:27:30 +08:00
if len ( input . shape ) > 0 and any (
2024-01-17 10:37:10 +08:00
isinstance ( x , ( Variable , paddle . pir . Value ) ) for x in input
2023-09-27 19:27:30 +08:00
) :
2022-08-03 14:49:51 +08:00
# We only deal with the case where the list is nested one level, convert all scalars into variables, and then use stack to process. It is necessary to ensure the consistency of types.
2022-10-23 20:01:27 +08:00
if not all (
2023-06-09 10:14:23 +08:00
x . shape == ( 1 , )
for x in input
2023-09-27 19:27:30 +08:00
if isinstance (
2024-01-17 10:37:10 +08:00
x , ( Variable , core . eager . Tensor , paddle . pir . Value )
2023-09-27 19:27:30 +08:00
)
2022-10-23 20:01:27 +08:00
) :
2022-08-03 14:49:51 +08:00
raise TypeError (
2025-02-11 17:16:56 +08:00
" Unsupported paddle.assign([Variable, Variable...]) with non-scalar variable. "
2022-08-03 14:49:51 +08:00
)
def convert_scalar ( x ) :
2023-09-27 19:27:30 +08:00
if not isinstance (
2024-01-17 10:37:10 +08:00
x , ( Variable , core . eager . Tensor , paddle . pir . Value )
2023-09-27 19:27:30 +08:00
) :
2022-08-03 14:49:51 +08:00
return assign ( x )
return x
to_stack_list = list ( map ( convert_scalar , input ) )
ret = paddle . stack ( to_stack_list )
ret = paddle . squeeze ( ret , - 1 )
return ret
if input . dtype == ' object ' :
2022-10-23 20:01:27 +08:00
""" may be this form [[Var], [Var], [3], [4]], we reject them. """
2022-04-13 18:20:54 +08:00
raise TypeError (
2022-08-03 14:49:51 +08:00
" The type of received input == `object`, it is not supported to convert to tensor, such as [[Var], [Var], [3], [4]] "
2022-04-13 18:20:54 +08:00
)
2022-08-03 14:49:51 +08:00
2022-04-13 18:20:54 +08:00
dtype = convert_np_dtype_to_dtype_ ( input . dtype )
2023-12-28 17:14:26 +08:00
check_dtype (
dtype ,
' input ' ,
[
' float32 ' ,
' float64 ' ,
' int32 ' ,
' int64 ' ,
' bool ' ,
' complex64 ' ,
' complex128 ' ,
] ,
' assign ' ,
' (When the type of input in assign is numpy array.) ' ,
)
value_name = " values "
values = input . ravel ( ) . tolist ( )
2025-06-17 15:16:45 +08:00
max_element_num = 17179869184 # 17179869184 = 2**34
if input . size > max_element_num :
2025-01-21 09:00:46 +08:00
from paddle . jit . sot . utils . exceptions import SotExtraInfo
sot_extra_info = SotExtraInfo ( need_breakgraph = True )
err = ValueError (
2022-10-23 20:01:27 +08:00
" The size of input is too big. Please consider "
" saving it to file and ' load_op ' to load it "
)
2025-01-21 09:00:46 +08:00
sot_extra_info . attach ( err )
raise err
2023-09-27 19:27:30 +08:00
if in_dynamic_or_pir_mode ( ) :
2022-07-29 10:59:32 +08:00
if output is None :
output = zeros ( list ( input . shape ) , dtype )
2023-09-27 19:27:30 +08:00
if in_dynamic_mode ( ) :
_C_ops . assign_value_ (
output ,
list ( input . shape ) ,
dtype ,
values ,
_current_expected_place ( ) ,
)
else :
output = _C_ops . assign_value_ (
output ,
list ( input . shape ) ,
dtype ,
values ,
_current_expected_place ( ) ,
)
2022-05-31 15:26:28 +08:00
else :
2022-07-29 10:59:32 +08:00
if output is None :
output = helper . create_variable_for_type_inference (
2022-10-23 20:01:27 +08:00
dtype = input . dtype
)
helper . append_op (
type = ' assign_value ' ,
outputs = { ' Out ' : [ output ] } ,
attrs = {
' dtype ' : dtype ,
' shape ' : list ( input . shape ) ,
value_name : values ,
} ,
)
2022-04-13 18:20:54 +08:00
return output
2021-06-07 16:50:45 +08:00
2026-03-04 15:59:18 +08:00
@param_one_alias ( [ ' x ' , ' input ' ] )
2024-06-17 13:25:16 +08:00
def clone ( x : paddle . Tensor , name : str | None = None ) - > paddle . Tensor :
2021-12-23 13:39:15 +08:00
"""
2022-09-14 21:56:19 +08:00
Returns a copy of input Tensor. It will always have a Tensor copy.
2021-12-23 13:39:15 +08:00
In addition, This function is derivable, so gradients will flow back from the output to input.
Parameters:
x (Tensor): The input Tensor.
2026-03-04 15:59:18 +08:00
Alias: ``input``.
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2021-12-23 13:39:15 +08:00
2022-09-14 21:56:19 +08:00
Returns:
2022-08-08 13:29:21 +08:00
Tensor, A Tensor copied from ``input``.
2021-12-23 13:39:15 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2021-12-23 13:39:15 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> import numpy as np
>>> x = paddle.ones([2])
>>> x.stop_gradient = False
>>> x.retain_grads()
>>> clone_x = paddle.clone(x)
>>> clone_x.retain_grads()
>>> y = clone_x**3
>>> y.backward()
2024-06-17 13:25:16 +08:00
>>> print(clone_x.grad.numpy()) # type: ignore
2023-08-29 14:23:59 +08:00
[3. 3.]
2025-09-09 10:06:44 +08:00
>>> print(x.grad.numpy()) # type: ignore
2023-08-29 14:23:59 +08:00
[3. 3.]
2021-12-23 13:39:15 +08:00
"""
return x . clone ( )
2022-10-23 20:01:27 +08:00
# NOTE(zhiqiu): not public
2024-06-17 13:25:16 +08:00
def _memcpy ( input , place = None , output = None ) - > paddle . Tensor :
2021-06-07 16:50:45 +08:00
"""
The OP copies the :attr:`input` to the :attr:`output`.
2023-05-08 15:58:11 +08:00
NOTE: currently, only support CUDAPlace <-> CUDAPinnedPlace.
2021-06-07 16:50:45 +08:00
Parameters:
input (Tensor): A tensor. Its data type supports float16, float32, float64, int32, int64, and bool.
device (Place): Target place for the output.
output (Tensor, optional): A tensor. If :attr:`output` is None, a new tensor will
be created as :attr:`output`. Default: None.
Returns:
2022-08-08 13:29:21 +08:00
Tensor, A tensor with the same shape, data type and value as :attr:`input`.
2021-06-07 16:50:45 +08:00
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2021-06-07 16:50:45 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> data = paddle.full(shape=[3, 2], fill_value=2.5, dtype= ' float64 ' )
>>> print(data.numpy())
[[2.5 2.5]
[2.5 2.5]
[2.5 2.5]]
>>> # doctest: +SKIP( ' NOTE(zhiqiu): not public ' )
>>> result = paddle._memcpy(data, place=paddle.CPUPlace())
>>> print(result2)
[[2.5 2.5]
[2.5 2.5]
[2.5 2.5]]
2021-06-07 16:50:45 +08:00
"""
2024-03-13 14:41:22 +08:00
dst_place_type = - 1
if place is None :
dst_place_type = - 1
else :
p = core . Place ( )
p . set_place ( place )
if p . is_cpu_place ( ) :
dst_place_type = 0
elif p . is_gpu_place ( ) :
dst_place_type = 1
elif p . is_cuda_pinned_place ( ) :
dst_place_type = 2
elif p . is_xpu_place ( ) :
dst_place_type = 3
2024-10-11 10:52:56 +08:00
elif p . is_custom_place ( ) :
dst_place_type = 4
2024-03-13 14:41:22 +08:00
if in_pir_mode ( ) :
return _C_ops . memcpy ( input , dst_place_type )
2021-06-07 16:50:45 +08:00
helper = LayerHelper ( ' memcpy ' , * * locals ( ) )
check_type ( input , ' input ' , ( Variable ) , ' memcpy ' )
2023-03-30 10:11:14 +08:00
if isinstance ( input , ( Variable , core . eager . Tensor ) ) :
2022-10-23 20:01:27 +08:00
check_dtype (
input . dtype ,
' input ' ,
[
' float16 ' ,
' uint16 ' ,
' float32 ' ,
' float64 ' ,
' int32 ' ,
' int64 ' ,
' uint8 ' ,
2023-03-28 15:56:46 +08:00
' int8 ' ,
2022-10-23 20:01:27 +08:00
' bool ' ,
] ,
' memcpy ' ,
' (When the type of input in memcpy is Variable.) ' ,
)
2021-06-07 16:50:45 +08:00
if output is None :
output = helper . create_variable_for_type_inference ( dtype = input . dtype )
attrs = { ' dst_place_type ' : dst_place_type }
2022-10-23 20:01:27 +08:00
helper . append_op (
type = ' memcpy ' ,
inputs = { ' X ' : [ input ] } ,
outputs = { ' Out ' : [ output ] } ,
attrs = attrs ,
)
2021-06-07 16:50:45 +08:00
return output
2021-12-18 15:02:10 +08:00
2024-06-17 13:25:16 +08:00
def complex (
2025-08-26 15:58:19 +08:00
real : paddle . Tensor ,
imag : paddle . Tensor ,
name : str | None = None ,
* ,
out : paddle . Tensor | None = None ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2024-02-19 11:44:52 +08:00
""" Return a complex tensor given the real and image component.
2021-12-18 15:02:10 +08:00
Args:
real (Tensor): The real component. The data type should be ' float32 ' or ' float64 ' .
imag (Tensor): The image component. The data type should be the same as ``real``.
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2025-08-15 00:14:23 +08:00
out(Tensor|None, optional): The output tensor. Default: None.
2021-12-18 15:02:10 +08:00
Returns:
2024-06-17 13:25:16 +08:00
Tensor, The output tensor. The data type is ' complex64 ' or ' complex128 ' , with the same precision as ``real`` and ``imag``.
2021-12-18 15:02:10 +08:00
2022-12-02 10:43:18 +08:00
Note:
``paddle.complex`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .
.. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
2021-12-18 15:02:10 +08:00
Examples:
2026-01-21 19:40:59 +08:00
.. code-block:: pycon
2021-12-18 15:02:10 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> x = paddle.arange(2, dtype=paddle.float32).unsqueeze(-1)
>>> y = paddle.arange(3, dtype=paddle.float32)
>>> z = paddle.complex(x, y)
>>> print(z)
Tensor(shape=[2, 3], dtype=complex64, place=Place(cpu), stop_gradient=True,
2026-01-21 19:40:59 +08:00
[[(0.00000000+0.00000000j),
(0.00000000+1.00000000j),
(0.00000000+2.00000000j)],
[(1.00000000+0.00000000j),
(1.00000000+1.00000000j),
(1.00000000+2.00000000j)]])
2021-12-18 15:02:10 +08:00
"""
2023-11-01 19:18:56 +08:00
if in_dynamic_or_pir_mode ( ) :
2025-08-11 14:39:09 +08:00
return _C_ops . complex ( real , imag , out = out )
2022-12-27 09:06:13 +08:00
else :
check_variable_and_dtype (
real , ' real ' , [ ' float32 ' , ' float64 ' ] , ' complex '
)
check_variable_and_dtype (
imag , ' imag ' , [ ' float32 ' , ' float64 ' ] , ' complex '
)
2022-07-13 14:59:41 +08:00
2022-12-27 09:06:13 +08:00
op_type = " complex "
helper = LayerHelper ( op_type , * * locals ( ) )
inputs = { " X " : real , " Y " : imag }
out = helper . create_variable_for_type_inference (
dtype = _real_to_complex_dtype ( real . dtype )
)
outputs = { " Out " : out }
attrs = { }
helper . append_op (
type = op_type , inputs = inputs , attrs = attrs , outputs = outputs
)
return out
2022-05-20 15:43:35 +08:00
2024-06-17 13:25:16 +08:00
def tril_indices (
row : int , col : int , offset : int = 0 , dtype = ' int64 '
) - > paddle . Tensor :
2022-05-20 15:43:35 +08:00
"""
2022-09-14 21:56:19 +08:00
Return the indices of the lower triangular part of the 2-D matrix
2024-02-19 11:44:52 +08:00
whose row and col is known. Indices are ordered based on row and then columns.
2022-05-20 15:43:35 +08:00
The lower triangular part of the matrix is defined as the elements on
and below the diagonal.
2022-09-14 21:56:19 +08:00
2022-05-20 15:43:35 +08:00
Args:
row (int): The input x which is a int number describe the number of row of the matrix.
col (int): The input x which is a int number describe the number of col of the matrix.
offset (int, optional): The offset to consider, default value is 0.
2022-09-14 21:56:19 +08:00
- If offset = 0, all elements on and below the main diagonal are retained.
- If offset > 0, include just as many diagonals above the main diagonal.
- If offset < 0, excludes just as many diagonals below the main diagonal.
2025-08-25 13:29:59 +08:00
dtype (str|core.VarDesc.VarType|core.DataType, optional): the data type of the output tensor, can be int32, int64.
2022-05-20 15:43:35 +08:00
Returns:
Tensor: Results of the indices of lower triangular part of a row * col matrix,
where the first row contains row coordinates of and the second row contains column coordinates.
Examples:
2026-01-21 19:40:59 +08:00
.. code-block:: pycon
2022-05-20 15:43:35 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> # example 1, default offset value
2026-01-21 19:40:59 +08:00
>>> data1 = paddle.tril_indices(4, 4, 0)
2023-08-29 14:23:59 +08:00
>>> print(data1)
Tensor(shape=[2, 10], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 1, 1, 2, 2, 2, 3, 3, 3, 3],
[0, 0, 1, 0, 1, 2, 0, 1, 2, 3]])
>>> # example 2, positive offset value
2026-01-21 19:40:59 +08:00
>>> data2 = paddle.tril_indices(4, 4, 2)
2023-08-29 14:23:59 +08:00
>>> print(data2)
Tensor(shape=[2, 15], dtype=int64, place=Place(cpu), stop_gradient=True,
[[0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
[0, 1, 2, 0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2, 3]])
>>> # example 3, negative offset value
2026-01-21 19:40:59 +08:00
>>> data3 = paddle.tril_indices(4, 4, -1)
2023-08-29 14:23:59 +08:00
>>> print(data3)
Tensor(shape=[2, 6], dtype=int64, place=Place(cpu), stop_gradient=True,
[[1, 2, 2, 3, 3, 3],
[0, 0, 1, 0, 1, 2]])
2022-05-20 15:43:35 +08:00
"""
2025-08-28 20:55:50 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , core . DataType ) ) :
2022-05-20 15:43:35 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
2024-04-11 10:51:53 +08:00
if not isinstance ( row , int ) or row < 0 :
raise TypeError ( " row should be a non-negative int " )
if col is not None :
if not isinstance ( col , int ) or col < 0 :
raise TypeError ( " col should be a non-negative int " )
else :
col = row
2023-10-30 14:14:47 +08:00
if in_dynamic_or_pir_mode ( ) :
2022-12-27 09:06:13 +08:00
if col is None :
col = row
2022-10-23 20:01:27 +08:00
out = _C_ops . tril_indices (
row , col , offset , dtype , _current_expected_place ( )
)
2022-05-20 15:43:35 +08:00
return out
2022-12-27 09:06:13 +08:00
else :
if not isinstance ( offset , int ) :
raise TypeError ( " offset should be a int " )
2022-05-20 15:43:35 +08:00
helper = LayerHelper ( " tril_indices " , * * locals ( ) )
out = helper . create_variable_for_type_inference ( dtype = dtype )
2022-10-23 20:01:27 +08:00
helper . append_op (
type = ' tril_indices ' ,
inputs = { } ,
outputs = { ' out ' : [ out ] } ,
attrs = { ' rows ' : row , ' cols ' : col , ' offset ' : offset , ' dtype ' : dtype } ,
)
2022-05-20 15:43:35 +08:00
return out
2022-08-25 17:02:30 +08:00
2024-06-17 13:25:16 +08:00
def triu_indices (
row : int , col : int | None = None , offset : int = 0 , dtype = ' int64 '
) - > paddle . Tensor :
2022-08-25 17:02:30 +08:00
"""
Return the indices of the upper triangular part of the 2-D matrix
whose row and col is known. Indices are ordered based on row and then columns.
The upper triangular part of the matrix is defined as the elements on
and above the diagonal.
Args:
row (int): The input x which is a int number describe the number of row of the matrix.
2024-06-17 13:25:16 +08:00
col (int|None, optional): The input x which is a int number describe the number of col of the matrix.
2024-02-19 11:44:52 +08:00
default value for col is None, then it will be set equal to row, indicting a square matrix.
2022-08-25 17:02:30 +08:00
offset (int, optional): The offset to consider, default value is 0.
- If offset = 0, all elements on and above the main diagonal are retained.
- If offset > 0, include just as few diagonals above the main diagonal.
- If offset < 0, excludes just as few diagonals below the main diagonal.
2025-08-25 13:29:59 +08:00
dtype (str|np.dtype|core.VarDesc.VarType|core.DataType, optional): the data type of the output tensor,
2022-08-25 17:02:30 +08:00
can be int32, int64, default value is int64.
Returns:
Tensor: Results of the indices of upper triangular part of a row * col matrix,
where the first row contains row coordinates of and the second row contains column coordinates.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2022-08-25 17:02:30 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> # example 1, default offset value
2026-01-29 20:44:09 +08:00
>>> data1 = paddle.triu_indices(4, 4, 0)
2023-08-29 14:23:59 +08:00
>>> print(data1.numpy())
[[0 0 0 0 1 1 1 2 2 3]
[0 1 2 3 1 2 3 2 3 3]]
>>> # example 2, positive offset value
2026-01-29 20:44:09 +08:00
>>> data2 = paddle.triu_indices(4, 4, 2)
2023-08-29 14:23:59 +08:00
>>> print(data2.numpy())
[[0 0 1]
[2 3 3]]
>>> # example 3, negative offset value
2026-01-29 20:44:09 +08:00
>>> data3 = paddle.triu_indices(4, 4, -1)
2023-08-29 14:23:59 +08:00
>>> print(data3.numpy())
[[0 0 0 0 1 1 1 1 2 2 2 3 3]
[0 1 2 3 0 1 2 3 1 2 3 2 3]]
2022-08-25 17:02:30 +08:00
"""
2025-08-28 20:55:50 +08:00
if not isinstance ( dtype , ( core . VarDesc . VarType , core . DataType ) ) :
2022-08-25 17:02:30 +08:00
dtype = convert_np_dtype_to_dtype_ ( dtype )
2024-04-11 10:51:53 +08:00
if not isinstance ( row , int ) or row < 0 :
raise TypeError ( " row should be a non-negative int " )
if col is not None :
if not isinstance ( col , int ) or col < 0 :
raise TypeError ( " col should be a non-negative int " )
else :
col = row
2023-10-30 14:14:47 +08:00
if in_dynamic_or_pir_mode ( ) :
2022-12-27 09:06:13 +08:00
if col is None :
col = row
2022-10-23 20:01:27 +08:00
out = _C_ops . triu_indices (
row , col , offset , dtype , _current_expected_place ( )
)
2022-08-25 17:02:30 +08:00
return out
2022-12-27 09:06:13 +08:00
else :
if not isinstance ( offset , int ) :
raise TypeError ( " offset should be a int " )
2022-08-25 17:02:30 +08:00
helper = LayerHelper ( " triu_indices " , * * locals ( ) )
out = helper . create_variable_for_type_inference ( dtype = dtype )
2022-10-23 20:01:27 +08:00
helper . append_op (
type = ' triu_indices ' ,
inputs = { } ,
outputs = { ' out ' : [ out ] } ,
attrs = { ' row ' : row , ' col ' : col , ' offset ' : offset , ' dtype ' : dtype } ,
)
2022-08-25 17:02:30 +08:00
return out
2023-03-17 10:11:11 +08:00
2024-06-17 13:25:16 +08:00
def polar (
2025-08-26 15:58:19 +08:00
abs : paddle . Tensor ,
angle : paddle . Tensor ,
name : str | None = None ,
* ,
out : paddle . Tensor | None = None ,
2024-06-17 13:25:16 +08:00
) - > paddle . Tensor :
2024-02-19 11:44:52 +08:00
""" Return a Cartesian coordinates corresponding to the polar coordinates complex tensor given the ``abs`` and ``angle`` component.
2023-03-17 10:11:11 +08:00
Args:
abs (Tensor): The abs component. The data type should be ' float32 ' or ' float64 ' .
2024-02-19 11:44:52 +08:00
angle (Tensor): The angle component. The data type should be the same as ``abs``.
2025-08-26 15:58:19 +08:00
name (str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
out (Tensor, optional): The output tensor. If set, the result will be stored in this tensor. Default is None.
2023-03-17 10:11:11 +08:00
Returns:
2024-06-17 13:25:16 +08:00
Tensor, The output tensor. The data type is ' complex64 ' or ' complex128 ' , with the same precision as ``abs`` and ``angle``.
2023-03-17 10:11:11 +08:00
Note:
``paddle.polar`` supports broadcasting. If you want know more about broadcasting, please refer to `Introduction to Tensor`_ .
.. _Introduction to Tensor: ../../guides/beginner/tensor_en.html#chapter5-broadcasting-of-tensor
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2023-03-17 10:11:11 +08:00
2023-08-29 14:23:59 +08:00
>>> import paddle
>>> import numpy as np
2023-03-17 10:11:11 +08:00
2023-08-29 14:23:59 +08:00
>>> abs = paddle.to_tensor([1, 2], dtype=paddle.float64)
>>> angle = paddle.to_tensor([np.pi / 2, 5 * np.pi / 4], dtype=paddle.float64)
>>> out = paddle.polar(abs, angle)
>>> print(out)
Tensor(shape=[2], dtype=complex128, place=Place(cpu), stop_gradient=True,
2026-01-29 20:44:09 +08:00
[ (0.00000000+1.00000000j), (-1.41421356-1.41421356j)])
2023-03-17 10:11:11 +08:00
"""
check_variable_and_dtype ( abs , ' abs ' , [ ' float32 ' , ' float64 ' ] , ' paddle.polar ' )
check_variable_and_dtype (
angle , ' angle ' , [ ' float32 ' , ' float64 ' ] , ' paddle.polar '
)
2025-08-26 15:58:19 +08:00
return paddle . complex (
abs * paddle . cos ( angle ) , abs * paddle . sin ( angle ) , out = out , name = name
)
2023-09-22 11:14:48 +08:00
@dygraph_only
2026-02-02 11:26:49 +08:00
@param_two_alias ( [ " loc " , " median " ] , [ " scale " , " sigma " ] )
2024-06-17 13:25:16 +08:00
def cauchy_ (
x : paddle . Tensor ,
2024-11-05 10:55:10 +08:00
loc : Numeric = 0 ,
scale : Numeric = 1 ,
2024-06-17 13:25:16 +08:00
name : str | None = None ,
) - > paddle . Tensor :
2023-09-22 11:14:48 +08:00
""" Fills the tensor with numbers drawn from the Cauchy distribution.
Args:
2024-01-25 19:36:24 +08:00
x (Tensor): the tensor will be filled, The data type is float32 or float64.
2023-09-22 11:14:48 +08:00
loc (scalar, optional): Location of the peak of the distribution. The data type is float32 or float64.
2026-02-02 11:26:49 +08:00
Alias: ``median``.
2023-09-22 11:14:48 +08:00
scale (scalar, optional): The half-width at half-maximum (HWHM). The data type is float32 or float64. Must be positive values.
2026-02-02 11:26:49 +08:00
Alias: ``sigma``.
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2023-09-22 11:14:48 +08:00
Returns:
Tensor: input tensor with numbers drawn from the Cauchy distribution.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2023-09-22 11:14:48 +08:00
>>> import paddle
>>> x = paddle.randn([3, 4])
>>> x.cauchy_(1, 2)
>>> # doctest: +SKIP( ' random check ' )
>>> print(x)
Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
[[ 3.80087137, 2.25415039, 2.77960515, 7.64125967],
[ 0.76541221, 2.74023032, 1.99383152, -0.12685823],
[ 1.45228469, 1.76275957, -4.30458832, 34.74880219]])
"""
x . normal_ ( )
loc = paddle . to_tensor ( loc ) . astype ( x . dtype )
half = paddle . to_tensor ( 0.5 ) . astype ( x . dtype )
x . subtract_ ( half ) . scale_ ( np . pi ) . tan_ ( ) . scale_ ( scale ) . add_ ( loc )
return x
@dygraph_only
2026-02-26 16:41:54 +08:00
@param_one_alias ( [ ' probs ' , ' p ' ] )
2024-06-17 13:25:16 +08:00
def geometric_ (
x : paddle . Tensor ,
probs : float | paddle . Tensor ,
name : str | None = None ,
) - > paddle . Tensor :
2023-09-22 11:14:48 +08:00
""" Fills the tensor with numbers drawn from the Geometric distribution.
Args:
2024-01-25 19:36:24 +08:00
x (Tensor): the tensor will be filled, The data type is float32 or float64.
2024-06-17 13:25:16 +08:00
probs (float|Tensor): Probability parameter.
2023-09-22 11:14:48 +08:00
The value of probs must be positive. When the parameter is a tensor, probs is probability of success for each trial.
2026-02-26 16:41:54 +08:00
Alias: ``p``.
2024-06-17 13:25:16 +08:00
name(str|None, optional): For details, please refer to :ref:`api_guide_Name`. Generally, no setting is required. Default: None.
2023-09-22 11:14:48 +08:00
Returns:
Tensor: input tensor with numbers drawn from the Geometric distribution.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2023-09-22 11:14:48 +08:00
>>> import paddle
>>> x = paddle.randn([3, 4])
>>> x.geometric_(0.3)
>>> # doctest: +SKIP( ' random check ' )
>>> print(x)
Tensor(shape=[3, 4], dtype=float32, place=Place(cpu), stop_gradient=True,
[[2.42739224, 4.78268528, 1.23302543, 3.76555204],
[1.38877118, 0.16075331, 0.16401523, 2.47349310],
[1.72872102, 2.76533413, 0.33410925, 1.63351011]])
"""
tiny = np . finfo ( dtype = convert_dtype ( x . dtype ) ) . tiny
probs = paddle . to_tensor ( probs ) . astype ( x . dtype )
x . uniform_ ( min = float ( tiny ) , max = float ( 1 ) )
x . log_ ( ) . divide_ ( paddle . log1p ( - ( probs ) ) )
return x
2024-11-06 16:34:24 +08:00
@inplace_apis_in_dygraph_only
def set_ (
x : paddle . Tensor ,
source : paddle . Tensor | None = None ,
shape : Sequence [ int ] | None = None ,
stride : Sequence [ int ] | None = None ,
offset : int = 0 ,
name : str | None = None ,
) - > paddle . Tensor :
"""
set x with specified source Tensor ' s underlying storage, shape, stride and offset.
Note that the ``x`` will share the same data with ``source`` Tensor.
Args:
x (Tensor): An arbitrary Tensor. The data type supports ``bfloat16``, ``float16``, ``float32``, ``float64``,
``bool``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64`` or ``complex128``.
source (Tensor|None, optional): Define the target Tensor to use. The data type supports `bfloat16`, ``float16``,
``float32``, ``float64``, ``bool``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64`` or
``complex128``. Default: None, which means to set ``x`` with an empty source tensor.
shape (list|tuple|None, optional): Define the target shape. Each element of it should be integer. Default: None,
which means it will use the specified ``source`` ' s shape as default value.
stride (list|tuple|None, optional): Define the target stride. Each element of it should be integer. Default: None,
and when ``shape`` is also None, it will use the specified ``source`` ' s stride as default value; when ``shape``
is specified, it will use the default stride corresponding to the specified ``shape``.
2026-02-02 11:26:49 +08:00
offset (int, optional): Define the target offset from x ' s holder in bytes. Default: 0.
2024-11-06 16:34:24 +08:00
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the Tensor with the same data type as ``x``.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2024-11-06 16:34:24 +08:00
>>> import paddle
2026-01-29 20:44:09 +08:00
>>> src = paddle.to_tensor([[11.0, 22.0, 33.0]])
>>> src2 = paddle.to_tensor([11.0, 22.0, 33.0, 44.0, 55.0, 66.0])
2024-11-06 16:34:24 +08:00
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])
2024-11-06 16:34:24 +08:00
>>> x.set_()
>>> print(x)
Tensor(shape=[0], dtype=float32, place=Place(cpu), stop_gradient=True,
[])
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])
2024-11-06 16:34:24 +08:00
>>> x.set_(src)
>>> print(x)
Tensor(shape=[1, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[11., 22., 33.]])
>>> print(x._is_shared_buffer_with(src))
True
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])
2024-11-06 16:34:24 +08:00
>>> x.set_(src, shape=[2, 1])
>>> print(x)
Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[11.],
[22.]])
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])
2024-11-06 16:34:24 +08:00
>>> x.set_(src2, shape=[3], stride=[2])
>>> print(x)
Tensor(shape=[3], dtype=float32, place=Place(cpu), stop_gradient=True,
[11., 33., 55.])
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1.0, 2.0, 3.0, 4.0, 5.0])
2024-11-06 16:34:24 +08:00
>>> x.set_(src2, shape=[5], offset=4)
>>> print(x)
Tensor(shape=[5], dtype=float32, place=Place(cpu), stop_gradient=True,
[22., 33., 44., 55., 66.])
"""
if in_dynamic_mode ( ) :
# set_ doesn't have backward op so EagerUtils::CheckInplace will not be
# called in eager_generator.cc. Here to keep consistent with other inplace
# op, manually check whether x is leaf node and doesn't stop gradient.
if x . is_leaf and not x . stop_gradient :
raise ValueError (
f " (InvalidArgument) Leaf Tensor { x . name } that doesn ' t stop gradient can ' t use "
" inplace strategy. "
)
if source is None :
source = paddle . empty ( [ 0 ] , dtype = x . dtype )
shape = [ 0 ]
stride = [ 0 ]
else :
if not isinstance ( source , ( Variable , core . eager . Tensor ) ) :
raise ValueError (
f " Input (source) should be paddle.Tensor but received { type ( source ) } "
)
check_dtype (
source . dtype ,
' source ' ,
[
' bool ' ,
' float16 ' ,
' uint16 ' ,
' float32 ' ,
' float64 ' ,
' int8 ' ,
' int16 ' ,
' int32 ' ,
' int64 ' ,
' uint8 ' ,
' complex64 ' ,
' complex128 ' ,
] ,
' set ' ,
)
if stride is None :
if shape is None :
stride = source . strides
else :
stride = paddle . empty ( shape ) . strides
if shape is None :
shape = source . shape
return _C_ops . set_ ( x , source , shape , stride , offset )
2024-12-18 15:46:43 +08:00
@inplace_apis_in_dygraph_only
def resize_ (
x : paddle . Tensor ,
shape : Sequence [ int ] ,
fill_zero : bool = False ,
name : str | None = None ,
) - > paddle . Tensor :
"""
Resize ``x`` with specified ``shape``.
Args:
x (Tensor): An arbitrary Tensor. The data type supports ``bfloat16``, ``float16``, ``float32``, ``float64``,
``bool``, ``int8``, ``int16``, ``int32``, ``int64``, ``uint8``, ``complex64`` or ``complex128``.
shape (list|tuple): Define the target shape. Each element of it should be integer.
fill_zero (bool, optional): If the size of specified ``shape`` is greater than the original Tensor size, the
new Tensor will be filled with zero if ``fill_zero`` is True. Default: False, which means the filled value
will be undetermined.
name (str|None, optional): Name for the operation (optional, default is None). For more information, please refer to :ref:`api_guide_Name`.
Returns:
Tensor, the resized Tensor.
Examples:
2026-01-29 20:44:09 +08:00
.. code-block:: pycon
2024-12-18 15:46:43 +08:00
>>> import paddle
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1.0, 2.0, 3.0])
2024-12-18 15:46:43 +08:00
>>> x.resize_([2, 1])
>>> print(x)
Tensor(shape=[2, 1], dtype=float32, place=Place(cpu), stop_gradient=True,
[[1.],
[2.]])
2026-01-29 20:44:09 +08:00
>>> x = paddle.to_tensor([1.0, 2.0, 3.0])
2024-12-18 15:46:43 +08:00
>>> x.resize_([2, 3], fill_zero=True)
>>> print(x)
Tensor(shape=[2, 3], dtype=float32, place=Place(cpu), stop_gradient=True,
[[1., 2., 3.],
[0., 0., 0.]])
"""
if in_dynamic_mode ( ) :
check_dtype (
x . dtype ,
' x ' ,
[
' bool ' ,
' float16 ' ,
' uint16 ' ,
' float32 ' ,
' float64 ' ,
' int8 ' ,
' int16 ' ,
' int32 ' ,
' int64 ' ,
' uint8 ' ,
' complex64 ' ,
' complex128 ' ,
] ,
' resize ' ,
)
if not isinstance ( shape , ( list , tuple ) ) :
raise ValueError (
f " Input (shape) should be list or tuple but received { type ( shape ) } "
)
new_size = math . prod ( shape )
old_size = math . prod ( x . shape )
if ( new_size > old_size ) and fill_zero :
repeats = - ( - new_size / / old_size ) # ceil division
flatten_x = x . flatten ( )
tmp = paddle . concat (
( flatten_x , ) + ( paddle . zeros_like ( flatten_x ) , ) * ( repeats - 1 )
) [ : new_size ]
return x . set_ ( tmp , shape )
return x . set_ ( x , shape )
2025-08-18 20:08:55 +08:00
def dtype_tensor_factory ( dtype ) :
class _DtypeTensorFactory :
def __new__ ( cls , * args , * * kwargs ) :
if len ( args ) == 0 :
return paddle . empty ( shape = [ 0 ] , dtype = dtype )
elif len ( args ) == 1 and isinstance ( args [ 0 ] , ( list , tuple ) ) :
2025-08-26 20:52:51 +08:00
return paddle . tensor ( args [ 0 ] , dtype = dtype )
2025-08-18 20:08:55 +08:00
elif all ( isinstance ( arg , int ) for arg in args ) :
return paddle . empty ( shape = list ( args ) , dtype = dtype )
else :
kwargs . setdefault ( ' dtype ' , dtype )
return paddle . Tensor ( * args , * * kwargs )
return _DtypeTensorFactory
FloatTensor = dtype_tensor_factory ( ' float32 ' )
DoubleTensor = dtype_tensor_factory ( ' float64 ' )
HalfTensor = dtype_tensor_factory ( ' float16 ' )
BFloat16Tensor = dtype_tensor_factory ( ' bfloat16 ' )
ByteTensor = dtype_tensor_factory ( ' uint8 ' )
CharTensor = dtype_tensor_factory ( ' int8 ' )
ShortTensor = dtype_tensor_factory ( ' int16 ' )
IntTensor = dtype_tensor_factory ( ' int32 ' )
LongTensor = dtype_tensor_factory ( ' int64 ' )
BoolTensor = dtype_tensor_factory ( ' bool ' )