SIGN IN SIGN UP
PaddlePaddle / Paddle UNCLAIMED

PArallel Distributed Deep LEarning: Machine Learning Framework from Industrial Practice (『飞桨』核心框架,深度学习&机器学习高性能单机、分布式训练和跨平台部署)

2022-04-15 11:38:24 +08:00
# Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import annotations
from typing import TYPE_CHECKING, Literal
2022-04-15 11:38:24 +08:00
__all__ = []
from paddle import _C_ops
from paddle.base.layer_helper import LayerHelper
from paddle.framework import in_dynamic_or_pir_mode
2022-04-15 11:38:24 +08:00
from paddle.nn.functional.conv import _update_padding_nd
from paddle.utils import convert_to_list
from ...binary import add
2022-04-15 11:38:24 +08:00
if TYPE_CHECKING:
from collections.abc import Sequence
from paddle import Tensor
from paddle._typing import (
DataLayout2D,
DataLayout3D,
Size2,
Size3,
Size4,
Size6,
)
from paddle.nn.functional.common import _PaddingSizeMode
2022-04-15 11:38:24 +08:00
def _conv3d(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size3 = 1,
padding: _PaddingSizeMode | Size3 | Size6 | Sequence[Size2] = 0,
dilation: Size3 = 1,
groups: Literal[1] = 1,
subm: bool = False,
key: str | None = None,
data_format: Literal["NDHWC"] = "NDHWC",
name: str | None = None,
) -> Tensor:
2022-04-15 11:38:24 +08:00
assert groups == 1, "Currently, only support groups=1"
dims = 3
# Currently, only support 'NDHWC'
if data_format not in ["NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NDHWC'. Received "
f"Attr(data_format): {data_format}."
)
2022-04-15 11:38:24 +08:00
if len(x.shape) != 5:
raise ValueError(
f"Input x should be 5D tensor, but received x with the shape of {x.shape}"
)
2022-04-15 11:38:24 +08:00
channel_last = data_format == "NDHWC"
2022-04-15 11:38:24 +08:00
channel_dim = -1 if channel_last else 1
num_channels = x.shape[channel_dim]
padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims)
stride = convert_to_list(stride, dims, 'stride')
dilation = convert_to_list(dilation, dims, 'dilation')
if in_dynamic_or_pir_mode():
pre_bias = _C_ops.sparse_conv3d(
x,
weight,
padding,
dilation,
stride,
groups,
subm,
key if key is not None else "",
)
2022-09-27 11:20:41 +08:00
if bias is not None:
return add(pre_bias, bias)
else:
return pre_bias
else:
2022-09-27 11:20:41 +08:00
inputs = {'x': x, 'kernel': weight}
attrs = {
'paddings': padding,
'dilations': dilation,
'strides': stride,
'groups': groups,
'subm': subm,
'key': key,
2022-09-27 11:20:41 +08:00
}
op_type = 'sparse_conv3d'
helper = LayerHelper(op_type, **locals())
rulebook = helper.create_variable_for_type_inference(
dtype='int32', stop_gradient=True
)
counter = helper.create_variable_for_type_inference(
dtype='int32', stop_gradient=True
)
2022-09-27 11:20:41 +08:00
pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype)
outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter}
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=attrs
)
2022-09-27 11:20:41 +08:00
if bias is not None:
return add(pre_bias, bias)
else:
return pre_bias
2022-04-15 11:38:24 +08:00
def _conv2d(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size2 = 1,
padding: _PaddingSizeMode | Size2 | Size4 | Sequence[Size2] = 0,
dilation: Size2 = 1,
groups: Literal[1] = 1,
subm: bool = False,
key: str | None = None,
data_format: Literal["NHWC"] = "NHWC",
name: str | None = None,
) -> Tensor:
assert groups == 1, "Currently, only support groups=1"
dims = 2
# Currently, only support 'NHWC'
if data_format not in ["NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NHWC'. Received "
f"Attr(data_format): {data_format}."
)
if len(x.shape) != 4:
raise ValueError(
f"Input x should be 4D tensor, but received x with the shape of {x.shape}"
)
channel_last = data_format == "NHWC"
channel_dim = -1 if channel_last else 1
if len(x.shape) != 4:
raise ValueError(
f"Input x should be 4D tensor, but received x with the shape of {x.shape}"
)
num_channels = x.shape[channel_dim]
padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims)
stride = convert_to_list(stride, dims, 'stride')
dilation = convert_to_list(dilation, dims, 'dilation')
if in_dynamic_or_pir_mode():
pre_bias = _C_ops.sparse_conv3d(
x,
weight,
padding,
dilation,
stride,
groups,
subm,
key if key is not None else "",
)
else:
inputs = {'x': x, 'kernel': weight}
attrs = {
'paddings': padding,
'dilations': dilation,
'strides': stride,
'groups': groups,
'subm': subm,
'key': key,
}
op_type = 'sparse_conv3d'
helper = LayerHelper(op_type, **locals())
rulebook = helper.create_variable_for_type_inference(
dtype='int32', stop_gradient=True
)
counter = helper.create_variable_for_type_inference(
dtype='int32', stop_gradient=True
)
pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype)
outputs = {"out": pre_bias, "rulebook": rulebook, "counter": counter}
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=attrs
)
if bias is not None:
return add(pre_bias, bias)
else:
return pre_bias
def _conv3d_igemm(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size3 = 1,
padding: _PaddingSizeMode | Size3 | Size6 | Sequence[Size2] = 0,
dilation: Size3 = 1,
groups: Literal[1] = 1,
subm: Literal[True] = True,
key: str | None = None,
data_format: Literal["NDHWC"] = "NDHWC",
name: str | None = None,
) -> Tensor:
assert groups == 1, "Currently, only support groups=1"
assert subm is True, "Currently, only support subm=True for implicit gemm"
dims = 3
# Currently, only support 'NDHWC'
if data_format not in ["NDHWC"]:
raise ValueError(
"Attr(data_format) should be 'NDHWC'. Received "
f"Attr(data_format): {data_format}."
)
if len(x.shape) != 5:
raise ValueError(
f"Input x should be 5D tensor, but received x with the shape of {x.shape}"
)
channel_last = data_format == "NDHWC"
channel_dim = -1 if channel_last else 1
if len(x.shape) != 5:
raise ValueError(
f"Input x should be 5D tensor, but received x with the shape of {x.shape}"
)
num_channels = x.shape[channel_dim]
padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims)
stride = convert_to_list(stride, dims, 'stride')
dilation = convert_to_list(dilation, dims, 'dilation')
if in_dynamic_or_pir_mode():
pre_bias = _C_ops.sparse_conv3d_implicit_gemm(
x,
weight,
padding,
dilation,
stride,
groups,
subm,
key if key is not None else "",
)
if bias is not None:
return add(pre_bias, bias)
else:
return pre_bias
else:
inputs = {'x': x, 'kernel': weight}
attrs = {
'paddings': padding,
'dilations': dilation,
'strides': stride,
'groups': groups,
'subm': subm,
'key': key,
}
op_type = 'sparse_conv3d_implicit_gemm'
helper = LayerHelper(op_type, **locals())
pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype)
outputs = {"out": pre_bias}
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=attrs
)
if bias is not None:
return add(pre_bias, bias)
else:
return pre_bias
def _conv2d_igemm(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size2 = 1,
padding: _PaddingSizeMode | Size2 | Size4 | Sequence[Size2] = 0,
dilation: Size2 = 1,
groups: Literal[1] = 1,
subm: Literal[True] = True,
key: str | None = None,
data_format: Literal["NHWC"] = "NHWC",
name: str | None = None,
) -> Tensor:
assert groups == 1, "Currently, only support groups=1"
assert subm is True, "Currently, only support subm=True for implicit gemm"
dims = 2
# Currently, only support 'NDHWC'
if data_format not in ["NHWC"]:
raise ValueError(
"Attr(data_format) should be 'NHWC'. Received "
f"Attr(data_format): {data_format}."
)
if len(x.shape) != 4:
raise ValueError(
f"Input x should be 5D tensor, but received x with the shape of {x.shape}"
)
channel_last = data_format == "NHWC"
channel_dim = -1 if channel_last else 1
if len(x.shape) != 4:
raise ValueError(
f"Input x should be 4D tensor, but received x with the shape of {x.shape}"
)
num_channels = x.shape[channel_dim]
padding, padding_algorithm = _update_padding_nd(padding, channel_last, dims)
stride = convert_to_list(stride, dims, 'stride')
dilation = convert_to_list(dilation, dims, 'dilation')
if in_dynamic_or_pir_mode():
pre_bias = _C_ops.sparse_conv3d_implicit_gemm(
x,
weight,
padding,
dilation,
stride,
groups,
subm,
key if key is not None else "",
)
if bias is not None:
return add(pre_bias, bias)
else:
return pre_bias
else:
inputs = {'x': x, 'kernel': weight}
attrs = {
'paddings': padding,
'dilations': dilation,
'strides': stride,
'groups': groups,
'subm': subm,
'key': key,
}
op_type = 'sparse_conv3d_implicit_gemm'
helper = LayerHelper(op_type, **locals())
pre_bias = helper.create_sparse_variable_for_type_inference(x.dtype)
outputs = {"out": pre_bias}
helper.append_op(
type=op_type, inputs=inputs, outputs=outputs, attrs=attrs
)
if bias is not None:
return add(pre_bias, bias)
else:
return pre_bias
def conv3d(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size3 = 1,
padding: _PaddingSizeMode | Size3 | Size6 | Sequence[Size2] = 0,
dilation: Size3 = 1,
groups: Literal[1] = 1,
data_format: DataLayout3D = "NDHWC",
name: str | None = None,
) -> Tensor:
2022-04-15 11:38:24 +08:00
r"""
The sparse convolution3d functional calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of
2022-04-15 11:38:24 +08:00
:math:`[N, D, H, W, C]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. If bias attribution is provided,
bias is added to the output of the convolution.
2022-04-15 11:38:24 +08:00
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with MCDHW format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Args:
x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
2022-04-15 11:38:24 +08:00
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor|None, optional): The bias, a Tensor of shape [M].
2022-10-24 18:11:01 +08:00
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
2022-04-15 11:38:24 +08:00
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
2022-10-24 18:11:01 +08:00
padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings
2022-04-15 11:38:24 +08:00
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NDHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
2022-10-24 18:11:01 +08:00
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
2022-04-15 11:38:24 +08:00
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
2022-04-15 11:38:24 +08:00
Default: dilation = 1.
2022-10-24 18:11:01 +08:00
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
2022-04-15 11:38:24 +08:00
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1. Currently, only support groups=1.
data_format (str, optional): Specify the data format of the input, and the data format of the output
2022-04-15 11:38:24 +08:00
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of:
`[batch_size, input_depth, input_height, input_width, input_channels]`.
name(str|None, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
2022-04-15 11:38:24 +08:00
None by default.
Returns:
A SparseCooTensor representing the conv3d, whose data type is the same with input.
2022-04-15 11:38:24 +08:00
Examples:
.. code-block:: pycon
2022-04-15 11:38:24 +08:00
>>> import paddle
>>> indices = [
... [0, 0, 0, 0],
... [0, 0, 0, 0],
... [0, 0, 1, 2],
... [1, 3, 2, 3],
... ]
>>> values = [[1], [2], [3], [4]]
>>> indices = paddle.to_tensor(indices, dtype='int32')
>>> values = paddle.to_tensor(values, dtype='float32')
>>> dense_shape = [1, 1, 3, 4, 1]
>>> sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
>>> weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
>>> y = paddle.sparse.nn.functional.conv3d(sparse_x, weight)
>>> print(y.shape)
paddle.Size([1, 1, 1, 2, 1])
2022-04-15 11:38:24 +08:00
"""
return _conv3d(
x,
weight,
bias,
stride,
padding,
dilation,
groups,
False,
None,
data_format,
name,
)
def subm_conv3d(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size3 = 1,
padding: _PaddingSizeMode | Size3 | Size6 | Sequence[Size2] = 0,
dilation: Size3 = 1,
groups: Literal[1] = 1,
data_format: DataLayout3D = "NDHWC",
key: str | None = None,
name: str | None = None,
) -> Tensor:
2022-04-15 11:38:24 +08:00
r"""
The sparse submanifold convolution3d functional calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of
2022-04-15 11:38:24 +08:00
:math:`[N, D, H, W, C]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. If bias attribution is provided,
bias is added to the output of the convolution.
2022-04-15 11:38:24 +08:00
For each input :math:`X`, the equation is:
.. math::
Out = W \ast X + b
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with DHWCM format.
* :math:`\\ast`: Submanifold Convolution operation, refer to the paper: https://arxiv.org/abs/1706.01307.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Args:
x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
2022-04-15 11:38:24 +08:00
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor|None, optional): The bias, a Tensor of shape [M].
2022-10-24 18:11:01 +08:00
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
2022-04-15 11:38:24 +08:00
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
2022-04-15 11:38:24 +08:00
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
2022-10-24 18:11:01 +08:00
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
2022-04-15 11:38:24 +08:00
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
2022-04-15 11:38:24 +08:00
Default: dilation = 1.
2022-10-24 18:11:01 +08:00
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
2022-04-15 11:38:24 +08:00
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Currently, only support groups=1.
data_format (str, optional): Specify the data format of the input, and the data format of the output
2022-04-15 11:38:24 +08:00
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of:
`[batch_size, input_depth, input_height, input_width, input_channels]`.
key(str|None, optional): the key is used to save or use the same rulebook,
2022-07-26 20:13:27 +08:00
the definition and role of rulebook refers to
https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The
2022-07-26 20:13:27 +08:00
default value is None.
name(str|None, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
2022-04-15 11:38:24 +08:00
None by default.
Returns:
A SparseCooTensor representing the conv3d, whose data type is
the same with input.
2022-04-15 11:38:24 +08:00
Examples:
.. code-block:: pycon
2022-04-15 11:38:24 +08:00
>>> import paddle
>>> indices = [
... [0, 0, 0, 0],
... [0, 0, 0, 0],
... [0, 0, 1, 2],
... [1, 3, 2, 3],
... ]
>>> values = [[1], [2], [3], [4]]
>>> indices = paddle.to_tensor(indices, dtype='int32')
>>> values = paddle.to_tensor(values, dtype='float32')
>>> dense_shape = [1, 1, 3, 4, 1]
>>> sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
>>> weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
>>> y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight)
>>> print(y.shape)
paddle.Size([1, 1, 3, 4, 1])
2022-04-15 11:38:24 +08:00
"""
return _conv3d(
x,
weight,
bias,
stride,
padding,
dilation,
groups,
True,
key,
data_format,
name,
)
def subm_conv3d_igemm(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size3 = 1,
padding: _PaddingSizeMode | Size3 | Size6 | Sequence[Size2] = 0,
dilation: Size3 = 1,
groups: Literal[1] = 1,
data_format: DataLayout3D = "NDHWC",
key: str | None = None,
name: str | None = None,
) -> Tensor:
r"""
The sparse submanifold convolution3d functional calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of
:math:`[N, D, H, W, C]` . Where N is batch size, C is the number of
channels, D is the depth of the feature, H is the height of the feature,
and W is the width of the feature. If bias attribution is provided,
bias is added to the output of the convolution.
For each input :math:`X`, the equation is:
.. math::
Out = W \ast X + b
In the above equation:
* :math:`X`: Input value, a tensor with NCDHW or NDHWC format.
* :math:`W`: Filter value, a tensor with DHWCM format.
* :math:`\\ast`: Submanifold Convolution operation, refer to the paper: https://arxiv.org/abs/1706.01307.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Args:
x (Tensor): The input is 5-D SparseCooTensor with shape [N, D, H, W, C], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kD, kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's depth, height and width respectively.
bias (Tensor|None, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain three integers, (stride_depth, stride_height, stride_width).
Otherwise, stride_depth = stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_depth, pad_height, pad_width]` or
`[pad_depth_front, pad_depth_back, pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
and when `data_format` is `"NCDHW"`, `padding` can be in the form
`[[0,0], [0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right]]`.
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_depth_front, pad_depth_back], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain three integers, (dilation_depth, dilation_height,
dilation_width). Otherwise, dilation_depth = dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int, optional): The groups number of the Conv3D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Currently, only support groups=1.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NCDHW"`, `"NDHWC"`.
The default is `"NDHWC"`. When it is `"NDHWC"`, the data is stored in the order of:
`[batch_size, input_depth, input_height, input_width, input_channels]`.
key(str|None, optional): the key is used to save or use the same rulebook,
the definition and role of rulebook refers to
https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The
default value is None.
name(str|None, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
A SparseCooTensor representing the conv3d, whose data type is
the same with input.
Examples:
.. code-block:: pycon
>>> import paddle
>>> indices = [
... [0, 0, 0, 0],
... [0, 0, 0, 0],
... [0, 0, 1, 2],
... [1, 3, 2, 3],
... ]
>>> values = [[1], [2], [3], [4]]
>>> indices = paddle.to_tensor(indices, dtype='int32')
>>> values = paddle.to_tensor(values, dtype='float32')
>>> dense_shape = [1, 1, 3, 4, 1]
>>> sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
>>> weight = paddle.randn((1, 3, 3, 1, 1), dtype='float32')
>>> y = paddle.sparse.nn.functional.subm_conv3d(sparse_x, weight)
>>> print(y.shape)
paddle.Size([1, 1, 3, 4, 1])
"""
return _conv3d_igemm(
x,
weight,
bias,
stride,
padding,
dilation,
groups,
True,
key,
data_format,
name,
)
def conv2d(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size2 = 1,
padding: _PaddingSizeMode | Size2 | Size4 | Sequence[Size2] = 0,
dilation: Size2 = 1,
groups: Literal[1] = 1,
data_format: DataLayout2D = "NHWC",
name: str | None = None,
) -> Tensor:
r"""
The sparse convolution2d functional calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of
:math:`[N, H, W, C]` . Where N is batch size, C is the number of
channels, H is the height of the feature,
and W is the width of the feature. If bias attribution is provided,
bias is added to the output of the convolution.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NHWC format.
* :math:`W`: Filter value, a tensor with HWCM format.
* :math:`\\ast`: Convolution operation.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Args:
x (Tensor): The input is 4-D SparseCooTensor with shape [N, H, W, C], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's height and width respectively.
bias (Tensor|None, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1. Currently, only support groups=1.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NHWC"`.
The default is `"NHWC"`. When it is `"NHWC"`, the data is stored in the order of:
`[batch_size, input_height, input_width, input_channels]`.
name(str|None, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
A SparseCooTensor representing the conv2d, whose data type is the same with input.
Examples:
.. code-block:: pycon
>>> import paddle
>>> indices = [[0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
>>> values = [[1], [2], [3], [4]]
>>> indices = paddle.to_tensor(indices, dtype='int32')
>>> values = paddle.to_tensor(values, dtype='float32')
>>> dense_shape = [1, 3, 4, 1]
>>> sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
>>> weight = paddle.randn((3, 3, 1, 1), dtype='float32')
>>> y = paddle.sparse.nn.functional.conv2d(sparse_x, weight)
>>> print(y.shape)
paddle.Size([1, 1, 2, 1])
"""
return _conv2d(
x,
weight,
bias,
stride,
padding,
dilation,
groups,
False,
None,
data_format,
name,
)
def subm_conv2d(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size2 = 1,
padding: _PaddingSizeMode | Size2 | Size4 | Sequence[Size2] = 0,
dilation: Size2 = 1,
groups: Literal[1] = 1,
data_format: DataLayout2D = "NHWC",
key: str | None = None,
name: str | None = None,
) -> Tensor:
r"""
The sparse submanifold convolution2d functional calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of
:math:`[N, H, W, C]` . Where N is batch size, C is the number of
channels, H is the height of the feature,
and W is the width of the feature. If bias attribution is provided,
bias is added to the output of the convolution.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NHWC format.
* :math:`W`: Filter value, a tensor with HWCM format.
* :math:`\\ast`: Submanifold Convolution operation, refer to the paper: https://arxiv.org/abs/1706.01307.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Args:
x (Tensor): The input is 4-D SparseCooTensor with shape [N, H, W, C], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's height and width respectively.
bias (Tensor|None, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1. Currently, only support groups=1.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NHWC"`.
The default is `"NHWC"`. When it is `"NHWC"`, the data is stored in the order of:
`[batch_size, input_height, input_width, input_channels]`.
key(str|None, optional): the key is used to save or use the same rulebook,
the definition and role of rulebook refers to
https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The
default value is None.
name(str|None, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
A SparseCooTensor representing the conv2d, whose data type is the same with input.
Examples:
.. code-block:: pycon
>>> import paddle
>>> indices = [[0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
>>> values = [[1], [2], [3], [4]]
>>> indices = paddle.to_tensor(indices, dtype='int32')
>>> values = paddle.to_tensor(values, dtype='float32')
>>> dense_shape = [1, 3, 4, 1]
>>> sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
>>> weight = paddle.randn((3, 3, 1, 1), dtype='float32')
>>> y = paddle.sparse.nn.functional.subm_conv2d(sparse_x, weight)
>>> print(y.shape)
paddle.Size([1, 3, 4, 1])
"""
return _conv2d(
x,
weight,
bias,
stride,
padding,
dilation,
groups,
True,
key,
data_format,
name,
)
def subm_conv2d_igemm(
x: Tensor,
weight: Tensor,
bias: Tensor | None = None,
stride: Size2 = 1,
padding: _PaddingSizeMode | Size2 | Size4 | Sequence[Size2] = 0,
dilation: Size2 = 1,
groups: Literal[1] = 1,
data_format: DataLayout2D = "NHWC",
key: str | None = None,
name: str | None = None,
) -> Tensor:
r"""
The sparse submanifold convolution2d functional calculates the output based on the input, filter
and strides, paddings, dilations, groups parameters. Input(Input) and
Output(Output) are multidimensional SparseCooTensors with a shape of
:math:`[N, H, W, C]` . Where N is batch size, C is the number of
channels, H is the height of the feature,
and W is the width of the feature. If bias attribution is provided,
bias is added to the output of the convolution.
For each input :math:`X`, the equation is:
.. math::
Out = \sigma (W \ast X + b)
In the above equation:
* :math:`X`: Input value, a tensor with NHWC format.
* :math:`W`: Filter value, a tensor with HWCM format.
* :math:`\\ast`: Submanifold Convolution operation, refer to the paper: https://arxiv.org/abs/1706.01307.
* :math:`b`: Bias value, a 1-D tensor with shape [M].
* :math:`Out`: Output value, the shape of :math:`Out` and :math:`X` may be different.
Args:
x (Tensor): The input is 4-D SparseCooTensor with shape [N, H, W, C], the data
type of input is float16 or float32 or float64.
weight (Tensor): The convolution kernel, a Tensor with shape [kH, kW, C/g, M],
where M is the number of filters(output channels), g is the number of groups,
kD, kH, kW are the filter's height and width respectively.
bias (Tensor|None, optional): The bias, a Tensor of shape [M].
stride (int|list|tuple, optional): The stride size. It means the stride in convolution. If stride is a
list/tuple, it must contain two integers, (stride_height, stride_width).
Otherwise, stride_height = stride_width = stride. Default: stride = 1.
padding (string|int|list|tuple, optional): The padding size. It means the number of zero-paddings
on both sides for each dimension. If `padding` is a string, either 'VALID' or
'SAME' which is the padding algorithm. If padding size is a tuple or list,
it could be in three forms: `[pad_height, pad_width]` or
`[pad_height_top, pad_height_bottom, pad_width_left, pad_width_right]`,
when `data_format` is `"NHWC"`, `padding` can be in the form
`[[0,0], [pad_height_top, pad_height_bottom], [pad_width_left, pad_width_right], [0,0]]`.
Default: padding = 0.
dilation (int|list|tuple, optional): The dilation size. It means the spacing between the kernel points.
If dilation is a list/tuple, it must contain two integers, (dilation_height,
dilation_width). Otherwise, dilation_height = dilation_width = dilation.
Default: dilation = 1.
groups (int, optional): The groups number of the Conv2D Layer. According to grouped
convolution in Alex Krizhevsky's Deep CNN paper: when group=2,
the first half of the filters is only connected to the first half
of the input channels, while the second half of the filters is only
connected to the second half of the input channels. Default: groups=1. Currently, only support groups=1.
data_format (str, optional): Specify the data format of the input, and the data format of the output
will be consistent with that of the input. An optional string from: `"NHWC"`.
The default is `"NHWC"`. When it is `"NHWC"`, the data is stored in the order of:
`[batch_size, input_height, input_width, input_channels]`.
key(str|None, optional): the key is used to save or use the same rulebook,
the definition and role of rulebook refers to
https://pdfs.semanticscholar.org/5125/a16039cabc6320c908a4764f32596e018ad3.pdf. The
default value is None.
name(str|None, optional): For detailed information, please refer
to :ref:`api_guide_Name`. Usually name is no need to set and
None by default.
Returns:
A SparseCooTensor representing the conv2d, whose data type is the same with input.
Examples:
.. code-block:: pycon
>>> import paddle
>>> indices = [[0, 0, 0, 0], [0, 0, 1, 2], [1, 3, 2, 3]]
>>> values = [[1], [2], [3], [4]]
>>> indices = paddle.to_tensor(indices, dtype='int32')
>>> values = paddle.to_tensor(values, dtype='float32')
>>> dense_shape = [1, 3, 4, 1]
>>> sparse_x = paddle.sparse.sparse_coo_tensor(indices, values, dense_shape, stop_gradient=True)
>>> weight = paddle.randn((3, 3, 1, 1), dtype='float32')
>>> y = paddle.sparse.nn.functional.subm_conv2d(sparse_x, weight)
>>> print(y.shape)
paddle.Size([1, 3, 4, 1])
"""
return _conv2d_igemm(
x,
weight,
bias,
stride,
padding,
dilation,
groups,
True,
key,
data_format,
name,
)