tensorflow,pytorch中normalize方法

def normalize(tensor, ord="euclidean", axis=None, name=None):
  """Normalizes tensor along dimension axis using specified norm.

  This uses tf.linalg.norm to compute the norm along axis.

  This function can compute several different vector norms (the 1-norm, the
  Euclidean or 2-norm, the inf-norm, and in general the p-norm for p > 0) and
  matrix norms (Frobenius, 1-norm, 2-norm and inf-norm).

  Args:
    tensor: Tensor of types float32, float64, complex64, complex128
    ord: Order of the norm. Supported values are 'fro', 'euclidean', 1,
      2, np.inf and any positive real number yielding the corresponding
      p-norm. Default is 'euclidean' which is equivalent to Frobenius norm if
      tensor is a matrix and equivalent to 2-norm for vectors.

      Some restrictions apply: a) The Frobenius norm 'fro' is not defined for
        vectors, b) If axis is a 2-tuple (matrix norm), only 'euclidean',
        'fro', 1, 2, np.inf are supported. See the description of axis
        on how to compute norms for a batch of vectors or matrices stored in a
        tensor.

    axis: If axis is None (the default), the input is considered a vector
      and a single vector norm is computed over the entire set of values in the
      tensor, i.e. norm(tensor, ord=ord) is equivalent to
      norm(reshape(tensor, [-1]), ord=ord). If axis is a Python integer, the
      input is considered a batch of vectors, and axis determines the axis in
      tensor over which to compute vector norms. If axis is a 2-tuple of
      Python integers it is considered a batch of matrices and axis determines
      the axes in tensor over which to compute a matrix norm.

      Negative indices are supported. Example: If you are passing a tensor that
        can be either a matrix or a batch of matrices at runtime, pass
        axis=[-2,-1] instead of axis=None to make sure that matrix norms are
        computed.

args:

tensor 输入
ord 标准化方法,有l1/l2,常用的是l2,euclidean是欧式距离,我个人理解和l2是一样的
此处默认是euclidean
axis 沿哪一个轴/维度标准化,tensorflow此处的函数可以指定元组的轴

示例:

data = np.array([[[ 1., -1.,  2.],
                [ 2.,  0.,  0.],
                [ 0.,  1., -1.]],
                [[ 1., 1.,  1.],
                [ 2.,  2.,  2.],
                [ 1.,  1., -1.]],
                [[ 1., 0.,  0.],
                [ 2.,  0.,  0.],
                [ 0.,  0., -1.]]])

data_tensor = tf.convert_to_tensor(data,dtype=tf.float32)

默认 ord=l2 , axis = None , 此处将tensor展平,27个元素的平方和加起来 = 36 ,norm=6

result,__ = tf.linalg.normalize(data_tensor)

print(result)

tf.Tensor(
[[[ 0.16666667 -0.16666667  0.33333334]
  [ 0.33333334  0.          0.        ]
  [ 0.          0.16666667 -0.16666667]]

 [[ 0.16666667  0.16666667  0.16666667]
  [ 0.33333334  0.33333334  0.33333334]
  [ 0.16666667  0.16666667 -0.16666667]]

 [[ 0.16666667  0.          0.        ]
  [ 0.33333334  0.          0.        ]
  [ 0.          0.         -0.16666667]]], shape=(3, 3, 3), dtype=float32)

axis = 0,沿batch维度

result,__ = tf.linalg.normalize(data_tensor,axis=0)

print(result)

tf.Tensor(
[[[ 0.57735026 -0.70710677  0.8944272 ]
  [ 0.57735026  0.          0.        ]
  [ 0.          0.70710677 -0.57735026]]

 [[ 0.57735026  0.70710677  0.4472136 ]
  [ 0.57735026  1.          1.        ]
  [ 1.          0.70710677 -0.57735026]]

 [[ 0.57735026  0.          0.        ]
  [ 0.57735026  0.          0.        ]
  [ 0.          0.         -0.57735026]]], shape=(3, 3, 3), dtype=float32)

axis = 1 ,沿每一列维度

result,__ = tf.linalg.normalize(data_tensor,axis=1)

print(result)

tf.Tensor(
[[[ 0.4472136  -0.70710677  0.8944272 ]
  [ 0.8944272   0.          0.        ]
  [ 0.          0.70710677 -0.4472136 ]]

 [[ 0.40824828  0.40824828  0.40824828]
  [ 0.81649655  0.81649655  0.81649655]
  [ 0.40824828  0.40824828 -0.40824828]]

 [[ 0.4472136          nan  0.        ]
  [ 0.8944272          nan  0.        ]
  [ 0.                 nan -1.        ]]], shape=(3, 3, 3), dtype=float32)

axis = 2,沿每一行维度

result,__ = tf.linalg.normalize(data_tensor,axis=2)

print(result)

tf.Tensor(
[[[ 0.40824828 -0.40824828  0.81649655]
  [ 1.          0.          0.        ]
  [ 0.          0.70710677 -0.70710677]]

 [[ 0.57735026  0.57735026  0.57735026]
  [ 0.57735026  0.57735026  0.57735026]
  [ 0.57735026  0.57735026 -0.57735026]]

 [[ 1.          0.          0.        ]
  [ 1.          0.          0.        ]
  [ 0.          0.         -1.        ]]], shape=(3, 3, 3), dtype=float32)

axis = (1,2) 第0维的batch标准化

result,__ = tf.linalg.normalize(data_tensor,axis=(1,2))

print(result)

tf.Tensor(
[[[ 0.28867513 -0.28867513  0.57735026]
  [ 0.57735026  0.          0.        ]
  [ 0.          0.28867513 -0.28867513]]

 [[ 0.23570228  0.23570228  0.23570228]
  [ 0.47140455  0.47140455  0.47140455]
  [ 0.23570228  0.23570228 -0.23570228]]

 [[ 0.40824828  0.          0.        ]
  [ 0.81649655  0.          0.        ]
  [ 0.          0.         -0.40824828]]], shape=(3, 3, 3), dtype=float32)
def normalize(input: Tensor, p: float = 2, dim: int = 1, eps: float = 1e-12, out: Optional[Tensor] = None) -> Tensor:
    r"""Performs :math:L_p normalization of inputs over specified dimension.

    For a tensor :attr:input of sizes :math:(n_0, ..., n_{dim}, ..., n_k), each
    :math:n_{dim} -element vector :math:v along dimension :attr:dim is transformed as

    .. math::
        v = \frac{v}{\max(\lVert v \rVert_p, \epsilon)}.

    With the default arguments it uses the Euclidean norm over vectors along dimension :math:1 for normalization.

    Args:
        input: input tensor of any shape
        p (float): the exponent value in the norm formulation. Default: 2
        dim (int): the dimension to reduce. Default: 1
        eps (float): small value to avoid division by zero. Default: 1e-12
        out (Tensor, optional): the output tensor. If :attr:out is used, this
                                operation won't be differentiable.

"""

args
input 输入
p normalize的方法,l1,l2,p=2默认为l2标准化
dim 沿哪一个维度标准化,默认为1

data = np.array([[[ 1., -1.,  2.],
                [ 2.,  0.,  0.],
                [ 0.,  1., -1.]],
                [[ 1., 1.,  1.],
                [ 2.,  2.,  2.],
                [ 1.,  1., -1.]],
                [[ 1., 0.,  0.],
                [ 2.,  0.,  0.],
                [ 0.,  0., -1.]]])
data_tensor = torch.tensor(data,dtype=torch.float32)

dim=0,结果与tf的axis=0是相同的

result = F.normalize(data_tensor,dim=0)

print(result)

tensor([[[ 0.5774, -0.7071,  0.8944],
         [ 0.5774,  0.0000,  0.0000],
         [ 0.0000,  0.7071, -0.5774]],

        [[ 0.5774,  0.7071,  0.4472],
         [ 0.5774,  1.0000,  1.0000],
         [ 1.0000,  0.7071, -0.5774]],

        [[ 0.5774,  0.0000,  0.0000],
         [ 0.5774,  0.0000,  0.0000],
         [ 0.0000,  0.0000, -0.5774]]])

Original: https://blog.csdn.net/henry_xiong030/article/details/123865308
Author: henry_xiong030
Title: tensorflow,pytorch中normalize方法

原创文章受到原创版权保护。转载请注明出处:https://www.johngo689.com/497420/

转载文章受原作者版权保护。转载请注明原作者出处!

(0)

大家都在看

亲爱的 Coder【最近整理,可免费获取】👉 最新必读书单  | 👏 面试题下载  | 🌎 免费的AI知识星球