当前位置: 首页>>代码示例>>Python>>正文


Python backend.ndim方法代码示例

本文整理汇总了Python中tensorflow.keras.backend.ndim方法的典型用法代码示例。如果您正苦于以下问题:Python backend.ndim方法的具体用法?Python backend.ndim怎么用?Python backend.ndim使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.ndim方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: amplitude_to_decibel

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
    """[K] Convert (linear) amplitude to decibel (log10(x)).

    Parameters
    ----------
    x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`.

    amin: minimum amplitude. amplitude smaller than `amin` is set to this.

    dynamic_range: dynamic_range in decibel

    """
    log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
    if K.ndim(x) > 1:
        axis = tuple(range(K.ndim(x))[1:])
    else:
        axis = None

    log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True)  # [-?, 0]
    log_spec = K.maximum(log_spec, -1 * dynamic_range)  # [-80, 0]
    return log_spec 
开发者ID:keunwoochoi,项目名称:kapre,代码行数:23,代码来源:backend_keras.py

示例2: autodetect_mode

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def autodetect_mode(a, b):
    """
    Return a code identifying the mode of operation (single, mixed, inverted mixed and
    batch), given a and b. See `ops.modes` for meaning of codes.
    :param a: Tensor or SparseTensor.
    :param b: Tensor or SparseTensor.
    :return: mode of operation as an integer code.
    """
    a_dim = K.ndim(a)
    b_dim = K.ndim(b)
    if b_dim == 2:
        if a_dim == 2:
            return SINGLE
        elif a_dim == 3:
            return iMIXED
    elif b_dim == 3:
        if a_dim == 2:
            return MIXED
        elif a_dim == 3:
            return BATCH
    return UNKNOWN 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:23,代码来源:modes.py

示例3: matmul_AT_B

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def matmul_AT_B(a, b):
    """
    Computes A.T * B, dealing automatically with sparsity and data modes.
    :param a: Tensor or SparseTensor with rank 2 or 3.
    :param b: Tensor or SparseTensor with rank 2 or 3.
    :return: Tensor or SparseTensor with rank = max(rank(a), rank(b)).
    """
    mode = modes.autodetect_mode(a, b)
    if mode == modes.SINGLE or mode == modes.MIXED:
        # Single (rank(a)=2, rank(b)=2)
        # Mixed (rank(a)=2, rank(b)=3)
        a_t = ops.transpose(a)
    elif mode == modes.iMIXED or mode == modes.BATCH:
        # Inverted mixed (rank(a)=3, rank(b)=2)
        # Batch (rank(a)=3, rank(b)=3)
        a_t = ops.transpose(a, (0, 2, 1))
    else:
        raise ValueError('Expected ranks to be 2 or 3, got {} and {}'.format(
            K.ndim(a), K.ndim(b)
        ))

    return matmul_A_B(a_t, b) 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:24,代码来源:matmul.py

示例4: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def call(self, inputs):
        if self.data_mode == 'disjoint':
            X, I = inputs
            if K.ndim(I) == 2:
                I = I[:, 0]
        else:
            X = inputs
        inputs_linear = self.features_layer(X)
        attn = self.attention_layer(X)
        masked_inputs = inputs_linear * attn
        if self.data_mode in {'single', 'batch'}:
            output = K.sum(masked_inputs, axis=-2,
                           keepdims=self.data_mode == 'single')
        else:
            output = tf.math.segment_sum(masked_inputs, I)

        return output 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:19,代码来源:global_pool.py

示例5: _softmax

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def _softmax(x, axis=-1, alpha=1):
    """
    building on keras implementation, with additional alpha parameter

    Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
        alpha: a value to multiply all x
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    x = alpha * x
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D') 
开发者ID:adalca,项目名称:neuron,代码行数:26,代码来源:utils.py

示例6: sequence_masking

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def sequence_masking(x, mask, mode=0, axis=None):
    """为序列条件mask的函数
    mask: 形如(batch_size, seq_len)的0-1矩阵;
    mode: 如果是0,则直接乘以mask;
          如果是1,则在padding部分减去一个大正数。
    axis: 序列所在轴,默认为1;
    """
    if mask is None or mode not in [0, 1]:
        return x
    else:
        if axis is None:
            axis = 1
        if axis == -1:
            axis = K.ndim(x) - 1
        assert axis > 0, 'axis muse be greater than 0'
        for _ in range(axis - 1):
            mask = K.expand_dims(mask, 1)
        for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1):
            mask = K.expand_dims(mask, K.ndim(mask))
        if mode == 0:
            return x * mask
        else:
            return x - (1 - mask) * 1e12 
开发者ID:bojone,项目名称:bert4keras,代码行数:25,代码来源:backend.py

示例7: _fftshift

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def _fftshift(self, x, axes=None):
        """
        Shift the zero-frequency component to the center of the spectrum.
        This function swaps half-spaces for all axes listed (defaults to all).
        Note that ``y[0]`` is the Nyquist component only if ``len(x)`` is even.
        Parameters
        ----------
        x : array_like, Tensor
            Input array.
        axes : int or shape tuple, optional
            Axes over which to shift.  Default is None, which shifts all axes.
        Returns
        -------
        y : Tensor.
        """
        if axes is None:
            axes = tuple(range(ndim(x)))
            shift = [dim // 2 for dim in tf.shape(x)]
        elif isinstance(axes, int):
            shift = tf.shape(x)[axes] // 2
        else:
            shift = [tf.shape(x)[ax] // 2 for ax in axes]

        return _roll(x, shift, axes) 
开发者ID:xulabs,项目名称:aitom,代码行数:26,代码来源:RigidTransformation3DImputation.py

示例8: _ifftshift

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def _ifftshift(self, x, axes=None):
        """
        The inverse of `fftshift`. Although identical for even-length `x`, the
        functions differ by one sample for odd-length `x`.
        Parameters
        ----------
        x : array_like, Tensor.
        axes : int or shape tuple, optional
            Axes over which to calculate.  Defaults to None, which shifts all axes.
        Returns
        -------
        y : Tensor.
        """
        if axes is None:
            axes = tuple(range(ndim(x)))
            shift = [-(dim // 2) for dim in tf.shape(x)]
        elif isinstance(axes, int):
            shift = -(tf.shape(x)[axes] // 2)
        else:
            shift = [-(tf.shape(x)[ax] // 2) for ax in axes]

        return _roll(x, shift, axes) 
开发者ID:xulabs,项目名称:aitom,代码行数:24,代码来源:RigidTransformation3DImputation.py

示例9: cat_acc

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def cat_acc(y_true, y_pred):
    """Keras loss function for sparse_categorical_accuracy.

    :param y_true: tensor of true class labels.
    :param y_pred: class output scores from network.

    :returns: categorical accuracy.
    """
    # sparse_categorical_accuracy is broken in keras 2.2.4
    #   https://github.com/keras-team/keras/issues/11348#issuecomment-439969957
    # this is taken from e59570ae
    from tensorflow.keras import backend as K
    # reshape in case it's in shape (num_samples, 1) instead of (num_samples,)
    if K.ndim(y_true) == K.ndim(y_pred):
        y_true = K.squeeze(y_true, -1)
    # convert dense predictions to labels
    y_pred_labels = K.argmax(y_pred, axis=-1)
    y_pred_labels = K.cast(y_pred_labels, K.floatx())
    return K.cast(K.equal(y_true, y_pred_labels), K.floatx()) 
开发者ID:nanoporetech,项目名称:medaka,代码行数:21,代码来源:training.py

示例10: gather_indexes

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def gather_indexes(A: tf.Tensor, B: tf.Tensor) -> tf.Tensor:
    """
    Args:
        A: a tensor with data
        B: an integer tensor with indexes

    Returns:
        `answer` a tensor such that ``answer[i, j] = A[i, B[i, j]]``.
        In case `B` is one-dimensional, the output is ``answer[i] = A[i, B[i]]``

    """
    are_indexes_one_dim = (kb.ndim(B) == 1)
    if are_indexes_one_dim:
        B = tf.expand_dims(B, -1)
    first_dim_indexes = tf.expand_dims(tf.range(tf.shape(B)[0]), -1)
    first_dim_indexes = tf.tile(first_dim_indexes, [1, tf.shape(B)[1]])
    indexes = tf.stack([first_dim_indexes, B], axis=-1)
    answer = tf.gather_nd(A, indexes)
    if are_indexes_one_dim:
        answer = answer[:,0]
    return answer 
开发者ID:deepmipt,项目名称:DeepPavlov,代码行数:23,代码来源:network.py

示例11: normalize_A

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def normalize_A(A):
    """
    Computes symmetric normalization of A, dealing with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :return: Tensor or SparseTensor of rank k.
    """
    D = degrees(A)
    D = tf.sqrt(D)[:, None] + K.epsilon()
    perm = (0, 2, 1) if K.ndim(A) == 3 else (1, 0)
    output = (A / D) / ops.transpose(D, perm=perm)

    return output 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:15,代码来源:graph.py

示例12: degree_matrix

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def degree_matrix(A, return_sparse_batch=False):
    """
    Computes the degree matrix of A, deals with sparse A and batch mode
    automatically.
    :param A: Tensor or SparseTensor with rank k = {2, 3}.
    :param return_sparse_batch: if operating in batch mode, return a
    SparseTensor. Note that the sparse degree Tensor returned by this function
    cannot be used for sparse matrix multiplication afterwards.
    :return: SparseTensor of rank k.
    """
    D = degrees(A)

    batch_mode = K.ndim(D) == 2
    N = tf.shape(D)[-1]
    batch_size = tf.shape(D)[0] if batch_mode else 1

    inner_index = tf.tile(tf.stack([tf.range(N)] * 2, axis=1), (batch_size, 1))
    if batch_mode:
        if return_sparse_batch:
            outer_index = ops.repeat(
                tf.range(batch_size), tf.ones(batch_size) * tf.cast(N, tf.float32)
            )
            indices = tf.concat([outer_index[:, None], inner_index], 1)
            dense_shape = (batch_size, N, N)
        else:
            return tf.linalg.diag(D)
    else:
        indices = inner_index
        dense_shape = (N, N)

    indices = tf.cast(indices, tf.int64)
    values = tf.reshape(D, (-1,))
    return tf.SparseTensor(indices, values, dense_shape) 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:35,代码来源:graph.py

示例13: dot

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def dot(a, b, transpose_a=False, transpose_b=False):
    """
    Dot product between `a` and `b`, with automatic handling of batch dimensions.
    Supports both dense and sparse multiplication (including sparse-sparse).
    The innermost dimension of `a` must match the outermost dimension of `b`,
    unless there is a shared batch dimension.
    Note that doing sparse-sparse multiplication of any rank and sparse-dense
    multiplication with rank higher than 2 may result in slower computations.
    :param a: Tensor or SparseTensor with rank 2 or 3.
    :param b: Tensor or SparseTensor with rank 2 or 3.
    :param transpose_a: bool, transpose innermost two dimensions of a.
    :param transpose_b: bool, transpose innermost two dimensions of b.
    :return: Tensor or SparseTensor with rank 2 or 3.
    """
    a_is_sparse_tensor = isinstance(a, tf.SparseTensor)
    b_is_sparse_tensor = isinstance(b, tf.SparseTensor)

    # Handle case where we can use faster sparse-dense matmul
    if K.ndim(a) == 2 and K.ndim(b) == 2:
        if transpose_a:
            a = ops.transpose(a)
        if transpose_b:
            b = ops.transpose(b)
        if a_is_sparse_tensor and not b_is_sparse_tensor:
            return tf.sparse.sparse_dense_matmul(a, b)
        elif not a_is_sparse_tensor and b_is_sparse_tensor:
            return ops.transpose(
                tf.sparse.sparse_dense_matmul(ops.transpose(b), ops.transpose(a))
            )

    # Fallthrough to tfsp implementation
    # Defaults to tf.matmul if neither is sparse
    if a_is_sparse_tensor:
        a = tfsp.CSRSparseMatrix(a)
    if b_is_sparse_tensor:
        b = tfsp.CSRSparseMatrix(b)
    out = tfsp.matmul(a, b, transpose_a=transpose_a, transpose_b=transpose_b)
    if hasattr(out, 'to_sparse_tensor'):
        return out.to_sparse_tensor()

    return out 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:43,代码来源:matmul.py

示例14: get_inputs

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def get_inputs(inputs):
        if len(inputs) == 3:
            X, A, E = inputs
            assert K.ndim(E) == 2, 'E must have rank 2'
        elif len(inputs) == 2:
            X, A = inputs
            E = None
        else:
            raise ValueError('Expected 2 or 3 inputs tensors (X, A, E), got {}.'
                             .format(len(inputs)))
        assert K.ndim(X) == 2, 'X must have rank 2'
        assert K.is_sparse(A), 'A must be a SparseTensor'
        assert K.ndim(A) == 2, 'A must have rank 2'

        return X, A, E 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:17,代码来源:message_passing.py

示例15: __init__

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import ndim [as 别名]
def __init__(self, filters,
                 kernel_size,
                 strides=(1, 1, 1),
                 padding='valid',
                 data_format=None,
                 activation=None,
                 use_bias=True,
                 kernel_initializer='glorot_uniform',
                 bias_initializer='zeros',
                 kernel_regularizer=None,
                 bias_regularizer=None,
                 activity_regularizer=None,
                 kernel_constraint=None,
                 bias_constraint=None,
                 **kwargs):
        
        super(LocallyConnected3D, self).__init__(**kwargs)
        self.filters = filters
        self.kernel_size = conv_utils.normalize_tuple(
            kernel_size, 3, 'kernel_size')
        self.strides = conv_utils.normalize_tuple(strides, 3, 'strides')
        self.padding = conv_utils.normalize_padding(padding)
        if self.padding != 'valid':
            raise ValueError('Invalid border mode for LocallyConnected3D '
                             '(only "valid" is supported): ' + padding)
        self.data_format = conv_utils.normalize_data_format(data_format)
        self.activation = activations.get(activation)
        self.use_bias = use_bias
        self.kernel_initializer = initializers.get(kernel_initializer)
        self.bias_initializer = initializers.get(bias_initializer)
        self.kernel_regularizer = regularizers.get(kernel_regularizer)
        self.bias_regularizer = regularizers.get(bias_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)
        self.kernel_constraint = constraints.get(kernel_constraint)
        self.bias_constraint = constraints.get(bias_constraint)
        self.input_spec = InputSpec(ndim=5) 
开发者ID:adalca,项目名称:neuron,代码行数:38,代码来源:layers.py


注:本文中的tensorflow.keras.backend.ndim方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。