當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.permute_dimensions方法代碼示例

本文整理匯總了Python中tensorflow.keras.backend.permute_dimensions方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.permute_dimensions方法的具體用法?Python backend.permute_dimensions怎麽用?Python backend.permute_dimensions使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.permute_dimensions方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def call(self, x):
        power_spectrogram = super(Melspectrogram, self).call(x)
        # now,  channels_first: (batch_sample, n_ch, n_freq, n_time)
        #       channels_last: (batch_sample, n_freq, n_time, n_ch)
        if self.image_data_format == 'channels_first':
            power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 1, 3, 2])
        else:
            power_spectrogram = K.permute_dimensions(power_spectrogram, [0, 3, 2, 1])
        # now, whatever image_data_format, (batch_sample, n_ch, n_time, n_freq)
        output = K.dot(power_spectrogram, self.freq2mel)
        if self.image_data_format == 'channels_first':
            output = K.permute_dimensions(output, [0, 1, 3, 2])
        else:
            output = K.permute_dimensions(output, [0, 3, 2, 1])
        if self.power_melgram != 2.0:
            output = K.pow(K.sqrt(output), self.power_melgram)
        if self.return_decibel_melgram:
            output = backend_keras.amplitude_to_decibel(output)
        return output 
開發者ID:keunwoochoi,項目名稱:kapre,代碼行數:21,代碼來源:time_frequency.py

示例2: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def call(self, x):

        n = (self.win_length - 1) / 2.0
        denom = n * (n + 1) * (2 * n + 1) / 3

        if self.data_format == 'channels_first':
            x = K.permute_dimensions(x, (0, 2, 3, 1))

        x = tf.pad(x, tf.constant([[0, 0], [0, 0], [int(n), int(n)], [0, 0]]), mode=self.mode)
        kernel = K.arange(-n, n + 1, 1, dtype=K.floatx())
        kernel = K.reshape(kernel, (1, kernel.shape[-1], 1, 1))  # (freq, time)

        x = K.conv2d(x, kernel, 1, data_format='channels_last') / denom

        if self.data_format == 'channels_first':
            x = K.permute_dimensions(x, (0, 3, 1, 2))

        return x 
開發者ID:keunwoochoi,項目名稱:kapre,代碼行數:20,代碼來源:utils.py

示例3: split_heads_2d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def split_heads_2d(self, ip):
        tensor_shape = K.shape(ip)

        # batch, height, width, channels for axis = -1
        tensor_shape = [tensor_shape[i] for i in range(len(self._shape))]

        batch = tensor_shape[0]
        height = tensor_shape[1]
        width = tensor_shape[2]
        channels = tensor_shape[3]

        # Save the spatial tensor dimensions
        self._batch = batch
        self._height = height
        self._width = width

        ret_shape = K.stack([batch, height, width,  self.num_heads, channels // self.num_heads])
        split = K.reshape(ip, ret_shape)
        transpose_axes = (0, 3, 1, 2, 4)
        split = K.permute_dimensions(split, transpose_axes)

        return split 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:24,代碼來源:attn_augconv.py

示例4: relative_logits

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def relative_logits(self, q):
        shape = K.shape(q)
        # [batch, num_heads, H, W, depth_v]
        shape = [shape[i] for i in range(5)]

        height = shape[2]
        width = shape[3]

        rel_logits_w = self.relative_logits_1d(q, self.key_relative_w, height, width,
                                               transpose_mask=[0, 1, 2, 4, 3, 5])

        rel_logits_h = self.relative_logits_1d(
            K.permute_dimensions(q, [0, 1, 3, 2, 4]),
            self.key_relative_h, width, height,
            transpose_mask=[0, 1, 4, 2, 5, 3])

        return rel_logits_h, rel_logits_w 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:19,代碼來源:attn_augconv.py

示例5: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def call(self, inputx):
        
        if not inputx.dtype in [tf.complex64, tf.complex128]:
            print('Warning: inputx is not complex. Converting.', file=sys.stderr)
        
            # if inputx is float, this will assume 0 imag channel
            inputx = tf.cast(inputx, tf.complex64)

        # get the right fft
        if self.ndims == 1:
            fft = tf.fft
        elif self.ndims == 2:
            fft = tf.fft2d
        else:
            fft = tf.fft3d

        perm_dims = [0, self.ndims + 1] + list(range(1, self.ndims + 1))
        invert_perm_ndims = [0] + list(range(2, self.ndims + 2)) + [1]
        
        perm_inputx = K.permute_dimensions(inputx, perm_dims)  # [batch_size, nb_features, *vol_size]
        fft_inputx = fft(perm_inputx)
        return K.permute_dimensions(fft_inputx, invert_perm_ndims) 
開發者ID:adalca,項目名稱:neuron,代碼行數:24,代碼來源:layers.py

示例6: find_maxima

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def find_maxima(x, coordinate_scale=1, confidence_scale=255.0, data_format=None):
    """Finds the 2D maxima contained in a 4D tensor.
    # Arguments
        x: Tensor or variable.
        data_format: string, `"channels_last"` or `"channels_first"`.
    # Returns
        A tensor.
    # Raises
        ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`.
    """
    if data_format == "channels_first":
        x = permute_dimensions(x, [0, 2, 3, 1])
        x = _find_maxima(x, coordinate_scale, confidence_scale)
        x = permute_dimensions(x, [0, 2, 1])
        return x
    elif data_format == "channels_last":
        x = _find_maxima(x, coordinate_scale, confidence_scale)
        x = permute_dimensions(x, [0, 2, 1])
        return x
    else:
        raise ValueError("Invalid data_format:", data_format) 
開發者ID:jgraving,項目名稱:DeepPoseKit,代碼行數:23,代碼來源:backend.py

示例7: channle_shuffle

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def channle_shuffle(inputs, group):
    """Shuffle the channel
    Args:
        inputs: 4D Tensor
        group: int, number of groups
    Returns:
        Shuffled 4D Tensor
    """
    #in_shape = inputs.get_shape().as_list()
    h, w, in_channel  = K.int_shape(inputs)[1:]
    #h, w, in_channel = in_shape[1:]
    assert(in_channel % group == 0)
    l = K.reshape(inputs, [-1, h, w, in_channel // group, group])
    l = K.permute_dimensions(l, [0, 1, 2, 4, 3])
    l = K.reshape(l, [-1, h, w, in_channel])

    return l 
開發者ID:hereszsz,項目名稱:thundernet-tensorflow2.0,代碼行數:19,代碼來源:layers.py

示例8: _spectrogram_mono

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def _spectrogram_mono(self, x):
        '''x.shape : (None, 1, len_src),
        returns 2D batch of a mono power-spectrogram'''
        x = K.permute_dimensions(x, [0, 2, 1])
        x = K.expand_dims(x, 3)  # add a dummy dimension (channel axis)
        subsample = (self.n_hop, 1)
        output_real = K.conv2d(
            x,
            self.dft_real_kernels,
            strides=subsample,
            padding=self.padding,
            data_format='channels_last',
        )
        output_imag = K.conv2d(
            x,
            self.dft_imag_kernels,
            strides=subsample,
            padding=self.padding,
            data_format='channels_last',
        )
        output = output_real ** 2 + output_imag ** 2
        # now shape is (batch_sample, n_frame, 1, freq)
        if self.image_data_format == 'channels_last':
            output = K.permute_dimensions(output, [0, 3, 1, 2])
        else:
            output = K.permute_dimensions(output, [0, 2, 3, 1])
        return output 
開發者ID:keunwoochoi,項目名稱:kapre,代碼行數:29,代碼來源:time_frequency.py

示例9: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def call(self, x):
        # reshape so that the last axis is freq axis
        if self.image_data_format == 'channels_first':
            x = K.permute_dimensions(x, [0, 1, 3, 2])
        else:
            x = K.permute_dimensions(x, [0, 3, 2, 1])
        output = K.dot(x, self.filterbank)
        # reshape back
        if self.image_data_format == 'channels_first':
            return K.permute_dimensions(output, [0, 1, 3, 2])
        else:
            return K.permute_dimensions(output, [0, 3, 2, 1]) 
開發者ID:keunwoochoi,項目名稱:kapre,代碼行數:14,代碼來源:filterbank.py

示例10: relative_logits_1d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask):
        rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1])
        rel_logits = self.rel_to_abs(rel_logits)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W])
        rel_logits = K.expand_dims(rel_logits, axis=3)
        rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1])
        rel_logits = K.permute_dimensions(rel_logits, transpose_mask)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W])
        return rel_logits 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:12,代碼來源:attn_augconv.py

示例11: combine_heads_2d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def combine_heads_2d(self, inputs):
        # [batch, num_heads, height, width, depth_v // num_heads]
        transposed = K.permute_dimensions(inputs, [0, 2, 3, 1, 4])
        # [batch, height, width, num_heads, depth_v // num_heads]
        shape = K.shape(transposed)
        shape = [shape[i] for i in range(5)]

        a, b = shape[-2:]
        ret_shape = K.stack(shape[:-2] + [a * b])
        # [batch, height, width, depth_v]
        return K.reshape(transposed, ret_shape) 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:13,代碼來源:attn_augconv.py

示例12: channel_shuffle_2

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def channel_shuffle_2(x):
    dyn_shape = tf.shape(x)
    h, w = dyn_shape[1], dyn_shape[2]
    c = x.shape[3]
    x = K.reshape(x, [-1, h, w, 2, c // 2])
    x = K.permute_dimensions(x, [0, 1, 2, 4, 3])
    x = K.reshape(x, [-1, h, w, c])
    return x 
開發者ID:mnicnc404,項目名稱:CartoonGan-tensorflow,代碼行數:10,代碼來源:layers.py

示例13: channel_shuffle

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def channel_shuffle(x, groups):
    """
    Parameters
    ----------
    x:
        Input tensor of with `channels_last` data format
    groups: int
        number of groups per channel
    Returns
    -------
        channel shuffled output tensor
    Examples
    --------
    Example for a 1D Array with 3 groups
    >>> d = np.array([0,1,2,3,4,5,6,7,8])
    >>> x = np.reshape(d, (3,3))
    >>> x = np.transpose(x, [1,0])
    >>> x = np.reshape(x, (9,))
    '[0 1 2 3 4 5 6 7 8] --> [0 3 6 1 4 7 2 5 8]'
    """
    height, width, in_channels = x.shape.as_list()[1:]
    #handle dynamic image shape case
    if height is None:
        height = K.shape(x)[1]
    if width is None:
        width = K.shape(x)[2]
    channels_per_group = in_channels // groups

    x = K.reshape(x, [-1, height, width, groups, channels_per_group])
    x = K.permute_dimensions(x, (0, 1, 2, 4, 3))  # transpose
    x = K.reshape(x, [-1, height, width, in_channels])

    return x 
開發者ID:david8862,項目名稱:keras-YOLOv3-model-set,代碼行數:35,代碼來源:shufflenet.py

示例14: channel_shuffle

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def channel_shuffle(x):
    height, width, channels = x.shape.as_list()[1:]
    #handle dynamic image shape case
    if height is None:
        height = K.shape(x)[1]
    if width is None:
        width = K.shape(x)[2]

    channels_per_split = channels // 2
    x = K.reshape(x, [-1, height, width, 2, channels_per_split])
    x = K.permute_dimensions(x, (0,1,2,4,3))
    x = K.reshape(x, [-1, height, width, channels])
    return x 
開發者ID:david8862,項目名稱:keras-YOLOv3-model-set,代碼行數:15,代碼來源:shufflenet_v2.py

示例15: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import permute_dimensions [as 別名]
def call(self, inputs, **kwargs):
        if self.axis == 1:
            # If channels first, force it to be channels last for these ops
            inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])

        q, k, v = tf.split(inputs, [self.depth_k, self.depth_k, self.depth_v], axis=-1)

        q = self.split_heads_2d(q)
        k = self.split_heads_2d(k)
        v = self.split_heads_2d(v)

        # scale query
        depth_k_heads = self.depth_k / self.num_heads
        q *= (depth_k_heads ** -0.5)

        # [Batch, num_heads, height * width, depth_k or depth_v] if axis == -1
        qk_shape = [self._batch, self.num_heads, self._height * self._width, self.depth_k // self.num_heads]
        v_shape = [self._batch, self.num_heads, self._height * self._width, self.depth_v // self.num_heads]
        flat_q = K.reshape(q, K.stack(qk_shape))
        flat_k = K.reshape(k, K.stack(qk_shape))
        flat_v = K.reshape(v, K.stack(v_shape))

        # [Batch, num_heads, HW, HW]
        logits = tf.matmul(flat_q, flat_k, transpose_b=True)

        # Apply relative encodings
        if self.relative:
            h_rel_logits, w_rel_logits = self.relative_logits(q)
            logits += h_rel_logits
            logits += w_rel_logits

        weights = K.softmax(logits, axis=-1)
        attn_out = tf.matmul(weights, flat_v)

        attn_out_shape = [self._batch, self.num_heads, self._height, self._width, self.depth_v // self.num_heads]
        attn_out_shape = K.stack(attn_out_shape)
        attn_out = K.reshape(attn_out, attn_out_shape)
        attn_out = self.combine_heads_2d(attn_out)
        # [batch, height, width, depth_v]

        if self.axis == 1:
            # return to [batch, depth_v, height, width] for channels first
            attn_out = K.permute_dimensions(attn_out, [0, 3, 1, 2])

        attn_out.set_shape(self.compute_output_shape(self._shape))

        return attn_out 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:49,代碼來源:attn_augconv.py


注:本文中的tensorflow.keras.backend.permute_dimensions方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。