當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.reshape方法代碼示例

本文整理匯總了Python中tensorflow.keras.backend.reshape方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.reshape方法的具體用法?Python backend.reshape怎麽用?Python backend.reshape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.reshape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, x):

        n = (self.win_length - 1) / 2.0
        denom = n * (n + 1) * (2 * n + 1) / 3

        if self.data_format == 'channels_first':
            x = K.permute_dimensions(x, (0, 2, 3, 1))

        x = tf.pad(x, tf.constant([[0, 0], [0, 0], [int(n), int(n)], [0, 0]]), mode=self.mode)
        kernel = K.arange(-n, n + 1, 1, dtype=K.floatx())
        kernel = K.reshape(kernel, (1, kernel.shape[-1], 1, 1))  # (freq, time)

        x = K.conv2d(x, kernel, 1, data_format='channels_last') / denom

        if self.data_format == 'channels_first':
            x = K.permute_dimensions(x, (0, 3, 1, 2))

        return x 
開發者ID:keunwoochoi,項目名稱:kapre,代碼行數:20,代碼來源:utils.py

示例2: split_heads_2d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def split_heads_2d(self, ip):
        tensor_shape = K.shape(ip)

        # batch, height, width, channels for axis = -1
        tensor_shape = [tensor_shape[i] for i in range(len(self._shape))]

        batch = tensor_shape[0]
        height = tensor_shape[1]
        width = tensor_shape[2]
        channels = tensor_shape[3]

        # Save the spatial tensor dimensions
        self._batch = batch
        self._height = height
        self._width = width

        ret_shape = K.stack([batch, height, width,  self.num_heads, channels // self.num_heads])
        split = K.reshape(ip, ret_shape)
        transpose_axes = (0, 3, 1, 2, 4)
        split = K.permute_dimensions(split, transpose_axes)

        return split 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:24,代碼來源:attn_augconv.py

示例3: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, inputs):
        """
        Parameters
            inputs: volume of list with one volume
        """

        # check shapes
        if isinstance(inputs, (list, tuple)):
            assert len(inputs) == 1, "inputs has to be len 1. found: %d" % len(inputs)
            vol = inputs[0]
        else:
            vol = inputs

        # necessary for multi_gpu models...
        vol = K.reshape(vol, [-1, *self.inshape[1:]])

        # map transform across batch
        return tf.map_fn(self._single_resize, vol, dtype=tf.float32) 
開發者ID:adalca,項目名稱:neuron,代碼行數:20,代碼來源:layers.py

示例4: _single_batch_trf

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def _single_batch_trf(self, vol):
        # vol should be vol_shape + [nb_features]
        # self.trf should be vol_shape + [nb_features] + [ndims]

        vol_shape = vol.shape.as_list()
        nb_input_dims = vol_shape[-1]

        # this is inefficient...
        new_vols = [None] * self.output_features
        for j in range(self.output_features):
            new_vols[j] = tf.zeros(vol_shape[:-1], dtype=tf.float32)
            for i in range(nb_input_dims):
                trf_vol = transform(vol[..., i], self.trf[..., i, j, :] * self.trf_mult, interp_method=self.interp_method)
                trf_vol = tf.reshape(trf_vol, vol_shape[:-1])
                new_vols[j] += trf_vol * self.mult[..., i, j]

                if self.use_bias:
                    new_vols[j] += self.bias[..., j]
        
        return tf.stack(new_vols, -1) 
開發者ID:adalca,項目名稱:neuron,代碼行數:22,代碼來源:layers.py

示例5: channle_shuffle

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def channle_shuffle(inputs, group):
    """Shuffle the channel
    Args:
        inputs: 4D Tensor
        group: int, number of groups
    Returns:
        Shuffled 4D Tensor
    """
    #in_shape = inputs.get_shape().as_list()
    h, w, in_channel  = K.int_shape(inputs)[1:]
    #h, w, in_channel = in_shape[1:]
    assert(in_channel % group == 0)
    l = K.reshape(inputs, [-1, h, w, in_channel // group, group])
    l = K.permute_dimensions(l, [0, 1, 2, 4, 3])
    l = K.reshape(l, [-1, h, w, in_channel])

    return l 
開發者ID:hereszsz,項目名稱:thundernet-tensorflow2.0,代碼行數:19,代碼來源:layers.py

示例6: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, inputs):
        def brelu(x):
            # get shape of X, we are interested in the last axis, which is constant
            shape = K.int_shape(x)
            # last axis
            dim = shape[-1]
            # half of the last axis (+1 if necessary)
            dim2 = dim // 2
            if dim % 2 != 0:
                dim2 += 1
            # multiplier will be a tensor of alternated +1 and -1
            multiplier = K.ones((dim2,))
            multiplier = K.stack([multiplier, -multiplier], axis=-1)
            if dim % 2 != 0:
                multiplier = multiplier[:-1]
            # adjust multiplier shape to the shape of x
            multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,))
            return multiplier * tf.nn.relu(multiplier * x)

        return Lambda(brelu)(inputs) 
開發者ID:digantamisra98,項目名稱:Echo,代碼行數:22,代碼來源:custom_activation.py

示例7: relative_logits_1d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask):
        rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1])
        rel_logits = self.rel_to_abs(rel_logits)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W])
        rel_logits = K.expand_dims(rel_logits, axis=3)
        rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1])
        rel_logits = K.permute_dimensions(rel_logits, transpose_mask)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W])
        return rel_logits 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:12,代碼來源:attn_augconv.py

示例8: rel_to_abs

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def rel_to_abs(self, x):
        shape = K.shape(x)
        shape = [shape[i] for i in range(3)]
        B, Nh, L, = shape
        col_pad = K.zeros(K.stack([B, Nh, L, 1]))
        x = K.concatenate([x, col_pad], axis=3)
        flat_x = K.reshape(x, [B, Nh, L * 2 * L])
        flat_pad = K.zeros(K.stack([B, Nh, L - 1]))
        flat_x_padded = K.concatenate([flat_x, flat_pad], axis=2)
        final_x = K.reshape(flat_x_padded, [B, Nh, L + 1, 2 * L - 1])
        final_x = final_x[:, :, :L, L - 1:]
        return final_x 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:14,代碼來源:attn_augconv.py

示例9: combine_heads_2d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def combine_heads_2d(self, inputs):
        # [batch, num_heads, height, width, depth_v // num_heads]
        transposed = K.permute_dimensions(inputs, [0, 2, 3, 1, 4])
        # [batch, height, width, num_heads, depth_v // num_heads]
        shape = K.shape(transposed)
        shape = [shape[i] for i in range(5)]

        a, b = shape[-2:]
        ret_shape = K.stack(shape[:-2] + [a * b])
        # [batch, height, width, depth_v]
        return K.reshape(transposed, ret_shape) 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:13,代碼來源:attn_augconv.py

示例10: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, inputs):
        X = inputs[0]  # (batch_size, N, F)
        A = inputs[1]  # (batch_size, N, N)
        E = inputs[2]  # (n_edges, S) or (batch_size, N, N, S)

        mode = ops.autodetect_mode(A, X)
        if mode == modes.SINGLE:
            return self._call_single(inputs)

        # Parameters
        N = K.shape(X)[-2]
        F = K.int_shape(X)[-1]
        F_ = self.channels

        # Filter network
        kernel_network = E
        for l in self.kernel_network_layers:
            kernel_network = l(kernel_network)

        # Convolution
        target_shape = (-1, N, N, F_, F) if mode == modes.BATCH else (N, N, F_, F)
        kernel = K.reshape(kernel_network, target_shape)
        output = kernel * A[..., None, None]
        output = tf.einsum('abicf,aif->abc', output, X)

        if self.root:
            output += ops.dot(X, self.root_kernel)
        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)

        return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:35,代碼來源:ecc_conv.py

示例11: _call_single

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def _call_single(self, inputs):
        X = inputs[0]  # (N, F)
        A = inputs[1]  # (N, N)
        E = inputs[2]  # (n_edges, S)
        assert K.ndim(E) == 2, 'In single mode, E must have shape (n_edges, S).'

        # Enforce sparse representation
        if not K.is_sparse(A):
            A = ops.dense_to_sparse(A)

        # Parameters
        N = tf.shape(X)[-2]
        F = K.int_shape(X)[-1]
        F_ = self.channels

        # Filter network
        kernel_network = E
        for l in self.kernel_network_layers:
            kernel_network = l(kernel_network)  # (n_edges, F * F_)
        target_shape = (-1, F, F_)
        kernel = tf.reshape(kernel_network, target_shape)

        # Propagation
        index_i = A.indices[:, -2]
        index_j = A.indices[:, -1]
        messages = tf.gather(X, index_j)
        messages = ops.dot(messages[:, None, :], kernel)[:, 0, :]
        aggregated = ops.scatter_sum(messages, index_i, N)

        # Update
        output = aggregated
        if self.root:
            output += ops.dot(X, self.root_kernel)
        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)

        return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:41,代碼來源:ecc_conv.py

示例12: _single_aff_to_shift

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def _single_aff_to_shift(self, trf, volshape):
        if len(trf.shape) == 1:  # go from vector to matrix
            trf = tf.reshape(trf, [self.ndims, self.ndims + 1])

        # note this is unnecessarily extra graph since at every batch entry we have a tf.eye graph
        trf += tf.eye(self.ndims+1)[:self.ndims,:]  # add identity, hence affine is a shift from identitiy
        return affine_to_shift(trf, volshape, shift_center=True) 
開發者ID:adalca,項目名稱:neuron,代碼行數:9,代碼來源:layers.py

示例13: build

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def build(self, input_shape):



        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='mult-kernel',
                                    shape=(np.prod(self.orig_input_shape),
                                           self.output_len),
                                    initializer=self.kernel_initializer,
                                    trainable=True)

        M = K.reshape(self.kernel, [-1, self.output_len])  # D x d
        mt = K.transpose(M) # d x D
        mtm_inv = tf.matrix_inverse(K.dot(mt, M))  # d x d
        self.W = K.dot(mtm_inv, mt) # d x D

        if self.use_bias:
            self.bias = self.add_weight(name='bias-kernel',
                                        shape=(self.output_len, ),
                                        initializer=self.bias_initializer,
                                        trainable=True)

        # self.sigma_sq = self.add_weight(name='bias-kernel',
        #                                 shape=(1, ),
        #                                 initializer=self.initializer,
        #                                 trainable=True)

        super(SpatiallySparse_Dense, self).build(input_shape)  # Be sure to call this somewhere! 
開發者ID:adalca,項目名稱:neuron,代碼行數:30,代碼來源:layers.py

示例14: channel_shuffle_2

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def channel_shuffle_2(x):
    dyn_shape = tf.shape(x)
    h, w = dyn_shape[1], dyn_shape[2]
    c = x.shape[3]
    x = K.reshape(x, [-1, h, w, 2, c // 2])
    x = K.permute_dimensions(x, [0, 1, 2, 4, 3])
    x = K.reshape(x, [-1, h, w, c])
    return x 
開發者ID:mnicnc404,項目名稱:CartoonGan-tensorflow,代碼行數:10,代碼來源:layers.py

示例15: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import reshape [as 別名]
def call(self, x, **kwargs):
        # (x - y)^2 = x^2 + y^2 - 2 * x * y
        x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1)
        y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1),
                         (1, 1, self.n_shapelets))
        xy = K.dot(x, K.transpose(self.kernel))
        return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1] 
開發者ID:tslearn-team,項目名稱:tslearn,代碼行數:9,代碼來源:shapelets.py


注:本文中的tensorflow.keras.backend.reshape方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。