当前位置: 首页>>代码示例>>Python>>正文


Python backend.reshape方法代码示例

本文整理汇总了Python中tensorflow.keras.backend.reshape方法的典型用法代码示例。如果您正苦于以下问题:Python backend.reshape方法的具体用法?Python backend.reshape怎么用?Python backend.reshape使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.reshape方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def call(self, x):

        n = (self.win_length - 1) / 2.0
        denom = n * (n + 1) * (2 * n + 1) / 3

        if self.data_format == 'channels_first':
            x = K.permute_dimensions(x, (0, 2, 3, 1))

        x = tf.pad(x, tf.constant([[0, 0], [0, 0], [int(n), int(n)], [0, 0]]), mode=self.mode)
        kernel = K.arange(-n, n + 1, 1, dtype=K.floatx())
        kernel = K.reshape(kernel, (1, kernel.shape[-1], 1, 1))  # (freq, time)

        x = K.conv2d(x, kernel, 1, data_format='channels_last') / denom

        if self.data_format == 'channels_first':
            x = K.permute_dimensions(x, (0, 3, 1, 2))

        return x 
开发者ID:keunwoochoi,项目名称:kapre,代码行数:20,代码来源:utils.py

示例2: split_heads_2d

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def split_heads_2d(self, ip):
        tensor_shape = K.shape(ip)

        # batch, height, width, channels for axis = -1
        tensor_shape = [tensor_shape[i] for i in range(len(self._shape))]

        batch = tensor_shape[0]
        height = tensor_shape[1]
        width = tensor_shape[2]
        channels = tensor_shape[3]

        # Save the spatial tensor dimensions
        self._batch = batch
        self._height = height
        self._width = width

        ret_shape = K.stack([batch, height, width,  self.num_heads, channels // self.num_heads])
        split = K.reshape(ip, ret_shape)
        transpose_axes = (0, 3, 1, 2, 4)
        split = K.permute_dimensions(split, transpose_axes)

        return split 
开发者ID:titu1994,项目名称:keras-attention-augmented-convs,代码行数:24,代码来源:attn_augconv.py

示例3: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def call(self, inputs):
        """
        Parameters
            inputs: volume of list with one volume
        """

        # check shapes
        if isinstance(inputs, (list, tuple)):
            assert len(inputs) == 1, "inputs has to be len 1. found: %d" % len(inputs)
            vol = inputs[0]
        else:
            vol = inputs

        # necessary for multi_gpu models...
        vol = K.reshape(vol, [-1, *self.inshape[1:]])

        # map transform across batch
        return tf.map_fn(self._single_resize, vol, dtype=tf.float32) 
开发者ID:adalca,项目名称:neuron,代码行数:20,代码来源:layers.py

示例4: _single_batch_trf

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def _single_batch_trf(self, vol):
        # vol should be vol_shape + [nb_features]
        # self.trf should be vol_shape + [nb_features] + [ndims]

        vol_shape = vol.shape.as_list()
        nb_input_dims = vol_shape[-1]

        # this is inefficient...
        new_vols = [None] * self.output_features
        for j in range(self.output_features):
            new_vols[j] = tf.zeros(vol_shape[:-1], dtype=tf.float32)
            for i in range(nb_input_dims):
                trf_vol = transform(vol[..., i], self.trf[..., i, j, :] * self.trf_mult, interp_method=self.interp_method)
                trf_vol = tf.reshape(trf_vol, vol_shape[:-1])
                new_vols[j] += trf_vol * self.mult[..., i, j]

                if self.use_bias:
                    new_vols[j] += self.bias[..., j]
        
        return tf.stack(new_vols, -1) 
开发者ID:adalca,项目名称:neuron,代码行数:22,代码来源:layers.py

示例5: channle_shuffle

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def channle_shuffle(inputs, group):
    """Shuffle the channel
    Args:
        inputs: 4D Tensor
        group: int, number of groups
    Returns:
        Shuffled 4D Tensor
    """
    #in_shape = inputs.get_shape().as_list()
    h, w, in_channel  = K.int_shape(inputs)[1:]
    #h, w, in_channel = in_shape[1:]
    assert(in_channel % group == 0)
    l = K.reshape(inputs, [-1, h, w, in_channel // group, group])
    l = K.permute_dimensions(l, [0, 1, 2, 4, 3])
    l = K.reshape(l, [-1, h, w, in_channel])

    return l 
开发者ID:hereszsz,项目名称:thundernet-tensorflow2.0,代码行数:19,代码来源:layers.py

示例6: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def call(self, inputs):
        def brelu(x):
            # get shape of X, we are interested in the last axis, which is constant
            shape = K.int_shape(x)
            # last axis
            dim = shape[-1]
            # half of the last axis (+1 if necessary)
            dim2 = dim // 2
            if dim % 2 != 0:
                dim2 += 1
            # multiplier will be a tensor of alternated +1 and -1
            multiplier = K.ones((dim2,))
            multiplier = K.stack([multiplier, -multiplier], axis=-1)
            if dim % 2 != 0:
                multiplier = multiplier[:-1]
            # adjust multiplier shape to the shape of x
            multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,))
            return multiplier * tf.nn.relu(multiplier * x)

        return Lambda(brelu)(inputs) 
开发者ID:digantamisra98,项目名称:Echo,代码行数:22,代码来源:custom_activation.py

示例7: relative_logits_1d

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def relative_logits_1d(self, q, rel_k, H, W, transpose_mask):
        rel_logits = tf.einsum('bhxyd,md->bhxym', q, rel_k)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads * H, W, 2 * W - 1])
        rel_logits = self.rel_to_abs(rel_logits)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H, W, W])
        rel_logits = K.expand_dims(rel_logits, axis=3)
        rel_logits = K.tile(rel_logits, [1, 1, 1, H, 1, 1])
        rel_logits = K.permute_dimensions(rel_logits, transpose_mask)
        rel_logits = K.reshape(rel_logits, [-1, self.num_heads, H * W, H * W])
        return rel_logits 
开发者ID:titu1994,项目名称:keras-attention-augmented-convs,代码行数:12,代码来源:attn_augconv.py

示例8: rel_to_abs

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def rel_to_abs(self, x):
        shape = K.shape(x)
        shape = [shape[i] for i in range(3)]
        B, Nh, L, = shape
        col_pad = K.zeros(K.stack([B, Nh, L, 1]))
        x = K.concatenate([x, col_pad], axis=3)
        flat_x = K.reshape(x, [B, Nh, L * 2 * L])
        flat_pad = K.zeros(K.stack([B, Nh, L - 1]))
        flat_x_padded = K.concatenate([flat_x, flat_pad], axis=2)
        final_x = K.reshape(flat_x_padded, [B, Nh, L + 1, 2 * L - 1])
        final_x = final_x[:, :, :L, L - 1:]
        return final_x 
开发者ID:titu1994,项目名称:keras-attention-augmented-convs,代码行数:14,代码来源:attn_augconv.py

示例9: combine_heads_2d

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def combine_heads_2d(self, inputs):
        # [batch, num_heads, height, width, depth_v // num_heads]
        transposed = K.permute_dimensions(inputs, [0, 2, 3, 1, 4])
        # [batch, height, width, num_heads, depth_v // num_heads]
        shape = K.shape(transposed)
        shape = [shape[i] for i in range(5)]

        a, b = shape[-2:]
        ret_shape = K.stack(shape[:-2] + [a * b])
        # [batch, height, width, depth_v]
        return K.reshape(transposed, ret_shape) 
开发者ID:titu1994,项目名称:keras-attention-augmented-convs,代码行数:13,代码来源:attn_augconv.py

示例10: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def call(self, inputs):
        X = inputs[0]  # (batch_size, N, F)
        A = inputs[1]  # (batch_size, N, N)
        E = inputs[2]  # (n_edges, S) or (batch_size, N, N, S)

        mode = ops.autodetect_mode(A, X)
        if mode == modes.SINGLE:
            return self._call_single(inputs)

        # Parameters
        N = K.shape(X)[-2]
        F = K.int_shape(X)[-1]
        F_ = self.channels

        # Filter network
        kernel_network = E
        for l in self.kernel_network_layers:
            kernel_network = l(kernel_network)

        # Convolution
        target_shape = (-1, N, N, F_, F) if mode == modes.BATCH else (N, N, F_, F)
        kernel = K.reshape(kernel_network, target_shape)
        output = kernel * A[..., None, None]
        output = tf.einsum('abicf,aif->abc', output, X)

        if self.root:
            output += ops.dot(X, self.root_kernel)
        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)

        return output 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:35,代码来源:ecc_conv.py

示例11: _call_single

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def _call_single(self, inputs):
        X = inputs[0]  # (N, F)
        A = inputs[1]  # (N, N)
        E = inputs[2]  # (n_edges, S)
        assert K.ndim(E) == 2, 'In single mode, E must have shape (n_edges, S).'

        # Enforce sparse representation
        if not K.is_sparse(A):
            A = ops.dense_to_sparse(A)

        # Parameters
        N = tf.shape(X)[-2]
        F = K.int_shape(X)[-1]
        F_ = self.channels

        # Filter network
        kernel_network = E
        for l in self.kernel_network_layers:
            kernel_network = l(kernel_network)  # (n_edges, F * F_)
        target_shape = (-1, F, F_)
        kernel = tf.reshape(kernel_network, target_shape)

        # Propagation
        index_i = A.indices[:, -2]
        index_j = A.indices[:, -1]
        messages = tf.gather(X, index_j)
        messages = ops.dot(messages[:, None, :], kernel)[:, 0, :]
        aggregated = ops.scatter_sum(messages, index_i, N)

        # Update
        output = aggregated
        if self.root:
            output += ops.dot(X, self.root_kernel)
        if self.use_bias:
            output = K.bias_add(output, self.bias)
        if self.activation is not None:
            output = self.activation(output)

        return output 
开发者ID:danielegrattarola,项目名称:spektral,代码行数:41,代码来源:ecc_conv.py

示例12: _single_aff_to_shift

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def _single_aff_to_shift(self, trf, volshape):
        if len(trf.shape) == 1:  # go from vector to matrix
            trf = tf.reshape(trf, [self.ndims, self.ndims + 1])

        # note this is unnecessarily extra graph since at every batch entry we have a tf.eye graph
        trf += tf.eye(self.ndims+1)[:self.ndims,:]  # add identity, hence affine is a shift from identitiy
        return affine_to_shift(trf, volshape, shift_center=True) 
开发者ID:adalca,项目名称:neuron,代码行数:9,代码来源:layers.py

示例13: build

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def build(self, input_shape):



        # Create a trainable weight variable for this layer.
        self.kernel = self.add_weight(name='mult-kernel',
                                    shape=(np.prod(self.orig_input_shape),
                                           self.output_len),
                                    initializer=self.kernel_initializer,
                                    trainable=True)

        M = K.reshape(self.kernel, [-1, self.output_len])  # D x d
        mt = K.transpose(M) # d x D
        mtm_inv = tf.matrix_inverse(K.dot(mt, M))  # d x d
        self.W = K.dot(mtm_inv, mt) # d x D

        if self.use_bias:
            self.bias = self.add_weight(name='bias-kernel',
                                        shape=(self.output_len, ),
                                        initializer=self.bias_initializer,
                                        trainable=True)

        # self.sigma_sq = self.add_weight(name='bias-kernel',
        #                                 shape=(1, ),
        #                                 initializer=self.initializer,
        #                                 trainable=True)

        super(SpatiallySparse_Dense, self).build(input_shape)  # Be sure to call this somewhere! 
开发者ID:adalca,项目名称:neuron,代码行数:30,代码来源:layers.py

示例14: channel_shuffle_2

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def channel_shuffle_2(x):
    dyn_shape = tf.shape(x)
    h, w = dyn_shape[1], dyn_shape[2]
    c = x.shape[3]
    x = K.reshape(x, [-1, h, w, 2, c // 2])
    x = K.permute_dimensions(x, [0, 1, 2, 4, 3])
    x = K.reshape(x, [-1, h, w, c])
    return x 
开发者ID:mnicnc404,项目名称:CartoonGan-tensorflow,代码行数:10,代码来源:layers.py

示例15: call

# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import reshape [as 别名]
def call(self, x, **kwargs):
        # (x - y)^2 = x^2 + y^2 - 2 * x * y
        x_sq = K.expand_dims(K.sum(x ** 2, axis=2), axis=-1)
        y_sq = K.reshape(K.sum(self.kernel ** 2, axis=1),
                         (1, 1, self.n_shapelets))
        xy = K.dot(x, K.transpose(self.kernel))
        return (x_sq + y_sq - 2 * xy) / K.int_shape(self.kernel)[1] 
开发者ID:tslearn-team,项目名称:tslearn,代码行数:9,代码来源:shapelets.py


注:本文中的tensorflow.keras.backend.reshape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。