當前位置: 首頁>>代碼示例>>Python>>正文


Python backend.int_shape方法代碼示例

本文整理匯總了Python中tensorflow.keras.backend.int_shape方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.int_shape方法的具體用法?Python backend.int_shape怎麽用?Python backend.int_shape使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.keras.backend的用法示例。


在下文中一共展示了backend.int_shape方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: mixed_mode_dot

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def mixed_mode_dot(a, b):
    """
    Computes the equivalent of `tf.einsum('ij,bjk->bik', a, b)`, but
    works for both dense and sparse inputs.
    :param a: Tensor or SparseTensor with rank 2.
    :param b: Tensor or SparseTensor with rank 3.
    :return: Tensor or SparseTensor with rank 3.
    """
    s_0_, s_1_, s_2_ = K.int_shape(b)
    B_T = ops.transpose(b, (1, 2, 0))
    B_T = ops.reshape(B_T, (s_1_, -1))
    output = dot(a, B_T)
    output = ops.reshape(output, (s_1_, s_2_, -1))
    output = ops.transpose(output, (2, 0, 1))

    return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:18,代碼來源:matmul.py

示例2: sampling

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    # K is the keras backend
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:20,代碼來源:vae-mlp-mnist-8.1.1.py

示例3: sampling

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def sampling(args):
    """Implements reparameterization trick by sampling
    from a gaussian with zero mean and std=1.

    Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    Returns:
        sampled latent vector (tensor)
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:19,代碼來源:cvae-cnn-mnist-8.2.1.py

示例4: sampling

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def sampling(args):
    """Reparameterization trick by sampling 
        fr an isotropic unit Gaussian.

    # Arguments:
        args (tensor): mean and log of variance of Q(z|X)

    # Returns:
        z (tensor): sampled latent vector
    """

    z_mean, z_log_var = args
    batch = K.shape(z_mean)[0]
    dim = K.int_shape(z_mean)[1]
    # by default, random_normal has mean=0 and std=1.0
    epsilon = K.random_normal(shape=(batch, dim))
    return z_mean + K.exp(0.5 * z_log_var) * epsilon 
開發者ID:PacktPublishing,項目名稱:Advanced-Deep-Learning-with-Keras,代碼行數:19,代碼來源:vae-cnn-mnist-8.1.2.py

示例5: build

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def build(self, input_layer):
        last_layer = input_layer
        input_shape = K.int_shape(input_layer)

        if self.with_embedding:
            if input_shape[-1] != 1:
                raise ValueError("Only one feature (the index) can be used with embeddings, "
                                 "i.e. the input shape should be (num_samples, length, 1). "
                                 "The actual shape was: " + str(input_shape))

            last_layer = Lambda(lambda x: K.squeeze(x, axis=-1),
                                output_shape=K.int_shape(last_layer)[:-1])(last_layer)  # Remove feature dimension.
            last_layer = Embedding(self.embedding_size, self.embedding_dimension,
                                   input_length=input_shape[-2])(last_layer)

        for _ in range(self.num_layers):
            last_layer = Dense(self.num_units, activation=self.activation)(last_layer)
            if self.with_bn:
                last_layer = BatchNormalization()(last_layer)
            if not np.isclose(self.p_dropout, 0):
                last_layer = Dropout(self.p_dropout)(last_layer)
        return last_layer 
開發者ID:d909b,項目名稱:cxplain,代碼行數:24,代碼來源:rnn.py

示例6: expand_tile

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def expand_tile(units, axis):
    """
    Expand and tile tensor along given axis

    Args:
        units: tf tensor with dimensions [batch_size, time_steps, n_input_features]
        axis: axis along which expand and tile. Must be 1 or 2

    """
    assert axis in (1, 2)
    n_time_steps = K.int_shape(units)[1]
    repetitions = [1, 1, 1, 1]
    repetitions[axis] = n_time_steps
    if axis == 1:
        expanded = Reshape(target_shape=((1,) + K.int_shape(units)[1:]))(units)
    else:
        expanded = Reshape(target_shape=(K.int_shape(units)[1:2] + (1,) + K.int_shape(units)[2:]))(units)
    return K.tile(expanded, repetitions) 
開發者ID:deepmipt,項目名稱:DeepPavlov,代碼行數:20,代碼來源:keras_layers.py

示例7: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def call(self, x, **kwargs):
        assert isinstance(x, list)
        inp_a, inp_b = x

        outp_a = K.l2_normalize(inp_a, -1)
        outp_b = K.l2_normalize(inp_b, -1)
        alpha = K.batch_dot(outp_b, outp_a, axes=[1, 1])
        alpha = K.l2_normalize(alpha, 1)
        hmean = K.batch_dot(outp_b, alpha, axes=[2, 1])
        kcon = K.eye(K.int_shape(inp_a)[1], dtype='float32')

        m = []
        for i in range(self.output_dim):
            outp_a = inp_a * self.W[i]
            outp_hmean = hmean * self.W[i]
            outp_a = K.l2_normalize(outp_a, -1)
            outp_hmean = K.l2_normalize(outp_hmean, -1)
            outp = K.batch_dot(outp_hmean, outp_a, axes=[2, 2])
            outp = K.sum(outp * kcon, -1, keepdims=True)
            m.append(outp)
        if self.output_dim > 1:
            persp = K.concatenate(m, 2)
        else:
            persp = m[0]
        return [persp, persp] 
開發者ID:deepmipt,項目名稱:DeepPavlov,代碼行數:27,代碼來源:keras_layers.py

示例8: __call__

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def __call__(self, inputs):
        identity = inputs
        if K.int_shape(identity)[-1] == self.filters:
            identity = self.identity_bn(identity)
        else:
            identity = self.identity_bn(identity)
            identity = self.identity_1x1(identity)

        x = inputs
        x = self.bottleneck_1x1_bn(x)
        x = self.bottleneck_1x1(x)

        x = self.bottleneck_3x3_bn(x)
        x = self.bottleneck_3x3(x)

        x = self.expansion_1x1_bn(x)
        x = self.expansion_1x1(x)

        x = self.residual_add_bn(x)
        return self.residual_add([identity, x]) 
開發者ID:jgraving,項目名稱:DeepPoseKit,代碼行數:22,代碼來源:hourglass.py

示例9: residual

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def residual(_x, out_dim, name, stride=1):
  shortcut = _x
  num_channels = K.int_shape(shortcut)[-1]
  _x = ZeroPadding2D(padding=1, name=name + '.pad1')(_x)
  _x = Conv2D(out_dim, 3, strides=stride, use_bias=False, name=name + '.conv1')(_x)
  _x = BatchNormalization(epsilon=1e-5, name=name + '.bn1')(_x)
  _x = Activation('relu', name=name + '.relu1')(_x)

  _x = Conv2D(out_dim, 3, padding='same', use_bias=False, name=name + '.conv2')(_x)
  _x = BatchNormalization(epsilon=1e-5, name=name + '.bn2')(_x)

  if num_channels != out_dim or stride != 1:
    shortcut = Conv2D(out_dim, 1, strides=stride, use_bias=False, name=name + '.shortcut.0')(
        shortcut)
    shortcut = BatchNormalization(epsilon=1e-5, name=name + '.shortcut.1')(shortcut)

  _x = Add(name=name + '.add')([_x, shortcut])
  _x = Activation('relu', name=name + '.relu')(_x)
  return _x 
開發者ID:1044197988,項目名稱:Centernet-Tensorflow2.0,代碼行數:21,代碼來源:hourglass.py

示例10: correct_pad

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def correct_pad(backend, inputs, kernel_size):
    """Returns a tuple for zero-padding for 2D convolution with downsampling.
    # Arguments
        input_size: An integer or tuple/list of 2 integers.
        kernel_size: An integer or tuple/list of 2 integers.
    # Returns
        A tuple.
    """
    img_dim = 2 if backend.image_data_format() == 'channels_first' else 1
    input_size = backend.int_shape(inputs)[img_dim:(img_dim + 2)]

    if isinstance(kernel_size, int):
        kernel_size = (kernel_size, kernel_size)

    if input_size[0] is None:
        adjust = (1, 1)
    else:
        adjust = (1 - input_size[0] % 2, 1 - input_size[1] % 2)

    correct = (kernel_size[0] // 2, kernel_size[1] // 2)

    return ((correct[0] - adjust[0], correct[0]),
            (correct[1] - adjust[1], correct[1])) 
開發者ID:david8862,項目名稱:keras-YOLOv3-model-set,代碼行數:25,代碼來源:mobilenet_v3.py

示例11: channle_shuffle

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def channle_shuffle(inputs, group):
    """Shuffle the channel
    Args:
        inputs: 4D Tensor
        group: int, number of groups
    Returns:
        Shuffled 4D Tensor
    """
    #in_shape = inputs.get_shape().as_list()
    h, w, in_channel  = K.int_shape(inputs)[1:]
    #h, w, in_channel = in_shape[1:]
    assert(in_channel % group == 0)
    l = K.reshape(inputs, [-1, h, w, in_channel // group, group])
    l = K.permute_dimensions(l, [0, 1, 2, 4, 3])
    l = K.reshape(l, [-1, h, w, in_channel])

    return l 
開發者ID:hereszsz,項目名稱:thundernet-tensorflow2.0,代碼行數:19,代碼來源:layers.py

示例12: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def call(self, inputs):
        def brelu(x):
            # get shape of X, we are interested in the last axis, which is constant
            shape = K.int_shape(x)
            # last axis
            dim = shape[-1]
            # half of the last axis (+1 if necessary)
            dim2 = dim // 2
            if dim % 2 != 0:
                dim2 += 1
            # multiplier will be a tensor of alternated +1 and -1
            multiplier = K.ones((dim2,))
            multiplier = K.stack([multiplier, -multiplier], axis=-1)
            if dim % 2 != 0:
                multiplier = multiplier[:-1]
            # adjust multiplier shape to the shape of x
            multiplier = K.reshape(multiplier, tuple(1 for _ in shape[:-1]) + (-1,))
            return multiplier * tf.nn.relu(multiplier * x)

        return Lambda(brelu)(inputs) 
開發者ID:digantamisra98,項目名稱:Echo,代碼行數:22,代碼來源:custom_activation.py

示例13: augmented_conv2d

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def augmented_conv2d(ip, filters, kernel_size=(3, 3), strides=(1, 1),
                     depth_k=0.2, depth_v=0.2, num_heads=8, relative_encodings=True):
    """
    Builds an Attention Augmented Convolution block.

    Args:
        ip: keras tensor.
        filters: number of output filters.
        kernel_size: convolution kernel size.
        strides: strides of the convolution.
        depth_k: float or int. Number of filters for k.
            Computes the number of filters for `v`.
            If passed as float, computed as `filters * depth_k`.
        depth_v: float or int. Number of filters for v.
            Computes the number of filters for `k`.
            If passed as float, computed as `filters * depth_v`.
        num_heads: int. Number of attention heads.
            Must be set such that `depth_k // num_heads` is > 0.
        relative_encodings: bool. Whether to use relative
            encodings or not.

    Returns:
        a keras tensor.
    """
    # input_shape = K.int_shape(ip)
    channel_axis = 1 if K.image_data_format() == 'channels_first' else -1

    depth_k, depth_v = _normalize_depth_vars(depth_k, depth_v, filters)

    conv_out = _conv_layer(filters - depth_v, kernel_size, strides)(ip)

    # Augmented Attention Block
    qkv_conv = _conv_layer(2 * depth_k + depth_v, (1, 1), strides)(ip)
    attn_out = AttentionAugmentation2D(depth_k, depth_v, num_heads, relative_encodings)(qkv_conv)
    attn_out = _conv_layer(depth_v, kernel_size=(1, 1))(attn_out)

    output = concatenate([conv_out, attn_out], axis=channel_axis)
    output = BatchNormalization()(output)
    return output 
開發者ID:titu1994,項目名稱:keras-attention-augmented-convs,代碼行數:41,代碼來源:attn_augconv.py

示例14: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def call(self, inputs):
        F = K.int_shape(inputs)[-1]
        minkowski_prod_mat = np.eye(F)
        minkowski_prod_mat[-1, -1] = -1.
        minkowski_prod_mat = K.constant(minkowski_prod_mat)
        output = K.dot(inputs, minkowski_prod_mat)
        output = K.dot(output, K.transpose(inputs))
        output = K.clip(output, -10e9, -1.)

        if self.activation is not None:
            output = self.activation(output)

        return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:15,代碼來源:base.py

示例15: call

# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import int_shape [as 別名]
def call(self, inputs):
        X, A, E = self.get_inputs(inputs)
        F = K.int_shape(X)[-1]

        to_pad = self.channels - F
        output = tf.pad(X, [[0, 0], [0, to_pad]])
        for i in range(self.n_layers):
            m = tf.matmul(output, self.kernel[i])
            m = self.propagate(m, A)
            output = self.rnn(m, [output])[0]

        output = self.activation(output)
        return output 
開發者ID:danielegrattarola,項目名稱:spektral,代碼行數:15,代碼來源:gated_graph_conv.py


注:本文中的tensorflow.keras.backend.int_shape方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。