当前位置: 首页>>代码示例>>Python>>正文


Python backend.ndim方法代码示例

本文整理汇总了Python中keras.backend.ndim方法的典型用法代码示例。如果您正苦于以下问题:Python backend.ndim方法的具体用法?Python backend.ndim怎么用?Python backend.ndim使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.ndim方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_output

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def get_output(self, train=False):
        def format_shape(shape):
            if K._BACKEND == 'tensorflow':
                def trf(x):
                    try:
                        return int(x)
                    except TypeError:
                        return x

                return map(trf, shape)
            return shape

        X = self.get_input(train)

        in_shape = format_shape(K.shape(X))
        batch_flatten_len = K.prod(in_shape[:2])
        cast_in_shape = (batch_flatten_len, ) + tuple(in_shape[i] for i in range(2, K.ndim(X)))
        
        pre_outs = self.layer(K.reshape(X, cast_in_shape))
        
        out_shape = format_shape(K.shape(pre_outs))
        cast_out_shape = (in_shape[0], in_shape[1]) + tuple(out_shape[i] for i in range(1, K.ndim(pre_outs)))
        
        outputs = K.reshape(pre_outs, cast_out_shape)
        return outputs 
开发者ID:textclf,项目名称:fancy-cnn,代码行数:27,代码来源:timedistributed.py

示例2: get_realpart

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def get_realpart(x):
    image_format = K.image_data_format()
    ndim = K.ndim(x)
    input_shape = K.shape(x)

    if (image_format == 'channels_first' and ndim != 3) or ndim == 2:
        input_dim = input_shape[1] // 2
        return x[:, :input_dim]

    input_dim = input_shape[-1] // 2
    if ndim == 3:
        return x[:, :, :input_dim]
    elif ndim == 4:
        return x[:, :, :, :input_dim]
    elif ndim == 5:
        return x[:, :, :, :, :input_dim] 
开发者ID:ChihebTrabelsi,项目名称:deep_complex_networks,代码行数:18,代码来源:utils.py

示例3: get_imagpart

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def get_imagpart(x):
    image_format = K.image_data_format()
    ndim = K.ndim(x)
    input_shape = K.shape(x)

    if (image_format == 'channels_first' and ndim != 3) or ndim == 2:
        input_dim = input_shape[1] // 2
        return x[:, input_dim:]

    input_dim = input_shape[-1] // 2
    if ndim == 3:
        return x[:, :, input_dim:]
    elif ndim == 4:
        return x[:, :, :, input_dim:]
    elif ndim == 5:
        return x[:, :, :, :, input_dim:] 
开发者ID:ChihebTrabelsi,项目名称:deep_complex_networks,代码行数:18,代码来源:utils.py

示例4: _softmax

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def _softmax(x, axis=-1, alpha=1):
    """
    building on keras implementation, allow alpha parameter

    Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
        alpha: a value to multiply all x
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    x = alpha * x
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D') 
开发者ID:voxelmorph,项目名称:voxelmorph,代码行数:26,代码来源:models.py

示例5: sequence_masking

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def sequence_masking(x, mask, mode=0, axis=None):
    """为序列条件mask的函数
    mask: 形如(batch_size, seq_len)的0-1矩阵;
    mode: 如果是0,则直接乘以mask;
          如果是1,则在padding部分减去一个大正数。
    axis: 序列所在轴,默认为1;
    """
    if mask is None or mode not in [0, 1]:
        return x
    else:
        if axis is None:
            axis = 1
        if axis == -1:
            axis = K.ndim(x) - 1
        assert axis > 0, 'axis muse be greater than 0'
        for _ in range(axis - 1):
            mask = K.expand_dims(mask, 1)
        for _ in range(K.ndim(x) - K.ndim(mask) - axis + 1):
            mask = K.expand_dims(mask, K.ndim(mask))
        if mode == 0:
            return x * mask
        else:
            return x - (1 - mask) * 1e12 
开发者ID:bojone,项目名称:bert4keras,代码行数:25,代码来源:backend.py

示例6: gram_matrix

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def gram_matrix(x):
	"""
	Computes the outer-product of the input tensor x.

	Input
	-----
	- x: input tensor of shape (C x H x W)

	Returns
	-------
	- x . x^T

	Note that this can be computed efficiently if x is reshaped
	as a tensor of shape (C x H*W).
	"""
	# assert K.ndim(x) == 3
	if K.image_dim_ordering() == 'th':
		features = K.batch_flatten(x)
	else:
		features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
	return K.dot(features, K.transpose(features)) 
开发者ID:kevinzakka,项目名称:style-transfer,代码行数:23,代码来源:losses.py

示例7: softmax

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def softmax(x, axis=1):
    """Softmax activation function.
    # Arguments
        x : Tensor.
        axis: Integer, axis along which the softmax normalization is applied.
    # Returns
        Tensor, output of softmax transformation.
    # Raises
        ValueError: In case `dim(x) == 1`.
    """
    ndim = K.ndim(x)
    if ndim == 2:
        return K.softmax(x)
    elif ndim > 2:
        e = K.exp(x - K.max(x, axis=axis, keepdims=True))
        s = K.sum(e, axis=axis, keepdims=True)
        return e / s
    else:
        raise ValueError('Cannot apply softmax to a tensor that is 1D') 
开发者ID:kaka-lin,项目名称:stock-price-predict,代码行数:21,代码来源:seq2seq_attention_2.py

示例8: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def call(self, X, mask=None):
        if mask is not None:
            assert K.ndim(mask) == 2, 'Input mask to CRF must have dim 2 if not None'

        if self.test_mode == 'viterbi':
            test_output = self.viterbi_decoding(X, mask)
        else:
            test_output = self.get_marginal_prob(X, mask)

        self.uses_learning_phase = True
        if self.learn_mode == 'join':
            train_output = K.zeros_like(K.dot(X, self.kernel))
            out = K.in_train_phase(train_output, test_output)
        else:
            if self.test_mode == 'viterbi':
                train_output = self.get_marginal_prob(X, mask)
                out = K.in_train_phase(train_output, test_output)
            else:
                out = test_output
        return out 
开发者ID:keras-team,项目名称:keras-contrib,代码行数:22,代码来源:crf.py

示例9: __init__

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def __init__(self, output_dim, init='glorot_uniform', activation='relu',weights=None,
            W_regularizer=None, b_regularizer=None, activity_regularizer=None,
            W_constraint=None, b_constraint=None, input_dim=None, **kwargs):
        self.W_initializer = initializers.get(init)
        self.b_initializer = initializers.get('zeros')
        self.activation = activations.get(activation)
        self.output_dim = output_dim
        self.input_dim = input_dim

        self.W_regularizer = regularizers.get(W_regularizer)
        self.b_regularizer = regularizers.get(b_regularizer)
        self.activity_regularizer = regularizers.get(activity_regularizer)

        self.W_constraint = constraints.get(W_constraint)
        self.b_constraint = constraints.get(b_constraint)
        self.initial_weights = weights
        self.input_spec = InputSpec(ndim=2)

        if self.input_dim:
            kwargs['input_shape'] = (self.input_dim,)
        super(SparseFullyConnectedLayer, self).__init__(**kwargs) 
开发者ID:yangliuy,项目名称:NeuralResponseRanking,代码行数:23,代码来源:SparseFullyConnectedLayer.py

示例10: build

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def build(self, input_shape):
        assert len(input_shape) == 2
        input_dim = input_shape[1]
        #self.input_spec = InputSpec(dtype=K.floatx(), shape=(None, input_dim))
        self.input_spec = InputSpec(ndim=2, axes={1: input_dim})

        self.W = self.add_weight(
                shape=(input_dim, self.output_dim),
                initializer=self.W_initializer,
                name='SparseFullyConnected_W',
                regularizer=self.W_regularizer,
                constraint=self.W_constraint)
        self.b = self.add_weight(
                shape=(self.output_dim,),
                initializer=self.b_initializer,
                name='SparseFullyConnected_b',
                regularizer=self.b_regularizer,
                constraint=self.b_constraint)


        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights
        #self.built = True
        #super(SparseFullyConnectedLayer, self).build(input_shape) 
开发者ID:yangliuy,项目名称:NeuralResponseRanking,代码行数:27,代码来源:SparseFullyConnectedLayer.py

示例11: style_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def style_loss(style_image, target_image, style_masks, target_masks):
    '''Calculate style loss between style_image and target_image,
    in all regions.
    '''
    assert 3 == K.ndim(style_image) == K.ndim(target_image)
    assert 3 == K.ndim(style_masks) == K.ndim(target_masks)
    loss = K.variable(0)
    for i in range(num_labels):
        if K.image_data_format() == 'channels_first':
            style_mask = style_masks[i, :, :]
            target_mask = target_masks[i, :, :]
        else:
            style_mask = style_masks[:, :, i]
            target_mask = target_masks[:, :, i]
        loss += region_style_loss(style_image,
                                  target_image, style_mask, target_mask)
    return loss 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:19,代码来源:neural_doodle.py

示例12: total_variation_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def total_variation_loss(x):
    assert 4 == K.ndim(x)
    if K.image_data_format() == 'channels_first':
        a = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                     x[:, :, 1:, :img_ncols - 1])
        b = K.square(x[:, :, :img_nrows - 1, :img_ncols - 1] -
                     x[:, :, :img_nrows - 1, 1:])
    else:
        a = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                     x[:, 1:, :img_ncols - 1, :])
        b = K.square(x[:, :img_nrows - 1, :img_ncols - 1, :] -
                     x[:, :img_nrows - 1, 1:, :])
    return K.sum(K.pow(a + b, 1.25))

# Overall loss is the weighted sum of content_loss, style_loss and tv_loss
# Each individual loss uses features from image/mask models. 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:18,代码来源:neural_doodle.py

示例13: region_style_loss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def region_style_loss(style_image, target_image, style_mask, target_mask):
    '''Calculate style loss between style_image and target_image,
    for one common region specified by their (boolean) masks
    '''
    assert 3 == K.ndim(style_image) == K.ndim(target_image)
    assert 2 == K.ndim(style_mask) == K.ndim(target_mask)
    if K.image_data_format() == 'channels_first':
        masked_style = style_image * style_mask
        masked_target = target_image * target_mask
        num_channels = K.shape(style_image)[0]
    else:
        masked_style = K.permute_dimensions(
            style_image, (2, 0, 1)) * style_mask
        masked_target = K.permute_dimensions(
            target_image, (2, 0, 1)) * target_mask
        num_channels = K.shape(style_image)[-1]
    num_channels = K.cast(num_channels, dtype='float32')
    s = gram_matrix(masked_style) / K.mean(style_mask) / num_channels
    c = gram_matrix(masked_target) / K.mean(target_mask) / num_channels
    return K.mean(K.square(s - c)) 
开发者ID:hello-sea,项目名称:DeepLearning_Wavelet-LSTM,代码行数:22,代码来源:neural_doodle.py

示例14: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def call(self, x, mask=None):
        sims = []
        for n, sim in zip(self.n, self.similarities):
            for _ in range(n):
                batch_size = K.shape(x)[0]
                idx = K.random_uniform((batch_size,), low=0, high=batch_size,
                                       dtype='int32')
                x_shuffled = K.gather(x, idx)
                pair_sim = sim(x, x_shuffled)
                for _ in range(K.ndim(x) - 1):
                    pair_sim = K.expand_dims(pair_sim, dim=1)
                sims.append(pair_sim)

        return K.concatenate(sims, axis=-1) 
开发者ID:codekansas,项目名称:gandlf,代码行数:16,代码来源:core.py

示例15: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import ndim [as 别名]
def call(self, x, mask=None):

        assert self.built, 'Layer must be built before being called'
        input_shape = K.int_shape(x)

        reduction_axes = list(range(len(input_shape)))
        del reduction_axes[self.axis]
        broadcast_shape = [1] * len(input_shape)
        broadcast_shape[self.axis] = input_shape[self.axis]

        if sorted(reduction_axes) == range(K.ndim(x))[:-1]:
            x_normed = K.batch_normalization(
                x, self.running_mean, self.running_std,
                self.beta, self.gamma,
                epsilon=self.epsilon)
        else:
            # need broadcasting
            broadcast_running_mean = K.reshape(self.running_mean, broadcast_shape)
            broadcast_running_std = K.reshape(self.running_std, broadcast_shape)
            broadcast_beta = K.reshape(self.beta, broadcast_shape)
            broadcast_gamma = K.reshape(self.gamma, broadcast_shape)
            x_normed = K.batch_normalization(
                x, broadcast_running_mean, broadcast_running_std,
                broadcast_beta, broadcast_gamma,
                epsilon=self.epsilon)

        return x_normed 
开发者ID:akshaylamba,项目名称:FasterRCNN_KERAS,代码行数:29,代码来源:FixedBatchNormalization.py


注:本文中的keras.backend.ndim方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。