当前位置: 首页>>代码示例>>Python>>正文


Python backend.zeros方法代码示例

本文整理汇总了Python中keras.backend.zeros方法的典型用法代码示例。如果您正苦于以下问题:Python backend.zeros方法的具体用法?Python backend.zeros怎么用?Python backend.zeros使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.zeros方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def call(self, inputs, **kwargs):
        # (batch_size, 1, input_num_capsule, input_dim_capsule)
        expand_inputs = K.expand_dims(inputs, axis=1)
        # (batch_size, num_capsule, input_num_capsule, input_dim_capsule)
        expand_inputs = K.tile(expand_inputs, (1, self.num_capsule, 1, 1))
        # (batch_size, num_capsule, input_num_capsule, dim_capsule)
        u_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, axes=[2, 3]), expand_inputs)

        if self.num_routing <= 0:
            self.num_routing = 3
        # (batch_size, num_capsule, input_num_capsule)
        b = K.zeros((K.shape(u_hat)[0], self.num_capsule, self.input_num_capsule))
        for i in xrange(self.num_routing):
            # (batch_size, num_capsule, input_num_capsule)
            c = softmax(b, axis=1)
            # (batch_size, num_capsule, dim_capsule)
            s = K.batch_dot(c, u_hat, axes=[2, 2])
            squashed_s = squash(s)
            if i < self.num_routing - 1:
                # (batch_size, num_capsule, input_num_capsule)
                b += K.batch_dot(squashed_s, u_hat, axes=[2, 3])
        return squashed_s 
开发者ID:l11x0m7,项目名称:CapsNet,代码行数:24,代码来源:capsule.py

示例2: reset_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def reset_states(self):
		assert self.stateful, 'Layer must be stateful.'
		input_shape = self.input_spec[0].shape

		if not input_shape[0]:
			raise Exception('If a RNN is stateful, a complete ' +
							'input_shape must be provided (including batch size).')

		if hasattr(self, 'states'):
			K.set_value(self.states[0],
			            np.zeros((input_shape[0], self.hidden_recurrent_dim)))
			K.set_value(self.states[1],
			            np.zeros((input_shape[0], self.input_dim)))
			K.set_value(self.states[2],
			            np.zeros((input_shape[0], self.hidden_dim)))
		else:
			self.states = [K.zeros((input_shape[0], self.hidden_recurrent_dim)),
							K.zeros((input_shape[0], self.input_dim)),
							K.zeros((input_shape[0], self.hidden_dim))] 
开发者ID:bnsnapper,项目名称:keras_bn_library,代码行数:21,代码来源:rnnrbm.py

示例3: build

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def build(self, input_shape):
		self.input_spec = [InputSpec(shape=input_shape)]
		self.input_dim = input_shape[2]

		self.W = self.init((self.output_dim, 4 * self.input_dim),
		                   name='{}_W'.format(self.name))
		self.U = self.inner_init((self.input_dim, 4 * self.input_dim),
		                         name='{}_U'.format(self.name))
		self.b = K.variable(np.hstack((np.zeros(self.input_dim),
		                               K.get_value(self.forget_bias_init((self.input_dim,))),
		                               np.zeros(self.input_dim),
		                               np.zeros(self.input_dim))),
		                    name='{}_b'.format(self.name))

		self.A = self.init((self.input_dim, self.output_dim),
		                    name='{}_A'.format(self.name))
		self.ba = K.zeros((self.output_dim,), name='{}_ba'.format(self.name))


		self.trainable_weights = [self.W, self.U, self.b, self.A, self.ba]

		if self.initial_weights is not None:
			self.set_weights(self.initial_weights)
			del self.initial_weights 
开发者ID:bnsnapper,项目名称:keras_bn_library,代码行数:26,代码来源:recurrent.py

示例4: reset_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def reset_states(self):
		assert self.stateful, 'Layer must be stateful.'
		input_shape = self.input_spec[0].shape
		if not input_shape[0]:
			raise ValueError('If a RNN is stateful, it needs to know '
			                 'its batch size. Specify the batch size '
			                 'of your input tensors: \n'
			                 '- If using a Sequential model, '
			                 'specify the batch size by passing '
			                 'a `batch_input_shape` '
			                 'argument to your first layer.\n'
			                 '- If using the functional API, specify '
			                 'the time dimension by passing a '
			                 '`batch_shape` argument to your Input layer.')
		if hasattr(self, 'states'):
			K.set_value(self.states[0],
			            np.zeros((input_shape[0], self.input_dim)))
			K.set_value(self.states[1],
			            np.zeros((input_shape[0], self.output_dim)))
		else:
			self.states = [K.zeros((input_shape[0], self.input_dim)),
							K.zeros((input_shape[0], self.output_dim))] 
开发者ID:bnsnapper,项目名称:keras_bn_library,代码行数:24,代码来源:recurrent.py

示例5: get_updates

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        t = K.cast(self.iterations, K.floatx()) + 1
        lr_t = self.learning_rate * (K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t)))

        ms = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        vs = [K.zeros(K.int_shape(p), dtype=K.dtype(p)) for p in params]
        self.weights = [self.iterations] + ms + vs

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            p_t = lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))
            self.updates.append(K.update_sub(p, p_t))
        return self.updates 
开发者ID:CyberZHG,项目名称:keras-lookahead,代码行数:21,代码来源:optimizers.py

示例6: build

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def build(self, input_shape):
        # Create mean and count
        # These are weights because just maintaining variables don't get saved with the model, and we'd like
        # to have these numbers saved when we save the model.
        # But we need to make sure that the weights are untrainable.
        self.mean = self.add_weight(name='mean', 
                                      shape=input_shape[1:],
                                      initializer='zeros',
                                      trainable=False)
        self.count = self.add_weight(name='count', 
                                      shape=[1],
                                      initializer='zeros',
                                      trainable=False)

        # self.mean = K.zeros(input_shape[1:], name='mean')
        # self.count = K.variable(0.0, name='count')
        super(MeanStream, self).build(input_shape)  # Be sure to call this somewhere! 
开发者ID:voxelmorph,项目名称:voxelmorph,代码行数:19,代码来源:layers.py

示例7: cmc

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def cmc(model):
    
    def cmc_curve(model, camera1, camera2, rank_max=50):
        num = camera1.shape[0]    
        rank = []
        score = []    
        camera_batch1 = np.zeros(camera1.shape)
        for i in range(num):
            for j in range(num):
                camera_batch1[j] = camera1[i]
            similarity_batch = model.predict_on_batch([camera_batch1, camera2])
            sim_trans = similarity_batch.transpose()
            similarity_rate_sorted = np.argsort(sim_trans[0])
            for k in range(num):
                if similarity_rate_sorted[k] == i:
                    rank.append(k+1)
                    break
        rank_val = 0
        for i in range(rank_max):
            rank_val = rank_val + len([j for j in rank if i == j-1])        
            score.append(rank_val / float(num))
        return np.array(score)  
        
    a,b = get_data_for_cmc()
    return cmc_curve(model,a,b) 
开发者ID:Ning-Ding,项目名称:Implementation-CVPR2015-CNN-for-ReID,代码行数:27,代码来源:model_for_market1501.py

示例8: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def call(self, x, mask=None):

            s = K.shape(x)
            b = s[0]
            r = s[1]
            c = s[2]
            ch = s[3]

            half_n = self.n // 2 # half the local region

            input_sqr = K.square(x) # square the input

            extra_channels = K.zeros((b, r, c, ch + 2 * half_n))
            input_sqr = K.concatenate([extra_channels[:, :, :, :half_n],input_sqr, extra_channels[:, :, :, half_n + ch:]], axis = 3)

            scale = self.k # offset for the scale
            norm_alpha = self.alpha / self.n # normalized alpha
            for i in range(self.n):
                scale += norm_alpha * input_sqr[:, :, :, i:i+ch]
            scale = scale ** self.beta
            x = x / scale
            
            return x 
开发者ID:luigifreda,项目名称:pyslam,代码行数:25,代码来源:LRN.py

示例9: vectorizeData

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def vectorizeData(xContext, xQuestion, xAnswerBeing, xAnswerEnd, word_index, context_maxlen, question_maxlen):
    '''Vectorize the words to their respective index and pad context to max context length and question to max question length.
       Answers vectors are padded to the max context length as well.
    '''
    X = []
    Xq = []
    YBegin = []
    YEnd = []
    for i in xrange(len(xContext)):
        x = [word_index[w] for w in xContext[i]]
        xq = [word_index[w] for w in xQuestion[i]]
        # map the first and last words of answer span to one-hot representations
        y_Begin =  np.zeros(len(xContext[i]))
        y_Begin[xAnswerBeing[i]] = 1
        y_End = np.zeros(len(xContext[i]))
        y_End[xAnswerEnd[i]] = 1
        X.append(x)
        Xq.append(xq)
        YBegin.append(y_Begin)
        YEnd.append(y_End)
    return pad_sequences(X, maxlen=context_maxlen, padding='post'), pad_sequences(Xq, maxlen=question_maxlen, padding='post'), pad_sequences(YBegin, maxlen=context_maxlen, padding='post'), pad_sequences(YEnd, maxlen=context_maxlen, padding='post')

# Note: Need to download and unzip Glove pre-train model files into same file as this script 
开发者ID:wentaozhu,项目名称:recurrent-attention-for-QA-SQUAD-based-on-keras,代码行数:25,代码来源:QnA.py

示例10: build

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def build(self, input_shape):
        # input shape is (batch_size, num_words, num_senses, num_hyps)
        self.num_senses = input_shape[-2]
        self.num_hyps = input_shape[-1] - 1  # -1 because the last value is a word index
        # embedding of size 1.
        if self.set_sense_priors:
            self.sense_priors = self._get_initial_sense_priors((self.word_index_size, 1), name='{}_sense_priors'.format(self.name))
        else:
            # OntoLSTM makes sense proabilities uniform if the passed sense parameters are zero.
            self.sense_priors = K.zeros((self.word_index_size, 1))  # uniform sense probs
        # Keeping aside the initial weights to not let Embedding set them. It wouldn't know what sense priors are.
        if self.initial_weights is not None:
            self.onto_aware_embedding_weights = self.initial_weights
            self.initial_weights = None
        # The following method will set self.trainable_weights
        super(OntoAwareEmbedding, self).build(input_shape)  # input_shape will not be used by Embedding's build.
        if not self.tune_embedding:
            # Move embedding to non_trainable_weights
            self._non_trainable_weights.append(self._trainable_weights.pop())

        if self.set_sense_priors:
            self._trainable_weights.append(self.sense_priors)

        if self.onto_aware_embedding_weights is not None:
            self.set_weights(self.onto_aware_embedding_weights) 
开发者ID:pdasigi,项目名称:onto-lstm,代码行数:27,代码来源:embedding.py

示例11: reset_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def reset_states(self):
    assert self.stateful, 'Layer must be stateful.'
    input_shape = self.input_spec[0].shape
    if not input_shape[0]:
      raise Exception('If a RNN is stateful, a complete ' +
                      'input_shape must be provided (including batch size).')
    if hasattr(self, 'states'):
      K.set_value(self.states[0],
                  np.zeros((input_shape[0], self.output_dim)))
    else:
      self.states = [K.zeros((input_shape[0], self.output_dim))] 
开发者ID:LaurentMazare,项目名称:deep-models,代码行数:13,代码来源:rhn.py

示例12: build

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def build(self, input_shape):
    super(LSTM_LN, self).build(input_shape)
    self.gs, self.bs = [], []
    for i in xrange(3):
      f = 1 if i == 2 else 4
      self.gs += [ K.ones((f*self.output_dim,), name='{}_g%i'.format(self.name, i)) ]
      self.bs += [ K.zeros((f*self.output_dim,), name='{}_b%d'.format(self.name, i)) ]
    self.trainable_weights += self.gs + self.bs 
开发者ID:LaurentMazare,项目名称:deep-models,代码行数:10,代码来源:lstm_ln.py

示例13: build

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def build(self):
        stack_size = self.input_shape[2]
        dtensor5 = T.TensorType('float32', (False,)*5)
        self.input = dtensor5()
        self.W_shape = (self.nb_filter, stack_size, self.nb_row, self.nb_col)
        self.W = self.init(self.W_shape)
        self.b = shared_zeros((self.nb_filter,))

        self.params = [self.W, self.b]

        self.regularizers = []

        if self.W_regularizer:
            self.W_regularizer.set_param(self.W)
            self.regularizers.append(self.W_regularizer)

        if self.b_regularizer:
            self.b_regularizer.set_param(self.b)
            self.regularizers.append(self.b_regularizer)

        if self.activity_regularizer:
            self.activity_regularizer.set_layer(self)
            self.regularizers.append(self.activity_regularizer)

        if self.initial_weights is not None:
            self.set_weights(self.initial_weights)
            del self.initial_weights 
开发者ID:textclf,项目名称:fancy-cnn,代码行数:29,代码来源:convolutions.py

示例14: reset_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def reset_states(self):
        assert self.stateful, 'Layer must be stateful.'
        input_shape = self.input_spec[0].shape
        if not input_shape[0]:
            raise Exception('If a RNN is stateful, a complete ' +
                            'input_shape must be provided (including batch size).')
        if hasattr(self, 'states'):
            K.set_value(self.states[0],
                        np.zeros((input_shape[0], self.output_dim)))
            K.set_value(self.states[1],
                        np.zeros((input_shape[0], self.output_dim)))
        else:
            self.states = [K.zeros((input_shape[0], self.output_dim)),
                           K.zeros((input_shape[0], self.output_dim))] 
开发者ID:SigmaQuan,项目名称:NTM-Keras,代码行数:16,代码来源:lstm2ntm.py

示例15: initial

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import zeros [as 别名]
def initial(number_of_memory_locations, memory_vector_size):
    return K.zeros((number_of_memory_locations, memory_vector_size)) 
开发者ID:SigmaQuan,项目名称:NTM-Keras,代码行数:4,代码来源:memory.py


注:本文中的keras.backend.zeros方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。