当前位置: 首页>>代码示例>>Python>>正文


Python backend.batch_flatten方法代码示例

本文整理汇总了Python中keras.backend.batch_flatten方法的典型用法代码示例。如果您正苦于以下问题:Python backend.batch_flatten方法的具体用法?Python backend.batch_flatten怎么用?Python backend.batch_flatten使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.batch_flatten方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: discriminator_dummy

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def discriminator_dummy(img_size, n_filters, init_lr, name='d'):    # naive unet without GAN
    # set image specifics
    img_ch=3 # image channels
    out_ch=1 # output channel
    img_height, img_width = img_size[0], img_size[1]

    inputs = Input((img_height, img_width, img_ch + out_ch))

    d = Model(inputs, inputs, name=name)

    def d_loss(y_true, y_pred):
        L = objectives.binary_crossentropy(K.batch_flatten(y_true),
                                            K.batch_flatten(y_pred))
#         L = objectives.mean_squared_error(K.batch_flatten(y_true),
#                                            K.batch_flatten(y_pred))
        return L
    
    d.compile(optimizer=Adam(lr=init_lr, beta_1=0.5), loss=d_loss, metrics=['accuracy'])
    
    return d, d.layers[-1].output_shape[1:] 
开发者ID:jaeminSon,项目名称:V-GAN,代码行数:22,代码来源:model.py

示例2: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def call(self, inputs, **kwargs):
        if type(inputs) is list:  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
开发者ID:ssrp,项目名称:Multi-level-DCNet,代码行数:18,代码来源:capsulelayers.py

示例3: gram_matrix

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def gram_matrix(x):
	"""
	Computes the outer-product of the input tensor x.

	Input
	-----
	- x: input tensor of shape (C x H x W)

	Returns
	-------
	- x . x^T

	Note that this can be computed efficiently if x is reshaped
	as a tensor of shape (C x H*W).
	"""
	# assert K.ndim(x) == 3
	if K.image_dim_ordering() == 'th':
		features = K.batch_flatten(x)
	else:
		features = K.batch_flatten(K.permute_dimensions(x, (2, 0, 1)))
	return K.dot(features, K.transpose(features)) 
开发者ID:kevinzakka,项目名称:style-transfer,代码行数:23,代码来源:losses.py

示例4: nss

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def nss(y_true, y_pred):
    max_y_pred = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.max(K.max(y_pred, axis=2), axis=2)), 
                                                                   shape_r_out, axis=-1)), shape_c_out, axis=-1)
    y_pred /= max_y_pred
    y_pred_flatten = K.batch_flatten(y_pred)

    y_mean = K.mean(y_pred_flatten, axis=-1)
    y_mean = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_mean)), 
                                                               shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_std = K.std(y_pred_flatten, axis=-1)
    y_std = K.repeat_elements(K.expand_dims(K.repeat_elements(K.expand_dims(K.expand_dims(y_std)), 
                                                              shape_r_out, axis=-1)), shape_c_out, axis=-1)

    y_pred = (y_pred - y_mean) / (y_std + K.epsilon())

    return -(K.sum(K.sum(y_true * y_pred, axis=2), axis=2) / K.sum(K.sum(y_true, axis=2), axis=2))


# Gaussian priors initialization 
开发者ID:marcellacornia,项目名称:sam,代码行数:22,代码来源:models.py

示例5: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def step(self, x, states):
        x_shape = K.shape(x)
        h_tm1 = states[0]
        c_tm1 = states[1]

        e = self.V_a(K.tanh(self.W_a(h_tm1) + self.U_a(x)))
        a = K.reshape(K.softmax(K.batch_flatten(e)), (x_shape[0], 1, x_shape[2], x_shape[3]))
        x_tilde = x * K.repeat_elements(a, x_shape[1], 1)

        x_i = self.W_i(x_tilde)
        x_f = self.W_f(x_tilde)
        x_c = self.W_c(x_tilde)
        x_o = self.W_o(x_tilde)

        i = self.inner_activation(x_i + self.U_i(h_tm1))
        f = self.inner_activation(x_f + self.U_f(h_tm1))
        c = f * c_tm1 + i * self.activation(x_c + self.U_c(h_tm1))
        o = self.inner_activation(x_o + self.U_o(h_tm1))

        h = o * self.activation(c)
        return h, [h, c] 
开发者ID:marcellacornia,项目名称:sam,代码行数:23,代码来源:attentive_convlstm.py

示例6: get_initial_states

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def get_initial_states(self, nse_input, input_mask=None):
        '''
        This method produces the 'read' mask for all timesteps
        and initializes the memory slot mem_0.

        Input: nse_input (batch_size, input_length, input_dim)
        Output: list[Tensors]:
                h_0 (batch_size, output_dim)
                c_0 (batch_size, output_dim)
                flattened_mem_0 (batch_size, input_length * output_dim)
 
        While this method simply copies input to mem_0, variants that inherit from this class can do
        something fancier.
        '''
        input_to_read = nse_input
        mem_0 = input_to_read
        flattened_mem_0 = K.batch_flatten(mem_0)
        initial_states = self.reader.get_initial_states(nse_input)
        initial_states += [flattened_mem_0]
        return initial_states 
开发者ID:pdasigi,项目名称:onto-lstm,代码行数:22,代码来源:nse.py

示例7: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def step(self, inputs, states):
        vP_t = inputs
        hP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks 
        vP, WP_v, WPP_v, v, W_g2 = states[3:8]
        vP_mask, = states[8:]

        WP_v_Dot = K.dot(vP, WP_v)
        WPP_v_Dot = K.dot(K.expand_dims(vP_t, axis=1), WPP_v)

        s_t_hat = K.tanh(WPP_v_Dot + WP_v_Dot)
        s_t = K.dot(s_t_hat, v)
        s_t = K.batch_flatten(s_t)

        a_t = softmax(s_t, mask=vP_mask, axis=1)

        c_t = K.batch_dot(a_t, vP, axes=[1, 1])
        
        GRU_inputs = K.concatenate([vP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g2))
        GRU_inputs = g * GRU_inputs
        
        hP_t, s = super(SelfAttnGRU, self).step(GRU_inputs, states)

        return hP_t, s 
开发者ID:YerevaNN,项目名称:R-NET-in-Keras,代码行数:27,代码来源:SelfAttnGRU.py

示例8: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def step(self, inputs, states):
        # input
        ha_tm1 = states[0] # (B, 2H)
        _ = states[1:3] # ignore internal dropout/masks
        hP, WP_h, Wa_h, v = states[3:7] # (B, P, 2H)
        hP_mask, = states[7:8]

        WP_h_Dot = K.dot(hP, WP_h) # (B, P, H)
        Wa_h_Dot = K.dot(K.expand_dims(ha_tm1, axis=1), Wa_h) # (B, 1, H)

        s_t_hat = K.tanh(WP_h_Dot + Wa_h_Dot) # (B, P, H)
        s_t = K.dot(s_t_hat, v) # (B, P, 1)
        s_t = K.batch_flatten(s_t) # (B, P)
        a_t = softmax(s_t, mask=hP_mask, axis=1) # (B, P)
        c_t = K.batch_dot(hP, a_t, axes=[1, 1]) # (B, 2H)

        GRU_inputs = c_t
        ha_t, (ha_t_,) = super(PointerGRU, self).step(GRU_inputs, states)
        
        return a_t, [ha_t] 
开发者ID:YerevaNN,项目名称:R-NET-in-Keras,代码行数:22,代码来源:PointerGRU.py

示例9: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def call(self, inputs, mask=None):
        assert(isinstance(inputs, list) and len(inputs) == 5)
        uQ, WQ_u, WQ_v, v, VQ_r = inputs
        uQ_mask = mask[0] if mask is not None else None

        ones = K.ones_like(K.sum(uQ, axis=1, keepdims=True)) # (B, 1, 2H)
        s_hat = K.dot(uQ, WQ_u)
        s_hat += K.dot(ones, K.dot(WQ_v, VQ_r))
        s_hat = K.tanh(s_hat)
        s = K.dot(s_hat, v)
        s = K.batch_flatten(s)

        a = softmax(s, mask=uQ_mask, axis=1)

        rQ = K.batch_dot(uQ, a, axes=[1, 1])

        return rQ 
开发者ID:YerevaNN,项目名称:R-NET-in-Keras,代码行数:19,代码来源:QuestionPooling.py

示例10: step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def step(self, inputs, states):
        uP_t = inputs
        vP_tm1 = states[0]
        _ = states[1:3] # ignore internal dropout/masks
        uQ, WQ_u, WP_v, WP_u, v, W_g1 = states[3:9]
        uQ_mask, = states[9:10]

        WQ_u_Dot = K.dot(uQ, WQ_u) #WQ_u
        WP_v_Dot = K.dot(K.expand_dims(vP_tm1, axis=1), WP_v) #WP_v
        WP_u_Dot = K.dot(K.expand_dims(uP_t, axis=1), WP_u) # WP_u

        s_t_hat = K.tanh(WQ_u_Dot + WP_v_Dot + WP_u_Dot)

        s_t = K.dot(s_t_hat, v) # v
        s_t = K.batch_flatten(s_t)
        a_t = softmax(s_t, mask=uQ_mask, axis=1)
        c_t = K.batch_dot(a_t, uQ, axes=[1, 1])

        GRU_inputs = K.concatenate([uP_t, c_t])
        g = K.sigmoid(K.dot(GRU_inputs, W_g1))  # W_g1
        GRU_inputs = g * GRU_inputs
        vP_t, s = super(QuestionAttnGRU, self).step(GRU_inputs, states)

        return vP_t, s 
开发者ID:YerevaNN,项目名称:R-NET-in-Keras,代码行数:26,代码来源:QuestionAttnGRU.py

示例11: call

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def call(self, inputs, **kwargs):
        if isinstance(inputs, list):  # true label is provided with shape = [None, n_classes], i.e. one-hot code.
            assert len(inputs) == 2
            inputs, mask = inputs
        else:  # if no true label, mask by the max length of capsules. Mainly used for prediction
            # compute lengths of capsules
            x = K.sqrt(K.sum(K.square(inputs), -1))
            # generate the mask which is a one-hot code.
            # mask.shape=[None, n_classes]=[None, num_capsule]
            mask = K.one_hot(indices=K.argmax(x, 1), num_classes=x.get_shape().as_list()[1])

        # inputs.shape=[None, num_capsule, dim_capsule]
        # mask.shape=[None, num_capsule]
        # masked.shape=[None, num_capsule * dim_capsule]
        masked = K.batch_flatten(inputs * K.expand_dims(mask, -1))
        return masked 
开发者ID:brjathu,项目名称:deepcaps,代码行数:18,代码来源:capslayers.py

示例12: lap1_diff

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def lap1_diff(laplacian, frame_step=1):
    ''' Model which takes the lap-1 distance between frames `frame_step` apart
    in the batch '''
    deltas = []
    for i, lap_level in enumerate(laplacian):
        # Take the difference of the Laplacian pyramid of this layer vs. the next
        diff = Lambda(lambda lap_level, frame_step=frame_step:
                K.batch_flatten(
                    lap_level - K.concatenate([lap_level[frame_step:], lap_level[0:frame_step]], axis=0)))(lap_level)
        # scale for good measure
        diff = Lambda(lambda x, scale = 2.**-(i-1): scale*x)(diff)
        #diff = K.batch_flatten(lap_layer - K.concatenate([lap_layer[frame_step:], lap_layer[0:frame_step]], axis=0))
        deltas.append(diff) # diff: (frames, lap-pixels)

    out = keras.layers.concatenate(deltas, axis=1) # (frames, lap-pixels)
    # I use mean here instead of sum to make it more agnostic to total pixel count.
    out = Lambda(lambda x: K.mean(K.abs(x), axis=1))(out) # (frames,)
    return out 
开发者ID:wxs,项目名称:subjective-functions,代码行数:20,代码来源:gram.py

示例13: l2_diff

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def l2_diff(octaves, frame_step=1):
    ''' Model which takes the l2 distance between frames frame_step apart'''
    octave_diffs = []
    for frames in octaves:
        # Take the difference between the frames
        out = Lambda(lambda frames, frame_step=frame_step:
                K.batch_flatten(frames - K.concatenate([frames[frame_step:], frames[0:frame_step]], axis=0)))(frames)

        # square
        out = Lambda(lambda x: K.square(x), name=make_name("l2_diff_square"))(out)
        
        # mean instead of sum so we can ignore pixel count
        out = Lambda(lambda x: K.mean(x, axis=1), name=make_name("l2_diff_mean"))(out)

        # sqrt
        out = Lambda(lambda x: K.sqrt(x), name=make_name("l2_diff_sqrt"))(out)

        # (frames,) list of l2 distances

        octave_diffs.append(out)
    return octave_diffs # [(frames, ) ...] list of lists of l2 distances 
开发者ID:wxs,项目名称:subjective-functions,代码行数:23,代码来源:gram.py

示例14: read

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def read(self, nse_input, input_mask=None):
        '''
        This method produces the 'read' output (equation 1 in the paper) for all timesteps
        and initializes the memory slot mem_0.

        Input: nse_input (batch_size, input_length, input_dim)
        Outputs:
            o (batch_size, input_length, output_dim)
            flattened_mem_0 (batch_size, input_length * output_dim)
 
        While this method simply copies input to mem_0, variants that inherit from this class can do
        something fancier.
        '''
        input_to_read = nse_input
        mem_0 = input_to_read
        flattened_mem_0 = K.batch_flatten(mem_0)
        o = self.reader.call(input_to_read, input_mask)
        o_mask = self.reader.compute_mask(input_to_read, input_mask)
        return o, [flattened_mem_0], o_mask 
开发者ID:pdasigi,项目名称:neural-semantic-encoders,代码行数:21,代码来源:nse.py

示例15: compose_and_write_step

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import batch_flatten [as 别名]
def compose_and_write_step(self, o_t, states):
        flattened_mem_tm1, flattened_shared_mem_tm1, writer_h_tm1, writer_c_tm1 = states
        input_mem_shape = K.shape(flattened_mem_tm1)
        mem_shape = (input_mem_shape[0], input_mem_shape[1]/self.output_dim, self.output_dim)
        mem_tm1 = K.reshape(flattened_mem_tm1, mem_shape)
        shared_mem_tm1 = K.reshape(flattened_shared_mem_tm1, mem_shape)
        z_t, m_rt = self.summarize_memory(o_t, mem_tm1)
        shared_z_t, shared_m_rt = self.summarize_memory(o_t, shared_mem_tm1)
        c_t = self.compose_memory_and_output([o_t, m_rt, shared_m_rt])
        # Collecting the necessary variables to directly call writer's step function.
        writer_constants = self.writer.get_constants(c_t)  # returns dropouts for W and U (all 1s, see init)
        writer_states = [writer_h_tm1, writer_c_tm1] + writer_constants
        # Making a call to writer's step function, Equation 5
        h_t, [_, writer_c_t] = self.writer.step(c_t, writer_states)  # h_t, writer_c_t: (batch_size, output_dim)
        mem_t = self.update_memory(z_t, h_t, mem_tm1)
        shared_mem_t = self.update_memory(shared_z_t, h_t, shared_mem_tm1)
        return h_t, [K.batch_flatten(mem_t), K.batch_flatten(shared_mem_t), h_t, writer_c_t] 
开发者ID:pdasigi,项目名称:neural-semantic-encoders,代码行数:19,代码来源:nse.py


注:本文中的keras.backend.batch_flatten方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。