当前位置: 首页>>代码示例>>Python>>正文


Python backend.get_variable_shape方法代码示例

本文整理汇总了Python中keras.backend.get_variable_shape方法的典型用法代码示例。如果您正苦于以下问题:Python backend.get_variable_shape方法的具体用法?Python backend.get_variable_shape怎么用?Python backend.get_variable_shape使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.backend的用法示例。


在下文中一共展示了backend.get_variable_shape方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: AttentionRefinementModule

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def AttentionRefinementModule(inputs):
    # Global average pooling
    nb_channels = K.get_variable_shape(inputs)[-1]
    net = GlobalAveragePooling2D()(inputs)

    net = Reshape((1, nb_channels))(net)
    net = Conv1D(nb_channels, kernel_size=1,
                 kernel_initializer='he_normal',
                 )(net)
    net = BatchNormalization()(net)
    net = Activation('relu')(net)
    net = Conv1D(nb_channels, kernel_size=1,
                 kernel_initializer='he_normal',
                 )(net)
    net = BatchNormalization()(net)
    net = Activation('sigmoid')(net)  # tf.sigmoid(net)

    net = Multiply()([inputs, net])

    return net 
开发者ID:JACKYLUO1991,项目名称:Face-skin-hair-segmentaiton-and-skin-color-evaluation,代码行数:22,代码来源:dfanet.py

示例2: get_weightnorm_params_and_grads

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def get_weightnorm_params_and_grads(p, g):
    ps = K.get_variable_shape(p)

    # construct weight scaler: V_scaler = g/||V||
    V_scaler_shape = (ps[-1],)  # assumes we're using tensorflow!
    V_scaler = K.ones(V_scaler_shape)  # init to ones, so effective parameters don't change

    # get V parameters = ||V||/g * W
    norm_axes = [i for i in range(len(ps) - 1)]
    V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])

    # split V_scaler into ||V|| and g parameters
    V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
    g_param = V_scaler * V_norm

    # get grad in V,g parameters
    grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
    grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \
             (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)

    return V, V_norm, V_scaler, g_param, grad_g, grad_V 
开发者ID:openai,项目名称:weightnorm,代码行数:23,代码来源:weightnorm.py

示例3: get_weightnorm_params_and_grads

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def get_weightnorm_params_and_grads(p, g):
    ps = K.get_variable_shape(p)
    # construct weight scaler: V_scaler = g/||V||
    V_scaler_shape = (ps[-1],)  # assumes we're using tensorflow!
    V_scaler = K.ones(V_scaler_shape)  # init to ones, so effective parameters don't change
    # get V parameters = ||V||/g * W
    norm_axes = [i for i in range(len(ps) - 1)]
    V = p / tf.reshape(V_scaler, [1] * len(norm_axes) + [-1])
    # split V_scaler into ||V|| and g parameters
    V_norm = tf.sqrt(tf.reduce_sum(tf.square(V), norm_axes))
    g_param = V_scaler * V_norm
    # get grad in V,g parameters
    grad_g = tf.reduce_sum(g * V, norm_axes) / V_norm
    grad_V = tf.reshape(V_scaler, [1] * len(norm_axes) + [-1]) * \
             (g - tf.reshape(grad_g / V_norm, [1] * len(norm_axes) + [-1]) * V)
    return V, V_norm, V_scaler, g_param, grad_g, grad_V 
开发者ID:wmylxmj,项目名称:Anime-Super-Resolution,代码行数:18,代码来源:optimizer.py

示例4: separable_res_block_deep

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def separable_res_block_deep(inputs, nb_filters, filter_size=3, strides=1, dilation=1, ix=0):
    inputs = Activation('relu')(inputs)  # , name=prefix + '_sepconv1_act'

    ip_nb_filter = K.get_variable_shape(inputs)[-1]
    if ip_nb_filter != nb_filters or strides != 1:
        residual = Conv2D(nb_filters, 1, strides=strides, use_bias=False)(inputs)
        residual = BatchNormalization()(residual)
    else:
        residual = inputs

    x = SeparableConv2D(nb_filters // 4, filter_size,
                        dilation_rate=dilation,
                        padding='same',
                        use_bias=False,
                        kernel_initializer='he_normal',
                        )(inputs)
    x = BatchNormalization()(x)  # name=prefix + '_sepconv1_bn'

    x = Activation('relu')(x)  # , name=prefix + '_sepconv2_act'
    x = SeparableConv2D(nb_filters // 4, filter_size,
                        dilation_rate=dilation,
                        padding='same',
                        use_bias=False,
                        kernel_initializer='he_normal',
                        )(x)
    x = BatchNormalization()(x)  # name=prefix + '_sepconv2_bn'
    x = Activation('relu')(x)  # , name=prefix + '_sepconv3_act'
    # if strides != 1:
    x = SeparableConv2D(nb_filters, filter_size,
                        strides=strides,
                        dilation_rate=dilation,
                        padding='same',
                        use_bias=False,
                        )(x)

    x = BatchNormalization()(x)  # name=prefix + '_sepconv3_bn'
    x = add([x, residual])
    return x 
开发者ID:JACKYLUO1991,项目名称:Face-skin-hair-segmentaiton-and-skin-color-evaluation,代码行数:40,代码来源:dfanet.py

示例5: add_weightnorm_param_updates

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def add_weightnorm_param_updates(updates, new_V_param, new_g_param, W, V_scaler):
    ps = K.get_variable_shape(new_V_param)
    norm_axes = [i for i in range(len(ps) - 1)]

    # update W and V_scaler
    new_V_norm = tf.sqrt(tf.reduce_sum(tf.square(new_V_param), norm_axes))
    new_V_scaler = new_g_param / new_V_norm
    new_W = tf.reshape(new_V_scaler, [1] * len(norm_axes) + [-1]) * new_V_param
    updates.append(K.update(W, new_W))
    updates.append(K.update(V_scaler, new_V_scaler))


# data based initialization for a given Keras model 
开发者ID:openai,项目名称:weightnorm,代码行数:15,代码来源:weightnorm.py

示例6: add_weightnorm_param_updates

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def add_weightnorm_param_updates(updates, new_V_param, new_g_param, W, V_scaler):
    ps = K.get_variable_shape(new_V_param)
    norm_axes = [i for i in range(len(ps) - 1)]
    # update W and V_scaler
    new_V_norm = tf.sqrt(tf.reduce_sum(tf.square(new_V_param), norm_axes))
    new_V_scaler = new_g_param / new_V_norm
    new_W = tf.reshape(new_V_scaler, [1] * len(norm_axes) + [-1]) * new_V_param
    updates.append(K.update(W, new_W))
    updates.append(K.update(V_scaler, new_V_scaler))
    pass 
开发者ID:wmylxmj,项目名称:Anime-Super-Resolution,代码行数:12,代码来源:optimizer.py

示例7: get_updates

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def get_updates(self, loss, params):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]
        lr = self.lr
        if self.initial_decay > 0:
            lr *= (1. / (1. + self.decay * K.cast(self.iterations, K.floatx())))
            pass
        t = K.cast(self.iterations + 1, K.floatx())
        lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))
        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        self.weights = [self.iterations] + ms + vs
        for p, g, m, v in zip(params, grads, ms, vs):
            # if a weight tensor (len > 1) use weight normalized parameterization
            # this is the only part changed w.r.t. keras.optimizers.Adam
            ps = K.get_variable_shape(p)
            if len(ps)>1:
                # get weight normalization parameters
                V, V_norm, V_scaler, g_param, grad_g, grad_V = get_weightnorm_params_and_grads(p, g)
                # Adam containers for the 'g' parameter
                V_scaler_shape = K.get_variable_shape(V_scaler)
                m_g = K.zeros(V_scaler_shape)
                v_g = K.zeros(V_scaler_shape)
                # update g parameters
                m_g_t = (self.beta_1 * m_g) + (1. - self.beta_1) * grad_g
                v_g_t = (self.beta_2 * v_g) + (1. - self.beta_2) * K.square(grad_g)
                new_g_param = g_param - lr_t * m_g_t / (K.sqrt(v_g_t) + self.epsilon)
                self.updates.append(K.update(m_g, m_g_t))
                self.updates.append(K.update(v_g, v_g_t))
                # update V parameters
                m_t = (self.beta_1 * m) + (1. - self.beta_1) * grad_V
                v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(grad_V)
                new_V_param = V - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
                self.updates.append(K.update(m, m_t))
                self.updates.append(K.update(v, v_t))
                # if there are constraints we apply them to V, not W
                if getattr(p, 'constraint', None) is not None:
                    new_V_param = p.constraint(new_V_param)
                    pass
                # wn param updates --> W updates
                add_weightnorm_param_updates(self.updates, new_V_param, new_g_param, p, V_scaler)
                pass
            else: # do optimization normally
                m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
                v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
                p_t = p - lr_t * m_t / (K.sqrt(v_t) + self.epsilon)
                self.updates.append(K.update(m, m_t))
                self.updates.append(K.update(v, v_t))
                new_p = p_t
                # apply constraints
                if getattr(p, 'constraint', None) is not None:
                    new_p = p.constraint(new_p)
                    pass
                self.updates.append(K.update(p, new_p))
                pass
            pass
        return self.updates 
开发者ID:wmylxmj,项目名称:Anime-Super-Resolution,代码行数:60,代码来源:optimizer.py

示例8: get_updates

# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_variable_shape [as 别名]
def get_updates(self, params, loss):
        grads = self.get_gradients(loss, params)
        self.updates = [K.update_add(self.iterations, 1)]

        lr = self.lr
        if self.inital_decay > 0:
            lr *= (1. / (1. + self.decay * self.iterations))

        t = self.iterations + 1
        lr_t = lr * K.sqrt(1. - K.pow(self.beta_2, t)) / (1. - K.pow(self.beta_1, t))

        shapes = [K.get_variable_shape(p) for p in params]
        ms = [K.zeros(shape) for shape in shapes]
        vs = [K.zeros(shape) for shape in shapes]
        f = K.variable(0)
        d = K.variable(1)
        self.weights = [self.iterations] + ms + vs + [f, d]

        cond = K.greater(t, K.variable(1))
        small_delta_t = K.switch(K.greater(loss, f), self.small_k + 1, 1. / (self.big_K + 1))
        big_delta_t = K.switch(K.greater(loss, f), self.big_K + 1, 1. / (self.small_k + 1))

        c_t = K.minimum(K.maximum(small_delta_t, loss / (f + self.epsilon)), big_delta_t)
        f_t = c_t * f
        r_t = K.abs(f_t - f) / (K.minimum(f_t, f))
        d_t = self.beta_3 * d + (1 - self.beta_3) * r_t

        f_t = K.switch(cond, f_t, loss)
        d_t = K.switch(cond, d_t, K.variable(1.))

        self.updates.append(K.update(f, f_t))
        self.updates.append(K.update(d, d_t))

        for p, g, m, v in zip(params, grads, ms, vs):
            m_t = (self.beta_1 * m) + (1. - self.beta_1) * g
            v_t = (self.beta_2 * v) + (1. - self.beta_2) * K.square(g)
            p_t = p - lr_t * m_t / (d_t * K.sqrt(v_t) + self.epsilon)

            self.updates.append(K.update(m, m_t))
            self.updates.append(K.update(v, v_t))

            new_p = p_t
            self.updates.append(K.update(p, new_p))
        return self.updates 
开发者ID:tdeboissiere,项目名称:DeepLearningImplementations,代码行数:46,代码来源:Eve.py


注:本文中的keras.backend.get_variable_shape方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。