当前位置: 首页>>代码示例>>Python>>正文


Python initializations.normal方法代码示例

本文整理汇总了Python中keras.initializations.normal方法的典型用法代码示例。如果您正苦于以下问题:Python initializations.normal方法的具体用法?Python initializations.normal怎么用?Python initializations.normal使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.initializations的用法示例。


在下文中一共展示了initializations.normal方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: generator

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def generator(batch_size, gf_dim, ch, rows, cols):
    model = Sequential()

    model.add(
        Dense(gf_dim * 8 * rows[0] * cols[0], batch_input_shape=(batch_size, z_dim), name="g_h0_lin", init=normal))
    model.add(Reshape((rows[0], cols[0], gf_dim * 8)))
    model.add(BN(mode=2, axis=3, name="g_bn0", gamma_init=mean_normal, epsilon=1e-5))
    model.add(Activation("relu"))

    model.add(Deconvolution2D(gf_dim * 4, 5, 5, output_shape=(batch_size, rows[1], cols[1], gf_dim * 4), subsample=(2, 2),
                              name="g_h1", border_mode="same", init=normal))
    model.add(BN(mode=2, axis=3, name="g_bn1", gamma_init=mean_normal, epsilon=1e-5))
    model.add(Activation("relu"))

    model.add(Deconvolution2D(gf_dim * 2, 5, 5, output_shape=(batch_size, rows[2], cols[2], gf_dim * 2), subsample=(2, 2),
                              name="g_h2", border_mode="same", init=normal))
    model.add(BN(mode=2, axis=3, name="g_bn2", gamma_init=mean_normal, epsilon=1e-5))
    model.add(Activation("relu"))

    model.add(Deconvolution2D(ch, 5, 5, output_shape=(batch_size, rows[3], cols[3], ch), subsample=(2, 2), name="g_h3",
                              border_mode="same", init=normal))
    model.add(Activation("tanh"))

    return model 
开发者ID:mengli,项目名称:MachineLearning,代码行数:26,代码来源:vaegan_cifar.py

示例2: encoder

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def encoder(batch_size, df_dim, ch, rows, cols):
    model = Sequential()
    X = Input(batch_shape=(batch_size, rows[-1], cols[-1], ch))
    model = Convolution2D(df_dim, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h0_conv", dim_ordering="tf", init=normal)(X)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim * 2, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h1_conv", dim_ordering="tf")(model)
    model = BN(mode=2, axis=3, name="e_bn1", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim * 4, 5, 5, subsample=(2, 2), name="e_h2_conv", border_mode="same",
                          dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="e_bn2", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)
    model = Flatten()(model)

    mean = Dense(z_dim, name="e_h3_lin", init=normal)(model)
    logsigma = Dense(z_dim, name="e_h4_lin", activation="tanh", init=normal)(model)
    meansigma = Model([X], [mean, logsigma])
    return meansigma 
开发者ID:mengli,项目名称:MachineLearning,代码行数:24,代码来源:vaegan_cifar.py

示例3: discriminator

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def discriminator(batch_size, df_dim, ch, rows, cols):
    X = Input(batch_shape=(batch_size, rows[-1], cols[-1], ch))
    model = Convolution2D(df_dim, 5, 5, subsample=(2, 2), border_mode="same",
                          name="d_h0_conv", dim_ordering="tf", init=normal)(X)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim * 2, 5, 5, subsample=(2, 2), border_mode="same",
                          name="d_h1_conv", dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="d_bn1", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim * 4, 5, 5, subsample=(2, 2), border_mode="same",
                          name="d_h2_conv", dim_ordering="tf", init=normal)(model)

    dec = BN(mode=2, axis=3, name="d_bn3", gamma_init=mean_normal, epsilon=1e-5)(model)
    dec = LeakyReLU(.2)(dec)
    dec = Flatten()(dec)
    dec = Dense(1, name="d_h3_lin", init=normal)(dec)

    output = Model([X], [dec, model])

    return output 
开发者ID:mengli,项目名称:MachineLearning,代码行数:24,代码来源:vaegan_cifar.py

示例4: generator

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def generator(batch_size, gf_dim, ch, rows, cols):

    model = Sequential()

    model.add(Dense(gf_dim*8*rows[0]*cols[0], batch_input_shape=(batch_size, z_dim), name="g_h0_lin", init=normal))
    model.add(Reshape((rows[0], cols[0], gf_dim*8)))
    model.add(BN(mode=2, axis=3, name="g_bn0", gamma_init=mean_normal, epsilon=1e-5))
    model.add(Activation("relu"))

    model.add(Deconv2D(gf_dim*4, 5, 5, subsample=(2, 2), name="g_h1", init=normal))
    model.add(BN(mode=2, axis=3, name="g_bn1", gamma_init=mean_normal, epsilon=1e-5))
    model.add(Activation("relu"))

    model.add(Deconv2D(gf_dim*2, 5, 5, subsample=(2, 2), name="g_h2", init=normal))
    model.add(BN(mode=2, axis=3, name="g_bn2", gamma_init=mean_normal, epsilon=1e-5))
    model.add(Activation("relu"))

    model.add(Deconv2D(gf_dim, 5, 5, subsample=(2, 2), name="g_h3", init=normal))
    model.add(BN(mode=2, axis=3, name="g_bn3", gamma_init=mean_normal, epsilon=1e-5))
    model.add(Activation("relu"))

    model.add(Deconv2D(ch, 5, 5, subsample=(2, 2), name="g_h4", init=normal))
    model.add(Activation("tanh"))

    return model 
开发者ID:commaai,项目名称:research,代码行数:27,代码来源:autoencoder.py

示例5: mean_normal

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def mean_normal(shape, mean=1., scale=0.02, name=None):
    return K.variable(np.random.normal(loc=mean, scale=scale, size=shape), name=name) 
开发者ID:mengli,项目名称:MachineLearning,代码行数:4,代码来源:vaegan_cifar.py

示例6: fetch_next_batch

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def fetch_next_batch(cifar):
    z = np.random.normal(0., 1., (batch_size, z_dim))  # normal dist for GAN
    x = cifar.train.next_batch(batch_size)
    return z, x[0] 
开发者ID:mengli,项目名称:MachineLearning,代码行数:6,代码来源:vaegan_cifar.py

示例7: fetch_next_batch

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def fetch_next_batch(s):
    z = np.random.normal(0., 1., (batch_size, z_dim))  # normal dist for GAN
    x = s.train.next_batch(batch_size)
    return z, x[0] 
开发者ID:mengli,项目名称:MachineLearning,代码行数:6,代码来源:vaegan_svhn.py

示例8: init_normal

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def init_normal(shape, name=None):
    return initializations.normal(shape, scale=0.01, name=name) 
开发者ID:hexiangnan,项目名称:neural_collaborative_filtering,代码行数:4,代码来源:MLP.py

示例9: weights_init

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def weights_init(shape, name=None, dim_ordering=None):
    return normal(shape, scale=0.01, name=name) 
开发者ID:robertomest,项目名称:neural-style-keras,代码行数:4,代码来源:model.py

示例10: get_q_network

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def get_q_network(weights_path):
    model = Sequential()
    model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name), input_shape=(25112,)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(1024, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('relu'))
    model.add(Dropout(0.2))
    model.add(Dense(6, init=lambda shape, name: normal(shape, scale=0.01, name=name)))
    model.add(Activation('linear'))
    adam = Adam(lr=1e-6)
    model.compile(loss='mse', optimizer=adam)
    if weights_path != "0":
        model.load_weights(weights_path)
    return model 
开发者ID:imatge-upc,项目名称:detection-2016-nipsws,代码行数:17,代码来源:reinforcement.py

示例11: cleanup

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def cleanup(data):
  X = data[0][:64, -1]
  X = np.asarray([cv2.resize(x.transpose(1, 2, 0), (160, 80)) for x in X])
  X = X/127.5 - 1.
  Z = np.random.normal(0, 1, (X.shape[0], z_dim))
  return Z, X 
开发者ID:commaai,项目名称:research,代码行数:8,代码来源:autoencoder.py

示例12: encoder

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def encoder(batch_size, df_dim, ch, rows, cols):

    model = Sequential()
    X = Input(batch_shape=(batch_size, rows[-1], cols[-1], ch))
    model = Convolution2D(df_dim, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h0_conv", dim_ordering="tf", init=normal)(X)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*2, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h1_conv", dim_ordering="tf")(model)
    model = BN(mode=2, axis=3, name="e_bn1", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*4, 5, 5, subsample=(2, 2), name="e_h2_conv", border_mode="same",
                          dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="e_bn2", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)

    model = Convolution2D(df_dim*8, 5, 5, subsample=(2, 2), border_mode="same",
                          name="e_h3_conv", dim_ordering="tf", init=normal)(model)
    model = BN(mode=2, axis=3, name="e_bn3", gamma_init=mean_normal, epsilon=1e-5)(model)
    model = LeakyReLU(.2)(model)
    model = Flatten()(model)

    mean = Dense(z_dim, name="e_h3_lin", init=normal)(model)
    logsigma = Dense(z_dim, name="e_h4_lin", activation="tanh", init=normal)(model)
    meansigma = Model([X], [mean, logsigma])
    return meansigma 
开发者ID:commaai,项目名称:research,代码行数:30,代码来源:autoencoder.py

示例13: cleanup

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def cleanup(data):
  X = data[0]
  sh = X.shape
  X = X.reshape((-1, 3, 160, 320))
  X = np.asarray([cv2.resize(x.transpose(1, 2, 0), (160, 80)) for x in X])
  X = X/127.5 - 1.
  X = X.reshape((sh[0], (time+out_leng)*4, 80, 160, 3))
  Z = np.random.normal(0, 1, (X.shape[0], z_dim))
  return Z, X[:, ::4] 
开发者ID:commaai,项目名称:research,代码行数:11,代码来源:transition.py

示例14: gaussian_init

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def gaussian_init(shape, name=None, dim_ordering=None):
   return initializations.normal(shape, scale=0.001, name=name, dim_ordering=dim_ordering) 
开发者ID:codedecde,项目名称:Luna2016-Lung-Nodule-Detection,代码行数:4,代码来源:LUNA_unet.py

示例15: normal_init

# 需要导入模块: from keras import initializations [as 别名]
# 或者: from keras.initializations import normal [as 别名]
def normal_init(shape, dim_ordering='tf', name=None):
    return normal(shape, scale=0.0000001, name=name, dim_ordering=dim_ordering) 
开发者ID:kuleshov,项目名称:audio-super-res,代码行数:4,代码来源:dnn.py


注:本文中的keras.initializations.normal方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。