当前位置: 首页>>代码示例>>Python>>正文


Python Sequential.trainable方法代码示例

本文整理汇总了Python中keras.models.Sequential.trainable方法的典型用法代码示例。如果您正苦于以下问题:Python Sequential.trainable方法的具体用法?Python Sequential.trainable怎么用?Python Sequential.trainable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models.Sequential的用法示例。


在下文中一共展示了Sequential.trainable方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_synapse

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
def create_synapse(): 
    memory_input = Input(shape=(SYN_SIZE,))

    pretender1_input = Input(shape=(SYN_SIZE,))
    pretender2_input = Input(shape=(LATENT_DIM,))

    gate_in = Sequential()
    gate_in.add(Dense(SYN_SIZE, input_shape=(SYN_SIZE,), activation='relu'))
    gate_in.add(Dropout(1-1 / NUM_SYNAPSES))
    gate_in.add(Dense(LATENT_DIM, activation='hard_sigmoid'))

    gate_out = Sequential()
    gate_out.add(Dense(SYN_SIZE, input_shape=(LATENT_DIM,), activation='relu'))
    gate_out.add(Dropout(1-1 / NUM_SYNAPSES))
    gate_out.add(Dense(SYN_SIZE, activation='sigmoid'))

    task = Sequential()
    task.add(Dense(SYN_SIZE, input_shape=(SYN_SIZE,), activation='relu'))
    task.add(Dense(OUT_SIZE, activation='hard_sigmoid'))

    projector1 = Sequential()
    projector1.add(Dense(SYN_SIZE, input_shape=(LATENT_DIM,), activation='tanh'))
    projector1.add(Dense(NUM_FLAGS, activation='sigmoid'))

    projector2 = Sequential()
    projector2.add(Dense(SYN_SIZE, input_shape=(SYN_SIZE,), activation='tanh'))
    projector2.add(Dense(NUM_FLAGS, activation='sigmoid'))

    memory = Model(memory_input, gate_out(gate_in(memory_input)))
    memory.compile(optimizer=SGD(), loss="mean_squared_error")

    operator = Model(memory_input, task(gate_out(gate_in(memory_input))))
    operator.compile(optimizer=SGD(), loss="binary_crossentropy")

    projector1.compile(optimizer=Adadelta(), loss="mean_absolute_error")
    projector1.trainable = False

    projector2.compile(optimizer=Adadelta(), loss="mean_absolute_error")
    projector2.trainable = False

    pretender1 = Model(pretender1_input, projector1(gate_in(pretender1_input)))
    pretender1.compile(optimizer=RMSprop(), loss="binary_crossentropy")

    pretender2 = Model(pretender2_input, projector2(gate_out(pretender2_input)))
    pretender2.compile(optimizer=RMSprop(), loss="binary_crossentropy")

    return memory, projector1, projector2, pretender1, pretender2, gate_in, gate_out, operator
开发者ID:Suley,项目名称:recursive-symmetric-adversarial-ensemble,代码行数:49,代码来源:wakeup_cycle.py

示例2: test_nested_sequential_trainability

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
def test_nested_sequential_trainability():
    input_dim = 20
    num_units = 10
    num_classes = 2

    inner_model = Sequential()
    inner_model.add(Dense(num_units, input_shape=(input_dim,)))

    model = Sequential()
    model.add(inner_model)
    model.add(Dense(num_classes))

    assert len(model.trainable_weights) == 4
    inner_model.trainable = False
    assert len(model.trainable_weights) == 2
    inner_model.trainable = True
    assert len(model.trainable_weights) == 4
开发者ID:BlakePrice,项目名称:keras,代码行数:19,代码来源:test_sequential_model.py

示例3: test_nested_model_trainability

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
def test_nested_model_trainability():
    # a Sequential inside a Model
    inner_model = Sequential()
    inner_model.add(Dense(2, input_dim=1))

    x = Input(shape=(1,))
    y = inner_model(x)
    outer_model = Model(x, y)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []

    # a Sequential inside a Sequential
    inner_model = Sequential()
    inner_model.add(Dense(2, input_dim=1))
    outer_model = Sequential()
    outer_model.add(inner_model)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []

    # a Model inside a Model
    x = Input(shape=(1,))
    y = Dense(2)(x)
    inner_model = Model(x, y)
    x = Input(shape=(1,))
    y = inner_model(x)
    outer_model = Model(x, y)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []

    # a Model inside a Sequential
    x = Input(shape=(1,))
    y = Dense(2)(x)
    inner_model = Model(x, y)
    outer_model = Sequential()
    outer_model.add(inner_model)
    assert outer_model.trainable_weights == inner_model.trainable_weights
    inner_model.trainable = False
    assert outer_model.trainable_weights == []
    inner_model.trainable = True
    inner_model.layers[-1].trainable = False
    assert outer_model.trainable_weights == []
开发者ID:BlakePrice,项目名称:keras,代码行数:55,代码来源:test_dynamic_trainability.py

示例4: get_model

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
def get_model():
    # Optimizer
    adam = Adam(lr=0.0002, beta_1=0.5)

    # Generator
    generator = Sequential()
    generator.add(Dense(128*7*7, input_dim=randomDim, kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    generator.add(LeakyReLU(0.2))
    generator.add(Reshape((128, 7, 7)))
    generator.add(UpSampling2D(size=(2, 2)))
    generator.add(Conv2D(64, kernel_size=(5, 5), padding='same'))
    generator.add(LeakyReLU(0.2))
    generator.add(UpSampling2D(size=(2, 2)))
    generator.add(Conv2D(1, kernel_size=(5, 5), padding='same', activation='tanh'))
    generator.compile(loss='binary_crossentropy', optimizer=adam)

    # Discriminator
    discriminator = Sequential()
    discriminator.add(Conv2D(64, kernel_size=(5, 5), strides=(2, 2), padding='same', input_shape=(1, 28, 28), kernel_initializer=initializers.RandomNormal(stddev=0.02)))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))
    discriminator.add(Conv2D(128, kernel_size=(5, 5), strides=(2, 2), padding='same'))
    discriminator.add(LeakyReLU(0.2))
    discriminator.add(Dropout(0.3))
    discriminator.add(Flatten())
    discriminator.add(Dense(1, activation='sigmoid'))
    discriminator.compile(loss='binary_crossentropy', optimizer=adam)

    # Combined network
    discriminator.trainable = False
    ganInput = Input(shape=(randomDim,))
    x = generator(ganInput)
    ganOutput = discriminator(x)
    gan = Model(inputs=ganInput, outputs=ganOutput)
    gan.compile(loss='binary_crossentropy', optimizer=adam)

    return generator, discriminator, gan
开发者ID:burakbayramli,项目名称:classnotes,代码行数:39,代码来源:mnist_dcgan.py

示例5: SGD

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
sgd = SGD(lr=0.01, momentum=0.1)
decoder.compile(loss='binary_crossentropy', optimizer=sgd)

print("Setting up generator")
generator = Sequential()
generator.add(Dense(16, input_dim=1, activation='relu'))
generator.add(Dense(16, activation='relu'))
generator.add(Dense(1, activation='linear'))

generator.compile(loss='binary_crossentropy', optimizer=sgd)

print("Setting up combined net")
gen_dec = Sequential()
gen_dec.add(generator)
decoder.trainable = False
gen_dec.add(decoder)

'''def inverse_binary_crossentropy(y_true, y_pred):
    if theano.config.floatX == 'float64':
        epsilon = 1.0e-9
    else:
        epsilon = 1.0e-7
    y_pred = T.clip(y_pred, epsilon, 1.0 - epsilon)
    bce = T.nnet.binary_crossentropy(y_pred, y_true).mean(axis=-1)
    return -bce

gen_dec.compile(loss=inverse_binary_crossentropy, optimizer=sgd)'''

gen_dec.compile(loss='binary_crossentropy', optimizer=sgd)
开发者ID:agajews,项目名称:Neural-Network-Dev,代码行数:31,代码来源:simple_gan_nn_keras.py

示例6: Adam

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
sampler.add(lrelu())
sampler.add(Dense(dim))
sampler.add(lrelu())
sampler.add(Dense(mnist_dim))
sampler.add(Activation('sigmoid'))

# This is G itself!!!
sample_fake = theano.function([sampler.get_input()], sampler.get_output())

# We add the detector G on top, but it won't be adapted with this cost function.
# But here is a dirty hack: Theano shared variables on the GPU are the same for
# `detector` and `detector_no_grad`, so, when we adapt `detector` the values of
# `detector_no_grad` will be updated as well. But this only happens following the
# correct gradients.
# Don't you love pointers? Aliasing can be our friend sometimes.
detector.trainable = False
sampler.add(detector)

opt_g = Adam(lr=.001) # I got better results when
                      # detector's learning rate is faster
sampler.compile(loss='binary_crossentropy', optimizer=opt_g)

# debug
opt_d = Adam(lr=.002)
detector.trainable = True
detector.compile(loss='binary_crossentropy', optimizer=opt_d)
detector.predict(np.ones((3, mnist_dim))).shape



nb_epoch = 1000 # it takes some time to get something recognizable.
开发者ID:nPellejero,项目名称:deepNet,代码行数:33,代码来源:gan.py

示例7: Sequential

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
 
generator = Sequential()
generator.add(Dense(mid_dim / 4, input_dim=sample_dim, activation='tanh'))
generator.add(Dropout(dropout_rate))
generator.add(Dense(mid_dim / 2, activation='tanh'))
generator.add(Dropout(dropout_rate))
generator.add(Dense(mid_dim, activation='tanh'))
generator.add(Dropout(dropout_rate))
generator.add(Dense(data_dim, activation='sigmoid'))
# generate fake sample
sample_fake = K.function([generator.input, K.learning_phase()], generator.output)



discriminator.trainable = False
generator.add(Dropout(dropout_rate))
generator.add(discriminator)


opt_g = Adam(lr=.0001)
generator.compile(loss='binary_crossentropy', optimizer=opt_g)

opt_d = Adam(lr=.002) #the learning rate of discriminator should be faster
discriminator.trainable = True
discriminator.compile(loss='binary_crossentropy', optimizer=opt_d)


u_dist = numpy.random.uniform(-1, 1, (1000, sample_dim)).astype('float32')
gn_dist = sample_fake([u_dist, 0])
开发者ID:erichjzhang,项目名称:simplified-deeplearning,代码行数:31,代码来源:mnist_gan.py

示例8: range

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
y_train = [1 for i in range(X_train.shape[0])]

descriptive_model = Sequential()
generative_model = Sequential()

descriptive_model.add(Dense(input_dim=784, output_dim=250))
descriptive_model.add(Activation('sigmoid'))
descriptive_model.add(Dense(1))
descriptive_model.add(Activation('sigmoid'))

generative_model.add(Dense(input_dim=3000, output_dim=1500))
generative_model.add(Activation('relu'))
generative_model.add(Dense(784))
generative_model.add(Activation('sigmoid'))

descriptive_model.trainable = False
generative_model.add(descriptive_model)
generative_model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True), metrics=['accuracy'])

descriptive_model.trainable = True
descriptive_model.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True), metrics=['accuracy'])

batch_size = 32
fig = plt.figure()
fixed_noise = np.random.rand(1, dim).astype('float32')

progbar = generic_utils.Progbar(50)

def run(uh, nb_epoch, id, turnaround=False):
    for e in range(nb_epoch):
        acc0 = 0
开发者ID:AlexKaneRUS,项目名称:a3200-2016-ml,代码行数:33,代码来源:lab13.py

示例9: evaluate

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
def evaluate(generating_train_percentage, nb_epoch, dim, wanted_digit, save_model=False):
    (X_train, y_train), _ = mnist.load_data()

    X_train = numpy.reshape(X_train, (X_train.shape[0], numpy.multiply(X_train.shape[1], X_train.shape[2])))
    X_train = X_train.astype('float32')
    X_train /= float(255)
    wanted_digits = []
    for i in range(X_train.shape[0]):
        if y_train[i] == wanted_digit:
            wanted_digits.append(X_train[i])
    wanted_digits = numpy.array(wanted_digits)

    desc = Sequential()
    gen = Sequential()

    desc.add(Dense(input_dim=784, output_dim=250))
    desc.add(Activation('sigmoid'))
    desc.add(Dense(1))
    desc.add(Activation('sigmoid'))

    gen.add(Dense(input_dim=3000, output_dim=1500))
    gen.add(Activation('relu'))
    gen.add(Dense(784))
    gen.add(Activation('sigmoid'))

    desc.trainable = False
    gen.add(desc)
    gen.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True),
                metrics=['accuracy'])

    desc.trainable = True
    desc.compile(loss='binary_crossentropy', optimizer=SGD(lr=0.001, momentum=0.9, nesterov=True),
                 metrics=['accuracy'])

    batch_size = 32
    fig = plt.figure()
    fixed_noise = numpy.random.rand(1, dim).astype('float32')
    generating_train_percentage = int(1 / generating_train_percentage)
    if not os.path.exists(str(wanted_digit) + "/"):
        os.makedirs(str(wanted_digit))
    for iter in range(nb_epoch):
        gen_acc = 0
        desc_acc = 0
        gen_count = 0
        desc_count = 0
        for (first, last) in zip(range(0, wanted_digits.shape[0] - batch_size, batch_size),
                                 range(batch_size, wanted_digits.shape[0], batch_size)):
            noise_batch = numpy.random.rand(batch_size, dim).astype('float32')
            fake_samples = passThroughGenerativeModel(noise_batch, gen)
            true_n_fake = numpy.concatenate([wanted_digits[first: last],
                                             fake_samples], axis=0)
            y_batch = numpy.concatenate([numpy.ones((batch_size, 1)),
                                         numpy.zeros((batch_size, 1))], axis=0).astype('float32')
            all_fake = numpy.ones((batch_size, 1)).astype('float32')
            if iter % generating_train_percentage == 0 and iter != 0:
                gen_acc += gen.train_on_batch(noise_batch, all_fake)[1]
                gen_count += 1
            else:
                desc_acc += desc.train_on_batch(true_n_fake, y_batch)[1]
                desc_count += 1
        if gen_count != 0:
            gen_acc /= float(gen_count)
            print("Generative accuracy %s" % gen_acc)
        if desc_count != 0:
            desc_acc /= float(desc_count)
            print("Descriptive accuracy %s" % desc_acc)

        fixed_fake = passThroughGenerativeModel(fixed_noise, gen)
        fixed_fake *= 255
        plt.clf()
        plt.imshow(fixed_fake.reshape((28, 28)), cmap='gray')
        plt.axis('off')
        fig.canvas.draw()
        plt.savefig(str(wanted_digit) + "/Iter " + str(iter) + '.png')
        if desc_count != 0 and desc_acc <= 0.5:
            break
    if save_model:
        gen.save_weights(str(wanted_digit) + "/genModel.weights")
        open(str(wanted_digit) + "/genModel.structure", "w").write(gen.to_json())
开发者ID:vks4git,项目名称:Machine-learning,代码行数:81,代码来源:main_gal.py

示例10: ImageDataGenerator

# 需要导入模块: from keras.models import Sequential [as 别名]
# 或者: from keras.models.Sequential import trainable [as 别名]
# Deconv 6
model.add(Conv2DTranspose(16, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Deconv6'))

# Final layer - only including one channel so 3 filter
model.add(Conv2DTranspose(3, (3, 3), padding='valid', strides=(1,1), activation = 'relu', name = 'Final'))

### End of network ###


# Using a generator to help the model use less data
# Channel shifts help with shadows slightly
datagen = ImageDataGenerator(channel_shift_range=0.2)
datagen.fit(X_train)

# Compiling and training the model
model.compile(optimizer='Adam', loss='mean_squared_error')
model.fit_generator(datagen.flow(X_train, y_train, batch_size=batch_size), steps_per_epoch=len(X_train)/batch_size,
epochs=epochs, verbose=1, validation_data=(X_val, y_val))

# Freeze layers since training is done
model.trainable = False
model.compile(optimizer='Adam', loss='mean_squared_error')

# Save model architecture and weights
model.save('Model.h5')

# Show summary of model
#model.summary()

开发者ID:victorphd,项目名称:LaneDetect,代码行数:30,代码来源:TrainDataSet.py


注:本文中的keras.models.Sequential.trainable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。