本文整理汇总了Python中video.Video.append方法的典型用法代码示例。如果您正苦于以下问题:Python Video.append方法的具体用法?Python Video.append怎么用?Python Video.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类video.Video
的用法示例。
在下文中一共展示了Video.append方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: walk
# 需要导入模块: from video import Video [as 别名]
# 或者: from video.Video import append [as 别名]
def walk(model, samples_z, out_dir, inv_transform=None):
print('Outputting walk video')
model.phase = 'test'
walk_video = Video(os.path.join(out_dir, 'walk.mp4'))
for z in random_walk(samples_z, 150, n_dir_steps=10, change_fraction=0.1):
x = model.decode(z)
if inv_transform is not None:
x = inv_transform(x)
walk_video.append(dp.misc.img_tile(x))
示例2: train
# 需要导入模块: from video import Video [as 别名]
# 或者: from video.Video import append [as 别名]
def train(
model,
output_dir,
train_feed,
test_feed,
lr_start=0.01,
lr_stop=0.00001,
lr_gamma=0.75,
n_epochs=150,
gan_margin=0.35,
):
n_hidden = model.latent_encoder.n_out
# For plotting
original_x = np.array(test_feed.batches().next()[0])
samples_z = np.random.normal(size=(len(original_x), n_hidden))
samples_z = (samples_z).astype(dp.float_)
recon_video = Video(os.path.join(output_dir, "convergence_recon.mp4"))
sample_video = Video(os.path.join(output_dir, "convergence_samples.mp4"))
original_x_ = original_x
original_x_ = img_inverse_transform(original_x)
sp.misc.imsave(os.path.join(output_dir, "examples.png"), dp.misc.img_tile(original_x_))
# Train network
learn_rule = dp.RMSProp()
annealer = dp.GammaAnnealer(lr_start, lr_stop, n_epochs, gamma=lr_gamma)
trainer = aegan.GradientDescent(model, train_feed, learn_rule, margin=gan_margin)
try:
for e in range(n_epochs):
model.phase = "train"
model.setup(*train_feed.shapes)
learn_rule.learn_rate = annealer.value(e) / train_feed.batch_size
trainer.train_epoch()
model.phase = "test"
original_z = model.encode(original_x)
recon_x = model.decode(original_z)
samples_x = model.decode(samples_z)
recon_x = img_inverse_transform(recon_x)
samples_x = img_inverse_transform(samples_x)
recon_video.append(dp.misc.img_tile(recon_x))
sample_video.append(dp.misc.img_tile(samples_x))
except KeyboardInterrupt:
pass
model.phase = "test"
n_examples = 100
test_feed.reset()
original_x = np.array(test_feed.batches().next()[0])[:n_examples]
samples_z = np.random.normal(size=(n_examples, n_hidden))
output.samples(model, samples_z, output_dir, img_inverse_transform)
output.reconstructions(model, original_x, output_dir, img_inverse_transform)
original_z = model.encode(original_x)
output.walk(model, original_z, output_dir, img_inverse_transform)
return model
示例3: run
# 需要导入模块: from video import Video [as 别名]
# 或者: from video.Video import append [as 别名]
def run():
n_hidden = 64
ae_kind = 'variational'
lr_start = 0.01
lr_stop = 0.0001
lr_gamma = 0.75
n_epochs = 150
epoch_size = 250
batch_size = 64
experiment_name = 'mnist_ae'
experiment_name += '_nhidden%i' % n_hidden
out_dir = os.path.join('out', experiment_name)
arch_path = os.path.join(out_dir, 'arch.pickle')
start_arch_path = arch_path
start_arch_path = None
print('experiment_name', experiment_name)
print('start_arch_path', start_arch_path)
print('arch_path', arch_path)
# Setup network
if start_arch_path is None:
print('Creating new model')
encoder, decoder, _ = architectures.mnist()
if ae_kind == 'variational':
latent_encoder = architectures.vae_latent_encoder(n_hidden)
elif ae_kind == 'adversarial':
latent_encoder = architectures.aae_latent_encoder(n_hidden)
else:
print('Starting from %s' % start_arch_path)
with open(start_arch_path, 'rb') as f:
decoder, discriminator = pickle.load(f)
model = ae.Autoencoder(
encoder=encoder,
latent_encoder=latent_encoder,
decoder=decoder,
)
model.recon_error = ae.GaussianNegLogLikelihood()
# Fetch dataset
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.arrays()
x_train = mnist_transform(x_train)
x_test = mnist_transform(x_test)
# Prepare network inputs
train_input = dp.Input(x_train, batch_size, epoch_size)
test_input = dp.Input(x_test, batch_size)
# Plotting
n_examples = 64
batch = test_input.batches().next()
original_x = batch['x']
original_x = np.array(original_x)[:n_examples]
samples_z = np.random.normal(size=(n_examples, n_hidden))
samples_z = (samples_z).astype(dp.float_)
# Train network
learn_rule = dp.RMSProp()
trainer = dp.GradientDescent(model, train_input, learn_rule)
annealer = dp.GammaAnnealer(lr_start, lr_stop, n_epochs, gamma=lr_gamma)
try:
recon_video = Video(os.path.join(out_dir, 'convergence_recon.mp4'))
sample_video = Video(os.path.join(out_dir, 'convergence_samples.mp4'))
sp.misc.imsave(os.path.join(out_dir, 'examples.png'),
dp.misc.img_tile(mnist_inverse_transform(original_x)))
for e in range(n_epochs):
model.phase = 'train'
model.setup(**train_input.shapes)
learn_rule.learn_rate = annealer.value(e) / batch_size
loss = trainer.train_epoch()
model.phase = 'test'
original_z = model.encode(original_x)
recon_x = model.decode(original_z)
samples_x = model.decode(samples_z)
recon_x = mnist_inverse_transform(recon_x)
samples_x = mnist_inverse_transform(model.decode(samples_z))
recon_video.append(dp.misc.img_tile(recon_x))
sample_video.append(dp.misc.img_tile(samples_x))
likelihood = model.likelihood(test_input)
print('epoch %i Train loss:%.4f Test likelihood:%.4f' %
(e, np.mean(loss), np.mean(likelihood)))
except KeyboardInterrupt:
pass
print('Saving model to disk')
with open(arch_path, 'wb') as f:
pickle.dump((decoder, discriminator), f)
model.phase = 'test'
n_examples = 100
samples_z = np.random.normal(size=(n_examples, n_hidden)).astype(dp.float_)
output.samples(model, samples_z, out_dir, mnist_inverse_transform)
output.walk(model, samples_z, out_dir, mnist_inverse_transform)
示例4: run
# 需要导入模块: from video import Video [as 别名]
# 或者: from video.Video import append [as 别名]
def run():
n_hidden = 128
real_vs_gen_weight = 0.75
gan_margin = 0.3
lr_start = 0.04
lr_stop = 0.0001
lr_gamma = 0.75
n_epochs = 150
epoch_size = 250
batch_size = 64
experiment_name = 'mnist_gan'
experiment_name += '_nhidden%i' % n_hidden
out_dir = os.path.join('out', experiment_name)
arch_path = os.path.join(out_dir, 'arch.pickle')
start_arch_path = arch_path
start_arch_path = None
print('experiment_name', experiment_name)
print('start_arch_path', start_arch_path)
print('arch_path', arch_path)
# Setup network
if start_arch_path is None:
print('Creating new model')
_, decoder, discriminator = architectures.mnist()
else:
print('Starting from %s' % start_arch_path)
with open(start_arch_path, 'rb') as f:
decoder, discriminator = pickle.load(f)
model = gan.GAN(
n_hidden=n_hidden,
generator=decoder,
discriminator=discriminator,
real_vs_gen_weight=real_vs_gen_weight,
)
# Fetch dataset
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.arrays()
x_train = mnist_transform(x_train)
x_test = mnist_transform(x_test)
# Prepare network inputs
train_input = dp.Input(x_train, batch_size, epoch_size)
test_input = dp.Input(x_test, batch_size)
# Plotting
n_examples = 64
batch = test_input.batches().next()
original_x = batch['x']
original_x = np.array(original_x)[:n_examples]
samples_z = np.random.normal(size=(n_examples, n_hidden))
samples_z = (samples_z).astype(dp.float_)
# Train network
learn_rule = dp.RMSProp()
trainer = gan.GradientDescent(model, train_input, learn_rule,
margin=gan_margin)
annealer = dp.GammaAnnealer(lr_start, lr_stop, n_epochs, gamma=lr_gamma)
try:
sample_video = Video(os.path.join(out_dir, 'convergence_samples.mp4'))
sp.misc.imsave(os.path.join(out_dir, 'examples.png'),
dp.misc.img_tile(mnist_inverse_transform(original_x)))
for e in range(n_epochs):
model.phase = 'train'
model.setup(**train_input.shapes)
learn_rule.learn_rate = annealer.value(e) / batch_size
trainer.train_epoch()
model.phase = 'test'
samples_x = model.decode(samples_z)
samples_x = mnist_inverse_transform(model.decode(samples_z))
sample_video.append(dp.misc.img_tile(samples_x))
except KeyboardInterrupt:
pass
print('Saving model to disk')
with open(arch_path, 'wb') as f:
pickle.dump((decoder, discriminator), f)
model.phase = 'test'
n_examples = 100
samples_z = np.random.normal(size=(n_examples, n_hidden)).astype(dp.float_)
output.samples(model, samples_z, out_dir, mnist_inverse_transform)
output.walk(model, samples_z, out_dir, mnist_inverse_transform)
示例5: run
# 需要导入模块: from video import Video [as 别名]
# 或者: from video.Video import append [as 别名]
def run():
mode = 'vaegan'
vae_grad_scale = 0.0001
kld_weight = 1.0
z_gan_prop = False
experiment_name = mode
experiment_name += '_scale%.1e' % vae_grad_scale
experiment_name += '_kld%.2f' % kld_weight
if z_gan_prop:
experiment_name += '_zprop'
filename = 'savestates/lfw_' + experiment_name + '.pickle'
in_filename = None
print('experiment_name', experiment_name)
print('in_filename', in_filename)
print('filename', filename)
# Fetch dataset
x_train = lfw.lfw_imgs(alignment='deepfunneled', size=64, crop=50,
shuffle=True)
img_shape = x_train.shape[1:]
# Normalize pixel intensities
scaler = dp.UniformScaler(low=-1, high=1)
x_train = scaler.fit_transform(x_train)
# Setup network
if in_filename is None:
print('Creating new model')
expressions = model_expressions(img_shape)
else:
print('Starting from %s' % in_filename)
with open(in_filename, 'rb') as f:
expressions = pickle.load(f)
encoder, sampler, generator, discriminator = expressions
model = vaegan.VAEGAN(
encoder=encoder,
sampler=sampler,
generator=generator,
discriminator=discriminator,
mode=mode,
vae_grad_scale=vae_grad_scale,
kld_weight=kld_weight,
)
# Prepare network inputs
batch_size = 64
train_input = dp.Input(x_train, batch_size=batch_size, epoch_size=250)
# Plotting
n_examples = 100
examples = x_train[:n_examples]
samples_z = np.random.normal(size=(n_examples, model.sampler.n_hidden))
samples_z = samples_z.astype(dp.float_)
recon_video = Video('plots/lfw_' + experiment_name + '_reconstruction.mp4')
sample_video = Video('plots/lfw_' + experiment_name + '_samples.mp4')
sp.misc.imsave('lfw_examples.png', img_tile(dp.misc.to_b01c(examples)))
def plot():
model.phase = 'test'
examples_z = model.embed(examples)
reconstructed = clip_range(model.reconstruct(examples_z))
recon_video.append(img_tile(dp.misc.to_b01c(reconstructed)))
z = model.embed(x_train)
z_mean = np.mean(z, axis=0)
z_std = np.std(z, axis=0)
model.hidden_std = z_std
z_std = np.diagflat(z_std)
samples_z = np.random.multivariate_normal(mean=z_mean, cov=z_std,
size=(n_examples,))
samples_z = samples_z.astype(dp.float_)
samples = clip_range(model.reconstruct(samples_z))
sample_video.append(img_tile(dp.misc.to_b01c(samples)))
model.phase = 'train'
model.setup(**train_input.shapes)
# Train network
runs = [
(150, dp.RMSProp(learn_rate=0.05)),
(250, dp.RMSProp(learn_rate=0.03)),
(100, dp.RMSProp(learn_rate=0.01)),
(15, dp.RMSProp(learn_rate=0.005)),
]
try:
import timeit
for n_epochs, learn_rule in runs:
if mode == 'vae':
vaegan.train(model, train_input, learn_rule, n_epochs,
epoch_callback=plot)
else:
vaegan.margin_train(model, train_input, learn_rule, n_epochs,
epoch_callback=plot)
except KeyboardInterrupt:
#.........这里部分代码省略.........
示例6: run
# 需要导入模块: from video import Video [as 别名]
# 或者: from video.Video import append [as 别名]
def run():
mode = 'vaegan'
vae_grad_scale = 0.025
experiment_name = mode + 'scale_%.5f' % vae_grad_scale
filename = 'savestates/mnist_' + experiment_name + '.pickle'
in_filename = filename
in_filename = None
print('experiment_name', experiment_name)
print('in_filename', in_filename)
print('filename', filename)
# Fetch dataset
dataset = dp.dataset.MNIST()
x_train, y_train, x_test, y_test = dataset.arrays(dp_dtypes=True)
n_classes = dataset.n_classes
img_shape = x_train.shape[1:]
# Normalize pixel intensities
scaler = dp.UniformScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
y_train = one_hot(y_train, n_classes).astype(dp.float_)
y_test = one_hot(y_test, n_classes).astype(dp.float_)
x_train = np.reshape(x_train, (x_train.shape[0], -1))
x_test = np.reshape(x_test, (x_test.shape[0], -1))
# Setup network
if in_filename is None:
print('Creating new model')
expressions = model_expressions(img_shape)
else:
print('Starting from %s' % in_filename)
with open(in_filename, 'rb') as f:
expressions = pickle.load(f)
encoder, sampler, generator, discriminator = expressions
model = cond_vaegan.ConditionalVAEGAN(
encoder=encoder,
sampler=sampler,
generator=generator,
discriminator=discriminator,
mode=mode,
reconstruct_error=expr.nnet.BinaryCrossEntropy(),
vae_grad_scale=vae_grad_scale,
)
# Prepare network inputs
batch_size = 128
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size,
epoch_size=250)
# Plotting
n_examples = 100
examples = x_test[:n_examples]
examples_y = y_test[:n_examples]
samples_z = np.random.normal(size=(n_examples, model.sampler.n_hidden))
samples_z = samples_z.astype(dp.float_)
samples_y = ((np.arange(n_examples) // 10) % n_classes)
samples_y = one_hot(samples_y, n_classes).astype(dp.float_)
recon_video = Video('plots/mnist_' + experiment_name +
'_reconstruction.mp4')
sample_video = Video('plots/mnist_' + experiment_name + '_samples.mp4')
sp.misc.imsave('plots/mnist_examples.png',
img_tile(to_b01c(examples, img_shape)))
def plot():
model.phase = 'test'
model.sampler.batch_size=100
examples_z = model.embed(examples, examples_y)
examples_recon = model.reconstruct(examples_z, examples_y)
recon_video.append(img_tile(to_b01c(examples_recon, img_shape)))
samples = model.reconstruct(samples_z, samples_y)
sample_video.append(img_tile(to_b01c(samples, img_shape)))
model.setup(**train_input.shapes)
model.phase = 'train'
# Train network
runs = [
(75, dp.RMSProp(learn_rate=0.075)),
(25, dp.RMSProp(learn_rate=0.05)),
(5, dp.RMSProp(learn_rate=0.01)),
(5, dp.RMSProp(learn_rate=0.005)),
]
try:
for n_epochs, learn_rule in runs:
if mode == 'vae':
vaegan.train(model, train_input, learn_rule, n_epochs,
epoch_callback=plot)
else:
vaegan.margin_train(model, train_input, learn_rule, n_epochs,
epoch_callback=plot)
except KeyboardInterrupt:
pass
raw_input('\n\nsave model to %s?\n' % filename)
with open(filename, 'wb') as f:
expressions = encoder, sampler, generator, discriminator
#.........这里部分代码省略.........
示例7: run
# 需要导入模块: from video import Video [as 别名]
# 或者: from video.Video import append [as 别名]
def run():
mode = 'gan'
experiment_name = mode + '_stride_local_discrimination'
filename = 'savestates/cifar_cond_' + experiment_name + '.pickle'
in_filename = filename
in_filename = None
print('experiment_name', experiment_name)
print('in_filename', in_filename)
print('filename', filename)
# Fetch dataset
dataset = dp.dataset.CIFAR10()
x_train, y_train, x_test, y_test = dataset.arrays(dp_dtypes=True)
n_classes = dataset.n_classes
# Normalize pixel intensities
scaler = dp.StandardScaler()
x_train = scaler.fit_transform(x_train)
x_test = scaler.transform(x_test)
y_train = one_hot(y_train, n_classes).astype(dp.float_)
y_test = one_hot(y_test, n_classes).astype(dp.float_)
# Setup network
if in_filename is None:
print('Creating new model')
img_shape = x_train.shape[1:]
expressions = model_expressions(img_shape)
else:
print('Starting from %s' % in_filename)
with open(in_filename, 'rb') as f:
expressions = pickle.load(f)
encoder, sampler, generator, discriminator = expressions
model = cond_vaegan.ConditionalVAEGAN(
encoder=encoder,
sampler=sampler,
generator=generator,
discriminator=discriminator,
mode=mode,
)
# Prepare network inputs
batch_size = 64
train_input = dp.SupervisedInput(x_train, y_train, batch_size=batch_size,
epoch_size=150)
# Plotting
n_examples = 100
examples = x_test[:n_examples]
examples_y = y_test[:n_examples]
samples_z = np.random.normal(size=(n_examples, model.sampler.n_hidden))
samples_z = samples_z.astype(dp.float_)
samples_y = ((np.arange(n_examples) // 10) % n_classes)
samples_y = one_hot(samples_y, n_classes).astype(dp.float_)
recon_video = Video('plots/cifar_' + experiment_name +
'_reconstruction.mp4')
sample_video = Video('plots/cifar_' + experiment_name + '_samples.mp4')
sp.misc.imsave('cifar_examples.png', img_tile(dp.misc.to_b01c(examples)))
def plot():
examples_z = model.embed(examples, examples_y)
examples_recon = model.reconstruct(examples_z, examples_y)
examples_recon = clip_range(examples_recon)
recon_video.append(img_tile(dp.misc.to_b01c(examples_recon)))
samples = clip_range(model.reconstruct(samples_z, samples_y))
sample_video.append(img_tile(dp.misc.to_b01c(samples)))
model.setup(**train_input.shapes)
# Train network
runs = [
# (10, dp.RMSProp(learn_rate=0.08)),
# (25, dp.RMSProp(learn_rate=0.12)),
# (100, dp.RMSProp(learn_rate=0.1)),
(150, dp.RMSProp(learn_rate=0.075)),
(150, dp.RMSProp(learn_rate=0.06)),
(150, dp.RMSProp(learn_rate=0.05)),
(150, dp.RMSProp(learn_rate=0.04)),
(25, dp.RMSProp(learn_rate=0.01)),
]
try:
for n_epochs, learn_rule in runs:
if mode == 'vae':
vaegan.train(model, train_input, learn_rule, n_epochs,
epoch_callback=plot)
else:
vaegan.margin_train(model, train_input, learn_rule, n_epochs,
epoch_callback=plot)
except KeyboardInterrupt:
pass
raw_input('\n\nsave model to %s?\n' % filename)
with open(filename, 'wb') as f:
expressions = encoder, sampler, generator, discriminator
pickle.dump(expressions, f)
print('Generating latent space video')
walk_video = Video('plots/cifar_' + experiment_name + '_walk.mp4')
for z in random_walk(samples_z, 500, step_std=0.15):
samples = clip_range(model.reconstruct(z, samples_y))
#.........这里部分代码省略.........