本文整理汇总了Python中model.Model.decode_yz_x方法的典型用法代码示例。如果您正苦于以下问题:Python Model.decode_yz_x方法的具体用法?Python Model.decode_yz_x怎么用?Python Model.decode_yz_x使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.Model
的用法示例。
在下文中一共展示了Model.decode_yz_x方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot_clusters
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import decode_yz_x [as 别名]
def plot_clusters():
dataset_train, dataset_test = chainer.datasets.get_mnist()
images_train, labels_train = dataset_train._datasets
images_test, labels_test = dataset_test._datasets
dataset_indices = np.arange(0, len(images_test))
np.random.shuffle(dataset_indices)
model = Model()
assert model.load("model.hdf5")
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
num_clusters = model.ndim_y
num_plots_per_cluster = 11
image_width = 28
image_height = 28
ndim_x = image_width * image_height
pylab.gray()
with chainer.no_backprop_mode() and chainer.using_config("train", False):
# plot cluster head
head_y = np.identity(model.ndim_y, dtype=np.float32)
zero_z = np.zeros((model.ndim_y, model.ndim_z), dtype=np.float32)
head_x = model.decode_yz_x(head_y, zero_z).data
head_x = (head_x + 1.0) / 2.0
for n in range(num_clusters):
pylab.subplot(num_clusters, num_plots_per_cluster + 2, n * (num_plots_per_cluster + 2) + 1)
pylab.imshow(head_x[n].reshape((image_width, image_height)), interpolation="none")
pylab.axis("off")
# plot elements in cluster
counts = [0 for i in range(num_clusters)]
indices = np.arange(len(images_test))
np.random.shuffle(indices)
batchsize = 500
i = 0
x_batch = np.zeros((batchsize, ndim_x), dtype=np.float32)
for n in range(len(images_test) // batchsize):
for b in range(batchsize):
x_batch[b] = images_test[indices[i]]
i += 1
y_batch = model.encode_x_yz(x_batch)[0].data
labels = np.argmax(y_batch, axis=1)
for m in range(labels.size):
cluster = int(labels[m])
counts[cluster] += 1
if counts[cluster] <= num_plots_per_cluster:
x = (x_batch[m] + 1.0) / 2.0
pylab.subplot(num_clusters, num_plots_per_cluster + 2, cluster * (num_plots_per_cluster + 2) + 2 + counts[cluster])
pylab.imshow(x.reshape((image_width, image_height)), interpolation="none")
pylab.axis("off")
fig = pylab.gcf()
fig.set_size_inches(num_plots_per_cluster, num_clusters)
pylab.savefig("clusters.png")
示例2: plot_analogy
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import decode_yz_x [as 别名]
def plot_analogy():
dataset_train, dataset_test = chainer.datasets.get_mnist()
images_train, labels_train = dataset_train._datasets
images_test, labels_test = dataset_test._datasets
dataset_indices = np.arange(0, len(images_test))
np.random.shuffle(dataset_indices)
model = Model()
assert model.load("model.hdf5")
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
num_analogies = 10
pylab.gray()
batch_indices = dataset_indices[:num_analogies]
x_batch = images_test[batch_indices]
y_batch = labels_test[batch_indices]
y_onehot_batch = onehot(y_batch)
with chainer.no_backprop_mode() and chainer.using_config("train", False):
z_batch = model.encode_x_yz(x_batch)[1].data
# plot original image on the left
x_batch = (x_batch + 1.0) / 2.0
for m in range(num_analogies):
pylab.subplot(num_analogies, 10 + 2, m * 12 + 1)
pylab.imshow(x_batch[m].reshape((28, 28)), interpolation="none")
pylab.axis("off")
all_y = np.identity(10, dtype=np.float32)
for m in range(num_analogies):
# copy z_batch as many as the number of classes
fixed_z = np.repeat(z_batch[m].reshape(1, -1), 10, axis=0)
gen_x = model.decode_yz_x(all_y, fixed_z).data
gen_x = (gen_x + 1.0) / 2.0
# plot images generated from each label
for n in range(10):
pylab.subplot(num_analogies, 10 + 2, m * 12 + 3 + n)
pylab.imshow(gen_x[n].reshape((28, 28)), interpolation="none")
pylab.axis("off")
fig = pylab.gcf()
fig.set_size_inches(num_analogies, 10)
pylab.savefig("analogy.png")
示例3: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import decode_yz_x [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batchsize", "-b", type=int, default=64)
parser.add_argument("--total-epochs", "-e", type=int, default=300)
parser.add_argument("--gpu-device", "-g", type=int, default=0)
parser.add_argument("--grad-clip", "-gc", type=float, default=5)
parser.add_argument("--learning-rate", "-lr", type=float, default=0.0001)
parser.add_argument("--momentum", "-mo", type=float, default=0.5)
parser.add_argument("--optimizer", "-opt", type=str, default="adam")
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
mnist_train, mnist_test = chainer.datasets.get_mnist()
images_train, labels_train = mnist_train._datasets
images_test, labels_test = mnist_test._datasets
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
dataset = Dataset(train=(images_train, labels_train), test=(images_test, labels_test))
total_iterations_train = len(images_train) // args.batchsize
model = Model()
model.load(args.model)
# optimizers
optimizer_encoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_encoder.setup(model.encoder)
if args.grad_clip > 0:
optimizer_encoder.add_hook(GradientClipping(args.grad_clip))
optimizer_decoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_decoder.setup(model.decoder)
if args.grad_clip > 0:
optimizer_decoder.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_discriminator.setup(model.discriminator)
if args.grad_clip > 0:
optimizer_discriminator.add_hook(GradientClipping(args.grad_clip))
using_gpu = False
if args.gpu_device >= 0:
cuda.get_device(args.gpu_device).use()
model.to_gpu()
using_gpu = True
xp = model.xp
# 0 -> true sample
# 1 -> generated sample
class_true = np.zeros(args.batchsize, dtype=np.int32)
class_fake = np.ones(args.batchsize, dtype=np.int32)
if using_gpu:
class_true = cuda.to_gpu(class_true)
class_fake = cuda.to_gpu(class_fake)
training_start_time = time.time()
for epoch in range(args.total_epochs):
sum_loss_generator = 0
sum_loss_discriminator = 0
sum_loss_autoencoder = 0
sum_discriminator_confidence_true = 0
sum_discriminator_confidence_fake = 0
epoch_start_time = time.time()
dataset.shuffle()
# training
for itr in range(total_iterations_train):
# update model parameters
with chainer.using_config("train", True):
x_l, y_l, y_onehot_l = dataset.sample_minibatch(args.batchsize, gpu=using_gpu)
### reconstruction phase ###
if True:
z_fake_l = model.encode_x_z(x_l)
x_reconstruction_l = model.decode_yz_x(y_onehot_l, z_fake_l)
loss_reconstruction = F.mean_squared_error(x_l, x_reconstruction_l)
model.cleargrads()
loss_reconstruction.backward()
optimizer_encoder.update()
optimizer_decoder.update()
### adversarial phase ###
if True:
z_fake_l = model.encode_x_z(x_l)
z_true_batch = sampler.gaussian(args.batchsize, model.ndim_z, mean=0, var=1)
if using_gpu:
z_true_batch = cuda.to_gpu(z_true_batch)
dz_true = model.discriminate_z(z_true_batch, apply_softmax=False)
dz_fake = model.discriminate_z(z_fake_l, apply_softmax=False)
discriminator_confidence_true = float(xp.mean(F.softmax(dz_true).data[:, 0]))
discriminator_confidence_fake = float(xp.mean(F.softmax(dz_fake).data[:, 1]))
loss_discriminator = F.softmax_cross_entropy(dz_true, class_true) + F.softmax_cross_entropy(dz_fake, class_fake)
model.cleargrads()
loss_discriminator.backward()
#.........这里部分代码省略.........