本文整理汇总了Python中model.Model.linear_transformation方法的典型用法代码示例。如果您正苦于以下问题:Python Model.linear_transformation方法的具体用法?Python Model.linear_transformation怎么用?Python Model.linear_transformation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.Model
的用法示例。
在下文中一共展示了Model.linear_transformation方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot_mapped_cluster_head
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import linear_transformation [as 别名]
def plot_mapped_cluster_head():
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
model = Model()
assert model.load(args.model)
identity = np.identity(model.ndim_y, dtype=np.float32)
mapped_head = model.linear_transformation(identity)
labels = [i for i in range(10)]
plot.scatter_labeled_z(mapped_head.data, labels, "cluster_head.png")
示例2: plot_mapped_representation
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import linear_transformation [as 别名]
def plot_mapped_representation():
parser = argparse.ArgumentParser()
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
dataset_train, dataset_test = chainer.datasets.get_mnist()
images_train, labels_train = dataset_train._datasets
images_test, labels_test = dataset_test._datasets
model = Model()
assert model.load(args.model)
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
with chainer.no_backprop_mode() and chainer.using_config("train", False):
y_onehot, z = model.encode_x_yz(images_test, apply_softmax_y=True)
representation = model.encode_yz_representation(y_onehot, z)
mapped_representation = model.linear_transformation(representation)
plot.scatter_labeled_z(mapped_representation.data, labels_test, "scatter_r.png")
示例3: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import linear_transformation [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batchsize", "-b", type=int, default=64)
parser.add_argument("--total-epochs", "-e", type=int, default=5000)
parser.add_argument("--num-labeled-data", "-nl", type=int, default=100)
parser.add_argument("--gpu-device", "-g", type=int, default=0)
parser.add_argument("--grad-clip", "-gc", type=float, default=5)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
np.random.seed(args.seed)
model = Model()
model.load(args.model)
mnist_train, mnist_test = chainer.datasets.get_mnist()
images_train, labels_train = mnist_train._datasets
images_test, labels_test = mnist_test._datasets
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
dataset = Dataset(train=(images_train, labels_train),
test=(images_test, labels_test),
num_labeled_data=args.num_labeled_data,
num_classes=model.ndim_y)
print("#labeled: {}".format(dataset.get_num_labeled_data()))
print("#unlabeled: {}".format(dataset.get_num_unlabeled_data()))
_, labels = dataset.get_labeled_data()
print("labeled data:", labels)
total_iterations_train = len(images_train) // args.batchsize
# optimizers
optimizer_encoder = Optimizer("msgd", 0.01, 0.9)
optimizer_encoder.setup(model.encoder)
if args.grad_clip > 0:
optimizer_encoder.add_hook(GradientClipping(args.grad_clip))
optimizer_semi_supervised = Optimizer("msgd", 0.1, 0.9)
optimizer_semi_supervised.setup(model.encoder)
if args.grad_clip > 0:
optimizer_semi_supervised.add_hook(GradientClipping(args.grad_clip))
optimizer_generator = Optimizer("msgd", 0.1, 0.1)
optimizer_generator.setup(model.encoder)
if args.grad_clip > 0:
optimizer_generator.add_hook(GradientClipping(args.grad_clip))
optimizer_decoder = Optimizer("msgd", 0.01, 0.9)
optimizer_decoder.setup(model.decoder)
if args.grad_clip > 0:
optimizer_decoder.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator_z = Optimizer("msgd", 0.1, 0.1)
optimizer_discriminator_z.setup(model.discriminator_z)
if args.grad_clip > 0:
optimizer_discriminator_z.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator_y = Optimizer("msgd", 0.1, 0.1)
optimizer_discriminator_y.setup(model.discriminator_y)
if args.grad_clip > 0:
optimizer_discriminator_y.add_hook(GradientClipping(args.grad_clip))
optimizer_linear_transformation = Optimizer("msgd", 0.01, 0.9)
optimizer_linear_transformation.setup(model.linear_transformation)
if args.grad_clip > 0:
optimizer_linear_transformation.add_hook(GradientClipping(args.grad_clip))
using_gpu = False
if args.gpu_device >= 0:
cuda.get_device(args.gpu_device).use()
model.to_gpu()
using_gpu = True
xp = model.xp
# 0 -> true sample
# 1 -> generated sample
class_true = np.zeros(args.batchsize, dtype=np.int32)
class_fake = np.ones(args.batchsize, dtype=np.int32)
if using_gpu:
class_true = cuda.to_gpu(class_true)
class_fake = cuda.to_gpu(class_fake)
# 2D circle
# we use a linear transformation to map the 10D representation to a 2D space such that
# the cluster heads are mapped to the points that are uniformly placed on a 2D circle.
rad = math.radians(360 / model.ndim_y)
radius = 5
mapped_cluster_head_2d_target = np.zeros((10, 2), dtype=np.float32)
for n in range(model.ndim_y):
x = math.cos(rad * n) * radius
y = math.sin(rad * n) * radius
mapped_cluster_head_2d_target[n] = (x, y)
if using_gpu:
mapped_cluster_head_2d_target = cuda.to_gpu(mapped_cluster_head_2d_target)
# training loop
#.........这里部分代码省略.........