本文整理汇总了Python中model.Model.to_gpu方法的典型用法代码示例。如果您正苦于以下问题:Python Model.to_gpu方法的具体用法?Python Model.to_gpu怎么用?Python Model.to_gpu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类model.Model
的用法示例。
在下文中一共展示了Model.to_gpu方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import to_gpu [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batchsize", "-b", type=int, default=64)
parser.add_argument("--total-epochs", "-e", type=int, default=5000)
parser.add_argument("--num-labeled-data", "-nl", type=int, default=100)
parser.add_argument("--gpu-device", "-g", type=int, default=0)
parser.add_argument("--grad-clip", "-gc", type=float, default=5)
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
np.random.seed(args.seed)
model = Model()
model.load(args.model)
mnist_train, mnist_test = chainer.datasets.get_mnist()
images_train, labels_train = mnist_train._datasets
images_test, labels_test = mnist_test._datasets
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
dataset = Dataset(train=(images_train, labels_train),
test=(images_test, labels_test),
num_labeled_data=args.num_labeled_data,
num_classes=model.ndim_y)
print("#labeled: {}".format(dataset.get_num_labeled_data()))
print("#unlabeled: {}".format(dataset.get_num_unlabeled_data()))
_, labels = dataset.get_labeled_data()
print("labeled data:", labels)
total_iterations_train = len(images_train) // args.batchsize
# optimizers
optimizer_encoder = Optimizer("msgd", 0.01, 0.9)
optimizer_encoder.setup(model.encoder)
if args.grad_clip > 0:
optimizer_encoder.add_hook(GradientClipping(args.grad_clip))
optimizer_semi_supervised = Optimizer("msgd", 0.1, 0.9)
optimizer_semi_supervised.setup(model.encoder)
if args.grad_clip > 0:
optimizer_semi_supervised.add_hook(GradientClipping(args.grad_clip))
optimizer_generator = Optimizer("msgd", 0.1, 0.1)
optimizer_generator.setup(model.encoder)
if args.grad_clip > 0:
optimizer_generator.add_hook(GradientClipping(args.grad_clip))
optimizer_decoder = Optimizer("msgd", 0.01, 0.9)
optimizer_decoder.setup(model.decoder)
if args.grad_clip > 0:
optimizer_decoder.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator_z = Optimizer("msgd", 0.1, 0.1)
optimizer_discriminator_z.setup(model.discriminator_z)
if args.grad_clip > 0:
optimizer_discriminator_z.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator_y = Optimizer("msgd", 0.1, 0.1)
optimizer_discriminator_y.setup(model.discriminator_y)
if args.grad_clip > 0:
optimizer_discriminator_y.add_hook(GradientClipping(args.grad_clip))
optimizer_linear_transformation = Optimizer("msgd", 0.01, 0.9)
optimizer_linear_transformation.setup(model.linear_transformation)
if args.grad_clip > 0:
optimizer_linear_transformation.add_hook(GradientClipping(args.grad_clip))
using_gpu = False
if args.gpu_device >= 0:
cuda.get_device(args.gpu_device).use()
model.to_gpu()
using_gpu = True
xp = model.xp
# 0 -> true sample
# 1 -> generated sample
class_true = np.zeros(args.batchsize, dtype=np.int32)
class_fake = np.ones(args.batchsize, dtype=np.int32)
if using_gpu:
class_true = cuda.to_gpu(class_true)
class_fake = cuda.to_gpu(class_fake)
# 2D circle
# we use a linear transformation to map the 10D representation to a 2D space such that
# the cluster heads are mapped to the points that are uniformly placed on a 2D circle.
rad = math.radians(360 / model.ndim_y)
radius = 5
mapped_cluster_head_2d_target = np.zeros((10, 2), dtype=np.float32)
for n in range(model.ndim_y):
x = math.cos(rad * n) * radius
y = math.sin(rad * n) * radius
mapped_cluster_head_2d_target[n] = (x, y)
if using_gpu:
mapped_cluster_head_2d_target = cuda.to_gpu(mapped_cluster_head_2d_target)
# training loop
#.........这里部分代码省略.........
示例2: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import to_gpu [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batchsize", "-b", type=int, default=64)
parser.add_argument("--total-epochs", "-e", type=int, default=300)
parser.add_argument("--gpu-device", "-g", type=int, default=0)
parser.add_argument("--grad-clip", "-gc", type=float, default=5)
parser.add_argument("--learning-rate", "-lr", type=float, default=0.0001)
parser.add_argument("--momentum", "-mo", type=float, default=0.5)
parser.add_argument("--optimizer", "-opt", type=str, default="adam")
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
mnist_train, mnist_test = chainer.datasets.get_mnist()
images_train, labels_train = mnist_train._datasets
images_test, labels_test = mnist_test._datasets
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
dataset = Dataset(train=(images_train, labels_train), test=(images_test, labels_test))
total_iterations_train = len(images_train) // args.batchsize
model = Model()
model.load(args.model)
# optimizers
optimizer_encoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_encoder.setup(model.encoder)
if args.grad_clip > 0:
optimizer_encoder.add_hook(GradientClipping(args.grad_clip))
optimizer_decoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_decoder.setup(model.decoder)
if args.grad_clip > 0:
optimizer_decoder.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_discriminator.setup(model.discriminator)
if args.grad_clip > 0:
optimizer_discriminator.add_hook(GradientClipping(args.grad_clip))
using_gpu = False
if args.gpu_device >= 0:
cuda.get_device(args.gpu_device).use()
model.to_gpu()
using_gpu = True
xp = model.xp
# 0 -> true sample
# 1 -> generated sample
class_true = np.zeros(args.batchsize, dtype=np.int32)
class_fake = np.ones(args.batchsize, dtype=np.int32)
if using_gpu:
class_true = cuda.to_gpu(class_true)
class_fake = cuda.to_gpu(class_fake)
training_start_time = time.time()
for epoch in range(args.total_epochs):
sum_loss_generator = 0
sum_loss_discriminator = 0
sum_loss_autoencoder = 0
sum_discriminator_confidence_true = 0
sum_discriminator_confidence_fake = 0
epoch_start_time = time.time()
dataset.shuffle()
# training
for itr in range(total_iterations_train):
# update model parameters
with chainer.using_config("train", True):
x_l, y_l, y_onehot_l = dataset.sample_minibatch(args.batchsize, gpu=using_gpu)
### reconstruction phase ###
if True:
z_fake_l = model.encode_x_z(x_l)
x_reconstruction_l = model.decode_yz_x(y_onehot_l, z_fake_l)
loss_reconstruction = F.mean_squared_error(x_l, x_reconstruction_l)
model.cleargrads()
loss_reconstruction.backward()
optimizer_encoder.update()
optimizer_decoder.update()
### adversarial phase ###
if True:
z_fake_l = model.encode_x_z(x_l)
z_true_batch = sampler.gaussian(args.batchsize, model.ndim_z, mean=0, var=1)
if using_gpu:
z_true_batch = cuda.to_gpu(z_true_batch)
dz_true = model.discriminate_z(z_true_batch, apply_softmax=False)
dz_fake = model.discriminate_z(z_fake_l, apply_softmax=False)
discriminator_confidence_true = float(xp.mean(F.softmax(dz_true).data[:, 0]))
discriminator_confidence_fake = float(xp.mean(F.softmax(dz_fake).data[:, 1]))
loss_discriminator = F.softmax_cross_entropy(dz_true, class_true) + F.softmax_cross_entropy(dz_fake, class_fake)
model.cleargrads()
loss_discriminator.backward()
#.........这里部分代码省略.........
示例3: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import to_gpu [as 别名]
def main(args):
if args.gpu >= 0:
cuda.check_cuda_available()
xp = cuda.cupy if args.gpu >= 0 else np
model_id = build_model_id(args)
model_path = build_model_path(args, model_id)
setup_model_dir(args, model_path)
sys.stdout, sys.stderr = setup_logging(args)
x_train, y_train = load_model_data(args.train_file,
args.data_name, args.target_name,
n=args.n_train)
x_validation, y_validation = load_model_data(
args.validation_file,
args.data_name, args.target_name,
n=args.n_validation)
rng = np.random.RandomState(args.seed)
N = len(x_train)
N_validation = len(x_validation)
n_classes = max(np.unique(y_train)) + 1
json_cfg = load_model_json(args, x_train, n_classes)
print('args.model_dir', args.model_dir)
sys.path.append(args.model_dir)
from model import Model
model_cfg = ModelConfig(**json_cfg)
model = Model(model_cfg)
setattr(model, 'stop_training', False)
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
model.to_gpu()
best_accuracy = 0.
best_epoch = 0
def keep_training(epoch, best_epoch):
if model_cfg.n_epochs is not None and epoch > model_cfg.n_epochs:
return False
if epoch > 1 and epoch - best_epoch > model_cfg.patience:
return False
return True
epoch = 1
while True:
if not keep_training(epoch, best_epoch):
break
if args.shuffle:
perm = np.random.permutation(N)
else:
perm = np.arange(N)
sum_accuracy = 0
sum_loss = 0
pbar = progressbar.ProgressBar(term_width=40,
widgets=[' ', progressbar.Percentage(),
' ', progressbar.ETA()],
maxval=N).start()
for j, i in enumerate(six.moves.range(0, N, model_cfg.batch_size)):
pbar.update(j+1)
x_batch = xp.asarray(x_train[perm[i:i + model_cfg.batch_size]].flatten())
y_batch = xp.asarray(y_train[perm[i:i + model_cfg.batch_size]])
pred, loss, acc = model.fit(x_batch, y_batch)
sum_loss += float(loss.data) * len(y_batch)
sum_accuracy += float(acc.data) * len(y_batch)
pbar.finish()
print('train epoch={}, mean loss={}, accuracy={}'.format(
epoch, sum_loss / N, sum_accuracy / N))
# Validation set evaluation
sum_accuracy = 0
sum_loss = 0
pbar = progressbar.ProgressBar(term_width=40,
widgets=[' ', progressbar.Percentage(),
' ', progressbar.ETA()],
maxval=N_validation).start()
for i in six.moves.range(0, N_validation, model_cfg.batch_size):
pbar.update(i+1)
x_batch = xp.asarray(x_validation[i:i + model_cfg.batch_size].flatten())
y_batch = xp.asarray(y_validation[i:i + model_cfg.batch_size])
pred, loss, acc = model.predict(x_batch, target=y_batch)
sum_loss += float(loss.data) * len(y_batch)
sum_accuracy += float(acc.data) * len(y_batch)
pbar.finish()
validation_accuracy = sum_accuracy / N_validation
validation_loss = sum_loss / N_validation
if validation_accuracy > best_accuracy:
#.........这里部分代码省略.........
示例4: main
# 需要导入模块: from model import Model [as 别名]
# 或者: from model.Model import to_gpu [as 别名]
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--batchsize", "-b", type=int, default=64)
parser.add_argument("--total-epochs", "-e", type=int, default=5000)
parser.add_argument("--num-labeled-data", "-nl", type=int, default=100)
parser.add_argument("--gpu-device", "-g", type=int, default=0)
parser.add_argument("--grad-clip", "-gc", type=float, default=5)
parser.add_argument("--learning-rate", "-lr", type=float, default=0.0001)
parser.add_argument("--momentum", "-mo", type=float, default=0.1)
parser.add_argument("--optimizer", "-opt", type=str, default="adam")
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--model", "-m", type=str, default="model.hdf5")
args = parser.parse_args()
np.random.seed(args.seed)
model = Model()
model.load(args.model)
mnist_train, mnist_test = chainer.datasets.get_mnist()
images_train, labels_train = mnist_train._datasets
images_test, labels_test = mnist_test._datasets
# normalize
images_train = (images_train - 0.5) * 2
images_test = (images_test - 0.5) * 2
dataset = Dataset(train=(images_train, labels_train), test=(images_test, labels_test))
total_iterations_train = len(images_train) // args.batchsize
# optimizers
optimizer_encoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_encoder.setup(model.encoder)
if args.grad_clip > 0:
optimizer_encoder.add_hook(GradientClipping(args.grad_clip))
optimizer_decoder = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_decoder.setup(model.decoder)
if args.grad_clip > 0:
optimizer_decoder.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator_z = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_discriminator_z.setup(model.discriminator_z)
if args.grad_clip > 0:
optimizer_discriminator_z.add_hook(GradientClipping(args.grad_clip))
optimizer_discriminator_y = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_discriminator_y.setup(model.discriminator_y)
if args.grad_clip > 0:
optimizer_discriminator_y.add_hook(GradientClipping(args.grad_clip))
optimizer_cluster_head = Optimizer(args.optimizer, args.learning_rate, args.momentum)
optimizer_cluster_head.setup(model.cluster_head)
if args.grad_clip > 0:
optimizer_cluster_head.add_hook(GradientClipping(args.grad_clip))
using_gpu = False
if args.gpu_device >= 0:
cuda.get_device(args.gpu_device).use()
model.to_gpu()
using_gpu = True
xp = model.xp
# 0 -> true sample
# 1 -> generated sample
class_true = np.zeros(args.batchsize, dtype=np.int32)
class_fake = np.ones(args.batchsize, dtype=np.int32)
if using_gpu:
class_true = cuda.to_gpu(class_true)
class_fake = cuda.to_gpu(class_fake)
training_start_time = time.time()
for epoch in range(args.total_epochs):
sum_loss_generator = 0
sum_loss_discriminator = 0
sum_loss_autoencoder = 0
sum_loss_cluster_head = 0
sum_discriminator_z_confidence_true = 0
sum_discriminator_z_confidence_fake = 0
sum_discriminator_y_confidence_true = 0
sum_discriminator_y_confidence_fake = 0
epoch_start_time = time.time()
dataset.shuffle()
# training
for itr in range(total_iterations_train):
# update model parameters
with chainer.using_config("train", True):
# sample minibatch
x_u, _, _ = dataset.sample_minibatch(args.batchsize, gpu=using_gpu)
### reconstruction phase ###
if True:
y_onehot_u, z_u = model.encode_x_yz(x_u, apply_softmax_y=True)
repr_u = model.encode_yz_representation(y_onehot_u, z_u)
x_reconstruction_u = model.decode_representation_x(repr_u)
loss_reconstruction = F.mean_squared_error(x_u, x_reconstruction_u)
#.........这里部分代码省略.........