本文整理汇总了Python中chainer.cuda.cupy方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.cupy方法的具体用法?Python cuda.cupy怎么用?Python cuda.cupy使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.cuda
的用法示例。
在下文中一共展示了cuda.cupy方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def __init__(self, args):
xp = cuda.cupy if args.gpu >= 0 else np
xp.random.seed(args.seed)
Wh_data = xp.array([[[[1], [-1]]]], dtype='f')
Ww_data = xp.array([[[[1, -1]]]], dtype='f')
self.Wh = chainer.Variable(Wh_data)
self.Ww = chainer.Variable(Ww_data)
self.args = args
self.load_model()
self.create_dir()
self.get_img_var()
self.create_target()
self.create_image_plane()
self.prepare_optimizer()
self.create_lr_schedule()
示例2: update
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def update(self, s, i):
"""Update decoder state
Args:
s (any): Current (hidden, cell) states. If ``None`` is specified
zero-vector is used.
i (int): input label.
Return:
(~chainer.Variable) updated decoder state
"""
if cuda.get_device_from_array(s[0].data).id >= 0:
xp = cuda.cupy
else:
xp = np
v = chainer.Variable(xp.array([i],dtype=np.int32))
x = self.embed(v)
if s is not None:
hy, cy, dy = self.lstm(s[0], s[1], [x])
else:
hy, cy, dy = self.lstm(None, None, [x])
return hy, cy, dy
示例3: __call__
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def __call__(self, trainer):
iteration = trainer.updater.iteration
with cuda.get_device_from_id(trainer.updater.get_optimizer('main').target._device_id), chainer.using_config('train', False):
self.xp = np if trainer.updater.get_optimizer('main').target._device_id < 0 else cuda.cupy
image = self.xp.asarray(self.image)
predictor = trainer.updater.get_optimizer('main').target.predictor
predictions, rois, bboxes = predictor(image[self.xp.newaxis, ...])
backprop_visualizations = []
for visanchor in self.visualization_anchors:
vis_target = predictor
for target in visanchor:
vis_target = getattr(vis_target, target)
backprop_visualizations.append(self.visual_backprop.perform_visual_backprop(vis_target))
self.render_rois(predictions, rois, bboxes, iteration, self.image.copy(), backprop_vis=backprop_visualizations)
示例4: backward_gpu
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def backward_gpu(self, inputs, grad_outputs):
cupy = cuda.cupy
x, t = inputs[:2]
y = self.y
gloss = grad_outputs[0]
g_log_p = y
g_log_p[cupy.arange(len(t)), cupy.maximum(t, 0)] -= 1
g_log_p *= (t != self.ignore_label).reshape((len(t), 1))
if self.reduce == 'mean':
g_log_p *= gloss * self._coeff
else:
g_log_p *= gloss[:, None]
ret = super(AdaptiveSoftmaxCrossEntropy, self).backward(
inputs, (g_log_p, ))
return ret
示例5: backprop_check
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def backprop_check():
xp = cuda.cupy if config.use_gpu else np
duel = DDQN()
state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)
reward = [1, 0]
action = [3, 4]
episode_ends = [0, 0]
next_state = xp.random.uniform(-1.0, 1.0, (2, config.rl_agent_history_length * config.ale_screen_channels, config.ale_scaled_screen_size[1], config.ale_scaled_screen_size[0])).astype(xp.float32)
optimizer_conv = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
optimizer_conv.setup(duel.conv)
optimizer_fc = optimizers.Adam(alpha=config.rl_learning_rate, beta1=config.rl_gradient_momentum)
optimizer_fc.setup(duel.fc)
for i in xrange(10000):
optimizer_conv.zero_grads()
optimizer_fc.zero_grads()
loss, _ = duel.forward_one_step(state, action, reward, next_state, episode_ends)
loss.backward()
optimizer_conv.update()
optimizer_fc.update()
print loss.data,
print duel.conv.layer_2.W.data[0, 0, 0, 0],
print duel.fc.layer_2.W.data[0, 0],
示例6: __init__
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def __init__(self, *, iterator, noise_iterator, optimizer_generator,
optimizer_critic, device=-1):
if optimizer_generator.target.name is None:
optimizer_generator.target.name = 'generator'
if optimizer_critic.target.name is None:
optimizer_critic.target.name = 'critic'
iterators = {'main': iterator, 'z': noise_iterator}
optimizers = {'generator': optimizer_generator,
'critic': optimizer_critic}
super().__init__(iterators, optimizers, device=device)
if device >= 0:
cuda.get_device(device).use()
[optimizer.target.to_gpu() for optimizer in optimizers.values()]
self.xp = cuda.cupy if device >= 0 else np
示例7: __init__
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def __init__(self, in_channels, out_channels, ksize, stride=1, real=0, wscale=1.0):
super(ConvolutionRBM, self).__init__(
conv=L.Convolution2D(in_channels, out_channels, ksize, stride=stride, wscale=wscale),
)
# if gpu >= 0:
# cuda.check_cuda_available()
# xp = cuda.cupy # if gpu >= 0 else np
self.conv.add_param("a", in_channels) # dtype=xp.float32
self.conv.a.data.fill(0.)
self.in_channels = in_channels
self.out_channels = out_channels
self.ksize = ksize
self.real = real
self.rbm_train = False # default value is false
示例8: sample_h_given_v
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def sample_h_given_v(self, v0_sample):
""" get a sample of the hiddens by gibbs sampling
:param v0_sample: Variable, see vis above
:return:
h1_mean: Variable Matrix(batch_size, out_channels, image_height_out, image_width_out)
h1_sample: Variable Matrix(batch_size, out_channels, image_height_out, image_width_out)
- actual sample for hidden units, populated by 0 or 1.
"""
h1_mean = self.propup(v0_sample)
xp = cuda.get_array_module(h1_mean.data)
if xp == cuda.cupy:
h1_sample = cuda.cupy.random.random_sample(size=h1_mean.data.shape)
h1_sample[:] = h1_sample[:] < h1_mean.data[:]
else: # xp == np
h1_sample = np.random.binomial(size=h1_mean.data.shape, n=1, p=h1_mean.data)
return h1_mean, Variable(h1_sample.astype(xp.float32))
示例9: mean_squared_error
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def mean_squared_error(x0, x1, ignore_nan=False):
"""Mean squared error function.
This function computes mean squared error between two variables. The mean
is taken over the minibatch. Note that the error is not scaled by 1/2.
Args:
x0 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable.
x1 (:class:`~chainer.Variable` or :class:`numpy.ndarray` or \
:class:`cupy.ndarray`): Input variable.
ignore_nan (bool): If `True`, this function compute mean squared error
ignoring NaNs. The arithmetic mean is the sum of the non-NaN
elements along the axis divided by the number of whole elements.
Returns:
~chainer.Variable:
A variable holding an array representing the mean squared
error of two inputs.
"""
return MeanSquaredError(ignore_nan).apply((x0, x1))[0]
示例10: train
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def train(epoch=10, batch_size=32, gpu=False):
if gpu:
cuda.check_cuda_available()
xp = cuda.cupy if gpu else np
td = TrainingData(LABEL_FILE, img_root=IMAGES_ROOT, image_property=IMAGE_PROP)
# make mean image
if not os.path.isfile(MEAN_IMAGE_FILE):
print("make mean image...")
td.make_mean_image(MEAN_IMAGE_FILE)
else:
td.mean_image_file = MEAN_IMAGE_FILE
# train model
label_def = LabelingMachine.read_label_def(LABEL_DEF_FILE)
model = alex.Alex(len(label_def))
optimizer = optimizers.MomentumSGD(lr=0.01, momentum=0.9)
optimizer.setup(model)
epoch = epoch
batch_size = batch_size
print("Now our model is {0} classification task.".format(len(label_def)))
print("begin training the model. epoch:{0} batch size:{1}.".format(epoch, batch_size))
if gpu:
model.to_gpu()
for i in range(epoch):
print("epoch {0}/{1}: (learning rate={2})".format(i + 1, epoch, optimizer.lr))
td.shuffle(overwrite=True)
for x_batch, y_batch in td.generate_batches(batch_size):
x = chainer.Variable(xp.asarray(x_batch))
t = chainer.Variable(xp.asarray(y_batch))
optimizer.update(model, x, t)
print("loss: {0}, accuracy: {1}".format(float(model.loss.data), float(model.accuracy.data)))
serializers.save_npz(MODEL_FILE, model)
optimizer.lr *= 0.97
示例11: test_calls_gpu
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def test_calls_gpu(self):
self.linear.to_gpu(0)
self._test_calls(cuda.cupy)
示例12: test_calls_gpu_after_to_gpu
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def test_calls_gpu_after_to_gpu(self):
mu = self.linear.mu
mu.to_gpu(0)
self.linear = noisy_linear.FactorizedNoisyLinear(mu)
self._test_calls(cuda.cupy)
示例13: test_randomness_gpu
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def test_randomness_gpu(self):
self.linear.to_gpu(0)
self._test_randomness(cuda.cupy)
示例14: test_non_randomness_gpu
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def test_non_randomness_gpu(self):
self.linear.to_gpu(0)
self._test_non_randomness(cuda.cupy)
示例15: extract_feature
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import cupy [as 别名]
def extract_feature(self, x):
xp = cuda.cupy if self.args.gpu >= 0 else np
middles = dict(self.model.middle_layers(x))
middle = middles[self.args.layer]
if self.args.channels < 0:
return middle
else:
m = middle.data[:, self.args.channels, :, :]
m = xp.expand_dims(m, axis=1)
middle = chainer.Variable(m)
return middle