本文整理汇总了Python中chainer.cuda.to_cpu方法的典型用法代码示例。如果您正苦于以下问题:Python cuda.to_cpu方法的具体用法?Python cuda.to_cpu怎么用?Python cuda.to_cpu使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.cuda
的用法示例。
在下文中一共展示了cuda.to_cpu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: batch_act
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def batch_act(self, batch_obs):
"""Select a batch of actions for evaluation.
Args:
batch_obs (Sequence of ~object): Observations.
Returns:
Sequence of ~object: Actions.
"""
with chainer.using_config('train', False), chainer.no_backprop_mode():
batch_xs = self.batch_states(batch_obs, self.xp, self.phi)
batch_action = self.policy(batch_xs).sample()
# Q is not needed here, but log it just for information
q = self.q_function(batch_xs, batch_action)
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * float(
q.array.mean(axis=0))
self.logger.debug('t:%s a:%s q:%s',
self.t, batch_action.array[0], q.array)
return [cuda.to_cpu(action.array) for action in batch_action]
示例2: batch_act_and_train
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def batch_act_and_train(self, batch_obs):
with chainer.using_config('train', False), chainer.no_backprop_mode():
batch_av = self._evaluate_model_and_update_recurrent_states(
batch_obs, test=False)
batch_maxq = batch_av.max.array
batch_argmax = cuda.to_cpu(batch_av.greedy_actions.array)
batch_action = [
self.explorer.select_action(
self.t, lambda: batch_argmax[i],
action_value=batch_av[i:i + 1],
)
for i in range(len(batch_obs))]
self.batch_last_obs = list(batch_obs)
self.batch_last_action = list(batch_action)
# Update stats
self.average_q *= self.average_q_decay
self.average_q += (1 - self.average_q_decay) * float(batch_maxq.mean())
return batch_action
示例3: _compute_loss
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def _compute_loss(self, exp_batch, errors_out=None):
"""Compute a loss.
Returns:
Returns:
chainer.Variable: Scalar loss.
"""
y, taus = self._compute_y_and_taus(exp_batch)
with chainer.no_backprop_mode():
t = self._compute_target_values(exp_batch)
eltwise_loss = compute_eltwise_huber_quantile_loss(y, t, taus)
if errors_out is not None:
del errors_out[:]
delta = F.mean(eltwise_loss, axis=(1, 2))
errors_out.extend(cuda.to_cpu(delta.array))
if 'weights' in exp_batch:
return compute_weighted_value_loss(
eltwise_loss, exp_batch['weights'],
batch_accumulator=self.batch_accumulator)
else:
return compute_value_loss(
eltwise_loss, batch_accumulator=self.batch_accumulator)
示例4: _compute_loss
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def _compute_loss(self, exp_batch, errors_out=None):
"""Compute a loss of categorical DQN."""
y, t = self._compute_y_and_t(exp_batch)
# Minimize the cross entropy
# y is clipped to avoid log(0)
eltwise_loss = -t * F.log(F.clip(y, 1e-10, 1.))
if errors_out is not None:
del errors_out[:]
# The loss per example is the sum of the atom-wise loss
# Prioritization by KL-divergence
delta = F.sum(eltwise_loss, axis=1)
delta = cuda.to_cpu(delta.array)
for e in delta:
errors_out.append(e)
if 'weights' in exp_batch:
return compute_weighted_value_loss(
eltwise_loss, y.shape[0], exp_batch['weights'],
batch_accumulator=self.batch_accumulator)
else:
return compute_value_loss(
eltwise_loss, batch_accumulator=self.batch_accumulator)
示例5: iterate_eos_scores
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def iterate_eos_scores(new_scores, eos_idx, existing_cases = None, beam_width=None)->Tuple[Sequence, Sequence, Sequence]:
"""
Return the indices and scores corresponding to the eos word.
Meaning of returned values is the same as for iterate_best_score
"""
nb_cases, v_size = new_scores.shape
num_cases = np.arange(nb_cases, dtype=np.int32)
scores = -cuda.to_cpu(new_scores[:, eos_idx])
if existing_cases is not None:
need_to_return = np.logical_not(np.isin(num_cases, existing_cases))
num_cases = num_cases[need_to_return]
scores = scores[need_to_return]
idx_in_cases = np.full(num_cases.shape[0], eos_idx, dtype=np.int32)
if beam_width is not None:
if beam_width < len(scores):
idx_to_keep = np.argpartition(scores, beam_width)[:beam_width]
scores = scores[idx_to_keep]
num_cases = num_cases[idx_to_keep]
idx_in_cases = idx_in_cases[idx_to_keep]
return num_cases, idx_in_cases, scores
示例6: _pl_sample
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def _pl_sample(t, α):
"""
Sample from the plackett luce distribution directly
:param t: The target labels
:return: A random permutation from the plackett-luce distribution
parameterized by the target labels
"""
xp = cuda.get_array_module(t)
t = t[:, 0]
probs = xp.exp(t * α)
probs /= xp.sum(probs)
# Use CPU-based numpy implementation, because cupy.random.choice with
# replace=False does not work
probs = cuda.to_cpu(probs)
result = np.random.choice(probs.shape[0], probs.shape[0], replace=False,
p=probs)
return xp.array(result, copy=False)
示例7: generate
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def generate(net, image_model, image_path):
feature = image_model.feature(image_path)
net.initialize(feature)
candidates = [(net, [bos], 0)]
for i in range(max_length):
next_candidates = []
for prev_net, tokens, likelihood in candidates:
if tokens[-1] == eos:
next_candidates.append((None, tokens, likelihood))
continue
net = prev_net.copy()
x = xp.asarray([tokens[-1]]).astype(np.int32)
y = F.softmax(net(x))
token_likelihood = np.log(cuda.to_cpu(y.data[0]))
order = token_likelihood.argsort()[-beam_width:][::-1]
next_candidates.extend([(net, tokens + [i], likelihood + token_likelihood[i]) for i in order])
candidates = sorted(next_candidates, key=lambda x: -x[2])[:beam_width]
if all([candidate[1][-1] == eos for candidate in candidates]):
break
return [candidate[1] for candidate in candidates]
示例8: __call__
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def __call__(self, inputs):
pos_x, pos_y, offset_x, ego_x, ego_y, pose_x, pose_y = self._prepare_input(inputs)
batch_size, past_len, _ = pos_x.shape
h_pos = self.pos_encoder(pos_x)
h_ego = self.ego_encoder(ego_x)
h = F.concat((h_pos, h_ego), axis=1) # (B, C, 2)
h = self.inter(h)
h_pos = self.pos_decoder(h)
pred_y = self.last(h_pos) # (B, 10, C+6+28)
pred_y = F.swapaxes(pred_y, 1, 2)
pred_y = pred_y[:, :pos_y.shape[1], :]
loss = F.mean_squared_error(pred_y, pos_y)
pred_y = pred_y + F.broadcast_to(F.expand_dims(offset_x, 1), pred_y.shape)
pred_y = cuda.to_cpu(pred_y.data) * self._std + self._mean
return loss, pred_y, None
示例9: save_embeddings
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def save_embeddings(path, epoch, model, vocab, metadata, execution_time):
path = Path(path)
embeddings = WordEmbeddingsDense()
embeddings.vocabulary = vocab
embeddings.metadata.update(metadata)
embeddings.metadata["vocabulary"] = vocab.metadata
embeddings.metadata["epoch"] = epoch
embeddings.metadata["vecto_version"] = vecto.__version__
embeddings.matrix = cuda.to_cpu(model.getEmbeddings(gpu=metadata["gpu"]))
if metadata["out_type"] == 'ns':
model.matrix_context = cuda.to_cpu(model.getEmbeddings_context())
else:
model.matrix_context = None
embeddings.metadata["execution_time"] = execution_time #time_end - time_start
embeddings.metadata["embeddings_type"] = "vanilla"
path_out = path / f"ep_{epoch:03}"
embeddings.save_to_dir(path_out)
# if embeddings.matrix_context is not None:
# embeddings.matrix = model.matrix_context
# embeddings.metadata["embeddings_type"] = "context"
# embeddings.save_to_dir(os.path.join(path_out, 'context'))
示例10: report
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def report(images, out, name, ch_axis=1, row=0, mode=None, batched=True):
if isinstance(images, chainer.Variable):
images = images.data
images = cuda.to_cpu(images)
if batched:
stuck_image = _get_stuck_batched_image(images, ch_axis, row)
else:
stuck_image = _get_stuck_image(images, ch_axis)
now = datetime.datetime.now()
ts = get_unixtime(now)
filename = '{}_{}.png'.format(name, get_hash('{}'.format(ts)))
filepath = os.path.join(out, filename)
_save_image(_normalize_8bit(stuck_image), filepath, mode=mode)
return filename, now
示例11: test_forward_cpu_gpu_equal
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def test_forward_cpu_gpu_equal(self):
# cpu
x_cpu = chainer.Variable(self.x)
rois_cpu = chainer.Variable(self.rois)
roi_indices_cpu = chainer.Variable(self.roi_indices)
y_cpu = functions.roi_average_align_2d(
x_cpu, rois_cpu, roi_indices_cpu, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
# gpu
x_gpu = chainer.Variable(cuda.to_gpu(self.x))
rois_gpu = chainer.Variable(cuda.to_gpu(self.rois))
roi_indices_gpu = chainer.Variable(cuda.to_gpu(self.roi_indices))
y_gpu = functions.roi_average_align_2d(
x_gpu, rois_gpu, roi_indices_gpu, outsize=self.outsize,
spatial_scale=self.spatial_scale,
sampling_ratio=self.sampling_ratio,
)
testing.assert_allclose(y_cpu.data, cuda.to_cpu(y_gpu.data))
示例12: convert
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def convert(batch, device):
if device is None:
def to_device(x):
return x
elif device < 0:
to_device = cuda.to_cpu
else:
def to_device(x):
return cuda.to_gpu(x, device, cuda.Stream.null)
return tuple(
[to_device(d['lefts']) for d in batch] +
[to_device(d['rights']) for d in batch] +
[to_device(d['dests']) for d in batch] +
[to_device(d['labels']) for d in batch] +
[to_device(d['words']) for d in batch] +
[to_device(d['leaf_labels']) for d in batch]
)
示例13: mean_feature
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def mean_feature(net, paths, image_size, base_feature, top_num, batch_size, clip_rect=None):
xp = net.xp
image_num = len(paths)
features = []
for i in six.moves.range(0, image_num, batch_size):
x = [preprocess_image(Image.open(path).convert('RGB'), image_size, clip_rect) for path in paths[i:i + batch_size]]
x = xp.asarray(np.concatenate(x, axis=0))
y = feature(net, x)
features.append([cuda.to_cpu(layer.data) for layer in y])
if image_num > top_num:
last_features = np.concatenate([f[-1] for f in features], axis=0)
last_features = last_features.reshape((last_features.shape[0], -1))
base_feature = cuda.to_cpu(base_feature).reshape((1, -1,))
diff = np.sum((last_features - base_feature) ** 2, axis=1)
nearest_indices = np.argsort(diff)[:top_num]
nearests = [np.concatenate(xs, axis=0)[nearest_indices] for xs in zip(*features)]
else:
nearests = [np.concatenate(xs, axis=0) for xs in zip(*features)]
return [xp.asarray(np.mean(f, axis=0, keepdims=True)) for f in nearests]
示例14: iter_apply
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def iter_apply(Xs, Ms, Ys):
# fns = [lambda x: np.concatenate(x, 0), lambda x: float(np.sum(x))]
logits = []
cost = 0
with chainer.using_config('train', False), \
chainer.using_config('enable_backprop', False):
for xmb, mmb, ymb in iter_data(
Xs, Ms, Ys, n_batch=n_batch_train, truncate=False, verbose=True):
n = len(xmb)
XMB = model.xp.asarray(xmb)
YMB = model.xp.asarray(ymb)
MMB = model.xp.asarray(mmb)
h = model(XMB)
clf_logits = clf_head(h, XMB)
clf_logits *= n
clf_losses = compute_loss_fct(
XMB, YMB, MMB, clf_logits, only_return_losses=True)
clf_losses *= n
logits.append(cuda.to_cpu(clf_logits.array))
cost += cuda.to_cpu(F.sum(clf_losses).array)
logits = np.concatenate(logits, 0)
return logits, cost
示例15: getAndUpdateBufferY
# 需要导入模块: from chainer import cuda [as 别名]
# 或者: from chainer.cuda import to_cpu [as 别名]
def getAndUpdateBufferY(self, data):
if self._iter < self._max_buffer_size:
self._buffer_y[self._iter, :] = data[0]
return data
self._buffer_y[0:self._max_buffer_size-2, :] = self._buffer_y[1:self._max_buffer_size-1, :]
self._buffer_y[self._max_buffer_size-1, : ]=data[0]
if np.random.rand() < 0.5:
return data
id = np.random.randint(0, self._max_buffer_size)
return self._buffer_y[id, :].reshape((1, 3, self._image_size, self._image_size))
"""
def save_images(self,img, w=2, h=3):
img = cuda.to_cpu(img)
img = img.reshape((w, h, 3, self._image_size, self._image_size))
img = img.transpose(0,1,3,4,2)
img = (img + 1) *127.5
img = np.clip(img, 0, 255)
img = img.astype(np.uint8)
img = img.reshape((w, h, self._image_size, self._image_size, 3)).transpose(0,2,1,3,4).reshape((w*self._image_size, h*self._image_size, 3))[:,:,::-1]
Image.fromarray(img).save(self._eval_foler+"/iter_"+str(self._iter)+".jpg")
"""