本文整理汇总了Python中chainer.Variable.to_gpu方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.to_gpu方法的具体用法?Python Variable.to_gpu怎么用?Python Variable.to_gpu使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.Variable
的用法示例。
在下文中一共展示了Variable.to_gpu方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: transform
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def transform(self, data, test=False):
#make sure that data has the right shape.
if not type(data) == Variable:
if len(data.shape) < 4:
data = data[np.newaxis]
if len(data.shape) != 4:
raise TypeError("Invalid dimensions for image data. Dim = %s. Must be 4d array." % str(data.shape))
if data.shape[1] != self.color_channels:
if data.shape[-1] == self.color_channels:
data = data.transpose(0, 3, 1, 2)
else:
raise TypeError("Invalid dimensions for image data. Dim = %s"
% str(data.shape))
data = Variable(data)
else:
if len(data.data.shape) < 4:
data.data = data.data[np.newaxis]
if len(data.data.shape) != 4:
raise TypeError("Invalid dimensions for image data. Dim = %s. Must be 4d array." % str(data.data.shape))
if data.data.shape[1] != self.color_channels:
if data.data.shape[-1] == self.color_channels:
data.data = data.data.transpose(0, 3, 1, 2)
else:
raise TypeError("Invalid dimensions for image data. Dim = %s"
% str(data.shape))
# Actual transformation.
if self.flag_gpu:
data.to_gpu()
z = self._encode(data, test=test)[0]
z.to_cpu()
return z.data
示例2: AdamLearner
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
class AdamLearner(Link):
def __init__(self, dim):
super(AdamLearner, self).__init__(
beta1=(dim, ),
beta2=(dim, )
)
self.beta1.data.fill(-1e12)
self.beta2.data.fill(-1e12)
self.m = Variable(np.zeros_like(self.beta1.data))
self.v = Variable(np.zeros_like(self.beta2.data))
def to_gpu(self, device=None):
super(AdamLearner, self).to_gpu()
self.m.to_gpu(device)
self.v.to_gpu(device)
def __call__(self, x):
f1 = F.sigmoid(self.beta1)
f2 = F.sigmoid(self.beta2)
#self.m = f1 * self.m + (1 - f1) * x
#self.v = f2 * self.v + (1 - f2) * x**2
self.m = self.beta1 * self.m + (1 - self.beta1) * x
self.v = self.beta2 * self.v + (1 - self.beta2) * x**2
g = 1e-3 * self.m / F.sqrt(self.v + 1e-8)
return g
示例3: sample_z_from_n_2d_gaussian_mixture
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def sample_z_from_n_2d_gaussian_mixture(batchsize, z_dim, label_indices, n_labels, gpu=False):
if z_dim % 2 != 0:
raise Exception("z_dim must be a multiple of 2.")
def sample(x, y, label, n_labels):
shift = 1.4
r = 2.0 * np.pi / float(n_labels) * float(label)
new_x = x * cos(r) - y * sin(r)
new_y = x * sin(r) + y * cos(r)
new_x += shift * cos(r)
new_y += shift * sin(r)
return np.array([new_x, new_y]).reshape((2,))
x_var = 0.5
y_var = 0.05
x = np.random.normal(0, x_var, (batchsize, z_dim / 2))
y = np.random.normal(0, y_var, (batchsize, z_dim / 2))
z = np.empty((batchsize, z_dim), dtype=np.float32)
for batch in xrange(batchsize):
for zi in xrange(z_dim / 2):
z[batch, zi*2:zi*2+2] = sample(x[batch, zi], y[batch, zi], label_indices[batch], n_labels)
z = Variable(z)
if gpu:
z.to_gpu()
return z
示例4: visualize_walkthrough
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def visualize_walkthrough():
x_batch = sample_x_from_data_distribution(20)
z_batch = gen(x_batch, test=True)
if use_gpu:
z_batch.to_cpu()
fig = pylab.gcf()
fig.set_size_inches(16.0, 16.0)
pylab.clf()
if config.img_channel == 1:
pylab.gray()
z_a = z_batch.data[:10,:]
z_b = z_batch.data[10:,:]
for col in range(10):
_z_batch = z_a * (1 - col / 9.0) + z_b * col / 9.0
_z_batch = Variable(_z_batch)
if use_gpu:
_z_batch.to_gpu()
_x_batch = dec(_z_batch, test=True)
if use_gpu:
_x_batch.to_cpu()
for row in range(10):
pylab.subplot(10, 10, row * 10 + col + 1)
if config.img_channel == 1:
pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_width, config.img_width)), interpolation="none")
elif config.img_channel == 3:
pylab.imshow(np.clip((_x_batch.data[row] + 1.0) / 2.0, 0.0, 1.0).reshape((config.img_channel, config.img_width, config.img_width)), interpolation="none")
pylab.axis("off")
pylab.savefig("%s/walk_through.png" % args.visualization_dir)
示例5: forward_one_step
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def forward_one_step(self, state, action, reward, next_state, test=False):
xp = cuda.cupy if config.use_gpu else np
n_batch = state.shape[0]
state = Variable(state.reshape((n_batch, config.rl_history_length * 34)))
next_state = Variable(next_state.reshape((n_batch, config.rl_history_length * 34)))
if config.use_gpu:
state.to_gpu()
next_state.to_gpu()
q = self.compute_q_variable(state, test=test)
q_ = self.compute_q_variable(next_state, test=test)
max_action_indices = xp.argmax(q_.data, axis=1)
if config.use_gpu:
max_action_indices = cuda.to_cpu(max_action_indices)
target_q = self.compute_target_q_variable(next_state, test=test)
target = q.data.copy()
for i in xrange(n_batch):
max_action_index = max_action_indices[i]
target_value = reward[i] + config.rl_discount_factor * target_q.data[i][max_action_indices[i]]
action_index = self.get_index_for_action(action[i])
old_value = target[i, action_index]
diff = target_value - old_value
if diff > 1.0:
target_value = 1.0 + old_value
elif diff < -1.0:
target_value = -1.0 + old_value
target[i, action_index] = target_value
target = Variable(target)
loss = F.mean_squared_error(target, q)
return loss, q
示例6: inverse_transform
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def inverse_transform(self, data, test=False):
if not type(data) == Variable:
if len(data.shape) < 2:
data = data[np.newaxis]
if len(data.shape) != 2:
raise TypeError("Invalid dimensions for latent data. Dim = %s. Must be a 2d array." % str(data.shape))
data = Variable(data)
else:
if len(data.data.shape) < 2:
data.data = data.data[np.newaxis]
if len(data.data.shape) != 2:
raise TypeError("Invalid dimensions for latent data. Dim = %s. Must be a 2d array." % str(data.data.shape))
assert data.data.shape[-1] == self.latent_width, "Latent shape %d != %d" % (data.data.shape[-1], self.latent_width)
if self.flag_gpu:
data.to_gpu()
out = self._decode(data, test=test)
out.to_cpu()
if self.mode == 'linear':
final = out.data
else:
final = out.data.transpose(0, 2, 3, 1)
return final
示例7: sample_z_from_n_2d_gaussian_mixture
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def sample_z_from_n_2d_gaussian_mixture(batchsize, z_dim, label_indices, n_labels, gpu=False):
z = np.zeros((batchsize, z_dim), dtype=np.float32)
for i in range(batchsize):
z1 = np.random.normal(0.5, 0.2, 1)
z2 = np.random.normal(0.5, 0.2, 1)
z[i] = np.array([z1, z2]).reshape((2,))
z = Variable(z)
if gpu:
z.to_gpu()
return z
示例8: encode
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def encode(self, data, test=False):
x = self.enc(data, test=test)
mean, ln_var = F.split_axis(x, 2, 1)
samp = np.random.standard_normal(mean.data.shape).astype('float32')
samp = Variable(samp)
if self.flag_gpu:
samp.to_gpu()
z = samp * F.exp(0.5*ln_var) + mean
return z, mean, ln_var
示例9: multi_box_intersection
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def multi_box_intersection(a, b):
w = multi_overlap(a.x, a.w, b.x, b.w)
h = multi_overlap(a.y, a.h, b.y, b.h)
zeros = Variable(np.zeros(w.shape, dtype=w.data.dtype))
zeros.to_gpu()
w = F.maximum(w, zeros)
h = F.maximum(h, zeros)
area = w * h
return area
示例10: sample_x_from_data_distribution
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def sample_x_from_data_distribution(batchsize):
shape = config.img_channel * config.img_width * config.img_width
x_batch = np.zeros((batchsize, shape), dtype=np.float32)
for j in range(batchsize):
data_index = np.random.randint(len(dataset))
img = dataset[data_index]
x_batch[j] = img.reshape((shape,))
x_batch = Variable(x_batch)
if config.use_gpu:
x_batch.to_gpu()
return x_batch
示例11: forward_one_step
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def forward_one_step(self, state, action, reward, next_state, episode_ends, test=False):
xp = cuda.cupy if config.use_gpu else np
n_batch = state.shape[0]
state = Variable(state)
next_state = Variable(next_state)
if config.use_gpu:
state.to_gpu()
next_state.to_gpu()
q = self.compute_q_variable(state, test=test)
q_ = self.compute_q_variable(next_state, test=test)
max_action_indices = xp.argmax(q_.data, axis=1)
if config.use_gpu:
max_action_indices = cuda.to_cpu(max_action_indices)
# Generate target
target_q = self.compute_target_q_variable(next_state, test=test)
# Initialize target signal
# 教師信号を現在のQ値で初期化
target = q.data.copy()
for i in xrange(n_batch):
# Clip all positive rewards at 1 and all negative rewards at -1
# プラスの報酬はすべて1にし、マイナスの報酬はすべて-1にする
if episode_ends[i] is True:
target_value = np.sign(reward[i])
else:
max_action_index = max_action_indices[i]
target_value = np.sign(reward[i]) + config.rl_discount_factor * target_q.data[i][max_action_indices[i]]
action_index = self.get_index_with_action(action[i])
# 現在選択した行動に対してのみ誤差を伝播する。
# それ以外の行動を表すユニットの2乗誤差は0となる。(target=qとなるため)
old_value = target[i, action_index]
diff = target_value - old_value
# target is a one-hot vector in which the non-zero element(= target signal) corresponds to the taken action.
# targetは実際にとった行動に対してのみ誤差を考え、それ以外の行動に対しては誤差が0となるone-hotなベクトルです。
# Clip the error to be between -1 and 1.
# 1を超えるものはすべて1にする。(-1も同様)
if diff > 1.0:
target_value = 1.0 + old_value
elif diff < -1.0:
target_value = -1.0 + old_value
target[i, action_index] = target_value
target = Variable(target)
# Compute error
loss = F.mean_squared_error(target, q)
return loss, q
示例12: train_word_embedding_batch
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def train_word_embedding_batch(self, char_ids_batch):
xp = self.xp
word_vec = self.encode_word_batch(char_ids_batch)
batchsize = char_ids_batch.shape[0]
char_ids_batch = char_ids_batch.T
# reconstruction loss
loss_reconstruction = 0
self.word_decoder_lstm.reset_state()
prev_y = None
for i in xrange(char_ids_batch.shape[0]):
if prev_y is None:
prev_y = Variable(xp.zeros((batchsize, self.char_embed_size), dtype=xp.float32))
dec_in = F.concat((word_vec, prev_y))
y = self.word_decoder_lstm(dec_in, test=False)
target = Variable(char_ids_batch[i])
if self.gpu_enabled:
target.to_gpu()
loss = F.softmax_cross_entropy(y, target)
prev_y = self.embed_id(target)
loss_reconstruction += loss
self.zero_grads_generator()
loss_reconstruction.backward()
self.update_generator()
# adversarial loss
## 0: from encoder
## 1: from noise
real_z = self.sample_z(batchsize, self.word_embed_size)
fake_z = word_vec
y_fake = self.discriminator(fake_z, test=False)
## train generator
loss_generator = F.softmax_cross_entropy(y_fake, Variable(xp.ones((batchsize,), dtype=xp.int32)))
self.zero_grads_generator()
loss_generator.backward()
self.update_generator()
# train discriminator
y_real = self.discriminator(real_z, test=False)
loss_discriminator = F.softmax_cross_entropy(y_fake, Variable(xp.zeros((batchsize,), dtype=xp.int32)))
loss_discriminator += F.softmax_cross_entropy(y_real, Variable(xp.ones((batchsize,), dtype=xp.int32)))
self.optimizer_discriminator.zero_grads()
loss_discriminator.backward()
self.optimizer_discriminator.update()
return float(loss_reconstruction.data), float(loss_generator.data), float(loss_discriminator.data)
示例13: sample_x_and_label_from_data_distribution
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def sample_x_and_label_from_data_distribution(batchsize, sequential=False):
shape = config.img_channel * config.img_width * config.img_width
x_batch = np.zeros((batchsize, shape), dtype=np.float32)
label_batch = np.zeros((batchsize, 1), dtype=np.int32)
for j in range(batchsize):
data_index = np.random.randint(len(dataset))
if sequential:
data_index = j
img = dataset[data_index]
x_batch[j] = img.reshape((shape,))
label_batch[j] = labels[data_index]
x_batch = Variable(x_batch)
if use_gpu:
x_batch.to_gpu()
return x_batch, label_batch
示例14: sample_x_and_label_from_data_distribution
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def sample_x_and_label_from_data_distribution(batchsize):
shape = config.img_channel * config.img_width * config.img_width
x_batch = np.zeros((batchsize, shape), dtype=np.float32)
label_index_batch = np.zeros((batchsize, 1), dtype=np.int32)
label_one_hot = np.zeros((batchsize, 10), dtype=np.float32)
for j in range(batchsize):
data_index = np.random.randint(len(dataset))
img = dataset[data_index]
x_batch[j] = img.reshape((shape,))
label_index_batch[j] = labels[data_index]
label_one_hot[j, labels[data_index]] = 1.0
x_batch = Variable(x_batch)
label_one_hot = Variable(label_one_hot)
if config.use_gpu:
x_batch.to_gpu()
label_one_hot.to_gpu()
return x_batch, label_index_batch, label_one_hot
示例15: sample_z_from_swiss_roll_distribution
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import to_gpu [as 别名]
def sample_z_from_swiss_roll_distribution(batchsize, z_dim, label_indices, n_labels, gpu=False):
def sample(label, n_labels):
uni = np.random.uniform(0.0, 1.0) / float(n_labels) + float(label) / float(n_labels)
r = math.sqrt(uni) * 3.0
rad = np.pi * 4.0 * math.sqrt(uni)
x = r * cos(rad)
y = r * sin(rad)
return np.array([x, y]).reshape((2,))
z = np.zeros((batchsize, z_dim), dtype=np.float32)
for batch in xrange(batchsize):
for zi in xrange(z_dim / 2):
z[batch, zi*2:zi*2+2] = sample(label_indices[batch], n_labels)
z = Variable(z)
if gpu:
z.to_gpu()
return z