本文整理汇总了Python中chainer.Variable.data方法的典型用法代码示例。如果您正苦于以下问题:Python Variable.data方法的具体用法?Python Variable.data怎么用?Python Variable.data使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类chainer.Variable
的用法示例。
在下文中一共展示了Variable.data方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: transform
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import data [as 别名]
def transform(self, data, test=False):
#make sure that data has the right shape.
if not type(data) == Variable:
if len(data.shape) < 4:
data = data[np.newaxis]
if len(data.shape) != 4:
raise TypeError("Invalid dimensions for image data. Dim = %s. Must be 4d array." % str(data.shape))
if data.shape[1] != self.color_channels:
if data.shape[-1] == self.color_channels:
data = data.transpose(0, 3, 1, 2)
else:
raise TypeError("Invalid dimensions for image data. Dim = %s"
% str(data.shape))
data = Variable(data)
else:
if len(data.data.shape) < 4:
data.data = data.data[np.newaxis]
if len(data.data.shape) != 4:
raise TypeError("Invalid dimensions for image data. Dim = %s. Must be 4d array." % str(data.data.shape))
if data.data.shape[1] != self.color_channels:
if data.data.shape[-1] == self.color_channels:
data.data = data.data.transpose(0, 3, 1, 2)
else:
raise TypeError("Invalid dimensions for image data. Dim = %s"
% str(data.shape))
# Actual transformation.
if self.flag_gpu:
data.to_gpu()
z = self._encode(data, test=test)[0]
z.to_cpu()
return z.data
示例2: forward_eye_states
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import data [as 别名]
def forward_eye_states(self, x_batch_curr, y_batch_curr, volatile):
current_sample = Variable(x_batch_curr, volatile=volatile)
y_batch_curr = np.asarray(y_batch_curr).reshape(32, -1)
current_output = Variable(y_batch_curr, volatile=volatile)
h1_current = F.sigmoid(self.model_to_use.x_h1(current_sample))
h2_current = F.sigmoid(self.model_to_use.h1_h2(h1_current))
h3_current = F.sigmoid(self.model_to_use.h2_h3(h2_current))
h4_current = F.sigmoid(self.model_to_use.h3_h4(h3_current))
h4 = h4_current
y = self.model_to_use.h4_y(h4)
y.data = y.data.reshape(32, -1)
loss = F.sigmoid_cross_entropy(y, current_output)
current_output.data = np.squeeze(current_output.data)
accuracy = F.accuracy(y, current_output)
return accuracy, loss, y
示例3: inverse_transform
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import data [as 别名]
def inverse_transform(self, data, test=False):
if not type(data) == Variable:
if len(data.shape) < 2:
data = data[np.newaxis]
if len(data.shape) != 2:
raise TypeError("Invalid dimensions for latent data. Dim = %s. Must be a 2d array." % str(data.shape))
data = Variable(data)
else:
if len(data.data.shape) < 2:
data.data = data.data[np.newaxis]
if len(data.data.shape) != 2:
raise TypeError("Invalid dimensions for latent data. Dim = %s. Must be a 2d array." % str(data.data.shape))
assert data.data.shape[-1] == self.latent_width, "Latent shape %d != %d" % (data.data.shape[-1], self.latent_width)
if self.flag_gpu:
data.to_gpu()
out = self._decode(data, test=test)
out.to_cpu()
if self.mode == 'linear':
final = out.data
else:
final = out.data.transpose(0, 2, 3, 1)
return final
示例4: forward
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import data [as 别名]
def forward(self, x_batch_curr, y_batch_curr, volatile=False):
current_sample = Variable(x_batch_curr, volatile=volatile)
y_batch_curr = np.asarray(y_batch_curr).reshape(1, -1)
current_output = Variable(y_batch_curr, volatile=volatile)
h1_current = F.sigmoid(self.model_to_use.x_h1(current_sample))
# h1_previous = F.sigmoid(self.model_to_use.x_h1(previous_sample))
# h1_next = F.sigmoid(self.model_to_use.x_h1(next_sample))
# h1_diff_previous = h1_current - h1_previous
# h1_diff_next = h1_next - h1_current
h2_current = F.sigmoid(self.model_to_use.h1_h2(h1_current))
# h2_diff_n = F.sigmoid(self.model_to_use.h1_h2(h1_diff_next))
# h2_diff_p = F.sigmoid(self.model_to_use.h1_h2(h1_diff_previous))
# h2_diff_next = h2_diff_n - h2_current
# h2_diff_previous = h2_current - h2_diff_p
h3_current = F.sigmoid(self.model_to_use.h2_h3(h2_current))
# h3_diff_p = F.sigmoid(self.model_to_use.h2_h3(h2_diff_previous))
# h3_diff_n = F.sigmoid(self.model_to_use.h2_h3(h2_diff_next))
# h3_diff_next = h3_diff_n - h3_current
# h3_diff_previous = h3_current - h3_diff_p
h4_current = F.sigmoid(self.model_to_use.h3_h4(h3_current))
# h4_diff_previous = F.sigmoid(self.model_to_use.h3_h4(h3_diff_previous))
# h4_diff_next = F.sigmoid(self.model_to_use.h3_h4(h3_diff_next))
# h4_diff = h4_diff_next + h4_diff_previous
# h4 = h4_current * h4_diff
h4 = h4_current
y = self.model_to_use.h4_y(h4)
loss = F.sigmoid_cross_entropy(y, current_output)
current_output.data = np.squeeze(current_output.data)
y.data = y.data.reshape(-1, 1)
accuracy = F.accuracy(y, current_output)
return accuracy, loss, y
示例5: xrange
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import data [as 别名]
train_loss = []
train_acc = []
test_loss = []
test_acc = []
# Learning loop
for i in xrange(jump * n_epoch):
x_batch = np.array([train_data[(jump * j+i) % whole_len]
for j in six.moves.range(batchsize)]).astype(np.float32)
y_batch = np.array([train_arget[(jump * j + i+1) % whole_len]
for j in six.moves.range(batchsize)]).astype(np.int32)
state, loss = model(x_batch, y_batch, state)
accum_loss.data = accum_loss.data.astype(np.float32)
accum_loss += loss
if i % 1000 ==0:
print('epoch = {} \n\ttrain loss: {}'.format(i,accum_loss.data))
if (i + 1) % bprop_len == 0:
optimizer.zero_grads()
accum_loss.backward()
accum_loss.unchain_backward() # truncate
accum_loss = Variable(mod.zeros(()))
optimizer.clip_grads(grad_clip)
optimizer.update()
epoch += 1
示例6: E
# 需要导入模块: from chainer import Variable [as 别名]
# 或者: from chainer.Variable import data [as 别名]
loss = F.softmax_cross_entropy(a_y, t)
# 逆伝播
w_1.zerograd()
w_2.zerograd()
b_1.zerograd()
b_2.zerograd()
loss.backward(retain_grad=True)
grad_w_1 = w_1.grad
grad_w_2 = w_2.grad
grad_b_1 = b_1.grad
grad_b_2 = b_2.grad
w_1.data = w_1.data - learning_rate * grad_w_1
w_2.data = w_2.data - learning_rate * grad_w_2
b_1.data = b_1.data - learning_rate * grad_b_1
b_2.data = b_2.data - learning_rate * grad_b_2
time_finish = time.time()
time_elapsed = time_finish - time_start
print "time_elapsed:", time_elapsed
# 誤差
# E(K×K)を出す0.5×(y-t)×(y-t).T次元数は,{0.5×(1×K)(K×1)}
# E = sum(t×log(y)(1×K))
# 訓練データセットの交差エントロピー誤差と正解率を表示する
train_error, train_accuracy = error_and_accuracy(w_1, w_2, b_1, b_2,
x_train, t_train)
print "[train] Error:", train_error