本文整理匯總了Python中chainer.dataset.concat_examples方法的典型用法代碼示例。如果您正苦於以下問題:Python dataset.concat_examples方法的具體用法?Python dataset.concat_examples怎麽用?Python dataset.concat_examples使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類chainer.dataset
的用法示例。
在下文中一共展示了dataset.concat_examples方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_concat_arrays_padding
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def test_concat_arrays_padding(self, backend_config):
arrays = backend_config.get_array(
[numpy.random.rand(3, 4),
numpy.random.rand(2, 5),
numpy.random.rand(4, 3)])
array = dataset.concat_examples(arrays, padding=0)
self.assertEqual(array.shape, (3, 4, 5))
self.assertEqual(type(array), type(arrays[0]))
arrays = [backend.CpuDevice().send(a) for a in arrays]
array = backend.CpuDevice().send(array)
numpy.testing.assert_array_equal(array[0, :3, :4], arrays[0])
numpy.testing.assert_array_equal(array[0, 3:, :], 0)
numpy.testing.assert_array_equal(array[0, :, 4:], 0)
numpy.testing.assert_array_equal(array[1, :2, :5], arrays[1])
numpy.testing.assert_array_equal(array[1, 2:, :], 0)
numpy.testing.assert_array_equal(array[2, :4, :3], arrays[2])
numpy.testing.assert_array_equal(array[2, :, 3:], 0)
示例2: recalculate_bn_statistics
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def recalculate_bn_statistics(model, batchsize):
train = CamVidDataset(split='train')
it = chainer.iterators.SerialIterator(
train, batchsize, repeat=False, shuffle=False)
bn_avg_mean = defaultdict(np.float32)
bn_avg_var = defaultdict(np.float32)
n_iter = 0
for batch in it:
imgs, _ = concat_examples(batch)
model(model.xp.array(imgs))
for name, link in model.namedlinks():
if name.endswith('_bn'):
bn_avg_mean[name] += link.avg_mean
bn_avg_var[name] += link.avg_var
n_iter += 1
for name, link in model.namedlinks():
if name.endswith('_bn'):
link.avg_mean = bn_avg_mean[name] / n_iter
link.avg_var = bn_avg_var[name] / n_iter
return model
示例3: converter
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def converter(batch, device):
x, t = concat_examples(batch, device, 0)
return x, t
示例4: test_concat_tuples_padding
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def test_concat_tuples_padding(self, backend_config):
tuples = [
backend_config.get_array(
(numpy.random.rand(3, 4), numpy.random.rand(2, 5))),
backend_config.get_array(
(numpy.random.rand(4, 4), numpy.random.rand(3, 4))),
backend_config.get_array(
(numpy.random.rand(2, 5), numpy.random.rand(2, 6))),
]
arrays = dataset.concat_examples(tuples, padding=0)
self.assertEqual(len(arrays), 2)
self.assertEqual(arrays[0].shape, (3, 4, 5))
self.assertEqual(arrays[1].shape, (3, 3, 6))
self.assertEqual(type(arrays[0]), type(tuples[0][0]))
self.assertEqual(type(arrays[1]), type(tuples[0][1]))
for i in range(len(tuples)):
tuples[i] = (
backend.CpuDevice().send(tuples[i][0]),
backend.CpuDevice().send(tuples[i][1]))
arrays = tuple(backend.CpuDevice().send(array) for array in arrays)
numpy.testing.assert_array_equal(arrays[0][0, :3, :4], tuples[0][0])
numpy.testing.assert_array_equal(arrays[0][0, 3:, :], 0)
numpy.testing.assert_array_equal(arrays[0][0, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[0][1, :4, :4], tuples[1][0])
numpy.testing.assert_array_equal(arrays[0][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[0][2, :2, :5], tuples[2][0])
numpy.testing.assert_array_equal(arrays[0][2, 2:, :], 0)
numpy.testing.assert_array_equal(arrays[1][0, :2, :5], tuples[0][1])
numpy.testing.assert_array_equal(arrays[1][0, 2:, :], 0)
numpy.testing.assert_array_equal(arrays[1][0, :, 5:], 0)
numpy.testing.assert_array_equal(arrays[1][1, :3, :4], tuples[1][1])
numpy.testing.assert_array_equal(arrays[1][1, 3:, :], 0)
numpy.testing.assert_array_equal(arrays[1][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays[1][2, :2, :6], tuples[2][1])
numpy.testing.assert_array_equal(arrays[1][2, 2:, :], 0)
示例5: test_concat_dicts_padding
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def test_concat_dicts_padding(self, backend_config):
dicts = [
{'x': numpy.random.rand(3, 4), 'y': numpy.random.rand(2, 5)},
{'x': numpy.random.rand(4, 4), 'y': numpy.random.rand(3, 4)},
{'x': numpy.random.rand(2, 5), 'y': numpy.random.rand(2, 6)},
]
dicts = [
{key: backend_config.get_array(arr) for key, arr in d.items()}
for d in dicts]
arrays = dataset.concat_examples(dicts, padding=0)
self.assertIn('x', arrays)
self.assertIn('y', arrays)
self.assertEqual(arrays['x'].shape, (3, 4, 5))
self.assertEqual(arrays['y'].shape, (3, 3, 6))
self.assertEqual(type(arrays['x']), type(dicts[0]['x']))
self.assertEqual(type(arrays['y']), type(dicts[0]['y']))
for d in dicts:
d['x'] = backend.CpuDevice().send(d['x'])
d['y'] = backend.CpuDevice().send(d['y'])
arrays = {
'x': backend.CpuDevice().send(arrays['x']),
'y': backend.CpuDevice().send(arrays['y'])}
numpy.testing.assert_array_equal(arrays['x'][0, :3, :4], dicts[0]['x'])
numpy.testing.assert_array_equal(arrays['x'][0, 3:, :], 0)
numpy.testing.assert_array_equal(arrays['x'][0, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['x'][1, :4, :4], dicts[1]['x'])
numpy.testing.assert_array_equal(arrays['x'][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['x'][2, :2, :5], dicts[2]['x'])
numpy.testing.assert_array_equal(arrays['x'][2, 2:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][0, :2, :5], dicts[0]['y'])
numpy.testing.assert_array_equal(arrays['y'][0, 2:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][0, :, 5:], 0)
numpy.testing.assert_array_equal(arrays['y'][1, :3, :4], dicts[1]['y'])
numpy.testing.assert_array_equal(arrays['y'][1, 3:, :], 0)
numpy.testing.assert_array_equal(arrays['y'][1, :, 4:], 0)
numpy.testing.assert_array_equal(arrays['y'][2, :2, :6], dicts[2]['y'])
numpy.testing.assert_array_equal(arrays['y'][2, 2:, :], 0)
示例6: check_concat_arrays
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def check_concat_arrays(
self, arrays, device, expected_device, expected_dtype):
array = dataset.concat_examples(arrays, device, self.padding)
self.assertEqual(array.shape, (len(arrays),))
self.check_device(array, device, expected_device)
np_array = backend.CpuDevice().send(array)
for x, y in zip(np_array, arrays):
assert x.dtype == expected_dtype
numpy.testing.assert_array_equal(
x, numpy.array(y, dtype=expected_dtype))
示例7: __init__
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def __init__(self, iterator, target, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, num_iterations=200):
super(FastEvaluatorBase, self).__init__(
iterator,
target,
converter=converter,
device=device,
eval_hook=eval_hook,
eval_func=eval_func
)
self.num_iterations = num_iterations
示例8: get_concat_and_pad_examples
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def get_concat_and_pad_examples(padding=-10000):
def concat_and_pad_examples(batch, device=None):
return concat_examples(batch, device=device, padding=padding)
return concat_and_pad_examples
示例9: concat_and_pad_examples
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def concat_and_pad_examples(batch, device=None, padding=-10000):
return concat_examples(batch, device=device, padding=padding)
示例10: concat_and_reshape
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def concat_and_reshape(batch, device=None, padding=None):
x, y = dataset.concat_examples(batch, device, padding)
return x.reshape(len(x), 1, 784), y
示例11: converter
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def converter(batch, device=None):
"""Convert arrays to float32"""
batch_list = concat_examples(batch, device=device)
xp = cuda.get_array_module(batch_list[0])
batch = tuple([xp.asarray(elem, dtype=xp.float32) for elem in batch_list])
return batch
示例12: valid
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def valid(iterator, gpu, encoder, decoder, rel_send, rel_rec, edge_types, temp, var):
nll_valid = []
kl_valid = []
edge_accuracies = []
node_mses = []
chainer.config.train = False
chainer.config.enable_backprop = False
while True:
inputs = iterator.next()
node_features, edge_labels = dataset.concat_examples(inputs, device=gpu)
# logits: [batch_size, num_edges, edge_types]
logits = encoder(node_features, rel_send, rel_rec) # inverse func. of softmax
edges = F.gumbel_softmax(logits, tau=temp, axis=2)
edge_probs = F.softmax(logits, axis=2)
# edges, edge_probs: [batch_size, num_edges, edge_types]
# validation output uses teacher forcing
output = decoder(node_features, edges, rel_rec, rel_send, 1)
target = node_features[:, :, 1:, :]
num_nodes = node_features.shape[1]
loss_nll = get_nll_gaussian(output, target, var)
loss_kl = get_kl_categorical_uniform(edge_probs, num_nodes, edge_types)
nll_valid.append(float(loss_nll.array))
kl_valid.append(float(loss_kl.array))
edge_accuracy = get_edge_accuracy(logits.array, edge_labels)
edge_accuracies.append(edge_accuracy)
node_mse = float(F.mean_squared_error(output, target).array)
node_mses.append(node_mse)
if iterator.is_new_epoch:
break
put_log(iterator.epoch, np.mean(nll_valid), np.mean(kl_valid),
np.mean(edge_accuracies), np.mean(node_mses), 'valid')
chainer.config.train = True
chainer.config.enable_backprop = True
示例13: test
# 需要導入模塊: from chainer import dataset [as 別名]
# 或者: from chainer.dataset import concat_examples [as 別名]
def test(iterator, gpu, timesteps, encoder, decoder, rel_send, rel_rec, edge_types, temp, var):
nll_test = []
kl_test = []
edge_accuracies = []
node_mses = []
chainer.config.train = False
chainer.config.enable_backprop = False
while True:
inputs = iterator.next()
node_features, edge_labels = dataset.concat_examples(inputs, device=gpu)
data_encoder = node_features[:, :, :timesteps, :]
data_decoder = node_features[:, :, -timesteps:, :]
# logits: [batch_size, num_edges, edge_types]
logits = encoder(data_encoder, rel_send, rel_rec) # inverse func. of softmax
edges = F.gumbel_softmax(logits, tau=temp, axis=2)
edge_probs = F.softmax(logits, axis=2)
# edges, edge_probs: [batch_size, num_edges, edge_types]
# validation output uses teacher forcing
output = decoder(data_decoder, edges, rel_rec, rel_send, 1)
target = data_decoder[:, :, 1:, :]
num_nodes = node_features.shape[1]
loss_nll = get_nll_gaussian(output, target, var)
loss_kl = get_kl_categorical_uniform(edge_probs, num_nodes, edge_types)
nll_test.append(float(loss_nll.array))
kl_test.append(float(loss_kl.array))
edge_accuracy = get_edge_accuracy(logits.array, edge_labels)
edge_accuracies.append(edge_accuracy)
node_mse = float(F.mean_squared_error(output, target).array)
node_mses.append(node_mse)
if iterator.is_new_epoch:
break
put_log(iterator.epoch, np.mean(nll_test), np.mean(kl_test),
np.mean(edge_accuracies), np.mean(node_mses), 'test')
chainer.config.train = True
chainer.config.enable_backprop = True