本文整理匯總了Python中chainer.dataset.convert.concat_examples方法的典型用法代碼示例。如果您正苦於以下問題:Python convert.concat_examples方法的具體用法?Python convert.concat_examples怎麽用?Python convert.concat_examples使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類chainer.dataset.convert
的用法示例。
在下文中一共展示了convert.concat_examples方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: evaluate
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def evaluate(self):
val_iter = self.get_iterator("main")
target = self.get_target("main")
loss = 0
count = 0
for batch in copy.copy(val_iter):
x, t = convert.concat_examples(batch, device=self.device, padding=(0, -1))
xp = chainer.backends.cuda.get_array_module(x)
state = None
for i in six.moves.range(len(x[0])):
state, loss_batch = target(state, x[:, i], t[:, i])
non_zeros = xp.count_nonzero(x[:, i])
loss += loss_batch.data * non_zeros
count += int(non_zeros)
# report validation loss
observation = {}
with reporter.report_scope(observation):
reporter.report({"loss": float(loss / count)}, target)
return observation
示例2: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(self, iterator_a, iterator_b, opt_g_a, opt_g_b, opt_d_a, opt_d_b, device):
self._iterators = {'main': iterator_a, 'second': iterator_b}
self.generator_ab = opt_g_a.target
self.generator_ba = opt_g_b.target
self.discriminator_a = opt_d_a.target
self.discriminator_b = opt_d_b.target
self._optimizers = {
'generator_ab': opt_g_a,
'generator_ba': opt_g_b,
'discriminator_a': opt_d_a,
'discriminator_b': opt_d_b,
}
self.itr_a = iterator_a
self.itr_b = iterator_b
self.opt_g_a = opt_g_a
self.opt_g_b = opt_g_b
self.opt_d_a = opt_d_a
self.opt_d_b = opt_d_b
self.converter = convert.concat_examples
self.device = device
self.iteration = 0
self.xp = self.generator_ab.xp
self.bch = iterator_a.batch_size
示例3: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(self, comm, iterator, target, device=None,
converter=convert.concat_examples, root=0,
**kwargs):
progress_hook, = argument.parse_kwargs(kwargs, ('progress_hook', None))
self.comm = comm
self.iterator = iterator
self._targets = {"main": target}
self.converter = converter
if device is not None:
device = backend.get_device(device)
self.device = device
self._progress_hook = progress_hook
assert 0 <= root and root < self.comm.size
self.root = root
示例4: concat_examples
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def concat_examples(batch, device=None, padding=None):
"""Concat examples in minibatch.
:param np.ndarray batch: The batch to concatenate
:param int device: The device to send to
:param Tuple[int,int] padding: The padding to use
:return: (inputs, targets)
:rtype (torch.Tensor, torch.Tensor)
"""
x, t = convert.concat_examples(batch, padding=padding)
x = torch.from_numpy(x)
t = torch.from_numpy(t)
if device is not None and device >= 0:
x = x.cuda(device)
t = t.cuda(device)
return x, t
示例5: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(
self, iterator, target, device=None,
converter=convert.concat_examples, label_names=None,
filename='segmmentation_iter={iteration}_idx={index}.jpg',
mode='seg', n_processes=None):
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self.iterators = iterator
if isinstance(target, link.Link):
target = {'main': target}
self.targets = target
self.device = device
self.converter = converter
self.label_names = label_names
self.filename = filename
self.mode = mode
self.n_processes = n_processes or multiprocessing.cpu_count()
示例6: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(self, iterator, target, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, name=None,
pos_label=1, ignore_labels=None, raise_value_error=True,
logger=None, sample_weight=None,
multioutput='uniform_average', ignore_nan=False):
metrics_fun = {'r2_score': self.r2_score}
super(R2ScoreEvaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func, metrics_fun=metrics_fun,
name=name, logger=logger)
self.pos_label = pos_label
self.ignore_labels = ignore_labels
self.raise_value_error = raise_value_error
self.sample_weight = sample_weight
self.multioutput = multioutput
self.ignore_nan = ignore_nan
示例7: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(self, iterator, target, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None, metrics_fun=None,
name=None, logger=None):
super(BatchEvaluator, self).__init__(
iterator, target, converter=converter, device=device,
eval_hook=eval_hook, eval_func=eval_func)
self.name = name
self.logger = logger or getLogger()
if callable(metrics_fun):
# TODO(mottodora): use better name or infer
self.metrics_fun = {"evaluation": metrics_fun}
elif isinstance(metrics_fun, dict):
self.metrics_fun = metrics_fun
else:
raise TypeError('Unexpected type metrics_fun must be Callable or '
'dict.')
示例8: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(self, iterator, opt, device, loss_func,
converter=convert.concat_examples):
super(MolNvpUpdater, self).__init__(
iterator=iterator,
optimizer=opt,
converter=converter,
loss_func=loss_func,
device=device,
loss_scale=None,
)
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self.iterator = iterator
self.opt = opt
self.device = device
self.loss_func = loss_func
self.model = opt.target
self.converter = converter
示例9: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(self, iterator, optimizer, class_dim, converter=convert.concat_examples,
device=None, loss_func=None):
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if not isinstance(optimizer, dict):
optimizer = {'main': optimizer}
self._optimizers = optimizer
if device is not None and device >= 0:
for optimizer in six.itervalues(self._optimizers):
optimizer.target.to_gpu(device)
self.converter = converter
self.loss_func = loss_func
self.device = device
self.iteration = 0
self.class_dim = class_dim
示例10: preview_convert
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def preview_convert(iterator_a, iterator_b, g_a, g_b, device, gla, dst):
@chainer.training.make_extension()
def make_preview(trainer):
with chainer.using_config('train', False):
with chainer.no_backprop_mode():
x_a = iterator_a.next()
x_a = convert.concat_examples(x_a, device)
x_a = chainer.Variable(x_a)
x_b = iterator_b.next()
x_b = convert.concat_examples(x_b, device)
x_b = chainer.Variable(x_b)
x_ab = g_a(x_a)
x_ba = g_b(x_b)
x_bab = g_a(x_ba)
x_aba = g_b(x_ab)
preview_dir = '{}/preview'.format(dst)
if not os.path.exists(preview_dir):
os.makedirs(preview_dir)
image_dir = '{}/image'.format(dst)
if not os.path.exists(image_dir):
os.makedirs(image_dir)
names = ['a', 'ab', 'aba', 'b', 'ba', 'bab']
images = [x_a, x_ab, x_aba, x_b, x_ba, x_bab]
for n, i in zip(names, images):
i = cp.asnumpy(i.data)[:,:,padding:-padding,:].reshape(1, -1, 128)
image.save(image_dir+'/{}{}.jpg'.format(trainer.updater.epoch,n), i)
w = np.concatenate([gla.inverse(_i) for _i in dataset.reverse(i)])
dataset.save(preview_dir+'/{}{}.wav'.format(trainer.updater.epoch,n), 16000, w)
return make_preview
示例11: predict
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode():
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = y.data.shape[0] // 10
y_shape = y.data.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = sum(y, axis=1) / 10
return y
示例12: __init__
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def __init__(self, iterator, updater, converter=convert.concat_examples,
device=None, eval_hook=None):
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if isinstance(updater.model, link.Link):
self._targets = {'main': updater.model}
else:
self._targets = updater.model
self.updater = updater
self.converter = converter
self.device = device
self.eval_hook = eval_hook
示例13: eval
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def eval(loss_function, iterator):
"""
Evaluates the mean of given loss function over the entire batch in given
iterator
:param loss_function: The loss function to evaluate
:param iterator: The iterator over the evaluation data set
:return: The mean loss value
"""
iterator.reset()
results = []
for batch in iterator:
input_args = convert.concat_examples(batch)
results.append(loss_function(*input_args).data)
return np.mean(results)
示例14: run_train_loop
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def run_train_loop(
optimizer, train_iter, test_iter, test_count, epoch, device):
model = optimizer.target
train_count = 0
sum_accuracy = 0
sum_loss = 0
while train_iter.epoch < epoch:
batch = train_iter.next()
x_array, t_array = convert.concat_examples(batch, device)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array, requires_grad=False)
optimizer.update(model, x, t)
train_count += len(t)
sum_loss += float(model.loss.array) * len(t)
sum_accuracy += float(model.accuracy.array) * len(t)
if train_iter.is_new_epoch:
print('epoch: ', train_iter.epoch)
print('train mean loss: {}, accuracy: {}'.format(
sum_loss / train_count, sum_accuracy / train_count))
# evaluation
train_count = 0
sum_accuracy = 0
sum_loss = 0
# It is good practice to turn off train mode during evaluation.
with configuration.using_config('train', False):
for batch in test_iter:
x_array, t_array = convert.concat_examples(
batch, device)
x = chainer.Variable(x_array)
t = chainer.Variable(t_array, requires_grad=False)
loss = model(x, t)
sum_loss += float(loss.array) * len(t)
sum_accuracy += float(model.accuracy.array) * len(t)
test_iter.reset()
print('test mean loss: {}, accuracy: {}'.format(
sum_loss / test_count, sum_accuracy / test_count))
sum_accuracy = 0
sum_loss = 0
示例15: predict
# 需要導入模塊: from chainer.dataset import convert [as 別名]
# 或者: from chainer.dataset.convert import concat_examples [as 別名]
def predict(self, images, oversample=True):
"""Computes all the probabilities of given images.
Args:
images (iterable of PIL.Image or numpy.ndarray): Input images.
When you specify a color image as a :class:`numpy.ndarray`,
make sure that color order is RGB.
oversample (bool): If ``True``, it averages results across
center, corners, and mirrors. Otherwise, it uses only the
center.
Returns:
~chainer.Variable: Output that contains the class probabilities
of given images.
"""
x = concat_examples([prepare(img, size=(256, 256)) for img in images])
if oversample:
x = imgproc.oversample(x, crop_dims=(224, 224))
else:
x = x[:, :, 16:240, 16:240]
# Use no_backprop_mode to reduce memory consumption
with function.no_backprop_mode(), chainer.using_config('train', False):
x = Variable(self.xp.asarray(x))
y = self(x, layers=['prob'])['prob']
if oversample:
n = len(y) // 10
y_shape = y.shape[1:]
y = reshape(y, (n, 10) + y_shape)
y = sum(y, axis=1) / 10
return y