本文整理汇总了Python中torch.utils.data.items方法的典型用法代码示例。如果您正苦于以下问题:Python data.items方法的具体用法?Python data.items怎么用?Python data.items使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类torch.utils.data
的用法示例。
在下文中一共展示了data.items方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: read_object_labels
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import items [as 别名]
def read_object_labels(root, dataset, set):
path_labels = os.path.join(root, 'VOCdevkit', dataset, 'ImageSets', 'Main')
labeled_data = dict()
num_classes = len(object_categories)
for i in range(num_classes):
file = os.path.join(path_labels, object_categories[i] + '_' + set + '.txt')
data = read_image_label(file)
if i == 0:
for (name, label) in data.items():
labels = np.zeros(num_classes)
labels[i] = label
labeled_data[name] = labels
else:
for (name, label) in data.items():
labeled_data[name][i] = label
return labeled_data
示例2: write_object_labels_csv
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import items [as 别名]
def write_object_labels_csv(file, labeled_data):
# write a csv file
print('[dataset] write file %s' % file)
with open(file, 'w') as csvfile:
fieldnames = ['name']
fieldnames.extend(object_categories)
writer = csv.DictWriter(csvfile, fieldnames=fieldnames)
writer.writeheader()
for (name, labels) in labeled_data.items():
example = {'name': name}
for i in range(20):
example[fieldnames[i + 1]] = int(labels[i])
writer.writerow(example)
csvfile.close()
示例3: load_next_buffer
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import items [as 别名]
def load_next_buffer(self):
""" Loads next buffer """
self._buffer_fnames = self._files[self._buffer_index:self._buffer_index + self._buffer_size]
self._buffer_index += self._buffer_size
self._buffer_index = self._buffer_index % len(self._files)
self._buffer = []
self._cum_size = [0]
# progress bar
pbar = tqdm(total=len(self._buffer_fnames),
bar_format='{l_bar}{bar}| {n_fmt}/{total_fmt} {postfix}')
pbar.set_description("Loading file buffer ...")
for f in self._buffer_fnames:
with np.load(f) as data:
self._buffer += [{k: np.copy(v) for k, v in data.items()}]
self._cum_size += [self._cum_size[-1] +
self._data_per_sequence(data['rewards'].shape[0])]
pbar.update(1)
pbar.close()
示例4: _apply_to_data
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import items [as 别名]
def _apply_to_data(data, func, unpack_dict=False):
"""Apply a function to data, trying to unpack different data
types.
"""
apply_ = partial(_apply_to_data, func=func, unpack_dict=unpack_dict)
if isinstance(data, dict):
if unpack_dict:
return [apply_(v) for v in data.values()]
return {k: apply_(v) for k, v in data.items()}
if isinstance(data, (list, tuple)):
try:
# e.g.list/tuple of arrays
return [apply_(x) for x in data]
except TypeError:
return func(data)
return func(data)
示例5: unpack_data
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import items [as 别名]
def unpack_data(data):
"""Unpack data returned by the net's iterator into a 2-tuple.
If the wrong number of items is returned, raise a helpful error
message.
"""
# Note: This function cannot detect it when a user only returns 1
# item that is exactly of length 2 (e.g. because the batch size is
# 2). In that case, the item will be erroneously split into X and
# y.
try:
X, y = data
return X, y
except ValueError:
# if a 1-tuple/list or something else like a torch tensor
if not isinstance(data, (tuple, list)) or len(data) < 2:
raise ValueError(ERROR_MSG_1_ITEM)
raise ValueError(ERROR_MSG_MORE_THAN_2_ITEMS.format(len(data)))
示例6: state_dict
# 需要导入模块: from torch.utils import data [as 别名]
# 或者: from torch.utils.data import items [as 别名]
def state_dict(self):
def get_prefetch_num(split):
if self.loaders[split].num_workers > 0:
return (self.iters[split]._send_idx - self.iters[split]._rcvd_idx) * self.batch_size
else:
return 0
return {split: loader.sampler.state_dict(get_prefetch_num(split)) \
for split, loader in self.loaders.items()}