本文整理汇总了Python中utils.assert_eq方法的典型用法代码示例。如果您正苦于以下问题:Python utils.assert_eq方法的具体用法?Python utils.assert_eq怎么用?Python utils.assert_eq使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.assert_eq方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _load_dataset
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def _load_dataset(dataroot, name, img_id2val):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s2014_questions.json' % name)
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
entries.append(_create_entry(img_id2val[img_id], question, answer))
return entries
示例2: tokenize
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad to the back of the sentence
padding = [self.dictionary.padding_idx] * \
(max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
示例3: _load_dataset
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def _load_dataset(dataroot, name, img_id2val):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s2014_questions.json' % name)
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = pickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
entries.append(_create_entry(img_id2val[img_id], question, answer))
return entries
示例4: tokenize
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = padding + tokens
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
示例5: make_json
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def make_json(logits, qIds, dataloader):
utils.assert_eq(logits.size(0), len(qIds))
results = []
for i in range(logits.size(0)):
result = {}
result['question_id'] = qIds[i].item()
result['answer'] = get_answer(logits[i], dataloader)
results.append(result)
return results
示例6: _load_dataset
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def _load_dataset(dataroot, name, coco_train_img_id2val, coco_val_img_id2val,
label2ans):
"""Load entries
coco_train_img_id2val/coco_val_img_id2val:
dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(
dataroot, 'cp_v2_questions/vqacp_v2_%s_questions.json' % name)
questions = sorted(json.load(open(question_path)),
key=lambda x: x['question_id'])
answer_path = os.path.join(dataroot, 'cache', 'cp_v2_%s_target.pkl' % name)
answers = pickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
coco_split = question["coco_split"]
index = coco_train_img_id2val[img_id]\
if coco_split == "train2014" else coco_val_img_id2val[img_id]
if not COUNTING_ONLY \
or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(index, question, answer))
return entries
示例7: _load_dataset
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def _load_dataset(dataroot, name, img_id2val, label2ans):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to
retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
question_path = os.path.join(
dataroot, 'Questions/v2_OpenEnded_mscoco_%s_questions.json' %
(name + '2014' if 'test' != name[:4] else name))
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
# train, val
if 'test' != name[:4]:
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = pickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
if not COUNTING_ONLY \
or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(img_id2val[img_id],
question, answer))
# test2015
else:
entries = []
for question in questions:
img_id = question['image_id']
if not COUNTING_ONLY \
or is_howmany(question['question'], None, None):
entries.append(_create_entry(img_id2val[img_id],
question, None))
return entries
示例8: tokenize
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = padding + tokens
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
示例9: _load_dataset
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def _load_dataset(dataroot, name, img_id2val, label2ans):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s_questions.json' % \
(name + '2014' if 'test'!=name[:4] else name))
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
if 'test'!=name[:4]: # train, val
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(img_id2val[img_id], question, answer))
else: # test2015
entries = []
for question in questions:
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], None, None):
entries.append(_create_entry(img_id2val[img_id], question, None))
return entries
示例10: tokenize
# 需要导入模块: import utils [as 别名]
# 或者: from utils import assert_eq [as 别名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens