本文整理匯總了Python中utils.assert_eq方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.assert_eq方法的具體用法?Python utils.assert_eq怎麽用?Python utils.assert_eq使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.assert_eq方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: _load_dataset
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def _load_dataset(dataroot, name, img_id2val):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s2014_questions.json' % name)
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
entries.append(_create_entry(img_id2val[img_id], question, answer))
return entries
示例2: tokenize
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad to the back of the sentence
padding = [self.dictionary.padding_idx] * \
(max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
示例3: _load_dataset
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def _load_dataset(dataroot, name, img_id2val):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s2014_questions.json' % name)
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = pickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
entries.append(_create_entry(img_id2val[img_id], question, answer))
return entries
示例4: tokenize
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = padding + tokens
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
示例5: make_json
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def make_json(logits, qIds, dataloader):
utils.assert_eq(logits.size(0), len(qIds))
results = []
for i in range(logits.size(0)):
result = {}
result['question_id'] = qIds[i].item()
result['answer'] = get_answer(logits[i], dataloader)
results.append(result)
return results
示例6: _load_dataset
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def _load_dataset(dataroot, name, coco_train_img_id2val, coco_val_img_id2val,
label2ans):
"""Load entries
coco_train_img_id2val/coco_val_img_id2val:
dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val'
"""
question_path = os.path.join(
dataroot, 'cp_v2_questions/vqacp_v2_%s_questions.json' % name)
questions = sorted(json.load(open(question_path)),
key=lambda x: x['question_id'])
answer_path = os.path.join(dataroot, 'cache', 'cp_v2_%s_target.pkl' % name)
answers = pickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
coco_split = question["coco_split"]
index = coco_train_img_id2val[img_id]\
if coco_split == "train2014" else coco_val_img_id2val[img_id]
if not COUNTING_ONLY \
or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(index, question, answer))
return entries
示例7: _load_dataset
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def _load_dataset(dataroot, name, img_id2val, label2ans):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to
retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
question_path = os.path.join(
dataroot, 'Questions/v2_OpenEnded_mscoco_%s_questions.json' %
(name + '2014' if 'test' != name[:4] else name))
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
# train, val
if 'test' != name[:4]:
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = pickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
if not COUNTING_ONLY \
or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(img_id2val[img_id],
question, answer))
# test2015
else:
entries = []
for question in questions:
img_id = question['image_id']
if not COUNTING_ONLY \
or is_howmany(question['question'], None, None):
entries.append(_create_entry(img_id2val[img_id],
question, None))
return entries
示例8: tokenize
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = padding + tokens
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens
示例9: _load_dataset
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def _load_dataset(dataroot, name, img_id2val, label2ans):
"""Load entries
img_id2val: dict {img_id -> val} val can be used to retrieve image or features
dataroot: root path of dataset
name: 'train', 'val', 'test-dev2015', test2015'
"""
question_path = os.path.join(
dataroot, 'v2_OpenEnded_mscoco_%s_questions.json' % \
(name + '2014' if 'test'!=name[:4] else name))
questions = sorted(json.load(open(question_path))['questions'],
key=lambda x: x['question_id'])
if 'test'!=name[:4]: # train, val
answer_path = os.path.join(dataroot, 'cache', '%s_target.pkl' % name)
answers = cPickle.load(open(answer_path, 'rb'))
answers = sorted(answers, key=lambda x: x['question_id'])
utils.assert_eq(len(questions), len(answers))
entries = []
for question, answer in zip(questions, answers):
utils.assert_eq(question['question_id'], answer['question_id'])
utils.assert_eq(question['image_id'], answer['image_id'])
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], answer, label2ans):
entries.append(_create_entry(img_id2val[img_id], question, answer))
else: # test2015
entries = []
for question in questions:
img_id = question['image_id']
if not COUNTING_ONLY or is_howmany(question['question'], None, None):
entries.append(_create_entry(img_id2val[img_id], question, None))
return entries
示例10: tokenize
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import assert_eq [as 別名]
def tokenize(self, max_length=14):
"""Tokenizes the questions.
This will add q_token in each entry of the dataset.
-1 represent nil, and should be treated as padding_idx in embedding
"""
for entry in self.entries:
tokens = self.dictionary.tokenize(entry['question'], False)
tokens = tokens[:max_length]
if len(tokens) < max_length:
# Note here we pad in front of the sentence
padding = [self.dictionary.padding_idx] * (max_length - len(tokens))
tokens = tokens + padding
utils.assert_eq(len(tokens), max_length)
entry['q_token'] = tokens