本文整理汇总了Python中fuel.transformers.Mapping方法的典型用法代码示例。如果您正苦于以下问题:Python transformers.Mapping方法的具体用法?Python transformers.Mapping怎么用?Python transformers.Mapping使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类fuel.transformers
的用法示例。
在下文中一共展示了transformers.Mapping方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: setup_datastream
# 需要导入模块: from fuel import transformers [as 别名]
# 或者: from fuel.transformers import Mapping [as 别名]
def setup_datastream(path, vocab_file, config):
ds = QADataset(path, vocab_file, config.n_entities, need_sep_token=config.concat_ctx_and_question)
it = QAIterator(path, shuffle=config.shuffle_questions)
stream = DataStream(ds, iteration_scheme=it)
if config.concat_ctx_and_question:
stream = ConcatCtxAndQuestion(stream, config.concat_question_before, ds.reverse_vocab['<SEP>'])
# Sort sets of multiple batches to make batches of similar sizes
stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count))
comparison = _balanced_batch_helper(stream.sources.index('question' if config.concat_ctx_and_question else 'context'))
stream = Mapping(stream, SortMapping(comparison))
stream = Unpack(stream)
stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size))
stream = Padding(stream, mask_sources=['context', 'question', 'candidates'], mask_dtype='int32')
return ds, stream
示例2: setup_datastream
# 需要导入模块: from fuel import transformers [as 别名]
# 或者: from fuel.transformers import Mapping [as 别名]
def setup_datastream(path, batch_size, sort_batch_count, valid=False):
A = numpy.load(os.path.join(path, ('valid_x_raw.npy' if valid else 'train_x_raw.npy')))
B = numpy.load(os.path.join(path, ('valid_phn.npy' if valid else 'train_phn.npy')))
C = numpy.load(os.path.join(path, ('valid_seq_to_phn.npy' if valid else 'train_seq_to_phn.npy')))
D = [B[x[0]:x[1], 2] for x in C]
ds = IndexableDataset({'input': A, 'output': D})
stream = DataStream(ds, iteration_scheme=ShuffledExampleScheme(len(A)))
stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size * sort_batch_count))
comparison = _balanced_batch_helper(stream.sources.index('input'))
stream = Mapping(stream, SortMapping(comparison))
stream = Unpack(stream)
stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size, num_examples=len(A)))
stream = Padding(stream, mask_sources=['input', 'output'])
return ds, stream
示例3: get_stream
# 需要导入模块: from fuel import transformers [as 别名]
# 或者: from fuel.transformers import Mapping [as 别名]
def get_stream(hdf5_file, which_set, batch_size=None):
dataset = H5PYDataset(
hdf5_file, which_sets=(which_set,), load_in_memory=True)
if batch_size == None:
batch_size = dataset.num_examples
stream = DataStream(dataset=dataset, iteration_scheme=ShuffledScheme(
examples=dataset.num_examples, batch_size=batch_size))
# Required because Recurrent bricks receive as input [sequence, batch,
# features]
return Mapping(stream, transpose_stream)
示例4: wrap_stream
# 需要导入模块: from fuel import transformers [as 别名]
# 或者: from fuel.transformers import Mapping [as 别名]
def wrap_stream(self, stream):
return Mapping(stream, Invoke(self, 'apply'))
示例5: test_default_transformer
# 需要导入模块: from fuel import transformers [as 别名]
# 或者: from fuel.transformers import Mapping [as 别名]
def test_default_transformer(self):
class DoublingDataset(IterableDataset):
def apply_default_transformer(self, stream):
return Mapping(
stream, lambda sources: tuple(2 * s for s in sources))
dataset = DoublingDataset(self.data)
stream = dataset.apply_default_transformer(DataStream(dataset))
assert_equal(list(stream.get_epoch_iterator()), [(2,), (4,), (6,)])