本文整理匯總了Python中fuel.transformers.Mapping方法的典型用法代碼示例。如果您正苦於以下問題:Python transformers.Mapping方法的具體用法?Python transformers.Mapping怎麽用?Python transformers.Mapping使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類fuel.transformers
的用法示例。
在下文中一共展示了transformers.Mapping方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: setup_datastream
# 需要導入模塊: from fuel import transformers [as 別名]
# 或者: from fuel.transformers import Mapping [as 別名]
def setup_datastream(path, vocab_file, config):
ds = QADataset(path, vocab_file, config.n_entities, need_sep_token=config.concat_ctx_and_question)
it = QAIterator(path, shuffle=config.shuffle_questions)
stream = DataStream(ds, iteration_scheme=it)
if config.concat_ctx_and_question:
stream = ConcatCtxAndQuestion(stream, config.concat_question_before, ds.reverse_vocab['<SEP>'])
# Sort sets of multiple batches to make batches of similar sizes
stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size * config.sort_batch_count))
comparison = _balanced_batch_helper(stream.sources.index('question' if config.concat_ctx_and_question else 'context'))
stream = Mapping(stream, SortMapping(comparison))
stream = Unpack(stream)
stream = Batch(stream, iteration_scheme=ConstantScheme(config.batch_size))
stream = Padding(stream, mask_sources=['context', 'question', 'candidates'], mask_dtype='int32')
return ds, stream
示例2: setup_datastream
# 需要導入模塊: from fuel import transformers [as 別名]
# 或者: from fuel.transformers import Mapping [as 別名]
def setup_datastream(path, batch_size, sort_batch_count, valid=False):
A = numpy.load(os.path.join(path, ('valid_x_raw.npy' if valid else 'train_x_raw.npy')))
B = numpy.load(os.path.join(path, ('valid_phn.npy' if valid else 'train_phn.npy')))
C = numpy.load(os.path.join(path, ('valid_seq_to_phn.npy' if valid else 'train_seq_to_phn.npy')))
D = [B[x[0]:x[1], 2] for x in C]
ds = IndexableDataset({'input': A, 'output': D})
stream = DataStream(ds, iteration_scheme=ShuffledExampleScheme(len(A)))
stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size * sort_batch_count))
comparison = _balanced_batch_helper(stream.sources.index('input'))
stream = Mapping(stream, SortMapping(comparison))
stream = Unpack(stream)
stream = Batch(stream, iteration_scheme=ConstantScheme(batch_size, num_examples=len(A)))
stream = Padding(stream, mask_sources=['input', 'output'])
return ds, stream
示例3: get_stream
# 需要導入模塊: from fuel import transformers [as 別名]
# 或者: from fuel.transformers import Mapping [as 別名]
def get_stream(hdf5_file, which_set, batch_size=None):
dataset = H5PYDataset(
hdf5_file, which_sets=(which_set,), load_in_memory=True)
if batch_size == None:
batch_size = dataset.num_examples
stream = DataStream(dataset=dataset, iteration_scheme=ShuffledScheme(
examples=dataset.num_examples, batch_size=batch_size))
# Required because Recurrent bricks receive as input [sequence, batch,
# features]
return Mapping(stream, transpose_stream)
示例4: wrap_stream
# 需要導入模塊: from fuel import transformers [as 別名]
# 或者: from fuel.transformers import Mapping [as 別名]
def wrap_stream(self, stream):
return Mapping(stream, Invoke(self, 'apply'))
示例5: test_default_transformer
# 需要導入模塊: from fuel import transformers [as 別名]
# 或者: from fuel.transformers import Mapping [as 別名]
def test_default_transformer(self):
class DoublingDataset(IterableDataset):
def apply_default_transformer(self, stream):
return Mapping(
stream, lambda sources: tuple(2 * s for s in sources))
dataset = DoublingDataset(self.data)
stream = dataset.apply_default_transformer(DataStream(dataset))
assert_equal(list(stream.get_epoch_iterator()), [(2,), (4,), (6,)])