本文整理汇总了Python中quagga.connector.Connector.sync_fill方法的典型用法代码示例。如果您正苦于以下问题:Python Connector.sync_fill方法的具体用法?Python Connector.sync_fill怎么用?Python Connector.sync_fill使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类quagga.connector.Connector
的用法示例。
在下文中一共展示了Connector.sync_fill方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: PtbMiniBatchesGenerator
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import sync_fill [as 别名]
class PtbMiniBatchesGenerator(object):
def __init__(self, ptb_train, ptb_valid, batch_size, sentence_max_len, device_id):
self.blocking_contexts = None
self.context = Context(device_id)
device_id = self.context.device_id
self.train_offsets = HomogeneousDataGenerator(ptb_train, batch_size, sentence_max_len, randomize=True, infinite=True)
self.valid_offsets = HomogeneousDataGenerator(ptb_valid, batch_size, sentence_max_len)
train_sentences = np.array([self.train_offsets.flatten_sentences])
valid_sentences = np.array([self.valid_offsets.flatten_sentences])
self.train_sents = Matrix.from_npa(train_sentences, 'int', device_id)
self.valid_sents = Matrix.from_npa(valid_sentences, 'int', device_id)
self._sent_lengths = np.empty((batch_size, 1), dtype=np.int32, order='F')[...]
self.sent_lengths = Matrix.from_npa(self._sent_lengths, device_id=device_id)
sentence_batch = Matrix.empty(batch_size, sentence_max_len, 'int', device_id)
self.sentence_batch = Connector(sentence_batch, self.context)
self.sentence_batch.sync_fill(0)
self._mask = Matrix.empty(sentence_batch.nrows, self.sentence_batch.ncols, 'float', device_id)
self.mask = List([Connector(self._mask[:, i]) for i in xrange(sentence_max_len)], self.sentence_batch.ncols)
self.train_offsets_iterator = iter(self.train_offsets)
self.valid_offsets_iterator = iter(self.valid_offsets)
self.training_mode = True
def set_training_mode(self):
self.training_mode = True
def set_testing_mode(self):
self.training_mode = False
def fprop(self):
if self.training_mode:
offsets = next(self.train_offsets_iterator)
sents = self.train_sents
else:
try:
offsets = next(self.valid_offsets_iterator)
sents = self.valid_sents
except StopIteration as e:
self.valid_offsets_iterator = iter(self.valid_offsets)
raise e
self.context.wait(*self.blocking_contexts)
self._sent_lengths = self._sent_lengths.base[:len(offsets)]
self.sentence_batch.nrows = len(offsets)
for k, offset in enumerate(offsets):
self.sentence_batch[k].assign(self.context, sents[:, offset[0]:offset[1]])
self._sent_lengths[k] = offset[1] - offset[0]
max_sent_len = int(np.max(self._sent_lengths))
self.sentence_batch.last_modification_context = self.context
self.sentence_batch.ncols = max_sent_len
self.sent_lengths.assign_npa(self.context, self._sent_lengths)
self._mask.mask_column_numbers_row_wise(self.context, self.sent_lengths)
for e in self.mask:
e.last_modification_context = self.context
self.sentence_batch.fprop()
self.mask.fprop()