本文整理汇总了Python中quagga.connector.Connector类的典型用法代码示例。如果您正苦于以下问题:Python Connector类的具体用法?Python Connector怎么用?Python Connector使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Connector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, train_data, valid_data, batch_size, word_dropout_prob, device_id):
self.train_data = HomogeneousDataIterator(train_data, batch_size, randomize=True, infinite=True)
self.valid_data = HomogeneousDataIterator(valid_data, batch_size)
self.train_data_iterator = iter(self.train_data)
self.valid_data_iterator = iter(self.valid_data)
self.word_keep_prob = 1.0 - word_dropout_prob
self.rnd = RandomState(47571)
self.unk_idx = word_to_idx['<UNK>']
self.context = Context(device_id)
c = Counter([len(line) for line in chain(train_data, valid_data)])
print c.most_common()
max_len = max([len(line) for line in chain(train_data, valid_data)])
self.enc_x = Connector(Matrix.empty(batch_size, max_len, 'int', device_id))
self.enc_lengths = Matrix.empty(self.enc_x.nrows, 1, 'int', device_id)
self._enc_mask = Matrix.empty(self.enc_x.nrows, self.enc_x.ncols, 'float', device_id)
self.enc_mask = List([Connector(self._enc_mask[:, i]) for i in xrange(max_len)], self.enc_x.ncols)
self.dec_x = Connector(Matrix.empty(batch_size, max_len + 1, 'int', device_id))
self._dec_y = Matrix.empty(batch_size, max_len + 1, 'int', device_id)
self.dec_y = List([Connector(self._dec_y[:, i]) for i in xrange(max_len + 1)], self._dec_y.ncols)
self.dec_lengths = Matrix.empty(self.dec_x.nrows, 1, 'int', device_id)
self._dec_mask = Matrix.empty(self.dec_x.nrows, self.dec_x.ncols, 'float', device_id)
self.dec_mask = List([Connector(self._dec_mask[:, i]) for i in xrange(max_len + 1)], self.dec_x.ncols)
self.blocking_contexts = None
self.training_mode = True
示例2: __init__
def __init__(self, W, b, x, device_id=None):
self.f_context = Context(device_id)
device_id = self.f_context.device_id
if W.bpropagable:
self.W, self.dL_dW = W.register_usage(device_id, device_id)
else:
self.W = W.register_usage(device_id)
if b:
if b.bpropagable:
self.b, self.dL_db = b.register_usage(device_id, device_id)
self.ones = Matrix.empty(x.nrows, 1, self.b.dtype, device_id)
self.ones.sync_fill(1.0)
else:
self.b = b.register_usage(device_id)
if x.bpropagable:
self.x, self.dL_dx = x.register_usage(device_id, device_id)
else:
self.x = x.register_usage(device_id)
output = Matrix.empty(x.nrows, self.W.ncols, device_id=device_id)
self.learning = hasattr(self, 'dL_dW') or hasattr(self, 'dL_db') or \
hasattr(self, 'dL_dx')
if self.learning:
self.b_context = Context(device_id)
self.output = Connector(output, device_id)
else:
self.output = Connector(output)
示例3: LastSelectorBlock
class LastSelectorBlock(object):
"""
TODO(igor).
Parameters
----------
x : Matrix (GpuMatrix or CpuMatrix)
"""
def __init__(self, x):
device_id = x[0].device_id
learning = x[0].bpropagable
self.context = Context(device_id)
self.output = Matrix.empty_like(x[0])
self.output = Connector(self.output, device_id if learning else None)
if learning:
self.x, self.dL_dx = izip(*x.register_usage(device_id, device_id))
else:
self.x = x.register_usage(device_id)
self.last_idx = x.length - 1
def fprop(self):
self.output.assign(self.context, self.x[self.last_idx])
self.output.fprop()
def bprop(self):
self.dL_dx[self.last_idx].add(self.context, self.output.backward_matrix)
示例4: ArgmaxBlock
class ArgmaxBlock(object):
"""
Determines argmax values along the specified ``axis`` in the input matrix.
The block returns a vector (matrix with one of its dimensions equals 1) of
argmax values.
Parameters
----------
x : Matrix (GpuMatrix or CpuMatrix)
Block's input
axis : int
Axis along which argmax is determined
device_id : int
Defines the device's id on which the computation will take place
Returns
-------
vector
A vector containing argmax values (e.g. argmax for each row if axis == 1).
"""
def __init__(self, x, axis, device_id=None):
if axis != 1:
raise NotImplementedError
self.axis = axis
self.context = Context(device_id)
device_id = self.context.device_id
self.x = x.register_usage(device_id)
self.output = Connector(Matrix.empty(x.nrows, 1, x.dtype, device_id))
def fprop(self):
self.x.argmax(self.context, self.output, self.axis)
self.output.fprop()
示例5: RepeatBlock
class RepeatBlock(object):
def __init__(self, x, repeats, axis=None, device_id=None):
self.context = Context(device_id)
device_id = self.context.device_id
self.repeats = repeats
self.axis = axis
learning = x.bpropagable
if learning:
self.x, self.dL_dx = x.register_usage(device_id, device_id)
else:
self.x = x.register_usage(device_id)
if axis == 0:
self.output = Matrix.empty(x.nrows * repeats, x.ncols, x.dtype, device_id)
elif axis == 1:
self.output = Matrix.empty(x.nrows, x.ncols * repeats, x.dtype, device_id)
else:
raise ValueError('TODO')
self.output = Connector(self.output, device_id if learning else None)
def fprop(self):
self.output.assign_repeat(self.context, self.x, self.repeats, self.axis)
self.output.fprop()
def bprop(self):
if hasattr(self, 'dL_dx'):
self.dL_dx.add_repeat_derivative(self.context, self.output.backward_matrix, self.repeats, self.axis)
示例6: NonlinearityBlock
class NonlinearityBlock(object):
"""
Applies nonlinear functions (``sigmoid``, ``tahn``, ``relu``) on input.
Parameters
----------
x : Matrix (GpuMatrix or CpuMatrix)
nonlinearity : string
device_id : int
"""
def __init__(self, x, nonlinearity, device_id=None):
"""
"""
self.f_context = Context(device_id)
device_id = self.f_context.device_id
self.learning = x.bpropagable
if self.learning:
self.b_context = Context(device_id)
self.x, self.dL_dx = x.register_usage(device_id, device_id)
self._df_dpref = Matrix.empty_like(self.x, device_id)
else:
self.x = x.register_usage(device_id)
output = Matrix.empty_like(x, device_id)
self.output = Connector(output, device_id if self.learning else None)
if nonlinearity == "sigmoid":
self.f = self.x.sigmoid
elif nonlinearity == "tanh":
self.f = self.x.tanh
elif nonlinearity == "relu":
self.f = self.x.relu
elif nonlinearity == "softmax":
raise ValueError("For softmax nonlinearity use SoftmaxBlock!")
else:
raise ValueError("TODO!")
self.training_mode = True
@property
def df_dpref(self):
if self.training_mode and self.learning:
return self._df_dpref
def fprop(self):
self.f(self.f_context, self.output, self.df_dpref)
self.output.fprop()
def bprop(self):
if hasattr(self, "dL_dx"):
# dL/dpref = dL/df .* df/dpref
dL_df = self.output.backward_matrix
self.dL_dx.add_hprod(self.b_context, dL_df, self.df_dpref)
def set_training_mode(self):
self.training_mode = True
def set_testing_mode(self):
self.training_mode = False
示例7: PtbMiniBatchesGenerator
class PtbMiniBatchesGenerator(object):
def __init__(self, ptb_train, ptb_valid, batch_size, sentence_max_len, device_id):
self.blocking_contexts = None
self.context = Context(device_id)
device_id = self.context.device_id
self.train_offsets = HomogeneousDataGenerator(ptb_train, batch_size, sentence_max_len, randomize=True, infinite=True)
self.valid_offsets = HomogeneousDataGenerator(ptb_valid, batch_size, sentence_max_len)
train_sentences = np.array([self.train_offsets.flatten_sentences])
valid_sentences = np.array([self.valid_offsets.flatten_sentences])
self.train_sents = Matrix.from_npa(train_sentences, 'int', device_id)
self.valid_sents = Matrix.from_npa(valid_sentences, 'int', device_id)
self._sent_lengths = np.empty((batch_size, 1), dtype=np.int32, order='F')[...]
self.sent_lengths = Matrix.from_npa(self._sent_lengths, device_id=device_id)
sentence_batch = Matrix.empty(batch_size, sentence_max_len, 'int', device_id)
self.sentence_batch = Connector(sentence_batch, self.context)
self.sentence_batch.sync_fill(0)
self._mask = Matrix.empty(sentence_batch.nrows, self.sentence_batch.ncols, 'float', device_id)
self.mask = List([Connector(self._mask[:, i]) for i in xrange(sentence_max_len)], self.sentence_batch.ncols)
self.train_offsets_iterator = iter(self.train_offsets)
self.valid_offsets_iterator = iter(self.valid_offsets)
self.training_mode = True
def set_training_mode(self):
self.training_mode = True
def set_testing_mode(self):
self.training_mode = False
def fprop(self):
if self.training_mode:
offsets = next(self.train_offsets_iterator)
sents = self.train_sents
else:
try:
offsets = next(self.valid_offsets_iterator)
sents = self.valid_sents
except StopIteration as e:
self.valid_offsets_iterator = iter(self.valid_offsets)
raise e
self.context.wait(*self.blocking_contexts)
self._sent_lengths = self._sent_lengths.base[:len(offsets)]
self.sentence_batch.nrows = len(offsets)
for k, offset in enumerate(offsets):
self.sentence_batch[k].assign(self.context, sents[:, offset[0]:offset[1]])
self._sent_lengths[k] = offset[1] - offset[0]
max_sent_len = int(np.max(self._sent_lengths))
self.sentence_batch.last_modification_context = self.context
self.sentence_batch.ncols = max_sent_len
self.sent_lengths.assign_npa(self.context, self._sent_lengths)
self._mask.mask_column_numbers_row_wise(self.context, self.sent_lengths)
for e in self.mask:
e.last_modification_context = self.context
self.sentence_batch.fprop()
self.mask.fprop()
示例8: test_bprop_vector
def test_bprop_vector(self):
r = []
for _ in xrange(self.N):
embd_dim = self.rng.random_integers(10000)
batch_size, output_dim = self.rng.random_integers(2000, size=2)
W = self.get_orthogonal_matrix(embd_dim, output_dim)
row_idxs = self.rng.randint(embd_dim, size=(batch_size, 1)).astype(np.int32)
true_labels = self.rng.randint(output_dim, size=(batch_size, 1)).astype(np.int32)
device_id = 0
output = {}
for processor_type in ['gpu', 'cpu']:
quagga.processor_type = processor_type
qrow_idxs = Connector(Matrix.from_npa(row_idxs))
qtrue_labels = Connector(Matrix.from_npa(true_labels))
qW = Connector(Matrix.from_npa(W), device_id)
row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
sce_block = SoftmaxCeBlock(row_slicing_block.output, qtrue_labels)
qW.fprop()
qrow_idxs.fprop()
row_slicing_block.fprop()
sce_block.fprop()
sce_block.bprop()
row_slicing_block.bprop()
qW.add(Context(), qW.backward_matrix)
output[processor_type] = qW.to_host()
r.append(np.allclose(output['gpu'], output['cpu']))
self.assertEqual(sum(r), len(r))
示例9: __init__
def __init__(self, R, b, grad_clipping, mask, prev_c, prev_h, device_id=None):
self.f_context = Context(device_id)
device_id = self.f_context.device_id
if R.bpropagable:
self.R, self.dL_dR = R.register_usage(device_id, device_id)
self.R_b_context = Context(device_id)
else:
self.R = R.register_usage(device_id)
if b.bpropagable:
self.b, self.dL_db = b.register_usage(device_id, device_id)
self.b_b_context = Context(device_id)
else:
self.b = b.register_usage(device_id)
self.grad_clipping = grad_clipping
if mask:
self.mask = mask.register_usage(device_id)
if prev_c.bpropagable:
self.prev_c, self.dL_dprev_c = prev_c.register_usage(device_id, device_id)
else:
self.prev_c = prev_c.register_usage(device_id)
if prev_h.bpropagable:
self.prev_h, self.dL_dprev_h = prev_h.register_usage(device_id, device_id)
else:
self.prev_h = prev_h.register_usage(device_id)
self.learning = R.bpropagable or prev_c.bpropagable or prev_h.bpropagable
if self.learning:
self.b_context = Context(device_id)
dim = self.R.nrows
batch_size = self.prev_c.nrows
self.zifo = Matrix.empty(batch_size, 4 * dim, device_id=device_id)
self.z = self.zifo[:, 0*dim:1*dim]
self.i = self.zifo[:, 1*dim:2*dim]
self.f = self.zifo[:, 2*dim:3*dim]
self.o = self.zifo[:, 3*dim:4*dim]
self.c = Matrix.empty_like(self.prev_c, device_id)
self.c = Connector(self.c, device_id if self.learning else None)
self.tanh_c = Matrix.empty_like(self.c, device_id)
self.h = Matrix.empty_like(self.c, device_id)
self.h = Connector(self.h, device_id if self.learning else None)
if self.learning:
self._dzifo_dpre_zifo = Matrix.empty_like(self.zifo)
self.dz_dpre_z = self._dzifo_dpre_zifo[:, 0*dim:1*dim]
self.di_dpre_i = self._dzifo_dpre_zifo[:, 1*dim:2*dim]
self.df_dpre_f = self._dzifo_dpre_zifo[:, 2*dim:3*dim]
self.do_dpre_o = self._dzifo_dpre_zifo[:, 3*dim:4*dim]
self.dL_dpre_zifo = self._dzifo_dpre_zifo
self.dL_dpre_z = self.dz_dpre_z
self.dL_dpre_i = self.di_dpre_i
self.dL_dpre_f = self.df_dpre_f
self.dL_dpre_o = self.do_dpre_o
self._dtanh_c_dc = Matrix.empty_like(self.c)
示例10: SoftmaxCeBlock
class SoftmaxCeBlock(object):
"""
Softmax nonlinearity with mean cross entropy loss
"""
def __init__(self, x, true_labels, mask=None, device_id=None):
self.context = Context(device_id)
device_id = self.context.device_id
if x.bpropagable:
self.x, self.dL_dx = x.register_usage(device_id, device_id)
else:
self.x = x.register_usage(device_id)
self.true_labels = true_labels.register_usage(device_id)
if mask:
self.mask = mask.register_usage(device_id)
self.probs = Connector(Matrix.empty_like(self.x))
self.loss = None
def fprop(self):
self.x.softmax(self.context, self.probs)
self.probs.fprop()
def bprop(self):
if not hasattr(self, 'dL_dx'):
return
# error = (probs - true_labels) / M
if self.true_labels.dtype == 'int':
self.dL_dx.add_softmax_ce_derivative(self.context, self.probs, self.true_labels)
else:
self.dL_dx.add_scaled_subtraction(self.context, 1. / self.probs.nrows, self.probs, self.true_labels)
if hasattr(self, 'mask'):
self.dL_dx.hprod(self.context, self.mask)
def calculate_loss(self, context):
true_labels_np = self.true_labels.to_host(context)
probs_np = self.probs.to_host(context)
if hasattr(self, 'mask'):
mask = self.mask.to_host(context)
context.add_callback(self._calculate_ce_loss, true_labels_np, probs_np, mask)
else:
context.add_callback(self._calculate_ce_loss, true_labels_np, probs_np)
def _calculate_ce_loss(self, true_labels_np, probs_np, mask=None):
if self.true_labels.dtype == 'int':
idxs = range(probs_np.shape[0]), true_labels_np.flatten()
logs = np.log(probs_np[idxs] + 1e-20)
else:
logs = np.log(np.sum(true_labels_np * probs_np, axis=1) + 1e-20)
if mask is not None:
logs *= mask[:, 0]
self.loss = - np.sum(logs) / np.sum(mask)
else:
self.loss = - np.mean(logs)
示例11: DataBlock
class DataBlock(object):
def __init__(self, word_to_idx, device_id):
self.context = Context(device_id)
device_id = self.context.device_id
self.word_idx = Connector(Matrix.empty(1, 1, 'int', device_id))
self.word_to_idx = word_to_idx
self.word = None
def fprop(self):
word_npa = np.zeros((1, 1), np.int32, 'F')
word_npa[0][0] = self.word_to_idx[self.word] if self.word in self.word_to_idx else self.word_to_idx['<UNK>']
self.word_idx.assign_npa(self.context, word_npa)
self.word_idx.fprop()
示例12: DataBlock
class DataBlock(object):
def __init__(self, char_to_idx, device_id):
self.context = Context(device_id)
device_id = self.context.device_id
self.char_idx = Connector(Matrix.empty(1, 1, 'int', device_id))
self.char_to_idx = char_to_idx
self.char = None
def fprop(self):
char_npa = np.zeros((1, 1), np.int32, 'F')
char_npa[0][0] = self.char_to_idx[self.char] if self.char in self.char_to_idx else self.char_to_idx['<unk>']
self.char_idx.assign_npa(self.context, char_npa)
self.char_idx.fprop()
示例13: DropoutBlock
class DropoutBlock(object):
"""
Sets elements of input matrix ``x`` to zero with probability
``dropout_prob`` in training mode. Scales ``x`` by factor of
``1-dropout_prob`` during in testing mode.
Parameters
----------
dropout_prob : float
x : Matrix (GpuMatrix or CpuMatrix)
seed : int
device_id : int
Defines the device's id on which the computation will take place
Notes
-----
The dropout block is a regularizer that randomly sets input values to zero
in training mode. This procedure is supposed to improve generalization.
During testing, the dropout block scales input values.
"""
def __init__(self, dropout_prob, x, seed=42, device_id=None):
self.dropout_prob = dropout_prob
self.f_context = Context(device_id)
device_id = self.f_context.device_id
self.generator = Matrix.get_random_generator(seed)
if x.bpropagable:
self.b_context = Context(device_id)
self.x, self.dL_dx = x.register_usage(device_id, device_id)
else:
self.x = x.register_usage(device_id)
self.output = Matrix.empty_like(self.x)
self.output = Connector(self.output, device_id if x.bpropagable else None)
self.training_mode = True
def fprop(self):
if self.training_mode:
self.x.dropout(self.f_context, self.generator, self.dropout_prob, self.output)
else:
self.x.scale(self.f_context, 1.0 - self.dropout_prob, self.output)
self.output.fprop()
def bprop(self):
if hasattr(self, 'dL_dx') and self.training_mode:
dL_doutput = self.output.backward_matrix
self.dL_dx.add_mask_zeros(self.b_context, dL_doutput, self.output)
def set_training_mode(self):
self.training_mode = True
def set_testing_mode(self):
self.training_mode = False
示例14: AttentionBlock
class AttentionBlock(object):
"""
Location based attention block
out = sum_{i=1}^{T}a_i * h_i
a_i = softmax(h_i * u)
"""
def __init__(self, matrices, u, mask=None, device_id=None):
self.context = Context(device_id)
device_id = self.context.device_id
self.output = Matrix.empty_like(matrices[0], device_id)
learning = matrices[0].bpropagable or u.bpropagable
self.output = Connector(self.output, device_id if learning else None)
if matrices[0].bpropagable:
self.matrices, self.dL_dmatrices = \
izip(*matrices.register_usage(device_id, device_id))
else:
self.matrices = matrices.register_usage(device_id)
self.length = matrices.length
if u.bpropagable:
self.u, self.dL_du = u.register_usage(device_id, device_id)
else:
self.u = u.register_usage(device_id)
if mask:
self.mask = mask.register_usage(device_id)
self.a = Matrix.empty(matrices[0].nrows, matrices.length,
'float', device_id)
self.dL_dpre_a = Matrix.empty_like(self.a)
self.a_cols = [self.a[:, i] for i in xrange(len(self.matrices))]
def fprop(self):
for i in xrange(self.length):
self.a_cols[i].assign_dot(self.context, self.matrices[i], self.u)
if hasattr(self, 'mask'):
self.a.fill(self.context, -3.402823466e+38, self.mask, 0.0)
self.a.softmax(self.context, self.a)
self.output.assign_sequential_weighted_sum(self.context, self.a,
self.matrices[:self.length])
self.output.fprop()
def bprop(self):
dL_doutput = self.output.backward_matrix
self.dL_dpre_a.assign_dL_dpre_a(self.context, dL_doutput, self.a,
self.matrices[:self.length])
if hasattr(self, 'dL_dmatrices'):
Matrix.add_attention_tile(self.context, dL_doutput, self.a,
self.dL_dpre_a, self.u,
self.dL_dmatrices[:self.length])
if hasattr(self, 'dL_du'):
self.dL_du.add_attention_derivative(self.context, self.dL_dpre_a,
self.matrices[:self.length])
示例15: SigmoidCeBlock
class SigmoidCeBlock(object):
"""
Sigmoid nonlinearity with mean cross entropy loss
"""
def __init__(self, x, true_labels, mask=None, device_id=None):
self.context = Context(device_id)
device_id = self.context.device_id
if x.bpropagable:
self.x, self.dL_dx = x.register_usage(device_id, device_id)
else:
self.x = x.register_usage(device_id)
self.true_labels = true_labels.register_usage(device_id)
if mask:
self.mask = mask.register_usage(device_id)
self.probs = Connector(Matrix.empty_like(self.x))
self.loss = None
def fprop(self):
self.x.sigmoid(self.context, self.probs)
self.probs.fprop()
def bprop(self):
# error = (probs - true_labels) / M
self.dL_dx.add_scaled_subtraction(self.context,
1. / float(self.probs.nrows),
self.probs, self.true_labels)
if hasattr(self, 'mask'):
self.dL_dx.hprod(self.context, self.mask)
def calculate_loss(self, context):
true_labels_np = self.true_labels.to_host(context)
probs_np = self.probs.to_host(context)
if hasattr(self, 'mask'):
mask = self.mask.to_host(context)
context.add_callback(self._calculate_ce_loss,
true_labels_np, probs_np, mask)
else:
context.add_callback(self._calculate_ce_loss,
true_labels_np, probs_np)
def _calculate_ce_loss(self, true_labels_np, probs_np, mask=None):
logs = true_labels_np * np.log(probs_np + 1e-20) + \
(1.0 - true_labels_np) * np.log(1. - probs_np + 1e-20)
if mask is not None:
logs *= mask
self.loss = - np.sum(logs) / (np.sum(mask) * logs.shape[1])
else:
self.loss = - np.mean(logs)