本文整理汇总了Python中quagga.connector.Connector.to_host方法的典型用法代码示例。如果您正苦于以下问题:Python Connector.to_host方法的具体用法?Python Connector.to_host怎么用?Python Connector.to_host使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类quagga.connector.Connector
的用法示例。
在下文中一共展示了Connector.to_host方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_bprop_vector
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import to_host [as 别名]
def test_bprop_vector(self):
r = []
for _ in xrange(self.N):
embd_dim = self.rng.random_integers(10000)
batch_size, output_dim = self.rng.random_integers(2000, size=2)
W = self.get_orthogonal_matrix(embd_dim, output_dim)
row_idxs = self.rng.randint(embd_dim, size=(batch_size, 1)).astype(np.int32)
true_labels = self.rng.randint(output_dim, size=(batch_size, 1)).astype(np.int32)
device_id = 0
output = {}
for processor_type in ['gpu', 'cpu']:
quagga.processor_type = processor_type
qrow_idxs = Connector(Matrix.from_npa(row_idxs))
qtrue_labels = Connector(Matrix.from_npa(true_labels))
qW = Connector(Matrix.from_npa(W), device_id)
row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
sce_block = SoftmaxCeBlock(row_slicing_block.output, qtrue_labels)
qW.fprop()
qrow_idxs.fprop()
row_slicing_block.fprop()
sce_block.fprop()
sce_block.bprop()
row_slicing_block.bprop()
qW.add(Context(), qW.backward_matrix)
output[processor_type] = qW.to_host()
r.append(np.allclose(output['gpu'], output['cpu']))
self.assertEqual(sum(r), len(r))
示例2: SoftmaxCeBlock
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import to_host [as 别名]
class SoftmaxCeBlock(object):
"""
Softmax nonlinearity with mean cross entropy loss
"""
def __init__(self, x, true_labels, mask=None, device_id=None):
self.context = Context(device_id)
device_id = self.context.device_id
if x.bpropagable:
self.x, self.dL_dx = x.register_usage(device_id, device_id)
else:
self.x = x.register_usage(device_id)
self.true_labels = true_labels.register_usage(device_id)
if mask:
self.mask = mask.register_usage(device_id)
self.probs = Connector(Matrix.empty_like(self.x))
self.loss = None
def fprop(self):
self.x.softmax(self.context, self.probs)
self.probs.fprop()
def bprop(self):
if not hasattr(self, 'dL_dx'):
return
# error = (probs - true_labels) / M
if self.true_labels.dtype == 'int':
self.dL_dx.add_softmax_ce_derivative(self.context, self.probs, self.true_labels)
else:
self.dL_dx.add_scaled_subtraction(self.context, 1. / self.probs.nrows, self.probs, self.true_labels)
if hasattr(self, 'mask'):
self.dL_dx.hprod(self.context, self.mask)
def calculate_loss(self, context):
true_labels_np = self.true_labels.to_host(context)
probs_np = self.probs.to_host(context)
if hasattr(self, 'mask'):
mask = self.mask.to_host(context)
context.add_callback(self._calculate_ce_loss, true_labels_np, probs_np, mask)
else:
context.add_callback(self._calculate_ce_loss, true_labels_np, probs_np)
def _calculate_ce_loss(self, true_labels_np, probs_np, mask=None):
if self.true_labels.dtype == 'int':
idxs = range(probs_np.shape[0]), true_labels_np.flatten()
logs = np.log(probs_np[idxs] + 1e-20)
else:
logs = np.log(np.sum(true_labels_np * probs_np, axis=1) + 1e-20)
if mask is not None:
logs *= mask[:, 0]
self.loss = - np.sum(logs) / np.sum(mask)
else:
self.loss = - np.mean(logs)
示例3: SigmoidCeBlock
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import to_host [as 别名]
class SigmoidCeBlock(object):
"""
Sigmoid nonlinearity with mean cross entropy loss
"""
def __init__(self, x, true_labels, mask=None, device_id=None):
self.context = Context(device_id)
device_id = self.context.device_id
if x.bpropagable:
self.x, self.dL_dx = x.register_usage(device_id, device_id)
else:
self.x = x.register_usage(device_id)
self.true_labels = true_labels.register_usage(device_id)
if mask:
self.mask = mask.register_usage(device_id)
self.probs = Connector(Matrix.empty_like(self.x))
self.loss = None
def fprop(self):
self.x.sigmoid(self.context, self.probs)
self.probs.fprop()
def bprop(self):
# error = (probs - true_labels) / M
self.dL_dx.add_scaled_subtraction(self.context,
1. / float(self.probs.nrows),
self.probs, self.true_labels)
if hasattr(self, 'mask'):
self.dL_dx.hprod(self.context, self.mask)
def calculate_loss(self, context):
true_labels_np = self.true_labels.to_host(context)
probs_np = self.probs.to_host(context)
if hasattr(self, 'mask'):
mask = self.mask.to_host(context)
context.add_callback(self._calculate_ce_loss,
true_labels_np, probs_np, mask)
else:
context.add_callback(self._calculate_ce_loss,
true_labels_np, probs_np)
def _calculate_ce_loss(self, true_labels_np, probs_np, mask=None):
logs = true_labels_np * np.log(probs_np + 1e-20) + \
(1.0 - true_labels_np) * np.log(1. - probs_np + 1e-20)
if mask is not None:
logs *= mask
self.loss = - np.sum(logs) / (np.sum(mask) * logs.shape[1])
else:
self.loss = - np.mean(logs)
示例4: test_theano_bprop_matrix
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import to_host [as 别名]
def test_theano_bprop_matrix(self):
r = []
for i in xrange(self.N):
max_input_sequence_len = self.rng.random_integers(300)
sequence_len = max_input_sequence_len if i == 0 else self.rng.random_integers(2, max_input_sequence_len)
embd_dim = self.rng.random_integers(10000)
batch_size = self.rng.random_integers(500)
output_dim = self.rng.random_integers(2000)
W = self.get_orthogonal_matrix(embd_dim, output_dim)
row_idxs = self.rng.randint(embd_dim, size=(batch_size, max_input_sequence_len)).astype(np.int32)
true_labels = [self.rng.randint(output_dim, size=(batch_size, 1)).astype(np.int32) for _ in xrange(max_input_sequence_len)]
device_id = 0
quagga.processor_type = 'gpu'
qrow_idxs = Connector(Matrix.from_npa(row_idxs))
qtrue_labels = List([Connector(Matrix.from_npa(e)) for e in true_labels], qrow_idxs.ncols)
qW = Connector(Matrix.from_npa(W), device_id)
row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
seq_sce_block = SequencerBlock(block_class=SoftmaxCeBlock,
params=[],
sequences=[row_slicing_block.output, qtrue_labels])
qW.fprop()
qrow_idxs.ncols = sequence_len
qrow_idxs.fprop()
row_slicing_block.fprop()
seq_sce_block.fprop()
seq_sce_block.bprop()
row_slicing_block.bprop()
qW.add(Context(), qW.backward_matrix)
th_row_idxs = T.imatrix()
th_true_labels = T.imatrix()
row_slicing_layer = RowSlicingLayer(W)
toutput = row_slicing_layer.get_output_expr(th_row_idxs)
loss = SequentialSoftmaxLayer.get_loss(toutput, th_true_labels)
dL_dW = T.grad(loss, row_slicing_layer.W)
fun = theano.function([th_row_idxs, th_true_labels],
updates=[(row_slicing_layer.W, row_slicing_layer.W + dL_dW)])
fun(row_idxs, np.hstack(true_labels[:sequence_len]))
r.append(np.allclose(qW.to_host(), row_slicing_layer.W.get_value(), atol=1e-5))
self.assertEqual(sum(r), len(r))
示例5: test_theano_bprop_vector
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import to_host [as 别名]
def test_theano_bprop_vector(self):
r = []
for _ in xrange(self.N):
embd_dim = self.rng.random_integers(10000)
batch_size, output_dim = self.rng.random_integers(2000, size=2)
W = self.get_orthogonal_matrix(embd_dim, output_dim)
row_idxs = self.rng.randint(embd_dim, size=(batch_size, 1)).astype(np.int32)
true_labels = self.rng.randint(output_dim, size=(batch_size, 1)).astype(np.int32)
device_id = 0
quagga.processor_type = 'gpu'
qrow_idxs = Connector(Matrix.from_npa(row_idxs))
qW = Connector(Matrix.from_npa(W), device_id)
qtrue_labels = Connector(Matrix.from_npa(true_labels))
row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
sce_block = SoftmaxCeBlock(row_slicing_block.output, qtrue_labels)
qtrue_labels.fprop()
qW.fprop()
qrow_idxs.fprop()
row_slicing_block.fprop()
sce_block.fprop()
sce_block.bprop()
row_slicing_block.bprop()
qW.add(Context(), qW.backward_matrix)
th_row_idxs = T.ivector()
th_true_labels = T.ivector()
row_slicing_layer = RowSlicingLayer(W)
toutput = row_slicing_layer.get_output_expr(th_row_idxs)
loss = SoftmaxLayer.get_loss(toutput, th_true_labels)
dL_dW = T.grad(loss, row_slicing_layer.W)
fun = theano.function([th_row_idxs, th_true_labels],
updates=[(row_slicing_layer.W, row_slicing_layer.W + dL_dW)])
fun(row_idxs[:, 0], true_labels[:, 0])
r.append(np.allclose(qW.to_host(), row_slicing_layer.W.get_value()))
self.assertEqual(sum(r), len(r))
示例6: test_bprop_matrix
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import to_host [as 别名]
def test_bprop_matrix(self):
r = []
for i in xrange(self.N):
max_input_sequence_len = self.rng.random_integers(500)
sequence_len = max_input_sequence_len if i == 0 else self.rng.random_integers(max_input_sequence_len)
embd_dim = self.rng.random_integers(10000)
batch_size = self.rng.random_integers(500)
output_dim = self.rng.random_integers(2000)
W = self.get_orthogonal_matrix(embd_dim, output_dim)
row_idxs = self.rng.randint(embd_dim, size=(batch_size, max_input_sequence_len)).astype(np.int32)
true_labels = [self.rng.randint(output_dim, size=(batch_size, 1)).astype(np.int32) for _ in xrange(max_input_sequence_len)]
device_id = 0
output = {}
for processor_type in ['gpu', 'cpu']:
quagga.processor_type = processor_type
qrow_idxs = Connector(Matrix.from_npa(row_idxs))
qtrue_labels = List([Connector(Matrix.from_npa(e)) for e in true_labels], qrow_idxs.ncols)
qW = Connector(Matrix.from_npa(W), device_id)
row_slicing_block = RowSlicingBlock(qW, qrow_idxs)
seq_sce_block = SequencerBlock(block_class=SoftmaxCeBlock,
params=[],
sequences=[row_slicing_block.output, qtrue_labels])
qW.fprop()
qrow_idxs.ncols = sequence_len
qrow_idxs.fprop()
row_slicing_block.fprop()
seq_sce_block.fprop()
seq_sce_block.bprop()
row_slicing_block.bprop()
qW.add(Context(), qW.backward_matrix)
output[processor_type] = qW.to_host()
r.append(np.allclose(output['gpu'], output['cpu']))
self.assertEqual(sum(r), len(r))
示例7: test_theano_grad
# 需要导入模块: from quagga.connector import Connector [as 别名]
# 或者: from quagga.connector.Connector import to_host [as 别名]
def test_theano_grad(self):
class SequentialMeanPoolingLayer(object):
def get_output_expr(self, input_sequence):
return T.mean(input_sequence, axis=2)
class LogisticRegressionLayer(object):
def __init__(self, W_init, b_init):
self.W = theano.shared(value=W_init())
self.b = theano.shared(value=b_init())
def get_output_expr(self, input_expr):
return T.nnet.sigmoid(T.dot(input_expr, self.W) + self.b)
quagga.processor_type = 'gpu'
r = []
for i in xrange(self.N):
max_input_sequence_len = self.rng.random_integers(500)
sequence_len = max_input_sequence_len if i == 0 else self.rng.random_integers(max_input_sequence_len)
batch_size = self.rng.random_integers(512)
dim = self.rng.random_integers(1500)
x = [self.rng.rand(batch_size, dim).astype(dtype=np.float32) for _ in xrange(max_input_sequence_len)]
true_labels = self.rng.randint(1, size=(batch_size, 1)).astype(dtype=np.float32)
W_init = self.get_orthogonal_initializer(dim, 1)
b_init = lambda: self.rng.rand(1, 1).astype(dtype=np.float32)
# Theano model
state = self.rng.get_state()
th_x = T.ftensor3()
th_true_labels = T.fmatrix()
smp_layer = SequentialMeanPoolingLayer()
lr_layer = LogisticRegressionLayer(W_init, lambda: b_init()[0])
probs = lr_layer.get_output_expr(smp_layer.get_output_expr(th_x))
loss = T.mean(T.nnet.binary_crossentropy(probs, th_true_labels))
grad_x = T.grad(loss, wrt=th_x)
get_grad_x = theano.function([th_x, th_true_labels], grad_x)
# quagga model
self.rng.set_state(state)
context = Context()
x = List([Connector(Matrix.from_npa(e), context, context) for e in x])
true_labels = Connector(Matrix.from_npa(true_labels))
smp_block = SequentialMeanPoolingBlock(x)
dot_block = DotBlock(W_init, b_init, smp_block.output)
sce_block = SigmoidCeBlock(dot_block.output, true_labels)
x.set_length(sequence_len)
smp_block.fprop()
dot_block.fprop()
sce_block.fprop()
sce_block.bprop()
dot_block.bprop()
smp_block.bprop()
dL_dx = [e.backward_matrix.to_host() for e in x]
dL_dx_th = get_grad_x(np.dstack([e.to_host() for e in x]), true_labels.to_host())
for i in xrange(dL_dx_th.shape[-1]):
if not np.allclose(dL_dx[i], dL_dx_th[..., i]):
r.append(False)
break
else:
r.append(True)
self.assertEqual(sum(r), self.N)