本文整理汇总了Python中theano.tensor.ftensor3函数的典型用法代码示例。如果您正苦于以下问题:Python ftensor3函数的具体用法?Python ftensor3怎么用?Python ftensor3使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了ftensor3函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: testSNLIExample
def testSNLIExample():
"""
Test an example actually taken from SNLI dataset on LSTM pipeline.
"""
start = time.time()
table = EmbeddingTable(dataPath+"glove.6B.50d.txt.gz")
dataStats= "/Users/mihaileric/Documents/Research/LSTM-NLI/data/" \
"test_dataStats.json"
dataJSONFile= "/Users/mihaileric/Documents/Research/LSTM-NLI/data/" \
"snli_1.0_test.jsonl"
premiseTensor, hypothesisTensor = table.convertDataToEmbeddingTensors(
dataJSONFile, dataStats)
symPremise = T.ftensor3("inputPremise")
symHypothesis = T.ftensor3("inputHypothesis")
premiseSent = premiseTensor[:, 0:3, :]
hypothesisSent = hypothesisTensor[:, 0:3, :]
network = LSTMP2H(numTimestepsPremise=57, numTimestepsHypothesis=30,
dimInput=10, embedData="/Users/mihaileric/Documents/Research/"
"LSTM-NLI/data/glove.6B.50d.txt.gz")
network.printLSTMP2HParams()
predictFunc = network.predictFunc(symPremise, symHypothesis)
labels = network.predict(premiseSent, hypothesisSent, predictFunc)
for l in labels:
print "Label: %s" %(l)
print "Time for evaluation: %f" %(time.time() - start)
示例2: test_pycuda_elemwise_kernel
def test_pycuda_elemwise_kernel():
x=T.fmatrix('x')
y=T.fmatrix('y')
f=theano.function([x,y],x+y, mode=mode_with_gpu)
print f.maker.env.toposort()
f2 = theano.function([x,y],x+y, mode=mode_with_gpu.including("local_pycuda_gpu_elemwise_kernel"))
print f2.maker.env.toposort()
assert any([ isinstance(node.op, theano.sandbox.cuda.GpuElemwise) for node in f.maker.env.toposort()])
assert any([ isinstance(node.op, PycudaElemwiseKernelOp) for node in f2.maker.env.toposort()])
val1 = numpy.asarray(numpy.random.rand(5,5), dtype='float32')
val2 = numpy.asarray(numpy.random.rand(5,5), dtype='float32')
#val1 = numpy.ones((5,5))
#val2 = numpy.arange(25).reshape(5,5)
assert (f(val1,val2) == f2(val1,val2)).all()
print f(val1,val2)
print f2(val1,val2)
x3=T.ftensor3('x')
y3=T.ftensor3('y')
z3=T.ftensor3('y')
f4 = theano.function([x3,y3,z3],x3*y3+z3, mode=mode_with_gpu.including("local_pycuda_gpu_elemwise_kernel"))
print f4.maker.env.toposort()
assert any([ isinstance(node.op, PycudaElemwiseKernelOp) for node in f4.maker.env.toposort()])
val1 = numpy.random.rand(2,2,2)
print val1
print f4(val1,val1,val1)
assert numpy.allclose(f4(val1,val1,val1),val1*val1+val1)
示例3: theano_vars
def theano_vars(self):
if self.cond:
return [T.ftensor3('x'), T.fmatrix('mask'),
T.ftensor3('y'), T.fmatrix('label_mask')]
else:
return [T.ftensor3('x'), T.fmatrix('mask')]
示例4: test_infer_shape
def test_infer_shape(self):
# only matrix / matrix is supported
admat = tensor.ftensor3()
bdmat = tensor.ftensor3()
admat_val = my_rand(7, 4, 5)
bdmat_val = my_rand(7, 5, 3)
self._compile_and_check([admat, bdmat], [GpuBatchedDot()(admat, bdmat)], [admat_val, bdmat_val], GpuBatchedDot)
示例5: test_batched_dot
def test_batched_dot():
a = T.ftensor3('a')
b = T.ftensor3('b')
c = my_batched_dot(a, b)
# Test in with values
dim1, dim2, dim3, dim4 = 10, 12, 15, 20
A_shape = (dim1, dim2, dim3)
B_shape = (dim1, dim3, dim4)
C_shape = (dim1, dim2, dim4)
A = np.arange(np.prod(A_shape)).reshape(A_shape).astype(floatX)
B = np.arange(np.prod(B_shape)).reshape(B_shape).astype(floatX)
C = c.eval({a: A, b: B})
# check shape
assert C.shape == C_shape
# check content
C_ = np.zeros((dim1, dim2, dim4))
for i in range(dim1):
C_[i] = np.dot(A[i], B[i])
assert np.allclose(C, C_)
示例6: _setup_vars
def _setup_vars(self, sparse_input):
'''Setup Theano variables for our network.
Parameters
----------
sparse_input : bool
Not used -- sparse inputs are not supported for recurrent networks.
Returns
-------
vars : list of theano variables
A list of the variables that this network requires as inputs.
'''
_warn_dimshuffle()
assert not sparse_input, 'Theanets does not support sparse recurrent models!'
self.src = TT.ftensor3('src')
#self.src_mask = TT.imatrix('src_mask')
self.src_mask = TT.matrix('src_mask')
self.dst = TT.ftensor3('dst')
self.labels = TT.imatrix('labels')
self.weights = TT.matrix('weights')
if self.weighted:
return [self.src, self.src_mask, self.dst, self.labels, self.weights]
return [self.src, self.dst]
示例7: cmp
def cmp(a_shp, b_shp):
a = numpy.random.randn(* a_shp).astype(numpy.float32)
b = numpy.random.randn(* b_shp).astype(numpy.float32)
x = tensor.ftensor3()
y = tensor.ftensor3()
f = theano.function([x, y],
batched_dot(x, y),
mode=mode_with_gpu)
z0 = numpy.asarray(f(a, b))
ga = cuda_ndarray.CudaNdarray(a)
gb = cuda_ndarray.CudaNdarray(b)
z1 = numpy.asarray(f(ga, gb))
z_test = numpy.sum(
a[:, :, :, None] * b[:, None, :, :], axis=-2)
z1 = numpy.asarray(f(ga, gb))
z_test = numpy.sum(
a[:, :, :, None] * b[:, None, :, :], axis=-2)
unittest_tools.assert_allclose(z0, z_test)
unittest_tools.assert_allclose(z1, z_test)
示例8: random_search_gpu
def random_search_gpu(
modal_names, train_probs, val_probs,
target_train, target_val, numpy_rng, n_iter=400):
n_modal = train_probs.shape[0]
n_cls = train_probs.shape[2]
# sample random weights and normalize so the modalities sum to 1
# for each class
weight_samples = T.ftensor3('weight_samples')
probs = T.ftensor3('probs')
targets = T.ivector('targets')
preds = T.argmax(
T.sum(probs.dimshuffle('x',0,1,2) * weight_samples.dimshuffle(0,1,'x',2), axis=1),
axis=2)
accs = T.mean(T.eq(preds, targets.dimshuffle('x',0)), axis=1)
best_index = T.argmax(accs)
best_acc = accs[best_index]
best_weights = weight_samples[best_index]
print 'compiling functtion'
fn = theano.function([weight_samples, probs, targets],
[best_weights, best_index, best_acc])
print 'done'
weight_samples_np = numpy_rng.rand(n_iter, n_modal, n_cls).astype(np.float32)
weight_samples_np /= weight_samples_np.sum(1)[:, None, :]
return fn(weight_samples_np, val_probs, target_val)
示例9: test_attention_dot_does_not_crash
def test_attention_dot_does_not_crash():
Z = T.ftensor3('Z')
B = T.ftensor3('B') #base
W_re = T.fmatrix('W_re')
W_att_quadr = T.fmatrix("W_att_quadr")
W_att_in = T.fmatrix('W_att_in')
c = T.fmatrix('c') #initial state
y0 = T.fmatrix('y0') #initial activation
i = T.matrix('i',dtype='int8')
Y, H, d = LSTMCustomDotAttentionOpNoInplaceInstance(Z, c, y0, i, W_re, B, W_att_in, W_att_quadr)
f = theano.function(inputs=[Z, B, c, y0, i, W_re, W_att_in, W_att_quadr], outputs=Y)
n_B = 8
n_T = 5
n_batch = 4
n_cells = 8
numpy.random.seed(1234)
Z_val = numpy.random.ranf((n_T,n_batch,4*n_cells)).astype('float32')
B_val = numpy.random.ranf((n_B,n_batch,n_cells)).astype('float32')
W_re_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
W_att_quadr_val = numpy.eye(n_B).astype('float32')
W_att_in_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
c_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
y0_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
#i_val = numpy.ones((n_T, n_batch), dtype='int8')
i_val = numpy.array([[1,1,1,1,1], [0,0,1,1,1], [0,0,1,1,1], [0,0,1,0,0]], dtype='int8').T
Y_val = numpy.asarray(f(Z_val, B_val, c_val, y0_val, i_val, W_re_val, W_att_in_val, W_att_quadr_val))
#print Y_val
print("success")
示例10: make_node
def make_node(self, x, x2, x3, x4, x5):
# check that the theano version has support for __props__.
# This next line looks like it has a typo,
# but it's actually a way to detect the theano version
# is sufficiently recent to support the use of __props__.
assert hasattr(self, '_props'), "Your version of theano is too old to support __props__."
x = tensor.as_tensor_variable(x)
x2 = tensor.as_tensor_variable(x2)
x3 = tensor.as_tensor_variable(x3)
x4 = tensor.as_tensor_variable(x4)
x5 = tensor.as_tensor_variable(x5)
if prm.att_doc:
if prm.compute_emb:
td = tensor.itensor4().type()
else:
td = tensor.ftensor4().type()
tm = tensor.ftensor3().type()
else:
if prm.compute_emb:
td = tensor.itensor3().type()
else:
td = tensor.ftensor3().type()
tm = tensor.fmatrix().type()
return theano.Apply(self, [x,x2,x3,x4,x5], [td, tm, \
tensor.fmatrix().type(), tensor.ivector().type()])
示例11: test_multiple_inputs
def test_multiple_inputs():
X = T.ftensor3('X')
X2 = T.ftensor3('X')
W = T.fmatrix('W')
V_h = T.fmatrix('V_h')
b = T.fvector('b')
c = T.fmatrix('c') #initial state
i = T.matrix('i',dtype='int8')
X_val_mat0 = 0.1 * numpy.array([[1,2,3], [4,5,6]], dtype='float32')
X_val_mat1 = 0.1 * numpy.array([[5,1,8], [7,0,1]], dtype='float32')
X_val_mat2 = 0.1 * numpy.array([[2,1,1], [-7,0,-1]], dtype='float32')
X_val = numpy.zeros((3,2,3), dtype='float32')
X_val[0, :, :] = X_val_mat0
X_val[1, :, :] = X_val_mat1
X_val[2, :, :] = X_val_mat2
X_val2 = numpy.zeros_like(X_val)
#should be divisable by 4 for lstm, attention: note the .T
W_val = 0.1 * numpy.array([[3,1,2], [4,8,0], [7,7,1], [4,2,-5],
[6,-1,-2], [-4,8,0], [-7,2,1], [4,-2,-5],
[6,5,-2], [-4,8,-6], [-7,3,-1], [4,2,-5]], dtype='float32').T
#(for lstm) size 1/4th
V_h_val = 0.1 * numpy.array([[1,3,5], [2,-1,-1], [4, 8,-5], [0,-2,3],
[7,7,7], [1,2,3], [5,2,1], [-4,8,-4],
[-3,7,-7], [2,-2,-3], [-5,2,1], [-4,-5,-4]],
dtype='float32').T
b_val = 0.1 * numpy.array([1,2,3,4,5,6,7,8,9,10,11,12], dtype='float32')
c_val = numpy.zeros((2,3), dtype='float32')
i_val = numpy.ones((3,2),dtype='int8')
Z1, H1, d1 = LSTMOp2Instance(V_h, c, b, i, X, W)
Z2, H2, d2 = LSTMOp2Instance(V_h, c, b, i, X, X2, W, W)
Z3, H3, d3 = LSTMOp2Instance(V_h, c, b, i) # no inputs!
DX1 = T.grad(Z1.sum(), X)
DW1 = T.grad(Z1.sum(), W)
DV_h1 = T.grad(Z1.sum(), V_h)
Db1 = T.grad(Z1.sum(), b)
Dc1 = T.grad(Z1.sum(), c)
DX2 = T.grad(Z2.sum(), X)
DW2 = T.grad(Z2.sum(), W)
DV_h2 = T.grad(Z2.sum(), V_h)
Db2 = T.grad(Z2.sum(), b)
Dc2 = T.grad(Z2.sum(), c)
DV_h3 = T.grad(Z3.sum(), V_h)
f = theano.function(inputs=[X, W, V_h, c, b, i], outputs=[Z1, DX1, DW1])
g = theano.function(inputs=[X, X2, W, V_h, c, b, i], outputs=[Z2, DX2, DW2])
h = theano.function(inputs=[V_h, c, b, i], outputs=[Z3, DV_h3])
h_res = [numpy.asarray(A, dtype='float32') for A in h(V_h_val, c_val, b_val, i_val)]
#print h_res[0], h_res[1]
f_res = [numpy.asarray(A, dtype='float32') for A in f(X_val, W_val, V_h_val, c_val, b_val, i_val)]
g_res = [numpy.asarray(A, dtype='float32') for A in g(X_val, X_val2, W_val, V_h_val, c_val, b_val, i_val)]
for A1, A2 in zip(f_res, g_res):
assert numpy.allclose(A1, A2)
#print f_res[0], g_res[0]
print "success"
示例12: test_outer_infershape
def test_outer_infershape(self):
o = tensor.ftensor4()
x = tensor.ftensor3()
y = tensor.ftensor3()
xIdx = tensor.imatrix()
yIdx = tensor.imatrix()
self._compile_and_check(
[o, x, y, xIdx, yIdx], [self.outer_op(o, x, y, xIdx, yIdx)], self.outer_data(), self.outer_class
)
示例13: test_attention_time_gauss
def test_attention_time_gauss():
n_T = 4
n_batch = 2
n_inp_dim = 3
n_cells = 5
n_B = 5
custom_op = get_attention(RecurrentTransform.AttentionTimeGauss,
n_out=n_cells, n_batches=n_batch, n_input_t=n_B, n_input_dim=n_inp_dim)
att = custom_op.recurrent_transform
Z_val = numpy.random.ranf((n_T,n_batch,4*n_cells)).astype('float32')
W_re_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
W_att_quadr_val = numpy.eye(n_B).astype('float32')
W_att_in_val = numpy.random.ranf((n_cells, 4 * n_cells)).astype('float32')
B_val = numpy.random.ranf((n_B,n_batch,n_cells)).astype('float32')
c_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
y0_val = numpy.random.ranf((n_batch, n_cells)).astype('float32')
i_val = numpy.ones((n_T, n_batch), dtype='int8')
Z = T.ftensor3('Z')
B = T.ftensor3('B') #base
W_re = T.fmatrix('W_re')
W_att_quadr = T.fmatrix("W_att_quadr")
W_att_in = T.fmatrix('W_att_in')
c = T.fmatrix('c') #initial state
y0 = T.fmatrix('y0') #initial activation
i = T.matrix('i',dtype='int8')
t0 = T.fvector('t0')
custom_vars = att.get_sorted_custom_vars()
initial_state_vars = att.get_sorted_state_vars_initial()
custom_op_inputs = [Z, c, y0, i, W_re] + custom_vars + initial_state_vars
print("input args num:", len(custom_op_inputs))
print("input args:", custom_op_inputs)
custom_op_outputs = custom_op(*custom_op_inputs)
print("output args num:", len(custom_op_outputs))
custom_op_outputs = [cuda.host_from_gpu(v) for v in custom_op_outputs]
f = theano.function(inputs=[Z, c, y0, i, W_re], outputs=custom_op_outputs)
res = f(Z_val, c_val, y0_val, i_val, W_re_val)
#print res
# res: (output) Y, (gates and cell state) H, (final cell state) d, state vars sequences
(Y, H, d), state_var_seqs = res[:3], res[3:]
# print "running custom dumped data"
# custom_op_inputs = [theano.shared(numpy.load("../op.i.%i" % i)) for i in range(12)]
# custom_op_outputs = custom_op(*custom_op_inputs)
# custom_op_outputs = [cuda.host_from_gpu(v) for v in custom_op_outputs]
# f = theano.function(inputs=[], outputs=custom_op_outputs)
# res = f()
print(res)
assert False
示例14: fail
def fail(a_shp, b_shp):
a=numpy.random.randn(*a_shp).astype(numpy.float32)
b=numpy.random.randn(*b_shp).astype(numpy.float32)
x=tensor.ftensor3()
y=tensor.ftensor3()
f=theano.function([x,y], batched_dot(x,y), mode=mode_with_gpu)
z = f(a,b)
示例15: test_tensor3_roc_auc_scores
def test_tensor3_roc_auc_scores():
true = np.random.binomial(n=1, p=.5, size=(20, 30, 40)).astype('float32')
predicted = np.random.random((20, 30, 40)).astype('float32')
yt, yp = T.ftensor3('yt'), T.ftensor3('yp')
refscore = tmetrics.classification.last_axis_roc_auc_scores(true, predicted)
roc_auc_scores = tmetrics.classification.roc_auc_scores(yt, yp)
f = theano.function([yt, yp], roc_auc_scores)
score = f(true, predicted)
print 'refscore'
print refscore
print 'score'
print score
assert np.allclose(refscore, score, equal_nan=True)