本文整理汇总了Python中theano.tensor.fvector函数的典型用法代码示例。如果您正苦于以下问题:Python fvector函数的具体用法?Python fvector怎么用?Python fvector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fvector函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_vector_clf_curve
def test_vector_clf_curve():
yt = T.fvector('yt')
yp = T.fvector('yp')
tps = tmetrics.classification._vector_clf_curve(yt, yp)
f = theano.function([yt, yp], tps, allow_input_downcast=True)
true, predicted = np.random.binomial(n=1, p=.5, size=10).astype('float32'), np.random.random(10).astype('float32')
fps, tps, _ = f(true, predicted)
s_fps, s_tps, s_ = sklearn.metrics.ranking._binary_clf_curve(true, predicted)
np.set_printoptions(suppress=True)
print 'true'
print true
print 'predicted'
print predicted
print 'fps'
print fps
print 'sklearn fps'
print s_fps
print 'tps'
print tps
print 'sklearn tps'
print s_tps
print 'threshold values'
print _
print 'sklearn threshold values'
print s_
assert np.allclose(fps, s_fps)
assert np.allclose(tps, s_tps)
assert np.allclose(_, s_)
示例2: test_cudnn_softmax_grad_opt
def test_cudnn_softmax_grad_opt(self):
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is
# applied when cudnn is required
y = T.fvector("y")
f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_with_gpu)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad)]) == 1
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad optimization is not
# applied when cudnn is excluded or not available
mode_wo_cudnn = mode_with_gpu.excluding("cudnn")
y = T.fvector("y")
f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_wo_cudnn)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad)]) == 0
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 1
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not
# crash with manual graph
y = T.fvector("y")
o = theano.tensor.nnet.SoftmaxGrad()(y, y * 2)
f = theano.function([y], o, mode=mode_with_gpu)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, theano.sandbox.cuda.dnn.GpuDnnSoftmaxGrad)]) == 1
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0
示例3: setUp
def setUp(self):
self.x_true = np.random.uniform(low=0, high=1, size=5).astype('float32')
self.x_false_list = [np.random.uniform(low=0, high=1, size=5).astype('float32') for i in range(10)]
x_true_var = T.fvector()
x_false_var_list = [T.fvector() for t in self.x_false_list]
self.test = function(inputs=[x_true_var] + x_false_var_list, outputs=negative_sampling_loss(x_true_var, x_false_var_list))
示例4: __init__
def __init__(self, name, path, learning_rate=0.001):
self.r_symbol = T.fvector('r')
self.gamma_symbol = T.fscalar('gamma')
self.action_symbol = T.fmatrix('action')
self.y_symbol = T.fvector('y')
super(ReinforcementModel, self).__init__(
name, path, learning_rate=learning_rate)
示例5: test_0
def test_0():
N = 16*1000*10*1
if 1:
aval = abs(numpy.random.randn(N).astype('float32'))+.1
bval = numpy.random.randn(N).astype('float32')
a = T.fvector()
b = T.fvector()
else:
aval = abs(numpy.random.randn(N))+.1
bval = numpy.random.randn(N)
a = T.dvector()
b = T.dvector()
f = theano.function([a,b], T.pow(a,b), mode='LAZY')
theano_opencl.elemwise.swap_impls=False
g = theano.function([a,b], T.pow(a,b), mode='LAZY')
print 'ocl time', timeit.Timer(lambda: f(aval, bval)).repeat(3,3)
print 'gcc time', timeit.Timer(lambda: g(aval, bval)).repeat(3,3)
print 'numpy time', timeit.Timer(lambda: aval**bval).repeat(3,3)
assert ((f(aval, bval) - aval**bval)**2).sum() < 1.1
assert ((g(aval, bval) - aval**bval)**2).sum() < 1.1
示例6: optimize
def optimize(self, train_data, lam, fixed_length=3):
i = T.iscalar('i')
lr = T.fscalar('lr');
Xl = T.fvector('Xl')
Xr = T.fvector('Xr')
cost = self.ae.cost(Xl, Xr) #+ lam * self.ae.penalty()
grads = T.grad(cost, self.ae.params)
update_vars = []
for var, gvar in zip(self.ae.params, grads):
if var.get_value().ndim == 1:
update_vars.append((var, var - 0.1*lr*gvar))
#elif var.get_value().ndim > 1:
# new_param = var - lr*gvar
# len_W = T.sqrt(T.sum(new_param**2, axis=0))
# desired_W = T.clip(len_W, 0., fixed_length)
# ratio = desired_W / (len_W + 1e-7)
# new_param = new_param * ratio
# update_vars.append((var, new_param))
else:
update_vars.append((var, var - lr*gvar))
opt = theano.function([i, lr], cost, updates=update_vars,
givens={Xl: train_data[i,0], Xr: train_data[i,1]})#, allow_input_downcast=True)
#get_grad = theano.function([], grads[3], givens={X:train_data[0]}, allow_input_downcast=True)
#get_gradb = theano.function([], grads[-1], givens={X:train_data[0]}, allow_input_downcast=True)
return opt#, get_grad, get_gradb
示例7: test_brier_score_loss_from_scikit_learn_example
def test_brier_score_loss_from_scikit_learn_example():
"""
from sklearn docs...
Examples
--------
>>> import numpy as np
>>> from sklearn.metrics import brier_score_loss
>>> y_true = np.array([0, 1, 1, 0])
>>> y_prob = np.array([0.1, 0.9, 0.8, 0.3])
>>> brier_score_loss(y_true, y_prob)
0.037...
"""
y_true = T.fvector('y_true')
y_predicted = T.fvector('y_predicted')
brier_score = tmetrics.brier_score_loss(y_true, y_predicted)
f = theano.function([y_true, y_predicted], brier_score)
yt = np.array([0, 1, 1, 0], 'float32')
yp = np.array([.1, .9, .8, .3], theano.config.floatX)
refscore = sklearn.metrics.brier_score_loss(yt, yp)
tol = .01
score = f(yt, yp)
assert (refscore - tol) < score < (refscore + tol)
#also test the function is numpy/pandas compatible
assert (refscore - tol) < tmetrics.brier_score_loss(yt, yp) < (refscore + tol)
示例8: __init__
def __init__(self, input_layers, *args, **kwargs):
super(RMSEObjective, self).__init__(input_layers, *args, **kwargs)
self.input_systole = input_layers["systole:value"]
self.input_diastole = input_layers["diastole:value"]
self.target_vars["systole:value"] = T.fvector("systole_target_value")
self.target_vars["diastole:value"] = T.fvector("diastole_target_value")
示例9: theanoVecVecMul
def theanoVecVecMul(In1,In2,opt):
var1 = T.fvector('var1')
var2 = T.fvector('var2')
if opt=='M':
var3 = T.fot(var1,var2)
else:
var3 = T.mul(var1,var2)
DivVec = function([var1,var2],var3)
return DivVec(In1,In2)
示例10: __init__
def __init__(self, num_emb, emb_dim, hidden_dim, output_dim,
degree=2, learning_rate=0.01, momentum=0.9,
trainable_embeddings=True,
labels_on_nonroot_nodes=False):
assert emb_dim > 1 and hidden_dim > 1
self.num_emb = num_emb
self.emb_dim = emb_dim
self.hidden_dim = hidden_dim
self.output_dim = output_dim
self.degree = degree
self.learning_rate = learning_rate
self.momentum = momentum
self.params = []
self.embeddings = theano.shared(self.init_matrix([self.num_emb, self.emb_dim]))
if trainable_embeddings:
self.params.append(self.embeddings)
self.x = T.ivector(name='x') # word indices
self.tree = T.imatrix(name='tree') # shape [None, self.degree]
if labels_on_nonroot_nodes:
self.y = T.fmatrix(name='y') # output shape [None, self.output_dim]
self.y_exists = T.fvector(name='y_exists') # shape [None]
else:
self.y = T.fvector(name='y') # output shape [self.output_dim]
self.num_words = self.x.shape[0] # total number of nodes (leaves + internal) in tree
emb_x = self.embeddings[self.x]
emb_x = emb_x * T.neq(self.x, -1).dimshuffle(0, 'x') # zero-out non-existent embeddings
self.tree_states = self.compute_tree(emb_x, self.tree)
self.final_state = self.tree_states[-1]
if labels_on_nonroot_nodes:
self.output_fn = self.create_output_fn_multi()
self.pred_y = self.output_fn(self.tree_states)
self.loss = self.loss_fn_multi(self.y, self.pred_y, self.y_exists)
else:
self.output_fn = self.create_output_fn()
self.pred_y = self.output_fn(self.final_state)
self.loss = self.loss_fn(self.y, self.pred_y)
updates = self.gradient_descent(self.loss)
train_inputs = [self.x, self.tree, self.y]
if labels_on_nonroot_nodes:
train_inputs.append(self.y_exists)
self._train = theano.function(train_inputs,
[self.loss, self.pred_y],
updates=updates)
self._evaluate = theano.function([self.x, self.tree],
self.final_state)
self._predict = theano.function([self.x, self.tree],
self.pred_y)
示例11: test_roc_auc_score
def test_roc_auc_score():
true = np.random.binomial(n=1, p=.5, size=50).astype('float32')
#true = np.array([0, 0, 1, 1]).astype('float32')
predicted = np.random.random(size=50).astype('float32')
#predicted = np.array([0.1, 0.4, 0.35, 0.8]).astype('float32')
yt = T.fvector('y_true')
yp = T.fvector('y_predicted')
roc_auc_score_expr = tmetrics.classification.roc_auc_score(yt, yp)
refscore = sklearn.metrics.roc_auc_score(true, predicted)
print 'refscore'
print refscore
f = theano.function([yt, yp], roc_auc_score_expr)
score = f(true, predicted)
print 'score'
print score
try:
assert np.allclose(refscore, score)
except AssertionError:
fps, tps, thresholds = tmetrics.classification._binary_clf_curve(yt, yp)
fpr, tpr, _thresh = tmetrics.classification.roc_curve(yt, yp)
f = theano.function([yt, yp], [fps, tps, thresholds, fpr, tpr, _thresh, roc_auc_score_expr])
result = f(true, predicted)
print '** tmetrics **'
print 'fps'
print result[0]
print 'tps'
print result[1]
print 'thresholds'
print result[2]
print 'fpr'
print result[3]
print 'tpr'
print result[4]
print '_thresh'
print result[5]
print 'roc score'
print result[6]
print '** refscore **'
curve = sklearn.metrics.ranking._binary_clf_curve(true, predicted)
print 'fpr'
print curve[0]
print 'tpr'
print curve[1]
print 'thresholds'
print curve[2]
trapz = np.trapz(curve[1], curve[0])
print 'trapz'
print trapz
print 'auc'
print sklearn.metrics.ranking.auc(curve[0], curve[1])
print 'roc_curve'
print sklearn.metrics.roc_curve(true, predicted)
raise
示例12: main
def main():
#loading in data set
dataset_for_error = '/vega/stats/users/sl3368/Data_LC/NormData/LC_stim_15.mat'
stimuli = load_class_data_batch(dataset_for_error)
stim = stimuli[0]
data = theano.shared( stim, borrow=True)
print 'Number of rows: '
print stim.shape[0]
#setting variable for error
init = numpy.float64(0.0)
mean_error = shared(init)
#writing theano functions for computing mean square error for one lag
prediction = T.fvector('predict') # 60 row vector representing time t
real = T.fvector('real') #row representing time t+1
cost = T.mean( (real - prediction) ** 2)
#function for updating mean error
batch_error = theano.function([prediction,real],cost,updates=[(mean_error, mean_error + cost)])
increment = stim.shape[0]/100
#iterating over batch and computing the error
for index in range(stim.shape[0]-1):
if index % increment == 0:
print str(index/increment)+'% done...'
recent = batch_error(stim[index],stim[index+1])
#m_e_avg = mean_error / 9000000
#printing result
print 'Total error: '
print mean_error.get_value()
print 'Finding padding amount...'
num_zero = float(0.0)
#calculating zeros amount
for index in range(stim.shape[0]):
is_zero = True
for i in range(60):
if stim[index][i] != 0:
is_zero = False
if is_zero:
num_zero = num_zero + 1
print 'Percent Zero: '+str(float(num_zero/(increment * 100)))
示例13: test_softmax_grad
def test_softmax_grad(self):
def cmp(n, m, f, f_gpu):
data = numpy.arange(n * m, dtype="float32").reshape(n, m)
gdata = numpy.asarray(data)[:, :, None, None]
out = f(data)
gout = numpy.asarray(f_gpu(gdata))[:, :, 0, 0]
utt.assert_allclose(out, gout)
x = T.matrix("x", "float32")
x_gpu = T.tensor4("x_gpu", "float32")
f_z = T.nnet.softmax_op
f_gpu = dnn.GpuDnnSoftmax("accurate", "channel")
# Verify the grad operation
dims = (2, 3, 4, 5)
gdata = numpy.arange(numpy.product(dims), dtype="float32").reshape(dims)
T.verify_grad(f_gpu, [gdata], rng=numpy.random, mode=mode_with_gpu)
# Verify that the CPU and GPU implementations return the same results
# up to a tolerance.
self._test_softmax(x, x_gpu, f_z, f_gpu, cmp)
self._test_softmax(x, x, f_z, f_z, self._cmp)
# Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
# optimization is applied when cudnn is required
y = T.fvector("y")
f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_with_gpu)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 1
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0
# Verify that the SoftmaxGrad -> Gpu[Dnn]SoftmaxGrad
# optimization is not applied when cudnn is excluded or not
# available
mode_wo_cudnn = mode_with_gpu.excluding("cudnn")
y = T.fvector("y")
f = theano.function([y], T.grad(T.nnet.softmax(y).mean(), y), mode=mode_wo_cudnn)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 0
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 1
# Verify that the SoftmaxGrad -> GpuDnnSoftmaxGrad do not
# crash with manual graph
y = T.fvector("y")
o = theano.tensor.nnet.SoftmaxGrad()(y, y * 2)
f = theano.function([y], o, mode=mode_with_gpu)
sorted_f = f.maker.fgraph.toposort()
assert len([i for i in sorted_f if isinstance(i.op, self.gpu_grad_op)]) == 1
assert len([i for i in sorted_f if isinstance(i.op, theano.tensor.nnet.SoftmaxGrad)]) == 0
示例14: get_div_function
def get_div_function(self):
tind = T.ivector('ind')
if self.NMF_updates == 'beta':
self.div = theano.function(inputs=[tind],
outputs=costs.beta_div(self.X_buff[tind[1]:tind[2], ],
self.W[tind[0]].T,
self.H[tind[3]:tind[4], ],
self.beta),
name="div",
allow_input_downcast=True)
if self.NMF_updates == 'groupNMF':
tcomp = T.ivector('comp')
tlambda = T.fvector('lambda')
tSc = T.ivector('Sc')
tCs = T.ivector('Cs')
tparams = [tind, tcomp, tlambda, tSc, tCs]
cost, beta_div, cls_dist, ses_dist = costs.group_div(self.X_buff[tind[1]:tind[2], ],
self.W,
self.H[tind[3]:tind[4], ],
self.beta,
tparams)
self.div = theano.function(inputs=[tind, tcomp, tlambda, tSc, tCs],
outputs=[cost,
beta_div,
cls_dist,
ses_dist],
name="div",
allow_input_downcast=True,
on_unused_input='ignore')
if self.NMF_updates == 'noiseNMF':
tcomp = T.ivector('comp')
tlambda = T.fvector('lambda')
tSc = T.ivector('Sc')
tparams = [tind, tcomp, tlambda, tSc]
cost, beta_div, cls_dist, ses_dist = costs.noise_div(self.X_buff[tind[1]:tind[2], ],
self.W,
self.Wn,
self.H[tind[3]:tind[4], ],
self.beta,
tparams)
self.div = theano.function(inputs=[tind, tcomp, tlambda, tSc],
outputs=[cost,
beta_div,
cls_dist,
ses_dist],
name="div",
allow_input_downcast=True,
on_unused_input='ignore')
示例15: test_1D_roc_auc_scores
def test_1D_roc_auc_scores():
yt = T.fvector('yt')
yp = T.fvector('yp')
y = np.array([0, 0, 1, 1]).astype('float32')
scores = np.array([0.1, 0.4, 0.35, 0.8]).astype('float32')
ref_fpr, ref_tpr, ref_thresh = sklearn.metrics.roc_curve(y, scores)
roc_auc_scores = tmetrics.classification.roc_auc_scores(yt, yp)
fpr, tpr, thresh = tmetrics.classification.roc_curves(yt, yp)
f = theano.function([yt, yp], [fpr, tpr, thresh, roc_auc_scores])
score_fpr, score_tpr, score_thresh, score_auc = f(y ,scores)
assert np.allclose(ref_fpr, score_fpr)
assert np.allclose(ref_tpr, score_tpr)
assert np.allclose(ref_thresh, score_thresh)
assert np.allclose(sklearn.metrics.roc_auc_score(y, scores), score_auc)