本文整理汇总了Python中theano.tensor.dvector函数的典型用法代码示例。如果您正苦于以下问题:Python dvector函数的具体用法?Python dvector怎么用?Python dvector使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dvector函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_profiling
def test_profiling():
old1 = theano.config.profile
old2 = theano.config.profile_memory
try:
theano.config.profile = True
theano.config.profile_memory = True
x = T.dvector("x")
y = T.dvector("y")
z = x + y
p = theano.ProfileStats(False)
if theano.config.mode in ["DebugMode", "DEBUG_MODE"]:
m = "FAST_RUN"
else:
m = None
f = theano.function([x, y], z, profile=p, name="test_profiling",
mode=m)
output = f([1, 2, 3, 4], [1, 1, 1, 1])
buf = StringIO.StringIO()
f.profile.summary(buf)
finally:
theano.config.profile = old1
theano.config.profile_memory = old2
示例2: theano_setup
def theano_setup(self):
W = T.dmatrix('W')
b = T.dvector('b')
c = T.dvector('c')
x = T.dmatrix('x')
s = T.dot(x, W) + c
# h = 1 / (1 + T.exp(-s))
# h = T.nnet.sigmoid(s)
h = T.tanh(s)
# r = T.dot(h,W.T) + b
# r = theano.printing.Print("r=")(2*T.tanh(T.dot(h,W.T) + b))
ract = T.dot(h,W.T) + b
r = self.output_scaling_factor * T.tanh(ract)
#g = function([W,b,c,x], h)
#f = function([W,b,c,h], r)
#fg = function([W,b,c,x], r)
# Another variable to be able to call a function
# with a noisy x and compare it to a reference x.
y = T.dmatrix('y')
all_losses = ((r - y)**2)
loss = T.sum(all_losses)
#loss = ((r - y)**2).sum()
self.theano_encode_decode = function([W,b,c,x], r)
self.theano_all_losses = function([W,b,c,x,y], [all_losses, T.abs_(s), T.abs_(ract)])
self.theano_gradients = function([W,b,c,x,y], [T.grad(loss, W), T.grad(loss, b), T.grad(loss, c)])
示例3: dtw
def dtw(array1, array2):
"""
Accepts: two one dimensional arrays
Returns: (float) DTW distance between them.
"""
s = np.zeros((array1.size+1, array2.size+1))
s[:,0] = 1e6
s[0,:] = 1e6
s[0,0] = 0.0
# Set up symbolic variables
square = T.dmatrix('square')
vec1 = T.dvector('vec1')
vec2 = T.dvector('vec2')
vec1_length = T.dscalar('vec1_length')
vec2_length = T.dscalar('vec2_length')
outer_loop = T.arange(vec1_length, dtype='int64')
inner_loop = T.arange(vec2_length, dtype='int64')
# Run the outer loop
path, _ = scan(fn=outer,
outputs_info=[dict(initial=square, taps=[-1])],
non_sequences=[inner_loop, vec1, vec2],
sequences=outer_loop)
# Compile the function
theano_square = function([vec1, vec2, square, vec1_length, vec2_length], path, on_unused_input='warn')
# Call the compiled function and return the actual distance
return theano_square(array1, array2, s, array1.size, array2.size)[-1][array1.size, array2.size]
示例4: make_minimizer
def make_minimizer(Model):
L, y = T.ivector('L'), T.dvector('y')
mu, eps = T.dscalar('mu'), T.dscalar('eps')
R, eta = T.dtensor3('R'), T.dvector('eta')
model = Model(L, y, mu, R, eta, eps)
return theano.function([L, y, mu, R, eta, eps], model.minimize())
示例5: __init__
def __init__(self,N,Nsub,NRGC,prior=1):
self.N = N
self.Nsub = Nsub
self.NRGC = NRGC
U = Th.dmatrix() # SYMBOLIC variables #
V1 = Th.dvector() #
V2 = Th.dvector() #
STA = Th.dvector() #
STC = Th.dmatrix() #
theta = Th.dot( U.T , V1 ) #
UV1U = Th.dot( U , theta ) #
UV1V2U= Th.dot( V1 * U.T , (V2 * U.T).T ) #
posterior = -0.5 * Th.sum( V1 * V2 * U.T*U.T ) \
-0.25* Th.sum( UV1V2U.T * UV1V2U ) \
-0.5 * Th.sum( UV1U * UV1U * UV1U *V2 *V2 * V1 ) \
-0.5 * Th.sum( UV1U * UV1U * V2 * V1 ) \
-0.5 * Th.sum( theta * theta ) \
+ Th.dot( theta.T , STA ) \
+ Th.sum( Th.dot( V1* V2*U.T , U ) \
* (STC + STA.T*STA) )
dpost_dU = Th.grad( cost = posterior , #
wrt = U ) #
dpost_dV1 = Th.grad( cost = posterior , #
wrt = V1 ) #
dpost_dV2 = Th.grad( cost = posterior , #
wrt = V2 ) #
# self.posterior = function( [U,V2,V1,STA,STC], UV1V2U) #
self.posterior = function( [U,V2,V1,STA,STC], posterior) #
self.dpost_dU = function( [U,V2,V1,STA,STC], dpost_dU ) #
self.dpost_dV1 = function( [U,V2,V1,STA,STC], dpost_dV1 ) #
self.dpost_dV2 = function( [U,V2,V1,STA,STC], dpost_dV2 ) #
示例6: __init__
def __init__(self, first_W):
self.log_regression = LogisticRegression(first_W)
st = T.dvector('st')
ac = T.dvector('ac')
z = ac*ac
self.q_ = th.function(inputs=[st, ac],
outputs=[self.log_regression.cost(T.concatenate([ac, z, st, ac[:-1] * st[:-1]]))])
示例7: LQLEP_wBarrier
def LQLEP_wBarrier( LQLEP = Th.dscalar(), ldet = Th.dscalar(), v1 = Th.dvector(),
N_spike = Th.dscalar(), ImM = Th.dmatrix(), U = Th.dmatrix(),
V2 = Th.dvector(), u = Th.dvector(), C = Th.dmatrix(),
**other):
'''
The actual Linear-Quadratic-Exponential-Poisson log-likelihood,
as a function of theta and M,
with a barrier on the log-det term and a prior.
'''
sq_nonlinearity = V2**2.*Th.sum( Th.dot(U,C)*U, axis=[1]) #Th.sum(U**2,axis=[1])
nonlinearity = V2 * Th.sqrt( Th.sum( Th.dot(U,C)*U, axis=[1])) #Th.sum(U**2,axis=[1]) )
if other.has_key('uc'):
LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
- 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
+ 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
+ 10. * Th.sum( (other['uc'][2:]+other['uc'][:-2]-2*other['uc'][1:-1])**2. ) \
+ 0.000000001 * Th.sum( v1**2. )
# + 100. * Th.sum( v1 )
# + 0.0001*Th.sum( V2**2 )
else:
LQLEP_wPrior = LQLEP + 0.5 * N_spike * ( 1./(ldet+250.)**2. \
- 0.000001 * Th.sum(Th.log(1.-4*sq_nonlinearity))) \
+ 10. * Th.sum( (u[2:]+u[:-2]-2*u[1:-1])**2. ) \
+ 0.000000001 * Th.sum( v1**2. )
# + 100. * Th.sum( v1 )
# + 0.0001*Th.sum( V2**2 )
eigsImM,barrier = eig( ImM )
barrier = 1-(Th.sum(Th.log(eigsImM))>-250) * \
(Th.min(eigsImM)>0) * (Th.max(4*sq_nonlinearity)<1)
other.update(locals())
return named( **other )
示例8: test_uniform_vector
def test_uniform_vector(self):
random = RandomStreams(utt.fetch_seed())
low = tensor.dvector()
high = tensor.dvector()
out = random.uniform(low=low, high=high)
assert out.ndim == 1
f = function([low, high], out)
low_val = [.1, .2, .3]
high_val = [1.1, 2.2, 3.3]
seed_gen = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
# Arguments of size (3,)
val0 = f(low_val, high_val)
numpy_val0 = numpy_rng.uniform(low=low_val, high=high_val)
print('THEANO', val0)
print('NUMPY', numpy_val0)
assert numpy.all(val0 == numpy_val0)
# arguments of size (2,)
val1 = f(low_val[:-1], high_val[:-1])
numpy_val1 = numpy_rng.uniform(low=low_val[:-1], high=high_val[:-1])
print('THEANO', val1)
print('NUMPY', numpy_val1)
assert numpy.all(val1 == numpy_val1)
# Specifying the size explicitly
g = function([low, high], random.uniform(low=low, high=high, size=(3,)))
val2 = g(low_val, high_val)
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
numpy_val2 = numpy_rng.uniform(low=low_val, high=high_val, size=(3,))
assert numpy.all(val2 == numpy_val2)
self.assertRaises(ValueError, g, low_val[:-1], high_val[:-1])
示例9: init_propagate_function
def init_propagate_function(self):
x = T.dvector()
y = T.dmatrix()
b = T.dvector()
z = T.dot(x, y) + b
f = theano.function([x,y,b], z)
return f
示例10: neural_net
def neural_net(
x=T.dmatrix(), #our points, one point per row
y=T.dmatrix(), #our targets
w=T.dmatrix(), #first layer weights
b=T.dvector(), #first layer bias
v=T.dmatrix(), #second layer weights
c=T.dvector(), #second layer bias
step=T.dscalar(), #step size for gradient descent
l2_coef=T.dscalar() #l2 regularization amount
):
"""Idea A:
"""
hid = T.tanh(T.dot(x, w) + b)
pred = T.dot(hid, v) + c
sse = T.sum((pred - y) * (pred - y))
w_l2 = T.sum(T.sum(w*w))
v_l2 = T.sum(T.sum(v*v))
loss = sse + l2_coef * (w_l2 + v_l2)
def symbolic_params(cls):
return [cls.w, cls.b, cls.v, cls.c]
def update(cls, x, y, **kwargs):
params = cls.symbolic_params()
gp = T.grad(cls.loss, params)
return [], [In(p, update=p - cls.step * g) for p,g in zip(params, gp)]
def predict(cls, x, **kwargs):
return cls.pred, []
return locals()
示例11: test_0
def test_0():
N = 16*1000*10*1
if 1:
aval = abs(numpy.random.randn(N).astype('float32'))+.1
bval = numpy.random.randn(N).astype('float32')
a = T.fvector()
b = T.fvector()
else:
aval = abs(numpy.random.randn(N))+.1
bval = numpy.random.randn(N)
a = T.dvector()
b = T.dvector()
f = theano.function([a,b], T.pow(a,b), mode='LAZY')
theano_opencl.elemwise.swap_impls=False
g = theano.function([a,b], T.pow(a,b), mode='LAZY')
print 'ocl time', timeit.Timer(lambda: f(aval, bval)).repeat(3,3)
print 'gcc time', timeit.Timer(lambda: g(aval, bval)).repeat(3,3)
print 'numpy time', timeit.Timer(lambda: aval**bval).repeat(3,3)
assert ((f(aval, bval) - aval**bval)**2).sum() < 1.1
assert ((g(aval, bval) - aval**bval)**2).sum() < 1.1
示例12: test_loss_updates_one_layer_positive_relu
def test_loss_updates_one_layer_positive_relu(self):
n_vis = 4
n_hid = 2
hidden_layer = HiddenLayer(n_vis=n_vis, n_hid=n_hid, layer_name='h', activation='relu', param_init_range=0, alpha=0)
# W = theano.shared(value=np.ones((n_vis, n_hid)), name='h_W', borrow=True)
# hidden_layer.W = W
mlp = QNetwork([hidden_layer], discount=1, learning_rate=1)
features = T.dvector('features')
action = T.lscalar('action')
reward = T.dscalar('reward')
next_features = T.dvector('next_features')
loss, updates = mlp.get_loss_and_updates(features, action, reward, next_features)
train = theano.function(
[features, action, reward, next_features],
outputs=loss,
updates=updates,
mode='FAST_COMPILE')
features = [1,1,1,1]
action = 0
reward = 1
next_features = [1,1,1,1]
actual_loss = train(features, action, reward, next_features)
expected_loss = 0.5
actual_weights = list(mlp.layers[0].W.eval())
expected_weights = [[1,0], [1,0], [1,0], [1,0]]
self.assertEqual(actual_loss, expected_loss)
self.assertTrue(np.array_equal(actual_weights, expected_weights))
示例13: UV
def UV( U = Th.dmatrix('U') , V1 = Th.dvector('V1') , V2 = Th.dvector('V2') , **result):
'''
Reparameterize theta and M as a function of U, V1 and V2.
'''
result['theta'] = Th.dot( U.T , V1 )
result['M' ] = Th.dot( V1 * U.T , (V2 * U.T).T )
return result
示例14: test_normal_vector
def test_normal_vector(self):
random = RandomStreams(utt.fetch_seed())
avg = tensor.dvector()
std = tensor.dvector()
out = random.normal(avg=avg, std=std)
assert out.ndim == 1
f = function([avg, std], out)
avg_val = [1, 2, 3]
std_val = [.1, .2, .3]
seed_gen = numpy.random.RandomState(utt.fetch_seed())
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
# Arguments of size (3,)
val0 = f(avg_val, std_val)
numpy_val0 = numpy_rng.normal(loc=avg_val, scale=std_val)
assert numpy.allclose(val0, numpy_val0)
# arguments of size (2,)
val1 = f(avg_val[:-1], std_val[:-1])
numpy_val1 = numpy_rng.normal(loc=avg_val[:-1], scale=std_val[:-1])
assert numpy.allclose(val1, numpy_val1)
# Specifying the size explicitly
g = function([avg, std], random.normal(avg=avg, std=std, size=(3,)))
val2 = g(avg_val, std_val)
numpy_rng = numpy.random.RandomState(int(seed_gen.randint(2**30)))
numpy_val2 = numpy_rng.normal(loc=avg_val, scale=std_val, size=(3,))
assert numpy.allclose(val2, numpy_val2)
self.assertRaises(ValueError, g, avg_val[:-1], std_val[:-1])
示例15: __init__
def __init__(self, sizes, input_dim, output_dim):
self.layers = len(sizes) + 1
in_dim = [input_dim] + sizes
out_dim = sizes + [output_dim]
x = T.dvector('x')
y = T.dvector('y')
self.hyp_params = []
for i, (r,c) in enumerate(zip(in_dim,out_dim)):
if i == 0:
obj = HiddenLayer(x, r, c)
else:
obj = HiddenLayer(obj.output,r,c)
self.hyp_params.append(obj.params)
yhat = obj.output
prediction = T.argmax(yhat)
self.predict = theano.function([x],[yhat])
o_error = T.sum(T.sqr(yhat - y))
# o_error = T.sum(T.nnet.categorical_crossentropy(yhat, y))
updates = []
learning_rate = T.scalar('learning_rate')
for param in self.hyp_params:
updates.append((param['W'], param['W'] - learning_rate * T.grad(o_error,param['W'])))
updates.append((param['b'], param['b'] - learning_rate * T.grad(o_error,param['b'])))
self.train_step = theano.function([x,y,learning_rate],[o_error],
updates = updates)