本文整理汇总了Python中theano.tensor.dscalars函数的典型用法代码示例。如果您正苦于以下问题:Python dscalars函数的具体用法?Python dscalars怎么用?Python dscalars使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了dscalars函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_scalars
def test_scalars(self):
try:
import theano.tensor as T
from theano import function
except:
return
# Set up variables and function
vals = [1, 2, 3, 4, 5]
f = lambda a, b, c, d, e : a + (b * c) - d ** e
# Set up our objects
Cs = [ch.Ch(v) for v in vals]
C_result = f(*Cs)
# Set up Theano's equivalents
Ts = T.dscalars('T1', 'T2', 'T3', 'T4', 'T5')
TF = f(*Ts)
T_result = function(Ts, TF)
# Make sure values and derivatives are equal
self.assertEqual(C_result.r, T_result(*vals))
for k in range(len(vals)):
theano_derivative = function(Ts, T.grad(TF, Ts[k]))(*vals)
#print C_result.dr_wrt(Cs[k])
our_derivative = C_result.dr_wrt(Cs[k])[0,0]
#print theano_derivative, our_derivative
self.assertEqual(theano_derivative, our_derivative)
示例2: sample_gradient
def sample_gradient():
print "微分"
x, y = T.dscalars("x", "y")
z = (x+2*y)**2
# dz/dx
gx = T.grad(z, x)
fgx = theano.function([x,y], gx)
print fgx(1.0, 1.0)
# dz/dy
gy = T.grad(z, y)
fgy = theano.function([x,y], gy)
print fgy(1.0, 1.0)
# d{sigmoid(x)}/dx
x = T.dscalar("x")
sig = sigmoid(x)
dsig = T.grad(sig, x)
f = theano.function([x], dsig)
print f(0.0)
print f(1.0)
# d{sigmoid(<x,w>)}/dx
w = T.dscalar("w")
sig = sigmoid(T.dot(x,w))
dsig = T.grad(sig, x)
f = theano.function([x, w], dsig)
print f(1.0, 2.0)
print f(3.0, 4.0)
print
示例3: test_examples_6
def test_examples_6(self):
from theano import Param
x, y = T.dscalars('x', 'y')
z = x + y
f = function([x, Param(y, default=1)], z)
assert f(33) == array(34.0)
assert f(33, 2) == array(35.0)
示例4: defaultValue
def defaultValue(*arg):
x, y, w = T.dscalars('x', 'y', 'w')
z = x + y + w
f = th.function([x, th.In(y, value=1), th.In(w, value=2, name='wName')], z)
if len(arg) == 3:
print(f(arg[0], wName = arg[1], y = arg[2]))
elif len(arg) == 2:
print(f(arg[0], arg[1]))
else:
print(f(arg[0]))
示例5: test_examples_7
def test_examples_7(self):
from theano import Param
x, y, w = T.dscalars('x', 'y', 'w')
z = (x + y) * w
f = function([x, Param(y, default=1), Param(w, default=2, name='w_by_name')], z)
assert f(33) == array(68.0)
assert f(33, 2) == array(70.0)
assert f(33, 0, 1) == array(33.0)
assert f(33, w_by_name=1) == array(34.0)
assert f(33, w_by_name=1, y=0) == array(33.0)
示例6: test_default_values
def test_default_values(self):
# Check that default values are restored
# when an exception occurs in interactive mode.
a, b = T.dscalars('a', 'b')
c = a + b
func = theano.function([theano.In(a, name='first'), theano.In(b, value=1, name='second')], c)
x = func(first=1)
try:
func(second=2)
except TypeError:
assert(func(first=1) == x)
示例7: test_deepcopy_trust_input
def test_deepcopy_trust_input(self):
a = T.dscalar() # the a is for 'anonymous' (un-named).
x, s = T.dscalars('xs')
f = function([x, In(a, value=1.0, name='a'),
In(s, value=0.0, update=s + a * x, mutable=True)],
s + a * x)
f.trust_input = True
try:
g = copy.deepcopy(f)
except NotImplementedError as e:
if e[0].startswith('DebugMode is not picklable'):
return
else:
raise
self.assertTrue(f.trust_input is g.trust_input)
f(np.asarray(2.))
self.assertRaises((ValueError, AttributeError), f, 2.)
g(np.asarray(2.))
self.assertRaises((ValueError, AttributeError), g, 2.)
示例8: test
def test():
# multiple inputs, multiple outputs
a, b = T.dmatrices('a', 'b')
diff = a - b
abs_diff = T.abs_(diff)
sqr_diff = diff ** 2
f = function([a, b], [diff, abs_diff, sqr_diff])
h, i, j = f([[0, 1], [2, 3]], [[4, 5], [6, 7]])
# default value for function arguments
a, b = T.dscalars('a', 'b')
z = a + b
f = function([a, Param(b, default=1)], z)
print f(1, b=2)
print f(1)
print f(1, 2)
# shared variable
state = shared(0)
inc = T.lscalar('inc') # state is int64 by default
accumulator = function([inc], state, updates=[(state, state + inc)])
print accumulator(300)
print state.get_value()
示例9: brachistochrone_functional
def brachistochrone_functional():
# define all symbols
lx, ly = T.dscalars('lx', 'ly')
fseq = T.dvector('fseq')
N = fseq.size + 1
delta_x = lx / N
iseq = T.arange(N-1)
# functional term
functional_ithterm = lambda i: T.switch(T.eq(i, 0),
T.sqrt(0.5*(delta_x**2+(fseq[0]-ly)**2)/(ly-0.5*(fseq[0]+ly))),
T.sqrt(0.5*(delta_x**2+(fseq[i]-fseq[i-1])**2)/(ly-0.5*(fseq[i]+fseq[i-1])))
)
# defining the functions
functional_parts, _ = theano.map(fn=lambda k: functional_ithterm(k), sequences=[iseq])
functional = functional_parts.sum() + T.sqrt(0.5*(delta_x**2+(0-fseq[N-2])**2)/(ly-0.5*(0+fseq[N-2])))
gfunc = T.grad(functional, fseq)
# compile the functions
time_fcn = theano.function(inputs=[fseq, lx, ly], outputs=functional)
grad_time_fcn = theano.function(inputs=[fseq, lx, ly], outputs=gfunc)
return time_fcn, grad_time_fcn
示例10: f
'''
Created on Jun 1, 2015
@author: xujian
'''
import theano
from theano import Param
import theano.tensor as T
from samba.dcerpc.atsvc import Third
a,b,c = T.dscalars('a','b','c')
z=(a+b)*c
f=theano.function([a,Param(b,default=0),Param(c,default=1,name="third_var")],z)
print f(1)
print f(1,2)
print f(1,third_var=2)
示例11: abs
'''
Executing multiple functions
'''
a,b = T.dmatrices('a','b')
diff = a-b
abs_diff = abs(a-b)
diff_sq = diff**2
mult = function([a,b],[diff,abs_diff,diff_sq])
print mult([[0,1],[1,2]],[[-1,2],[5,7]])
#print pp(diff)
#print pp(abs_diff)
'''
Setting a default value for an argument
So, if arg not give, take default value; else take the given value
'''
x, y = T.dscalars("x","y")
z = x+y
add = function([x,Param(y,default=1)],z)
print add(33.0)
print add(2,6)
'''
Setting names to parameters
'''
x,y,w = T.dscalars("x","y","w")
z = (x+y)*w
add_par = function([x,Param(y,default=1),Param(w,default=2,name="debalu")],z)
print add_par(33)
print add_par(33,6,debalu=5)
示例12: defaultValue
def defaultValue():
x, y, z = T.dscalars('x', 'y', 'z')
return function([x, In(y, value=1), In(z, value=2, name='namedZ')], (x + y) * z)
示例13: tuto
def tuto():
print "\nLogistic Function 1"
print "---------------------"
x = T.dmatrix('x')
s = 1 / (1 + T.exp(-x))
logistic = theano.function([x], s)
print logistic([[0, 1], [-1, -2]])
print "\nLogistic Function 2"
print "---------------------"
s2 = (1 + T.tanh(x / 2)) / 2
logistic2 = theano.function([x], s2)
print logistic2([[0, 1], [-1, -2]])
print "\nComputing More than one Thing at the Same Time"
print "------------------------------------------------"
a, b = T.dmatrices('a', 'b')
diff = a - b
abs_diff = abs(diff)
diff_squared = diff**2
f = theano.function([a, b], [diff, abs_diff, diff_squared])
print f([[1, 1], [1, 1]], [[0, 1], [2, 3]])
print "\nSetting a Default Value for an Argument"
print "---------------------------------------"
x, y = T.dscalars('x', 'y')
z = x + y
f = function([x, In(y, value=1)], z)
print f(33)
print f(33, 2)
print "A Real Example: Logistic Regression"
print "-----------------------------------"
rng = numpy.random
N = 400 # training sample size
feats = 784 # number of input variables
# generate a dataset: D = (input_values, target_class)
D = (rng.randn(N, feats), rng.randint(size=N, low=0, high=2))
training_steps = 10000
# Declare Theano symbolic variables
x = T.dmatrix("x")
y = T.dvector("y")
# initialize the weight vector w randomly
#
# this and the following bias variable b
# are shared so they keep their values
# between training iterations (updates)
w = theano.shared(rng.randn(feats), name="w")
# initialize the bias term
b = theano.shared(0., name="b")
print("Initial model:")
print(w.get_value())
print(b.get_value())
# Construct Theano expression graph
p_1 = 1 / (1 + T.exp(-T.dot(x, w) - b)) # Probability that target = 1
prediction = p_1 > 0.5 # The prediction thresholded
xent = -y * T.log(p_1) - (1-y) * T.log(1-p_1) # Cross-entropy loss function
cost = xent.mean() + 0.01 * (w ** 2).sum()# The cost to minimize
gw, gb = T.grad(cost, [w, b]) # Compute the gradient of the cost
# w.r.t weight vector w and
# bias term b
# (we shall return to this in a
# following section of this tutorial)
# Compile
train = theano.function(
inputs=[x,y],
outputs=[prediction, xent],
updates=((w, w - 0.1 * gw), (b, b - 0.1 * gb)))
predict = theano.function(inputs=[x], outputs=prediction)
# Train
for i in range(training_steps):
pred, err = train(D[0], D[1])
print("Final model:")
print(w.get_value())
print(b.get_value())
print("target values for D:")
print(D[1])
print("prediction on D:")
print(predict(D[0]))
示例14: test_1_examples_param_default
def test_1_examples_param_default():
x, y = T.dscalars('x', 'y')
f = theano.function([x, theano.Param(y, default=1)], x + y)
assert f(1, 2) == 3
assert f(1) == 2
示例15: dscalars
from time import clock
from numpy import ones
from theano import Mode
from theano import function
from theano.tensor import dscalars
from theano.tensor import dmatrices
from theano.tensor import lt
from theano.tensor import mean
from theano.tensor import switch
from theano.ifelse import ifelse
a_dscalar, b_dscalar = dscalars('a', 'b')
x_dmatrix, y_dmatrix = dmatrices('x', 'y')
z_switch_dmatrix = switch(lt(a_dscalar, b_dscalar), mean(x_dmatrix), mean(y_dmatrix))
z_ifelse_dmatrix = ifelse(lt(a_dscalar, b_dscalar), mean(x_dmatrix), mean(y_dmatrix))
# Both ops build a condition over symbolic variables. IfElse takes a boolean condition and two variables as inputs.
# Switch evaluates both output variables, ifelse is lazy and only evaluates one variable with respect to the condition.
# Unless linker='vm' or linker='cvm' are used, ifelse will compute both variables and take the same computation time as switch.
f_switch = function([a_dscalar, b_dscalar, x_dmatrix, y_dmatrix], z_switch_dmatrix, mode=Mode(linker='vm'))
f_ifelse = function([a_dscalar, b_dscalar, x_dmatrix, y_dmatrix], z_ifelse_dmatrix, mode=Mode(linker='vm'))
var1 = 0.
var2 = 1.
big_mat1 = ones((10000, 1000))
big_mat2 = ones((10000, 1000))