本文整理汇总了Python中theano.function函数的典型用法代码示例。如果您正苦于以下问题:Python function函数的具体用法?Python function怎么用?Python function使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了function函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_in_transit
def test_in_transit():
t = np.linspace(-20, 20, 1000)
m_planet = np.array([0.3, 0.5])
m_star = 1.45
r_star = 1.5
orbit = KeplerianOrbit(
m_star=m_star,
r_star=r_star,
t0=np.array([0.5, 17.4]),
period=np.array([10.0, 5.3]),
ecc=np.array([0.1, 0.8]),
omega=np.array([0.5, 1.3]),
m_planet=m_planet,
)
r_pl = np.array([0.1, 0.03])
coords = theano.function([], orbit.get_relative_position(t))()
r2 = coords[0]**2 + coords[1]**2
inds = theano.function([], orbit.in_transit(t, r=r_pl))()
m = np.isin(np.arange(len(t)), inds)
in_ = r2[inds] <= ((r_star + r_pl)**2)[None, :]
in_ &= coords[2][inds] > 0
assert np.all(np.any(in_, axis=1))
out = r2[~m] > ((r_star + r_pl)**2)[None, :]
out |= coords[2][~m] <= 0
assert np.all(out)
示例2: define_train_test_funcs
def define_train_test_funcs(self):
activation = self.layers[len(self.layers) - 1].activation
self.Y = T.matrix("Y")
pYs = T.reshape(activation, (self.maskY.shape[0] * self.batch_size, self.out_size))
tYs = T.reshape(self.Y, (self.maskY.shape[0] * self.batch_size, self.out_size))
cost = self.categorical_crossentropy(pYs, tYs)
gparams = []
for param in self.params:
#gparam = T.grad(cost, param)
gparam = T.clip(T.grad(cost, param), -10, 10)
gparams.append(gparam)
lr = T.scalar("lr")
# eval(): string to function
optimizer = eval(self.optimizer)
updates = optimizer(self.params, gparams, lr)
#updates = sgd(self.params, gparams, lr)
#updates = momentum(self.params, gparams, lr)
#updates = rmsprop(self.params, gparams, lr)
#updates = adagrad(self.params, gparams, lr)
#updates = dadelta(self.params, gparams, lr)
#updates = adam(self.params, gparams, lr)
self.train = theano.function(inputs = [self.X, self.maskX, self.Y, self.maskY, lr, self.batch_size],
givens = {self.is_train : np.cast['int32'](1)},
outputs = cost,
updates = updates)
self.predict = theano.function(inputs = [self.X, self.maskX, self.batch_size],
givens = {self.is_train : np.cast['int32'](0)},
outputs = activation)
示例3: test_bilinear_kernel_1D
def test_bilinear_kernel_1D(self):
"""Test 1D kernels used in bilinear upsampling
This method tests the correctness of the
1D kernel values used in bilinear upsampling
for some upsampling ratios.
"""
rat = tensor.iscalar()
kernel_ten = bilinear_kernel_1D(ratio=rat, normalize=False)
f_ten = theano.function([rat], kernel_ten)
kernel_ten_norm = bilinear_kernel_1D(ratio=rat, normalize=True)
f_ten_norm = theano.function([rat], kernel_ten_norm)
for ratio in [2, 3, 4, 5, 6, 7, 8, 9]:
# getting the un-normalized kernel
kernel = bilinear_kernel_1D(ratio=ratio, normalize=False)
f = theano.function([], kernel)
kernel_1D = self.numerical_kernel_1D(ratio)
utt.assert_allclose(kernel_1D, f())
utt.assert_allclose(kernel_1D, f_ten(ratio))
# getting the normalized kernel
kernel = bilinear_kernel_1D(ratio=ratio, normalize=True)
f = theano.function([], kernel)
kernel_1D = kernel_1D / float(ratio)
utt.assert_allclose(kernel_1D, f())
utt.assert_allclose(kernel_1D, f_ten_norm(ratio))
示例4: adam
def adam(lr, tparams, grads, inp, cost):
gshared = [theano.shared(p.get_value() * 0., name='%s_grad'%k) for k, p in tparams.iteritems()]
gsup = [(gs, g) for gs, g in zip(gshared, grads)]
f_grad_shared = theano.function(inp, cost, updates=gsup)
lr0 = 0.0002
b1 = 0.1
b2 = 0.001
e = 1e-8
updates = []
i = theano.shared(numpy.float32(0.))
i_t = i + 1.
fix1 = 1. - b1**(i_t)
fix2 = 1. - b2**(i_t)
lr_t = lr0 * (tensor.sqrt(fix2) / fix1)
for p, g in zip(tparams.values(), gshared):
m = theano.shared(p.get_value() * 0.)
v = theano.shared(p.get_value() * 0.)
m_t = (b1 * g) + ((1. - b1) * m)
v_t = (b2 * tensor.sqr(g)) + ((1. - b2) * v)
g_t = m_t / (tensor.sqrt(v_t) + e)
p_t = p - (lr_t * g_t)
updates.append((m, m_t))
updates.append((v, v_t))
updates.append((p, p_t))
updates.append((i, i_t))
f_update = theano.function([lr], [], updates=updates, on_unused_input='ignore')
return f_grad_shared, f_update
示例5: test_copy_random_state
def test_copy_random_state(self):
class Graph():
def __init__(self, seed=123):
self.rng = RandomStreams(seed)
self.y = self.rng.uniform(size=(1,))
g1 = Graph(seed=123)
f1 = theano.function([], g1.y)
g2 = Graph(seed=987)
f2 = theano.function([], g2.y)
#print 'By default, the two functions are out of sync.'
v1 = f1()
v2 = f2()
def copy_random_state(g1, g2):
if isinstance(g1.rng, MRG_RandomStreams):
g2.rng.rstate = g1.rng.rstate
for (su1, su2) in zip(g1.rng.state_updates, g2.rng.state_updates):
su2[0].set_value(su1[0].get_value())
#print 'We now copy the state of the theano random number generators.'
copy_random_state(g1, g2)
v3 = f1()
v4 = f2()
assert numpy.allclose(v1, 0.72803009)
assert numpy.allclose(v2, 0.55056769)
assert numpy.allclose(v3, 0.59044123)
assert numpy.allclose(v4, 0.59044123)
示例6: test_pooling_opt
def test_pooling_opt():
if not dnn.dnn_available(test_ctx_name):
raise SkipTest(dnn.dnn_available.msg)
x = T.fmatrix()
f = theano.function(
[x],
pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True),
mode=mode_with_gpu)
assert any([isinstance(n.op, dnn.GpuDnnPool)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
f = theano.function(
[x],
T.grad(pool_2d(x, ds=(2, 2), mode='average_inc_pad',
ignore_border=True).sum(),
x),
mode=mode_with_gpu.including("cudnn"))
assert any([isinstance(n.op, dnn.GpuDnnPoolGrad)
for n in f.maker.fgraph.toposort()])
f(numpy.zeros((10, 10), dtype='float32'))
示例7: test_flatten
def test_flatten():
m = theano.tensor.fmatrix()
f = theano.function([m], m.flatten(), mode=mode_with_gpu)
val = numpy.random.rand(10, 11).astype("float32")
res = f(val)
utt.assert_allclose(res, val.flatten())
assert res.shape == val.flatten().shape
assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]
val = numpy.random.rand(10, 11).astype("float32")
res = f(val)
utt.assert_allclose(res, val.flatten())
assert res.shape == val.flatten().shape
assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]
f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu)
val = numpy.random.rand(10, 11).astype("float32")
res = f(val)
utt.assert_allclose(res, val)
assert res.shape == val.shape
assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]
m = theano.tensor.tensor3()
f = theano.function([m], m.flatten(ndim=2), mode=mode_with_gpu)
val = numpy.random.rand(10, 11, 12).astype("float32")
res = f(val)
utt.assert_allclose(res, val.reshape(10, -1))
assert res.shape == val.reshape(10, -1).shape
assert GpuReshape in [type(node.op) for node in f.maker.fgraph.toposort()]
示例8: time_linker
def time_linker(name, linker):
steps_a = 5
steps_b = 100
x = tensor.vector()
a = build_graph(x,steps_a)
b = build_graph(x,steps_b)
f_a = function([x], a,
mode=Mode(optimizer=None, linker=linker()),
#profile='f_a speed test %s'%name,
)
f_b = function([x], b,
mode=Mode(optimizer=None, linker=linker()),
#profile='f_b speed test %s'%name,
)
print f_a([2.0, 3.0])
t0 = time.time()
print f_a([2.0, 3.0])
t1 = time.time()
print f_b([2.0, 3.0])
t2 = time.time()
print f_b([2.0, 3.0])
t3 = time.time()
t_a = t1 - t0
t_b = t3 - t2
print "%s takes %f s/Kop" % (
name,
(1000*(t_b-t_a) / (steps_b - steps_a)))
示例9: test_pattern_output
def test_pattern_output(self):
#print (self.phi.W.get_value(borrow=True))
assert (self.phi.W.get_value(borrow=True).shape == (self.n,self.d))
self.phi.W.set_value ( np.array([[1,0]]).T )
assert (self.phi.W.get_value(borrow=True).shape == (self.n,self.d))
assert (self.psi.W.get_value(borrow=True).shape == (self.d,self.num_classes))
self.psi.W.set_value ( np.array([[1,]]) )
assert (self.psi.W.get_value(borrow=True).shape == (self.d,self.num_classes))
# assert (self.beta.W.get_value(borrow=True).shape == (self.d, self.m))
# # [1,1] means that we will project the intermediate representation
# # onto both dimensions of the output representation
# self.beta.W.set_value ( np.array([[1,1]]) )
# assert (self.beta.W.get_value(borrow=True).shape == (self.d, self.m))
test_prediction = lasagne.layers.get_output(self.pattern, deterministic=True)
test_fn = theano.function([self.input_var], test_prediction)
X_hat = test_fn(self.X)
assert ( np.all(X_hat == self.S) )
# self.phi1 = test_prediction
# self.phi2 = lasagne.layers.get_output(self.pattern, self.side_var, deterministic=True)
beta_prediction = self.pattern.get_beta_output_for(self.input_var, self.side_var, deterministic=True)
beta_fn = theano.function([self.input_var, self.side_var], beta_prediction)
C_hat = beta_fn(self.X, self.CX)
assert ( np.all(C_hat == self.Cy) )
示例10: test_opt_gpujoin_onlyajoin
def test_opt_gpujoin_onlyajoin():
# from a bug in normal sampling
_a = numpy.asarray([[1, 2], [3, 4]], dtype='float32')
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float32')
a = cuda.shared_constructor(_a)
b = cuda.shared_constructor(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, cuda.HostFromGpu)
assert isinstance(graph_nodes[-2].op, cuda.GpuJoin)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
# test mixed dtype
_b = numpy.asarray([[5, 6, 7], [8, 9, 10]], dtype='float64')
b = theano.tensor.constant(_b)
c = tensor.join(1, a, b)
f = theano.function([], c, mode=mode_with_gpu)
f()
graph_nodes = f.maker.fgraph.toposort()
assert isinstance(graph_nodes[-1].op, theano.tensor.Join)
assert numpy.all(f() == numpy.concatenate([_a, _b], axis=1))
示例11: architecture
def architecture(self, cons, code_layer):
"""Build up the architecture by theano"""
for i in range(len(self.layers)-1):
# Initialize shared variables
init_w = cons*np.random.randn(self.layers[i], self.layers[i+1])
self.weights.append(th.shared(init_w))
init_bias = cons*np.random.randn(self.layers[i+1])
self.biases.append(th.shared(init_bias))
# Building architecture
a_before = T.dot(self.a_n[i], self.weights[i]) + \
self.biases[i].dimshuffle('x', 0)
a_next = self.activ(a_before)
self.a_n.append(a_next)
# help the optimization
for param in (self.weights+self.biases):
self.auxiliary.append(th.shared(np.zeros(param.get_value().shape)))
self.encode = th.function([self.x], self.a_n[code_layer])
self.decode = th.function([self.a_n[code_layer]], self.a_n[-1])
# Calculate the cost and gradients
Cost = (T.sum((self.a_n[-1]-self.y_hat)**2))/self.batch
params = self.weights + self.biases
grads = T.grad(Cost, params, disconnected_inputs='ignore')
# Update parameters
update_query = self.update(params, grads, self.auxiliary)
self.gradient_2 = th.function(inputs=[self.x, self.y_hat],
updates=update_query, outputs=Cost)
示例12: test_alloc_memset_0
def test_alloc_memset_0():
i = tensor.iscalar()
z = numpy.zeros((1,), dtype='float32')
o = numpy.ones((1,), dtype='float32')
ones = numpy.ones((2,), dtype='float32')
# Test with 0
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(z)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc) and topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 0).all()
# Test with 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(o)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(6)) == 1).all()
# Test with 1, 1
a = basic_ops.gpu_alloc(cuda.gpu_from_host(tensor.constant(ones)), i)
f = theano.function([i], a, mode=mode_with_gpu)
topo = f.maker.fgraph.toposort()
assert len(topo) == 1
assert isinstance(topo[0].op, basic_ops.GpuAlloc)
assert not topo[0].op.memset_0
assert (numpy.asarray(f(2)) == 1).all()
示例13: test_local_assert_no_cpu_op
def test_local_assert_no_cpu_op():
numpy.random.seed(1)
m = numpy.random.uniform(-1, 1, (10, 10)).astype("float32")
ms = cuda.shared_constructor(m, name="m_shared")
out = theano.tensor.tanh(ms).dot(ms.T)
mode_local_assert = mode_with_gpu.including("assert_no_cpu_op")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_0")
mode_local_assert = mode_local_assert.excluding("local_gpu_elemwise_1")
old = config.assert_no_cpu_op
old2 = config.on_opt_error
# If the flag is raise
try:
config.assert_no_cpu_op = 'raise'
config.on_opt_error = 'ignore'
assert_raises(AssertionError, theano.function,
[], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
config.on_opt_error = old2
# If the flag is ignore
try:
config.assert_no_cpu_op = 'ignore'
theano.function([], out, mode=mode_local_assert)
finally:
config.assert_no_cpu_op = old
示例14: test_in_transit_circ
def test_in_transit_circ():
t = np.linspace(-20, 20, 1000)
m_planet = np.array([0.3, 0.5])
m_star = 1.45
r_star = 1.5
orbit = KeplerianOrbit(
m_star=m_star,
r_star=r_star,
t0=np.array([0.5, 17.4]),
period=np.array([10.0, 5.3]),
ecc=np.array([0.0, 0.0]),
omega=np.array([0.0, 0.0]),
m_planet=m_planet,
)
orbit_circ = KeplerianOrbit(
m_star=m_star,
r_star=r_star,
t0=np.array([0.5, 17.4]),
period=np.array([10.0, 5.3]),
m_planet=m_planet,
)
r_pl = np.array([0.1, 0.03])
inds = theano.function([], orbit.in_transit(t, r=r_pl))()
inds_circ = theano.function([], orbit_circ.in_transit(t, r=r_pl))()
assert np.all(inds == inds_circ)
示例15: adadelta
def adadelta(lr,tparams,grads,x,mask,y,cost):
zipped_grads = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_grad' % k)
for k, p in tparams.items()]
running_up2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rup2' % k)
for k, p in tparams.items()]
running_grads2 = [theano.shared(p.get_value() * numpy_floatX(0.),
name='%s_rgrad2' % k)
for k, p in tparams.items()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2))
for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function([x, mask, y], cost, updates=zgup + rg2up,
name='adadelta_f_grad_shared')
updir = [-T.sqrt(ru2 + 1e-6) / T.sqrt(rg2 + 1e-6) * zg
for zg, ru2, rg2 in zip(zipped_grads,
running_up2,
running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2))
for ru2, ud in zip(running_up2, updir)]
#梯度更新字典
param_up = [(p, p + ud) for p, ud in zip(tparams.values(), updir)]
f_update = theano.function([lr], [], updates=ru2up + param_up,
on_unused_input='ignore',
name='adadelta_f_update')
return f_grad_shared, f_update