本文整理汇总了Python中theano.function方法的典型用法代码示例。如果您正苦于以下问题:Python theano.function方法的具体用法?Python theano.function怎么用?Python theano.function使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.function方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: batch_normalization
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
'''Apply batch normalization on x given mean, var, beta and gamma.
'''
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = range(x.ndim - 1)
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
示例2: test_cmrnorm
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def test_cmrnorm():
from theano.tests.unittest_tools import verify_grad
xtest = np.random.rand(2,8,3,4)
xtest = xtest.astype(theano.config.floatX)
x = T.tensor4('x', dtype=theano.config.floatX)
x.tag.test_value = xtest
y = cmrnorm(x, input_shape=xtest.shape[1:])
f = theano.function([x], y, mode='DEBUG_MODE')
f(xtest)
f = theano.function([x], gpu_from_host(T.grad(T.sum(y), wrt=x)),
mode='DEBUG_MODE')
f(xtest)
theano.printing.debugprint(f)
T.verify_grad(lambda x: cmrnorm(x, input_shape=xtest.shape[1:]),
(xtest,),
rng=np.random.RandomState(0))
print 'cmrnorm passed'
示例3: build_cost
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def build_cost(logits, targets):
"""
Build a classification cost function.
"""
# Clip gradients coming from the cost function.
logits = theano.gradient.grad_clip(
logits, -1. * FLAGS.clipping_max_value, FLAGS.clipping_max_value)
predicted_dist = T.nnet.softmax(logits)
costs = T.nnet.categorical_crossentropy(predicted_dist, targets)
cost = costs.mean()
pred = T.argmax(logits, axis=1)
acc = 1. - T.mean(T.cast(T.neq(pred, targets), theano.config.floatX))
return cost, acc
示例4: test_speed
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def test_speed(self):
top = self.stack.final_stack[-self.batch_size:]
cost = self._make_cost(top)
error_signal = T.grad(cost, top)
# Build automatic backprop function.
self.stack.make_backprop_scan(error_signal, [self.y],
compute_embedding_gradients=False)
f = theano.function(
[self.X, self.transitions, self.y],
[cost] + self.stack.gradients.values(),
updates=self.stack.scan_updates + self.stack.bscan_updates)
theano.printing.debugprint(f.maker.fgraph.outputs[1])
for t in range(10):
self._run_batch(f)
示例5: get_corrupted_input
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def get_corrupted_input(self, input, corruption_level):
"""This function keeps ``1-corruption_level`` entries of the inputs the
same and zero-out randomly selected subset of size ``coruption_level``
Note : first argument of theano.rng.binomial is the shape(size) of
random numbers that it should produce
second argument is the number of trials
third argument is the probability of success of any trial
this will produce an array of 0s and 1s where 1 has a
probability of 1 - ``corruption_level`` and 0 with
``corruption_level``
The binomial function return int64 data type by
default. int64 multiplicated by the input
type(floatX) always return float64. To keep all data
in floatX when floatX is float32, we set the dtype of
the binomial to floatX. As in our case the value of
the binomial is always 0 or 1, this don't change the
result. This is needed to allow the gpu to work
correctly as it only support float32 for now.
"""
return self.theano_rng.binomial(size=input.shape, n=1,
p=1 - corruption_level,
dtype=theano.config.floatX) * input
示例6: save
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def save(self, training_losses, save_dir, step):
''' Save the current network parameters to the save_dir and make a
symlink to the latest param so that the training function can easily
load the latest model'''
save_path = os.path.join(save_dir, 'weights.%d' % (step))
self.net.save(save_path)
# Make a symlink for weights.npy
symlink_path = os.path.join(save_dir, 'weights.npy')
if os.path.lexists(symlink_path):
os.remove(symlink_path)
# Make a symlink to the latest network params
os.symlink("%s.npy" % os.path.abspath(save_path), symlink_path)
# Write the losses
with open(os.path.join(save_dir, 'loss.%d.txt' % step), 'w') as f:
f.write('\n'.join([str(l) for l in training_losses]))
示例7: return_network
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def return_network(self):
'''This function returns weight matrix and bias vectors of each hidden layer in the
final network after training.'''
weights_all_layer = []
bias_all_layer = []
bias_prime_all_layer = []
for dA_layer in self.dA_layers:
weight = dA_layer.W.get_value(borrow = True)
bias = dA_layer.b.get_value(borrow = True)
bias_prime = dA_layer.b_prime.get_value(borrow = True)
weights_all_layer.append(weight)
bias_all_layer.append(bias)
bias_prime_all_layer.append(bias_prime)
return weights_all_layer, bias_all_layer, bias_prime_all_layer
示例8: test_softmax
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def test_softmax():
from keras.activations import softmax as s
# Test using a reference implementation of softmax
def softmax(values):
m = max(values)
values = numpy.array(values)
e = numpy.exp(values - m)
dist = list(e / numpy.sum(e))
return dist
x = T.vector()
exp = s(x)
f = theano.function([x], exp)
test_values=get_standard_values()
result = f(test_values)
expected = softmax(test_values)
print(str(result))
print(str(expected))
list_assert_equal(result, expected)
示例9: test_tanh
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def test_tanh():
from keras.activations import tanh as t
test_values = get_standard_values()
x = T.vector()
exp = t(x)
f = theano.function([x], exp)
result = f(test_values)
expected = [math.tanh(v) for v in test_values]
print(result)
print(expected)
list_assert_equal(result, expected)
示例10: adadelta
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def adadelta(lr, tparams, grads, inp, cost):
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
rg2_new = [0.95 * rg2 + 0.05 * (g ** 2) for rg2, g in zip(running_grads2, grads)]
rg2up = [(rg2, r_n) for rg2, r_n in zip(running_grads2, rg2_new)]
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(grads, running_up2, rg2_new)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
inp += [lr]
f_update = theano.function(inp, cost, updates=rg2up+ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_update
示例11: debugging_adadelta
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def debugging_adadelta(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_up2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rup2'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rg2up, profile=profile)
updir = [-tensor.sqrt(ru2 + 1e-6) / tensor.sqrt(rg2 + 1e-6) * zg for zg, ru2, rg2 in zip(zipped_grads, running_up2, running_grads2)]
ru2up = [(ru2, 0.95 * ru2 + 0.05 * (ud ** 2)) for ru2, ud in zip(running_up2, updir)]
param_up = [(p, p + ud) for p, ud in zip(itemlist(tparams), updir)]
f_update = theano.function([lr], [], updates=ru2up+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
示例12: rmsprop
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def rmsprop(lr, tparams, grads, inp, cost):
zipped_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_grad'%k) for k, p in tparams.iteritems()]
running_grads = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad'%k) for k, p in tparams.iteritems()]
running_grads2 = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_rgrad2'%k) for k, p in tparams.iteritems()]
zgup = [(zg, g) for zg, g in zip(zipped_grads, grads)]
rgup = [(rg, 0.95 * rg + 0.05 * g) for rg, g in zip(running_grads, grads)]
rg2up = [(rg2, 0.95 * rg2 + 0.05 * (g ** 2)) for rg2, g in zip(running_grads2, grads)]
f_grad_shared = theano.function(inp, cost, updates=zgup+rgup+rg2up, profile=profile)
updir = [theano.shared(p.get_value() * numpy.float32(0.), name='%s_updir'%k) for k, p in tparams.iteritems()]
updir_new = [(ud, 0.9 * ud - 1e-4 * zg / tensor.sqrt(rg2 - rg ** 2 + 1e-4)) for ud, zg, rg, rg2 in zip(updir, zipped_grads, running_grads, running_grads2)]
param_up = [(p, p + udn[1]) for p, udn in zip(itemlist(tparams), updir_new)]
f_update = theano.function([lr], [], updates=updir_new+param_up, on_unused_input='ignore', profile=profile)
return f_grad_shared, f_update
示例13: modelScore
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def modelScore(self,tag_ids,scores,s_len):
#{{{
"""
ATTENTATION THIS FUNCTION IS SYMBOL PROGRAMMING
this function is to return the score of our model at a fixed sentence label
@param:
scores: the scores matrix ,the output of our model
tag: a numpy array, which represent one sentence label
sent_lens: a scalar number, the length of sentence.
because our sentence label will be expand to max sentence length,
so we will use this to get the original sentence label.
@return:
a scalar number ,the score;
"""
#{{{
n_tags=self.output_dim;
transitions=self.transitions;
#score from tags_scores
real_path_score = scores[T.arange(s_len), tag_ids].sum()
# Score from transitions
b_id = theano.shared(value=np.array([n_tags], dtype=np.int32))
e_id = theano.shared(value=np.array([n_tags + 1], dtype=np.int32))
padded_tags_ids = T.concatenate([b_id, tag_ids, e_id], axis=0)
real_path_score += transitions[
padded_tags_ids[T.arange(s_len + 1)],
padded_tags_ids[T.arange(s_len + 1) + 1]
].sum()
#to prevent T.exp(real_path_score) to be inf
#return real_path_score;
return real_path_score/s_len;
#}}}
#}}}
示例14: arange
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def arange(start, stop=None, step=1, dtype='int32'):
'''Creates a 1-D tensor containing a sequence of integers.
The function arguments use the same convention as
Theano's arange: if only one argument is provided,
it is in fact the "stop" argument.
The default type of the returned tensor is 'int32' to
match TensorFlow's default.
'''
return T.arange(start, stop=stop, step=step, dtype=dtype)
示例15: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import function [as 别名]
def __init__(self, inputs, outputs, updates=[], **kwargs):
unique_variables_to_update = {}
for v, nv in updates:
if v not in unique_variables_to_update:
unique_variables_to_update[v] = nv
updates = unique_variables_to_update.items()
self.function = theano.function(inputs, outputs, updates=updates,
allow_input_downcast=True,
on_unused_input='ignore',
**kwargs)