本文整理汇总了Python中theano.pp函数的典型用法代码示例。如果您正苦于以下问题:Python pp函数的具体用法?Python pp怎么用?Python pp使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了pp函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: custom_svrg2
def custom_svrg2(loss, params, m, learning_rate=0.01, objective=None, data=None, target=None, getpred=None):
theano.pp(loss)
grads = theano.grad(loss, params)
n = data.shape[0]
updates = OrderedDict()
rng = T.shared_randomstreams.RandomStreams(seed=149)
for param, grad in zip(params, grads):
value = param.get_value(borrow=True)
mu = grad / n
def oneStep(w):
t = rng.choice(size=(1,), a=n)
loss_part_tilde = objective(getpred(data[t], param), target[t])
loss_part_tilde = loss_part_tilde.mean()
g_tilde = theano.grad(loss_part_tilde, param)
loss_part = objective(getpred(data[t], w), target[t])
loss_part = loss_part.mean()
g = theano.grad(loss_part, w)
w = w - learning_rate * (g - g_tilde + mu)
return w
w_tilde, scan_updates = theano.scan(fn=oneStep, outputs_info=param, n_steps=m)
updates.update(scan_updates)
updates[param] = w_tilde[-1]
return updates
示例2: compute_gradients
def compute_gradients(self):
# maybe doesn't need to be a class variable
self.grads = T.grad(self.cost, wrt=self.tparams.values())
#lrate: learning rate
self.f_populate_gradients, self.f_update_params = self.optimizer()
# =====================================================================
# print out the computational graph and make an image of it too
if self.debug and False:
# util.colorprint("Following is the graph of the final hidden layer:", "blue")
# final_activation_fn = theano.function([self.input], final_activation)
# theano.printing.debugprint(final_activation_fn.maker.fgraph.outputs[0])
# util.colorprint("Also, saving png of computational graph:", "blue")
# theano.printing.pydotprint(final_activation_fn,
# outfile="output/lmlp_final_act_viz.png",
# compact=True,
# scan_graphs=True,
# var_with_name_simple=True)
util.colorprint("Following is the graph of the first of the derivatives:", "blue")
final_grad_fn = theano.function([self.input, self.y], self.grads[0])
theano.printing.debugprint(final_grad_fn.maker.fgraph.outputs[0])
util.colorprint("Yay colorprinted:", "blue")
print theano.pp(self.final_activation)
util.colorprint("Also, saving png of computational graph:", "blue")
theano.printing.pydotprint(final_grad_fn,
outfile="output/lmlp_final_grad_viz.png",
compact=True,
scan_graphs=True,
var_with_name_simple=True)
示例3: bsgd1
def bsgd1(nn, data, name='sgd', lr=0.022, alpha=0.3, batch_size=500, epochs = 10):
train_set_x, train_set_y = data[0]
valid_set_x, valid_set_y = data[1]
test_set_x, test_set_y = data[2]
# valid_y_numpy = y_numpy[0]
# test_y_numpy = y_numpy[1]
test_y_numpy = map_48_to_39(test_y_numpy)
valid_y_numpy = map_48_to_39(valid_y_numpy)
print test_y_numpy
num_samples = train_set_x.get_value(borrow=True).shape[0]
num_batches = num_samples / batch_size
layers = nn.layers
x = T.matrix('x')
y = T.ivector('y')
y_eval = T.ivector('y_eval')
cost = nn.cost(x, y)
accuracy = nn.calcAccuracy(x, y)
params = nn.params
delta_params = nn.delta_params
print theano.pp(cost)
# theano.pp(accuracy)
p_grads = [T.grad(cost=cost, wrt = p) for p in params]
# implementing gradient descent with momentum
print p_grads
updates = OrderedDict()
for dp, gp in zip(delta_params, p_grads):
updates[dp] = dp*alpha - gp*lr
for p, dp in zip(params, delta_params):
updates[p] = p + updates[dp]
# updates = [(p, p - lr*gp) for p, gp in zip(params, p_grads)]
index = T.ivector('index')
batch_sgd_train = theano.function(inputs=[index], outputs=[cost, accuracy], updates=updates, givens={x: train_set_x[index], y:train_set_y[index]})
batch_sgd_valid = theano.function(inputs=[], outputs=[nn.calcAccuracy(x, y), nn.calcAccuracyTimit(x,y)], givens={x: valid_set_x, y:valid_set_y})
batch_sgd_test = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: test_set_x, y:test_set_y})
indices = np.arange(num_samples, dtype=np.dtype('int32'))
np.random.shuffle(indices)
for n in xrange(epochs):
np.random.shuffle(indices)
for i in xrange(num_batches):
batch = indices[i*batch_size: (i+1)*batch_size]
batch_sgd_train(batch)
# y_np = y.get_value()
# print y.eval()
print "epoch:", n, " validation accuracy:", batch_sgd_valid()
print batch_sgd_test()
示例4: test_examples_4
def test_examples_4(self):
from theano import pp
x = T.dscalar('x')
y = x**2
gy = T.grad(y, x)
pp(gy) # print out the gradient prior to optimization
'((fill((x ** 2), 1.0) * 2) * (x ** (2 - 1)))'
f = function([x], gy)
assert f(4) == array(8.0)
assert f(94.2) == array(188.40000000000001)
示例5: derivative
def derivative():
x = T.dscalar('x')
y = x ** 2
gy = T.grad(y, x)
print(pp(gy))
f = function([x], gy)
print(f(4))
print(np.allclose(f(94.2), 94.2 * 2))
print(pp(f.maker.fgraph.outputs[0]))
示例6: gradient
def gradient(a):
x = T.dscalar('x')
y = x**2
z = 1/x
gy = T.grad(y, x)
gz = T.grad(z, x)
print(th.pp(gy))
print(th.pp(gz))
f = th.function([x], gy)
g = th.function([x], gz)
print(f(a))
print(g(a))
示例7: ppth
def ppth(obj, fancy=True, graph=False, fid="/Users/keithd/temp/pydot_graph", fmt="pdf"):
if graph:
theano.printing.pydotprint(obj, outfile=fid, format=fmt)
elif fancy:
theano.printing.debugprint(obj)
else:
return theano.pp(obj)
示例8: cached_function
def cached_function(inputs, outputs):
import theano
with Message("Hashing theano fn"):
if hasattr(outputs, "__len__"):
hash_content = tuple(map(theano.pp, outputs))
else:
hash_content = theano.pp(outputs)
cache_key = hex(hash(hash_content) & (2 ** 64 - 1))[:-1]
cache_dir = Path("~/.hierctrl_cache")
cache_dir = cache_dir.expanduser()
cache_dir.mkdir_p()
cache_file = cache_dir / ("%s.pkl" % cache_key)
if cache_file.exists():
with Message("unpickling"):
with open(cache_file, "rb") as f:
try:
return pickle.load(f)
except Exception:
pass
with Message("compiling"):
fun = compile_function(inputs, outputs)
with Message("picking"):
with open(cache_file, "wb") as f:
pickle.dump(fun, f, protocol=pickle.HIGHEST_PROTOCOL)
return fun
示例9: bsgd
def bsgd(nn, data, name='sgd', lr=0.03, epochs=120, batch_size=500, momentum=0):
train_set_x, train_set_y = data[0]
valid_set_x, valid_set_y = data[1]
test_set_x, test_set_y = data[2]
# exit()
num_samples = train_set_x.get_value(borrow=True).shape[0]
num_batches = num_samples / batch_size
layers = nn.layers
x = T.matrix('x')
y = T.ivector('y')
cost = nn.cost(x, y)
accuracy = nn.calcAccuracy(x, y)
params = nn.params
print theano.pp(cost)
p_grads = [T.grad(cost=cost, wrt = p) for p in params]
print p_grads
updates = [(p, p - lr*gp) for p, gp in zip(nn.params, p_grads)]
index = T.ivector('index')
batch_sgd_train = theano.function(inputs=[index], outputs=[cost, accuracy], updates=updates, givens={x: train_set_x[index], y: train_set_y[index]})
batch_sgd_valid = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: valid_set_x, y: valid_set_y})
batch_sgd_test = theano.function(inputs=[], outputs=nn.calcAccuracy(x, y), givens={x: test_set_x, y: test_set_y})
# indices = range(num_samples)
indices = np.arange(num_samples, dtype=np.dtype('int32'))
np.random.shuffle(indices)
for n in xrange(epochs):
np.random.shuffle(indices)
for nb in xrange(num_batches):
batch = indices[nb*batch_size : (nb+1) * batch_size ]
batch_sgd_train(batch)
print "Validation Accuracy:", batch_sgd_valid()
print "Final Test Accuracy:", batch_sgd_test()
示例10: getp
def getp(si, tli, tri, tai, x_tm1, e, l, Wl, Wr, Wv):
xx = T.concatenate([x_tm1, [self.x0]], axis=0)
xsi = T.dot(e[si], Wv)
xsi = xsi[0]
pl, pl_ = theano.scan(lambda j, Wl, x, l, tli: T.dot(x[tli[j]], Wl[j]) * l[tli[j]],
sequences=T.arange(tli.shape[0]), non_sequences=[Wl, xx, l, tli])
xsi += T.sum(pl, axis=0)[0]
pr, pr_ = theano.scan(lambda j, Wr, x, l, tri: T.dot(x[tri[j]], Wr[j]) * l[tri[j]],
sequences=T.arange(tri.shape[0]), non_sequences=[Wr, xx, l, tri])
xsi += T.sum(pr, axis=0)[0]
pa, pa_ = theano.scan(lambda j, x, l, tai: x[tai[j]] * l[tai[j]],
sequences=T.arange(tai.shape[0]), non_sequences=[xx, l, tai])
xsi += T.sum(pa, axis=0)[0]
xsi /= l[si]
pp(xsi)
pp(x_tm1)
x_t = T.set_subtensor(x_tm1[si], T.tanh(xsi))
return x_t
示例11: main
def main():
x = T.dscalar('x')
y = T.dscalar('y')
z = x + y
f = function([x, y], z)
xm = T.dmatrix('x')
ym = T.dmatrix('y')
fm = function([xm, ym], xm * ym)
print(pp(xm * ym + 4 / ym))
print(f(2, 3), fm([[1, 2], [3, 4]], [[5, 6], [7, 8]]))
xv = T.vector()
yv = T.vector()
fv = function([xv, yv], xv ** 2 + yv ** 2 + 2 * xv * yv)
print(fv([1, 2], [3, 4]))
示例12: test_subtensor
def test_subtensor():
x = theano.tensor.dvector()
y = x[1]
assert theano.pp(y) == "<TensorType(float64, vector)>[Constant{1}]"
示例13: f
# coding: utf-8
import numpy
import theano
import theano.tensor as T
x = T.dscalar('x')
y = (T.sqrt(x) + 1) ** 3
dy = T.grad(cost=y, wrt=x)
f = theano.function(inputs=[x], outputs=dy)
print theano.pp(f.maker.fgraph.outputs[0])
print f(2)
print f(3)
示例14: function
from theano import pp
from theano import In
from theano import shared
x = numpy.asarray([[1, 2], [3, 4], [5, 6]])
x.shape
x = T.dscalar()
y = T.dscalar()
w = T.dscalar()
z =( x + y)*w
g = 10
f = function([x, In(y, value = 1), In(w, value = 2, name = 'w_by_name')], z)
f(2,3, w_by_name=g)
numpy.allclose(f(16.3, 12.1), 28.4)
print(pp(z))
a = T.vector()
b = T.vector()
target = a ** 2 + b ** 2 + 2 * a * b
f1 = function([a, b], target)
print(f1([1, 2], [4, 5]))
x = T.dmatrix()
s = 1 / (1 + T.exp(-x))
logistic = function([x], s)
m = [[1, 2], [3, 4], [5, 6]]
logistic(m)
s2 = (1 + T.tanh(x/2))/2
logistic2 = function([x], s2)
示例15: TensorType
# >>> x.type
# TensorType(float64, scalar)
# >>> T.dscalar
# TensorType(float64, scalar)
# >>> x.type is T.dscalar
# True
#By calling T.dscalar with a string argument, you create a Variable representing a
#floating-point scalar quantity with the given name.
x = T.dmatrix('x')
y = T.dmatrix('y')
z = x + y
f = function([x, y], z)
print f([[1, 2], [3, 4]], [[10, 20], [30, 40]])
print pp(z)
a = T.vector() # declare variable
out = a + a ** 10 # build symbolic expression
f = function([a], out) # compile function
print(f([0, 1, 2]))
x = T.dmatrix('x')
s = 1 / (1 + T.exp(-x))
logistic = function([x], s)
print logistic([[0, 1], [-1, -2]])
s2 = (1 + T.tanh(x / 2)) / 2
logistic2 = function([x], s2)
print logistic2([[0, 1], [-1, -2]])