本文整理汇总了Python中theano.compile.sharedvalue.shared函数的典型用法代码示例。如果您正苦于以下问题:Python shared函数的具体用法?Python shared怎么用?Python shared使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了shared函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, anchors, K, D, alpha = 1.0, beta = 1.0, *args, **kwargs):
#import pdb; pdb.set_trace()
self.alpha = shared(alpha)
self.beta = shared(beta)
#mask contains zeros for elements fixed at 1E-6
mask = np.ones((K,D))
for anchor in anchors:
#mask[:,anchor[1]] = 0
for hold in anchor[1]:
mask[:,hold] = 0
mask[anchor[0],hold] = 1
self.mask = TT.as_tensor_variable(mask)
# mask = TT.zeros_like(TT.as_tensor_variable(np.zeros((K,D))))
# for anchor in anchors:
# TT.set_subtensor(mask[anchor[0],:], 0)
# TT.set_subtensor(mask[anchor[0],anchor[1]], 1)
# self.mask = mask
super(Beta_with_anchors, self).__init__(transform=anchored_betas(mask=self.mask, K=K, D=D, alpha=alpha, beta=beta), *args, **kwargs)
#super(Beta_with_anchors, self).__init__(transform=anchored_betas(anchors=anchors, K=K, D=D, alpha=alpha, beta=beta), *args, **kwargs)
#Z = np.ones((D,K), np.float64) - 0.5
#self.mode = Z
#TODO: Should this be numpy like ratematrix
self.mean = TT.ones_like(self.mask)*1E-6
self.mean = TT.set_subtensor(self.mean[self.mask.nonzero()], (alpha / (alpha + beta)))
示例2: __init__
def __init__(self,
input=tensor.dvector('input'),
target=tensor.dvector('target'),
n_input=1, n_hidden=1, n_output=1, lr=1e-3, **kw):
super(NNet, self).__init__(**kw)
self.input = input
self.target = target
self.lr = shared(lr, 'learning_rate')
self.w1 = shared(numpy.zeros((n_hidden, n_input)), 'w1')
self.w2 = shared(numpy.zeros((n_output, n_hidden)), 'w2')
# print self.lr.type
self.hidden = sigmoid(tensor.dot(self.w1, self.input))
self.output = tensor.dot(self.w2, self.hidden)
self.cost = tensor.sum((self.output - self.target)**2)
self.sgd_updates = {
self.w1: self.w1 - self.lr * tensor.grad(self.cost, self.w1),
self.w2: self.w2 - self.lr * tensor.grad(self.cost, self.w2)}
self.sgd_step = pfunc(
params=[self.input, self.target],
outputs=[self.output, self.cost],
updates=self.sgd_updates)
self.compute_output = pfunc([self.input], self.output)
self.output_from_hidden = pfunc([self.hidden], self.output)
示例3: __init__
def __init__(self, hyperparameters):
self.hyperparameters = hyperparameters
numpy.random.seed()
self.embeddings = numpy.asarray((numpy.random.rand(self.hyperparameters.vocab_size, self.hyperparameters.embedding_size) - 0.5)* 2 * 0.01, dtype=floatX)
self.hidden_weights = shared(numpy.asarray(random_weights(self.hyperparameters.input_size, self.hyperparameters.hidden_size, scale_by=1), dtype=floatX))
self.output_weights = shared(numpy.asarray(random_weights(self.hyperparameters.hidden_size, self.hyperparameters.output_size, scale_by=1), dtype=floatX))
self.hidden_biases = shared(numpy.asarray(numpy.zeros((self.hyperparameters.hidden_size,)), dtype=floatX))
self.output_biases = shared(numpy.asarray(numpy.zeros((self.hyperparameters.output_size,)), dtype=floatX))
示例4: test_strict_generic
def test_strict_generic(self):
# this should work, because
# generic can hold anything even when strict=True
u = shared('asdf', strict=False)
v = shared('asdf', strict=True)
u.set_value(88)
v.set_value(88)
示例5: run_nnet
def run_nnet(use_gpu, n_batch=60, n_in=1024, n_hid=2048, n_out=10,
n_train=100):
if config.mode == 'DEBUG_MODE':
n_train = 1
if use_gpu:
w = tcn.shared_constructor(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
b = tcn.shared_constructor(my_zeros(n_hid), 'b')
v = tcn.shared_constructor(my_zeros((n_hid, n_out)), 'c')
c = tcn.shared_constructor(my_zeros(n_out), 'c')
else:
w = shared(0.01 * (my_rand(n_in, n_hid) - 0.5), 'w')
b = shared(my_zeros(n_hid), 'b')
v = shared(my_zeros((n_hid, n_out)), 'c')
c = shared(my_zeros(n_out), 'c')
x = tensor.fmatrix('x')
y = tensor.fmatrix('y')
lr = tensor.fscalar('lr')
hid = tensor.tanh(tensor.dot(x, w) + b)
out = tensor.tanh(tensor.dot(hid, v) + c)
loss = tensor.sum(0.5 * (out - y) ** 2 * lr)
if 0:
print('loss type', loss.type)
params = [w, b, v, c]
gparams = tensor.grad(loss, params)
mode = get_mode(use_gpu)
# print 'building pfunc ...'
train = pfunc([x, y, lr], [loss], mode=mode,
updates=[(p, p - g) for p, g in izip(params, gparams)])
if 0:
for i, n in enumerate(train.maker.fgraph.toposort()):
print(i, n)
xval = my_rand(n_batch, n_in)
yval = my_rand(n_batch, n_out)
lr = theano._asarray(0.01, dtype='float32')
t0 = time.time()
rval = []
for i in xrange(n_train):
rval.append(train(xval, yval, lr))
dt = time.time() - t0
print_mode(mode)
return numpy.asarray(rval), dt
示例6: test_scalar_floatX
def test_scalar_floatX(self):
# the test should assure that floatX is not used in the shared
# constructor for scalars Shared values can change, and since we don't
# know the range they might take, we should keep the same
# bit width / precision as the original value used to create the
# shared variable.
# Since downcasting of a value now raises an Exception,
def f(var, val):
var.set_value(val)
b = shared(numpy.int64(7), allow_downcast=True)
assert b.type == theano.tensor.lscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.int32(7), allow_downcast=True)
assert b.type == theano.tensor.iscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.int16(7), allow_downcast=True)
assert b.type == theano.tensor.wscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.int8(7), allow_downcast=True)
assert b.type == theano.tensor.bscalar
f(b, 8.23)
assert b.get_value() == 8
b = shared(numpy.float64(7.234), allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b, 8)
assert b.get_value() == 8
b = shared(numpy.float32(7.234), allow_downcast=True)
assert b.type == theano.tensor.fscalar
f(b, 8)
assert b.get_value() == 8
b = shared(numpy.float(7.234), allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b, 8)
assert b.get_value() == 8
b = shared(7.234, allow_downcast=True)
assert b.type == theano.tensor.dscalar
f(b, 8)
assert b.get_value() == 8
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
示例7: gen
def gen(self, op, *args, **kwargs):
"""Create a new random stream in this container.
:param op: a RandomFunction instance to
:param args: interpreted by `op`
:param kwargs: interpreted by `op`
:returns: The symbolic random draw part of op()'s return
value. This function stores the updated RandomStateType
Variable for use at `build` time.
:rtype: TensorVariable
"""
seed = int(self.gen_seedgen.randint(2 ** 30))
random_state_variable = shared(numpy.random.RandomState(seed))
# Add a reference to distinguish from other shared variables
random_state_variable.tag.is_rng = True
new_r, out = op(random_state_variable, *args, **kwargs)
out.rng = random_state_variable
out.update = (random_state_variable, new_r)
self.state_updates.append(out.update)
random_state_variable.default_update = new_r
return out
示例8: train
def train(self, train_set_x, pretraining_epochs=15,
pretrain_lr=0.001, batch_size=1, n_ins=784,
hidden_layers_sizes=[500, 500]):
"""
对StackedAutoEncoder进行训练
"""
if not isinstance(train_set_x, TensorSharedVariable):
train_set_x = shared(train_set_x)
n_train_batches = train_set_x.get_value(borrow=True).shape[0]
n_train_batches /= batch_size
print "hidden_layers_sizes: ", hidden_layers_sizes
print "... building the model"
numpy_rng = numpy.random.RandomState(89677)
self.sda = AdvancedStackedAutoEncoder(
numpy_rng=numpy_rng,
n_ins=n_ins,
hidden_layers_sizes=hidden_layers_sizes,
)
print "... getting the pretraining function"
pretraining_fns = self.sda.pretraining_functions(train_set_x=train_set_x,
batch_size=batch_size)
print '... pre-training the model'
for i in xrange(self.sda.n_layers):
# go through pretraining epochs
for epoch in xrange(pretraining_epochs):
# go through the training set
c = []
for batch_index in xrange(n_train_batches):
c.append(pretraining_fns[i](index=batch_index,lr=pretrain_lr))
print 'Pre-training layer %i, epoch %d, cost ' % (i, epoch),
print numpy.mean(c)
示例9: mv_shared
def mv_shared(*args, **kwargs):
'''mv_shared works same as `theano.shared`
It calls `theano.shared` to create the SharedVariable and use
MVSharedVariable to wrap it.
'''
var = shared(*args, **kwargs)
mv_shared.shared_vars.append(MVSharedVariable(var))
return var
示例10: test_tensor_floatX
def test_tensor_floatX(self):
def f(var, val):
var.set_value(val)
b = shared(numpy.int64([7]), allow_downcast=True)
assert b.type == theano.tensor.lvector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.int32([7]), allow_downcast=True)
assert b.type == theano.tensor.ivector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.int16([7]), allow_downcast=True)
assert b.type == theano.tensor.wvector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.int8([7]), allow_downcast=True)
assert b.type == theano.tensor.bvector
f(b, [8.23])
assert b.get_value() == 8
b = shared(numpy.float64([7.234]), allow_downcast=True)
assert b.type == theano.tensor.dvector
f(b, [8])
assert b.get_value() == 8
b = shared(numpy.float32([7.234]), allow_downcast=True)
assert b.type == theano.tensor.fvector
f(b, [8])
assert b.get_value() == 8
# numpy.float([7.234]) don't work
# b = shared(numpy.float([7.234]))
# assert b.type == theano.tensor.dvector
# f(b,[8])
# This generate a generic type. Should we cast? I don't think.
# b = shared([7.234])
# assert b.type == theano.tensor.dvector
# f(b,[8])
b = shared(numpy.asarray([7.234], dtype=theano.config.floatX),
allow_downcast=True)
assert b.dtype == theano.config.floatX
f(b, [8])
assert b.get_value() == 8
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
示例11: test_ctors
def test_ctors(self):
if theano.configdefaults.python_int_bitwidth() == 32:
assert shared(7).type == theano.tensor.iscalar, shared(7).type
else:
assert shared(7).type == theano.tensor.lscalar, shared(7).type
assert shared(7.0).type == theano.tensor.dscalar
assert shared(numpy.float32(7)).type == theano.tensor.fscalar
# test tensor constructor
b = shared(numpy.zeros((5, 5), dtype='int32'))
assert b.type == TensorType('int32', broadcastable=[False, False])
b = shared(numpy.random.rand(4, 5))
assert b.type == TensorType('float64', broadcastable=[False, False])
b = shared(numpy.random.rand(5, 1, 2))
assert b.type == TensorType('float64', broadcastable=[False, False, False])
assert shared([]).type == generic
def badfunc():
shared(7, bad_kw=False)
self.assertRaises(TypeError, badfunc)
示例12: __init__
def __init__(self, window_size, vocab_size, embedding_size, hidden_size, seed, initial_embeddings, two_hidden_layers):
"""
Initialize L{Model} parameters.
"""
self.vocab_size = vocab_size
self.window_size = window_size
self.embedding_size = embedding_size
self.two_hidden_layers = two_hidden_layers
if LBL:
self.hidden_size = hidden_size
self.output_size = self.embedding_size
else:
self.hidden_size = hidden_size
self.output_size = 1
import numpy
import hyperparameters
from pylearn.algorithms.weights import random_weights
numpy.random.seed(seed)
if initial_embeddings is None:
self.embeddings = numpy.asarray((numpy.random.rand(self.vocab_size, HYPERPARAMETERS["EMBEDDING_SIZE"]) - 0.5)*2 * HYPERPARAMETERS["INITIAL_EMBEDDING_RANGE"], dtype=floatX)
else:
assert initial_embeddings.shape == (self.vocab_size, HYPERPARAMETERS["EMBEDDING_SIZE"])
self.embeddings = copy.copy(initial_embeddings)
if HYPERPARAMETERS["NORMALIZE_EMBEDDINGS"]: self.normalize(range(self.vocab_size))
if LBL:
self.output_weights = shared(numpy.asarray(random_weights(self.input_size, self.output_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
self.output_biases = shared(numpy.asarray(numpy.zeros((1, self.output_size)), dtype=floatX))
self.score_biases = shared(numpy.asarray(numpy.zeros(self.vocab_size), dtype=floatX))
assert not self.two_hidden_layers
else:
self.hidden_weights = shared(numpy.asarray(random_weights(self.input_size, self.hidden_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
self.hidden_biases = shared(numpy.asarray(numpy.zeros((self.hidden_size,)), dtype=floatX))
if self.two_hidden_layers:
self.hidden2_weights = shared(numpy.asarray(random_weights(self.hidden_size, self.hidden_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
self.hidden2_biases = shared(numpy.asarray(numpy.zeros((self.hidden_size,)), dtype=floatX))
self.output_weights = shared(numpy.asarray(random_weights(self.hidden_size, self.output_size, scale_by=HYPERPARAMETERS["SCALE_INITIAL_WEIGHTS_BY"]), dtype=floatX))
self.output_biases = shared(numpy.asarray(numpy.zeros((self.output_size,)), dtype=floatX))
示例13: test_scalar_strict
def test_scalar_strict(self):
def f(var, val):
var.set_value(val)
b = shared(numpy.int64(7), strict=True)
assert b.type == theano.tensor.lscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int32(7), strict=True)
assert b.type == theano.tensor.iscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int16(7), strict=True)
assert b.type == theano.tensor.wscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int8(7), strict=True)
assert b.type == theano.tensor.bscalar
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.float64(7.234), strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.float32(7.234), strict=True)
assert b.type == theano.tensor.fscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.float(7.234), strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(7.234, strict=True)
assert b.type == theano.tensor.dscalar
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
示例14: test_tensor_strict
def test_tensor_strict(self):
def f(var, val):
var.set_value(val)
b = shared(numpy.int64([7]), strict=True)
assert b.type == theano.tensor.lvector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int32([7]), strict=True)
assert b.type == theano.tensor.ivector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int16([7]), strict=True)
assert b.type == theano.tensor.wvector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.int8([7]), strict=True)
assert b.type == theano.tensor.bvector
self.assertRaises(TypeError, f, b, 8.23)
b = shared(numpy.float64([7.234]), strict=True)
assert b.type == theano.tensor.dvector
self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.float32([7.234]), strict=True)
assert b.type == theano.tensor.fvector
self.assertRaises(TypeError, f, b, 8)
# numpy.float([7.234]) don't work
# b = shared(numpy.float([7.234]), strict=True)
# assert b.type == theano.tensor.dvector
# self.assertRaises(TypeError, f, b, 8)
# This generate a generic type. Should we cast? I don't think.
# b = shared([7.234], strict=True)
# assert b.type == theano.tensor.dvector
# self.assertRaises(TypeError, f, b, 8)
b = shared(numpy.zeros((5, 5), dtype='float32'))
self.assertRaises(TypeError, f, b, numpy.random.rand(5, 5))
示例15: test_SdA
def test_SdA():
"""
test AdvancedStackedAutoEncoder
"""
# test Sda 就是这么搞
# 现在想要得到feature只需要执行两个接口即可
# train(), get_features() 非常easy
train_sets = [
[1., 1., 1.],
[2., 2., 2.],
]
train_set_x = numpy.asarray(train_sets)
train_set_x = shared(train_set_x)
# test_set = [4.] * (28*28)
test_set = [4.] * 3
driver = StackedAutoEncoderDriver()
driver.train(train_set_x, n_ins=3, hidden_layers_sizes=[2, 1])
# driver.train_with_mnist()
params = driver.sda.params
features = driver.get_features(test_set)
print features