本文整理匯總了Python中theano.tensor.ivector方法的典型用法代碼示例。如果您正苦於以下問題:Python tensor.ivector方法的具體用法?Python tensor.ivector怎麽用?Python tensor.ivector使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類theano.tensor
的用法示例。
在下文中一共展示了tensor.ivector方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: setUp
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def setUp(self):
if 'gpu' not in theano.config.device:
raise RuntimeError("Thin stack only defined for GPU usage")
self.embedding_dim = self.model_dim = 2
self.vocab_size = 5
self.batch_size = 2
self.num_classes = 2
self.vs = VariableStore()
self.compose_network = util.TreeLSTMLayer
self.embedding_proj = IdentityLayer
self.skip_embeddings = False
self.X = T.imatrix("X")
self.transitions = T.imatrix("transitions")
self.y = T.ivector("y")
示例2: test_local_csm_properties_csm
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def test_local_csm_properties_csm():
data = tensor.vector()
indices, indptr, shape = (tensor.ivector(), tensor.ivector(),
tensor.ivector())
mode = theano.compile.mode.get_default_mode()
mode = mode.including("specialize", "local_csm_properties_csm")
for CS, cast in [(sparse.CSC, sp.csc_matrix),
(sparse.CSR, sp.csr_matrix)]:
f = theano.function([data, indices, indptr, shape],
sparse.csm_properties(
CS(data, indices, indptr, shape)),
mode=mode)
assert not any(
isinstance(node.op, (sparse.CSM, sparse.CSMProperties))
for node in f.maker.fgraph.toposort())
v = cast(random_lil((10, 40),
config.floatX, 3))
f(v.data, v.indices, v.indptr, v.shape)
示例3: test_local_csm_grad_c
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def test_local_csm_grad_c():
raise SkipTest("Opt disabled as it don't support unsorted indices")
if not theano.config.cxx:
raise SkipTest("G++ not available, so we need to skip this test.")
data = tensor.vector()
indices, indptr, shape = (tensor.ivector(), tensor.ivector(),
tensor.ivector())
mode = theano.compile.mode.get_default_mode()
if theano.config.mode == 'FAST_COMPILE':
mode = theano.compile.Mode(linker='c|py', optimizer='fast_compile')
mode = mode.including("specialize", "local_csm_grad_c")
for CS, cast in [(sparse.CSC, sp.csc_matrix), (sparse.CSR, sp.csr_matrix)]:
cost = tensor.sum(sparse.DenseFromSparse()(CS(data, indices, indptr, shape)))
f = theano.function(
[data, indices, indptr, shape],
tensor.grad(cost, data),
mode=mode)
assert not any(isinstance(node.op, sparse.CSMGrad) for node
in f.maker.fgraph.toposort())
v = cast(random_lil((10, 40),
config.floatX, 3))
f(v.data, v.indices, v.indptr, v.shape)
示例4: test_csm_grad
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def test_csm_grad(self):
for sparsetype in ('csr', 'csc'):
x = tensor.vector()
y = tensor.ivector()
z = tensor.ivector()
s = tensor.ivector()
call = getattr(sp, sparsetype + '_matrix')
spm = call(random_lil((300, 400), config.floatX, 5))
out = tensor.grad(dense_from_sparse(
CSM(sparsetype)(x, y, z, s)
).sum(), x)
self._compile_and_check([x, y, z, s],
[out],
[spm.data, spm.indices, spm.indptr,
spm.shape],
(CSMGrad, CSMGradC)
)
示例5: test_csm_unsorted
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def test_csm_unsorted(self):
"""
Test support for gradients of unsorted inputs.
"""
sp_types = {'csc': sp.csc_matrix,
'csr': sp.csr_matrix}
for format in ['csr', 'csc', ]:
for dtype in ['float32', 'float64']:
x = tensor.tensor(dtype=dtype, broadcastable=(False,))
y = tensor.ivector()
z = tensor.ivector()
s = tensor.ivector()
# Sparse advanced indexing produces unsorted sparse matrices
a = sparse_random_inputs(format, (4, 3), out_dtype=dtype,
unsorted_indices=True)[1][0]
# Make sure it's unsorted
assert not a.has_sorted_indices
def my_op(x):
y = tensor.constant(a.indices)
z = tensor.constant(a.indptr)
s = tensor.constant(a.shape)
return tensor.sum(
dense_from_sparse(CSM(format)(x, y, z, s) * a))
verify_grad_sparse(my_op, [a.data])
示例6: test_csm
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def test_csm(self):
sp_types = {'csc': sp.csc_matrix,
'csr': sp.csr_matrix}
for format in ['csc', 'csr']:
for dtype in ['float32', 'float64']:
x = tensor.tensor(dtype=dtype, broadcastable=(False,))
y = tensor.ivector()
z = tensor.ivector()
s = tensor.ivector()
f = theano.function([x, y, z, s], CSM(format)(x, y, z, s))
spmat = sp_types[format](random_lil((4, 3), dtype, 3))
res = f(spmat.data, spmat.indices, spmat.indptr,
numpy.asarray(spmat.shape, 'int32'))
assert numpy.all(res.data == spmat.data)
assert numpy.all(res.indices == spmat.indices)
assert numpy.all(res.indptr == spmat.indptr)
assert numpy.all(res.shape == spmat.shape)
示例7: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def __init__(self, seq_len, n_feature):
import theano.tensor as T
self.Input = lasagne.layers.InputLayer(shape=(None, seq_len, n_feature))
self.buildNetwork()
self.output = lasagne.layers.get_output(self.network)
self.params = lasagne.layers.get_all_params(self.network, trainable=True)
self.output_fn = theano.function([self.Input.input_var], self.output)
fx = T.fvector().astype("float64")
choices = T.ivector()
px = self.output[T.arange(self.output.shape[0]), choices]
log_px = T.log(px)
cost = -fx.dot(log_px)
updates = lasagne.updates.adagrad(cost, self.params, 0.0008)
Input = lasagne.layers.InputLayer(shape=(None, seq_len, n_feature))
self.train_fn = theano.function([self.Input.input_var, choices, fx], [cost, px, log_px], updates=updates)
示例8: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def __init__(self, layers, mini_batch_size):
"""Takes a list of `layers`, describing the network architecture, and
a value for the `mini_batch_size` to be used during training
by stochastic gradient descent.
"""
self.layers = layers
self.mini_batch_size = mini_batch_size
self.params = [param for layer in self.layers for param in layer.params]
self.x = T.matrix("x")
self.y = T.ivector("y")
init_layer = self.layers[0]
init_layer.set_inpt(self.x, self.x, self.mini_batch_size)
for j in xrange(1, len(self.layers)):
prev_layer, layer = self.layers[j-1], self.layers[j]
layer.set_inpt(
prev_layer.output, prev_layer.output_dropout, self.mini_batch_size)
self.output = self.layers[-1].output
self.output_dropout = self.layers[-1].output_dropout
示例9: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def __init__(self):
metric_names = ['Loss','L2','Accuracy']
super(Fr3dNetTrainer, self).__init__(metric_names)
tensor5 = T.TensorType(theano.config.floatX, (False,) * 5)
input_var = tensor5('inputs')
target_var = T.ivector('targets')
logging.info("Defining network")
net = fr3dnet.define_network(input_var)
self.network = net
train_fn, val_fn, l_r = fr3dnet.define_updates(net, input_var, target_var)
self.train_fn = train_fn
self.val_fn = val_fn
self.l_r = l_r
示例10: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def __init__(self, id, data, hp):
self.type = 'LM'
self.id = id
self.filename = 'savedmodels\model_'+id+'.pkl'
self.hp = hp
self.X = T.imatrix()
self.Y = T.ivector()
self.seed_idx = T.iscalar()
self.X.tag.test_value = np.random.randn(hp.seq_size, hp.batch_size).astype(dtype=np.int32)
self.data = copy.copy(data)
for key in ('tr_X', 'va_X', 'te_X', 'tr_Y', 'va_Y', 'te_Y'):
if key in self.data:
self.data['len_'+key] = len(self.data[key])
self.data[key] = shared(self.data[key], borrow=True, dtype=np.int32)
if hp['debug']:
theano.config.optimizer = 'None'
theano.config.compute_test_value = 'ignore'
theano.config.exception_verbosity = 'high'
示例11: test_givens
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def test_givens(self):
x = shared(0)
assign = pfunc([], x, givens={x: 3})
assert assign() == 3
assert x.get_value(borrow=True) == 0
y = tensor.ivector()
f = pfunc([y], (y * x), givens={x: 6})
assert numpy.all(f([1, 1, 1]) == [6, 6, 6])
assert x.get_value() == 0
z = tensor.ivector()
c = z * y
f = pfunc([y], (c + 7),
givens={z: theano._asarray([4, 4, 4], dtype='int32')})
assert numpy.all(f([1, 1, 1]) == [11, 11, 11])
assert x.get_value() == 0
示例12: get_SGD_trainer
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def get_SGD_trainer(self):
""" Returns a plain SGD minibatch trainer with learning rate as param. """
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate
gparams = T.grad(self.mean_cost, self.params) # all the gradients
updates = OrderedDict()
for param, gparam in zip(self.params, gparams):
updates[param] = param - gparam * learning_rate
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
示例13: get_adagrad_trainer
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def get_adagrad_trainer(self):
""" Returns an Adagrad (Duchi et al. 2010) trainer using a learning rate.
"""
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
learning_rate = T.fscalar('lr') # learning rate
gparams = T.grad(self.mean_cost, self.params) # all the gradients
updates = OrderedDict()
for accugrad, param, gparam in zip(self._accugrads, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = accugrad + gparam * gparam
dx = - (learning_rate / T.sqrt(agrad + self._eps)) * gparam
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y),
theano.Param(learning_rate)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
示例14: get_adadelta_trainer
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def get_adadelta_trainer(self):
""" Returns an Adadelta (Zeiler 2012) trainer using self._rho and
self._eps params. """
batch_x = T.fmatrix('batch_x')
batch_y = T.ivector('batch_y')
gparams = T.grad(self.mean_cost, self.params)
updates = OrderedDict()
for accugrad, accudelta, param, gparam in zip(self._accugrads,
self._accudeltas, self.params, gparams):
# c.f. Algorithm 1 in the Adadelta paper (Zeiler 2012)
agrad = self._rho * accugrad + (1 - self._rho) * gparam * gparam
dx = - T.sqrt((accudelta + self._eps)
/ (agrad + self._eps)) * gparam
updates[accudelta] = (self._rho * accudelta
+ (1 - self._rho) * dx * dx)
updates[param] = param + dx
updates[accugrad] = agrad
train_fn = theano.function(inputs=[theano.Param(batch_x),
theano.Param(batch_y)],
outputs=self.mean_cost,
updates=updates,
givens={self.x: batch_x, self.y: batch_y})
return train_fn
示例15: __init__
# 需要導入模塊: from theano import tensor [as 別名]
# 或者: from theano.tensor import ivector [as 別名]
def __init__(self,dataset,dictionary_size=50000,embedding_size=50,skip_window=5,learning_rate=0.1,negative_sampling=25):
self.ds = dictionary_size
self.es = embedding_size
self.sw = skip_window
self.lr = learning_rate
self.ns = negative_sampling
self._tokenize(dataset)
#nn architecture
self.input = T.matrix()
self.w1 = theano.shared((np.random.rand(self.ds,self.es).astype(theano.config.floatX)-0.5),borrow=True)
self.activeidx = T.ivector()
self.activew1 = T.take(self.w1, self.activeidx, axis=0)
self.l1out = T.dot(self.input,self.activew1)
self.w2 = theano.shared((np.random.rand(self.es,self.ds).astype(theano.config.floatX)-0.5),borrow=True)
self.sampidx = T.ivector()
self.sampw2 = T.take(self.w2, self.sampidx, axis=1)
self.l2out = T.nnet.softmax(T.dot(self.l1out,self.sampw2))
self.target = T.matrix()
#nn functions
self.z = (self.l2out - self.target).T
self.w1update = T.set_subtensor(self.w1[self.activeidx,:], self.w1[self.activeidx,:] - T.dot(self.sampw2, self.z).flatten()*self.lr)
self.w2update = T.set_subtensor(self.w2[:,self.sampidx], self.w2[:,self.sampidx] - T.outer(self.z, self.l1out).T*self.lr)
self.propogate = theano.function([self.input,self.target,self.activeidx,self.sampidx],\
updates = [(self.w1,self.w1update),(self.w2,self.w2update)],allow_input_downcast=True)