本文整理汇总了Python中theano.tensor.iscalar函数的典型用法代码示例。如果您正苦于以下问题:Python iscalar函数的具体用法?Python iscalar怎么用?Python iscalar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了iscalar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: build_model
def build_model(self, train_set, test_set, validation_set):
"""
Building the model should be done prior to training. It will implement the training, testing and validation
functions.
This method should be called from any subsequent inheriting model.
:param loss: The loss funciton applied to training (cf. updates.py), e.g. mse.
:param update: The update function (optimization framework) used for training (cf. updates.py), e.g. sgd.
:param update_args: The args for the update function applied to training, e.g. (0.001,).
"""
print "### BUILDING MODEL ###"
self.train_args = {}
self.train_args['inputs'] = OrderedDict({})
self.train_args['outputs'] = OrderedDict({})
self.test_args = {}
self.test_args['inputs'] = OrderedDict({})
self.test_args['outputs'] = OrderedDict({})
self.validate_args = {}
self.validate_args['inputs'] = OrderedDict({})
self.validate_args['outputs'] = OrderedDict({})
self.sym_index = T.iscalar('index')
self.sym_batchsize = T.iscalar('batchsize')
self.sym_lr = T.scalar('learningrate')
self.batch_slice = slice(self.sym_index * self.sym_batchsize, (self.sym_index + 1) * self.sym_batchsize)
self.sh_train_x = theano.shared(np.asarray(train_set[0], dtype=theano.config.floatX), borrow=True)
self.sh_train_t = theano.shared(np.asarray(train_set[1], dtype=theano.config.floatX), borrow=True)
self.sh_test_x = theano.shared(np.asarray(test_set[0], dtype=theano.config.floatX), borrow=True)
self.sh_test_t = theano.shared(np.asarray(test_set[1], dtype=theano.config.floatX), borrow=True)
if validation_set is not None:
self.sh_valid_x = theano.shared(np.asarray(validation_set[0], dtype=theano.config.floatX), borrow=True)
self.sh_valid_t = theano.shared(np.asarray(validation_set[1], dtype=theano.config.floatX), borrow=True)
示例2: getMinibatchTrainer
def getMinibatchTrainer(self, costFunction, variableToData, rms=True):
# define params
lr = T.fscalar('lr')
start = T.iscalar('start')
end = T.iscalar('end')
# Get the cost and its parameters.
params = costFunction[0]
cost = costFunction[1]
# Get the updates.
updates = self.getUpdates(cost, params, lr, rms)
# Store all state variables.
stateManager = StateManager([u[0] for u in updates])
# Slice the data
givens = dict()
for item in variableToData:
givens[item.variable] = item.slice(start,end)
# Define the training function.
train_model = theano.function(inputs=[theano.Param(start, borrow = True),
theano.Param(end, borrow=True),
theano.Param(lr, borrow=True)],
outputs=theano.Out(cost, borrow=True),
updates=updates,
givens=givens)
return train_model, stateManager
示例3: f_train
def f_train(self, t_x, t_corrupt = 0.2, t_rate = 0.1):
""" return training function of the following signiture:
input:
lower and upper indices on training data
alternative training data
return:
likelihood based cost
square distance between training data and prediction
"""
x = T.matrix('x') # pipe data through this symble
q = self.t_corrupt(x, t_corrupt)
h = self.t_encode(q)
z = self.t_decode(h)
L = - T.sum(x * T.log(z) + (1 - x) * T.log(1 - z), axis=1)
cost = T.mean(L) # to be returned
dist = T.mean(T.sqrt(T.sum((x - z) ** 2, axis = 1))) # to be returned
grad = T.grad(cost, self.parm)
diff = [(p, p - t_rate * g) for p, g in zip(self.parm, grad)]
t_fr = T.iscalar()
t_to = T.iscalar()
return theano.function(
[t_fr, t_to],
[cost, dist],
updates = diff,
givens = {x : t_x[t_fr:t_to]},
name = "DA_trainer")
示例4: getTrainer
def getTrainer(self,lossType="NLL"):
'''
return a function to do MBSGD on (trainX,trainY)
'''
trainY = T.ivector('y')
alpha = T.dscalar('a')
lowIdx = T.iscalar()
highIdx = T.iscalar()
trainX = T.matrix()
if lossType=="aNLL":
loss = self.aNLL(trainY)
elif lossType=='MSE':
loss = self.MSE(trainY)
else:
loss = self.NLL(trainY)
dW = T.grad(cost = loss, wrt = self.W)
db = T.grad(cost = loss, wrt = self.b)
updates = [(self.W,self.W - alpha * dW), (self.b,self.b - alpha * db)]
trainer = theano.function(
inputs = [trainX,trainY,alpha],
outputs = loss,
updates=updates,
givens = {
self.input : trainX,
},
allow_input_downcast=True
)
return trainer
示例5: SimFnIdx
def SimFnIdx(fnsim, embeddings, leftop, rightop):
"""
This function returns a Theano function to measure the similarity score
for a given triplet of entity indexes.
:param fnsim: similarity function (on Theano variables).
:param embeddings: an Embeddings instance.
:param leftop: class for the 'left' operator.
:param rightop: class for the 'right' operator.
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxo = T.iscalar('idxo')
idxr = T.iscalar('idxr')
idxl = T.iscalar('idxl')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D))
rhs = (embedding.E[:, idxr]).reshape((1, embedding.D))
rell = (relationl.E[:, idxo]).reshape((1, relationl.D))
relr = (relationr.E[:, idxo]).reshape((1, relationr.D))
simi = fnsim(leftop(lhs, rell), rightop(rhs, relr))
"""
Theano function inputs.
:input idxl: index value of the 'left' member.
:input idxr: index value of the 'right' member.
:input idxo: index value of the relation member.
Theano function output.
:output simi: score value.
"""
return theano.function([idxl, idxr, idxo], [simi],
on_unused_input='ignore')
示例6: multMatVect
def multMatVect(v, A, m1, B, m2):
# TODO : need description for parameter and return
"""
Multiply the first half of v by A with a modulo of m1 and the second half
by B with a modulo of m2.
Notes
-----
The parameters of dot_modulo are passed implicitly because passing them
explicitly takes more time than running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix('A')
s_sym = tensor.ivector('s')
m_sym = tensor.iscalar('m')
A2_sym = tensor.lmatrix('A2')
s2_sym = tensor.ivector('s2')
m2_sym = tensor.iscalar('m2')
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function(
[A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o, profile=False)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
示例7: RankRightFnIdx_filtered
def RankRightFnIdx_filtered(fnsim, embeddings, leftop, rightop, subtensorspec=None):
"""
This function returns a Theano function to measure the similarity score of
all 'right' entities given couples of relation and 'left' entities (as
index values).
"""
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')
rightparts = T.ivector('rightparts')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl
if subtensorspec is not None:
# We compute the score only for a subset of entities
rhs = (embedding.E[:, :subtensorspec]).T
else:
rhs = embedding.E.T # rhs: NxD embedding matrix
rhs = rhs[rightparts, :] # select the right parts not appearing
# in the train/valid/test sets
rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl)
relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr)
tmp = leftop(lhs, rell) # a = rell(lhs)
# b = relr(rhs)
simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)
return theano.function([idxl, idxo, rightparts], [simi], on_unused_input='ignore')
示例8: RankRightFnIdx_Schema
def RankRightFnIdx_Schema(fnsim, embeddings, prior, leftop, rightop, subtensorspec=None):
embedding, relationl, relationr = parse_embeddings(embeddings)
# Inputs
idxl, idxo = T.iscalar('idxl'), T.iscalar('idxo')
g = T.matrix('g')
# Graph
lhs = (embedding.E[:, idxl]).reshape((1, embedding.D)) # lhs: 1xD vector containing the embedding of idxl
if subtensorspec is not None:
# We compute the score only for a subset of entities
rhs = (embedding.E[:, :subtensorspec]).T
else:
rhs = embedding.E.T # rhs: NxD embedding matrix
rell = (relationl.E[:, idxo]).reshape((1, relationl.D)) # rell: 1xD vector containing the embedding of idxo (relationl)
relr = (relationr.E[:, idxo]).reshape((1, relationr.D)) # relr: 1xD vector containing the embedding of idxo (relationr)
tmp = leftop(lhs, rell) # a = rell(lhs)
# b = relr(rhs)
# Negative Energy
simi = fnsim(tmp.reshape((1, tmp.shape[1])), rightop(rhs, relr)) # simi = fnsim(a, b)
pen_simi = g[0, :].T * prior.P[idxo, 0].T + g[1, :].T * prior.P[idxo, 1].T
simi = simi - pen_simi
return theano.function([idxl, idxo, g], [simi], on_unused_input='ignore')
示例9: compile_bn
def compile_bn(data_set, model, make_updates):
"""
データをsharedにして、modelとoptimizerを使ってcomputational graphを作って、
コンパイルする。
Parameters
-----------
data_set : list of numpy.ndarray
feature_vec : ndarray
(n_pixels, D, n_tensors)
gt_vec : ndarray
(n_pixels, D)
test_feature_vec, test_gt_vec
model : models.Rcn1layerとか
optimizer : optimizers.SGDとか
"""
s_input, s_target, s_test_input, s_test_target = share_data_sets(*data_set)
nn, obj, train_mse, model_updates, model_param_l = model.make_graph_train()
test_mse, test_out = model.make_graph_test()
updates, opt_param_list = make_updates(loss=obj, param_list=nn.param_l)
i_batch = T.iscalar("i_batch")
index_list = T.ivector("index_list")
batch_size = T.iscalar("batch_size")
od = OrderedDict()
for k, e in updates.items() + model_updates.items():
od[k] = e
f_train = theano.function(
inputs=[i_batch, index_list, batch_size]+opt_param_list+model_param_l,
updates=od,
givens=[(nn.x_t3, s_input[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]]),
(nn.t_mat, s_target[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]])],
on_unused_input='warn')
f_training_error = theano.function(
inputs=[i_batch, index_list, batch_size]+model_param_l,
outputs=[train_mse],
givens=[(nn.x_t3, s_input[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]]),
(nn.t_mat, s_target[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]])],
on_unused_input='warn')
f_test_error = theano.function(
inputs=[i_batch, index_list, batch_size],
outputs=[test_mse],
givens=[(nn.x_t3, s_test_input[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]]),
(nn.t_mat, s_test_target[index_list[i_batch*batch_size: i_batch*batch_size + batch_size]])])
f_output = theano.function(
inputs=[nn.x_t3],
outputs=[test_out])
result = [f_train, f_training_error, f_test_error, f_output, s_input,
s_target, s_test_input, s_test_target, nn.param_l]
return result
示例10: test_compute_lnZ
def test_compute_lnZ(self):
v = T.matrix('v')
z = T.iscalar('z')
V = cartesian([(0, 1)] * self.input_size, dtype=config.floatX)
#H = cartesian([(0, 1)] * self.hidden_size, dtype=config.floatX)
# We simulate having an infinite number of hidden units by adding lot of hidden units with parameters set to 0.
nb_hidden_units_to_add = 10000
model = iRBM(input_size=self.model.input_size,
hidden_size=self.model.hidden_size + nb_hidden_units_to_add,
beta=self.model.beta.get_value())
model.W.set_value(np.r_[self.model.W.get_value(), np.zeros((nb_hidden_units_to_add, model.input_size), dtype=theano.config.floatX)])
model.b.set_value(np.r_[self.model.b.get_value(), np.zeros((nb_hidden_units_to_add,), dtype=theano.config.floatX)])
model.c.set_value(self.model.c.get_value())
v = T.matrix('v')
z = T.iscalar('z')
F_vz = theano.function([v, z], model.F(v, z))
energies = []
for z in range(1, model.hidden_size+1):
energies.append(F_vz(V, z))
lnZ = logsumexp(-np.array(energies)).eval()
lnZ_using_free_energy = theano.function([v], logsumexp(-self.model.free_energy(v)))
assert_almost_equal(lnZ_using_free_energy(V), lnZ, decimal=5) # decimal=5 needed for float32
示例11: compile_functions
def compile_functions(self, opt, **args):
print '... compiling training functions'
gen_cost, gen_show_cost, dis_cost, cost_pfake, cost_ptrue = self.get_cost()
self.opt = opt
gen_updates = self.opt.get_updates(gen_cost, self.gen_params)
dis_updates = self.opt.get_updates(dis_cost, self.dis_params)
self.get_noise = theano.function([],
self.theano_rng.uniform(size=(self.batch_size, self.num_z),
low=-1, high=1)
)
start_index = T.iscalar('start_index')
end_index = T.iscalar('end_index')
if self.uint8_data:
given_train_x = T.cast(self.shared_train[start_index:end_index], dtype='float32')
else:
given_train_x = self.shared_train[start_index:end_index]
self.train_gen_model = theano.function(
[self.z],
gen_show_cost,
updates=gen_updates,
)
self.train_dis_model = theano.function(
[start_index, end_index, self.z],
[cost_pfake, cost_ptrue],
updates=dis_updates,
givens={self.x: given_train_x}
)
示例12: multMatVect
def multMatVect(v, A, m1, B, m2):
"""
multiply the first half of v by A with a modulo of m1
and the second half by B with a modulo of m2
Note: The parameters of dot_modulo are passed implicitly because passing
them explicitly takes more time then running the function's C-code.
"""
if multMatVect.dot_modulo is None:
A_sym = tensor.lmatrix("A")
s_sym = tensor.ivector("s")
m_sym = tensor.iscalar("m")
A2_sym = tensor.lmatrix("A2")
s2_sym = tensor.ivector("s2")
m2_sym = tensor.iscalar("m2")
o = DotModulo()(A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym)
multMatVect.dot_modulo = function([A_sym, s_sym, m_sym, A2_sym, s2_sym, m2_sym], o)
# This way of calling the Theano fct is done to bypass Theano overhead.
f = multMatVect.dot_modulo
f.input_storage[0].storage[0] = A
f.input_storage[1].storage[0] = v[:3]
f.input_storage[2].storage[0] = m1
f.input_storage[3].storage[0] = B
f.input_storage[4].storage[0] = v[3:]
f.input_storage[5].storage[0] = m2
f.fn()
r = f.output_storage[0].storage[0]
return r
示例13: init_nnet
def init_nnet(W, n_classes, vec_dim):
"""Initialize neural network.
Args:
W (theano.shared): embedding matrix
n_classes: number of classes to be predicted
vec_dim: dimensionality of the embeddings
"""
w_idx = TT.iscalar(name="w_idx")
y_gold = TT.iscalar(name="y_gold")
embs = W[w_idx]
Theta = theano.shared(value=ORTHOGONAL.sample((n_classes, vec_dim)),
name="Theta")
beta = theano.shared(value=HE_UNIFORM.sample((1, n_classes)), name="beta")
y_probs = TT.nnet.softmax(TT.dot(Theta, embs.T).flatten() + beta).flatten()
params = [Theta]
cost = -TT.mean(TT.log(y_probs[y_gold]))
updates = sgd_updates_adadelta(params, cost)
train = theano.function([w_idx, y_gold], cost, updates=updates)
y_pred = TT.argmax(y_probs)
y_score = y_probs[y_pred]
predict = theano.function([w_idx], (y_pred, y_score))
acc = TT.eq(y_gold, y_pred)
validate = theano.function([w_idx, y_gold], acc)
return (train, validate, predict, params)
示例14: build_model
def build_model(shared_params, options, other_params):
"""
Build the complete neural network model and return the symbolic variables
"""
# symbolic variables
x = tensor.matrix(name="x", dtype=floatX)
y1 = tensor.iscalar(name="y1")
y2 = tensor.iscalar(name="y2")
# lstm cell
(ht, ct) = lstm_cell(x, shared_params, options, other_params) # gets the ht, ct
# softmax 1 i.e. frame type prediction
activation = tensor.dot(shared_params['softmax1_W'], ht).transpose() + shared_params['softmax1_b']
frame_pred = tensor.nnet.softmax(activation) # .transpose()
# softmax 2 i.e. gesture class prediction
#
# predicted probability for frame type
f_pred_prob = theano.function([x], frame_pred, name="f_pred_prob")
# predicted frame type
f_pred = theano.function([x], frame_pred.argmax(), name="f_pred")
# cost
cost = ifelse(tensor.eq(y1, 1), -tensor.log(frame_pred[0, 0] + options['log_offset'])
* other_params['begin_cost_factor'],
ifelse(tensor.eq(y1, 2), -tensor.log(frame_pred[0, 1] + options['log_offset'])
* other_params['end_cost_factor'],
ifelse(tensor.eq(y1, 3), -tensor.log(frame_pred[0, 2] + options['log_offset']),
tensor.abs_(tensor.log(y1)))), name='ifelse_cost')
# function for output of the currect lstm cell and softmax prediction
f_model_cell_output = theano.function([x], (ht, ct, frame_pred), name="f_model_cell_output")
# return the model symbolic variables and theano functions
return x, y1, y2, f_pred_prob, f_pred, cost, f_model_cell_output
示例15: __init__
def __init__(self, in_size, out_size, dim_y, dim_pos, hidden_size_encoder, hidden_size_decoder, cell = "gru", optimizer = "rmsprop", p = 0.5, num_sents = 1):
self.X = T.matrix("X")
self.Y_y = T.matrix("Y_y")
self.Y_pos = T.matrix("Y_pos")
self.in_size = in_size
self.out_size = out_size
self.dim_y = dim_y
self.dim_pos = dim_pos
self.hidden_size_encoder = hidden_size_encoder
self.hidden_size_decoder = hidden_size_decoder
self.cell = cell
self.drop_rate = p
self.num_sents = num_sents
self.is_train = T.iscalar('is_train') # for dropout
self.batch_size = T.iscalar('batch_size') # for mini-batch training
self.mask = T.matrix("mask")
self.mask_y = T.matrix("mask_y")
self.optimizer = optimizer
print "seq2seq out size ", self.out_size
if self.out_size == self.dim_y + self.dim_pos:
print "size right !"
self.define_layers()
self.define_train_test_funcs()