本文整理汇总了Python中theano.map方法的典型用法代码示例。如果您正苦于以下问题:Python theano.map方法的具体用法?Python theano.map怎么用?Python theano.map使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano
的用法示例。
在下文中一共展示了theano.map方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: in_top_k
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def in_top_k(predictions, targets, k):
'''Returns whether the `targets` are in the top `k` `predictions`
# Arguments
predictions: A tensor of shape batch_size x classess and type float32.
targets: A tensor of shape batch_size and type int32 or int64.
k: An int, number of top elements to consider.
# Returns
A tensor of shape batch_size and type int. output_i is 1 if
targets_i is within top-k values of predictions_i
'''
predictions_top_k = T.argsort(predictions)[:, -k:]
result, _ = theano.map(lambda prediction, target: any(equal(prediction, target)), sequences=[predictions_top_k, targets])
return result
# CONVOLUTIONS
示例2: stop_gradient
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def stop_gradient(variables):
"""Returns `variables` but with zero gradient w.r.t. every other variable.
# Arguments
variables: tensor or list of tensors to consider constant with respect
to any other variable.
# Returns
A single tensor or a list of tensors (depending on the passed argument)
that has constant gradient with respect to any other variable.
"""
if isinstance(variables, (list, tuple)):
return map(theano.gradient.disconnected_grad, variables)
else:
return theano.gradient.disconnected_grad(variables)
# CONTROL FLOW
示例3: gradient_descent
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def gradient_descent(self, loss):
"""Momentum GD with gradient clipping."""
grad = T.grad(loss, self.params)
self.momentum_velocity_ = [0.] * len(grad)
grad_norm = T.sqrt(sum(map(lambda x: T.sqr(x).sum(), grad)))
updates = OrderedDict()
not_finite = T.or_(T.isnan(grad_norm), T.isinf(grad_norm))
scaling_den = T.maximum(5.0, grad_norm)
for n, (param, grad) in enumerate(zip(self.params, grad)):
grad = T.switch(not_finite, 0.1 * param,
grad * (5.0 / scaling_den))
velocity = self.momentum_velocity_[n]
update_step = self.momentum * velocity - self.learning_rate * grad
self.momentum_velocity_[n] = update_step
updates[param] = param + update_step
return updates
示例4: map_fn
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def map_fn(fn, elems, name=None):
'''Map the function fn over the elements elems and return the outputs.
# Arguments
fn: Callable that will be called upon each element in elems
elems: tensor, at least 2 dimensional
name: A string name for the map node in the graph
# Returns
Tensor with first dimension equal to the elems and second depending on
fn
'''
return theano.map(fn, elems, name=name)[0]
示例5: map_fn
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
# Arguments
fn: Callable that will be called upon each element in elems
elems: tensor, at least 2 dimensional
name: A string name for the map node in the graph
# Returns
Tensor with first dimension equal to the elems and second depending on
fn
"""
return theano.map(fn, elems, name=name)[0]
示例6: compute_tree
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def compute_tree(self, x_word, x_index, tree):
self.recursive_unit = self.create_recursive_unit()
self.leaf_unit = self.create_leaf_unit()
num_parents = tree.shape[0] # num internal nodes
num_leaves = self.num_nodes - num_parents
# compute leaf hidden states
leaf_h, _ = theano.map(
fn=self.leaf_unit,
sequences=[ x_word[:num_leaves], x_index[:num_leaves] ])
if self.irregular_tree:
init_node_h = T.concatenate([leaf_h, leaf_h, leaf_h], axis=0)
else:
init_node_h = leaf_h
# use recurrence to compute internal node hidden states
def _recurrence(x_word, x_index, node_info, t, node_h, last_h):
child_exists = node_info > -1
offset = 2*num_leaves * int(self.irregular_tree) - child_exists * t ### offset???
child_h = node_h[node_info + offset] * child_exists.dimshuffle(0, 'x') ### transpose??
parent_h = self.recursive_unit(x_word, x_index, child_h, child_exists)
node_h = T.concatenate([node_h,
parent_h.reshape([1, self.hidden_dim])])
return node_h[1:], parent_h
dummy = theano.shared(self.init_vector([self.hidden_dim]))
(_, parent_h), _ = theano.scan(
fn=_recurrence,
outputs_info=[init_node_h, dummy],
sequences=[x_word[num_leaves:], x_index[num_leaves:], tree, T.arange(num_parents)],
n_steps=num_parents)
return T.concatenate([leaf_h, parent_h], axis=0)
示例7: __init__
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def __init__(self, rng,
input,
vocab_size,
embed_dm,
embeddings = None,
):
"""
input: theano.tensor.dmatrix, (number of instances, sentence word number)
vocab_size: integer, the size of vocabulary,
embed_dm: integer, the dimension of word vector representation
embeddings: theano.tensor.TensorType
pretrained embeddings
"""
if embeddings:
print "Use pretrained embeddings: ON"
assert embeddings.get_value().shape == (vocab_size, embed_dm), "%r != %r" %(
embeddings.get_value().shape,
(vocab_size, embed_dm)
)
self.embeddings = embeddings
else:
print "Use pretrained embeddings: OFF"
embedding_val = np.asarray(
rng.normal(0, 0.05, size = (vocab_size, embed_dm)),
dtype = theano.config.floatX
)
embedding_val[vocab_size-1,:] = 0 # the <PADDING> character is intialized to 0
self.embeddings = theano.shared(
np.asarray(embedding_val,
dtype = theano.config.floatX),
borrow = True,
name = 'embeddings'
)
self.params = [self.embeddings]
self.param_shapes = [(vocab_size, embed_dm)]
# Return:
# :type, theano.tensor.tensor4
# :param, dimension(1, 1, word embedding dimension, number of words in sentence)
# made to be 4D to fit into the dimension of convolution operation
sent_embedding_list, updates = theano.map(lambda sent: self.embeddings[sent],
input)
sent_embedding_tensor = T.stacklists(sent_embedding_list) # make it into a 3D tensor
self.output = sent_embedding_tensor.dimshuffle(0, 'x', 2, 1) # make it a 4D tensor
示例8: gen_full_alignment
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def gen_full_alignment(self):
# Get only the focus columns
for seq_name,sequence in self.seq_name_to_sequence.items():
# Replace periods with dashes (the uppercase equivalent)
sequence = sequence.replace(".","-")
#then get only the focus columns
self.seq_name_to_sequence[seq_name] = [sequence[ix].upper() for ix in self.focus_cols]
# Remove sequences that have bad characters
alphabet_set = set(list(self.alphabet))
seq_names_to_remove = []
for seq_name,sequence in self.seq_name_to_sequence.items():
for letter in sequence:
if letter not in alphabet_set and letter != "-":
seq_names_to_remove.append(seq_name)
seq_names_to_remove = list(set(seq_names_to_remove))
for seq_name in seq_names_to_remove:
del self.seq_name_to_sequence[seq_name]
# Encode the sequences
print ("Encoding sequences")
self.x_train = np.zeros((len(self.seq_name_to_sequence.keys()),len(self.focus_cols),len(self.alphabet)))
self.x_train_name_list = []
for i,seq_name in enumerate(self.seq_name_to_sequence.keys()):
sequence = self.seq_name_to_sequence[seq_name]
self.x_train_name_list.append(seq_name)
for j,letter in enumerate(sequence):
if letter in self.aa_dict:
k = self.aa_dict[letter]
self.x_train[i,j,k] = 1.0
# Fast sequence weights with Theano
if self.calc_weights:
print ("Computing sequence weights")
# Numpy version
# import scipy
# from scipy.spatial.distance import pdist, squareform
# self.weights = scale / np.sum(squareform(pdist(seq_index_array, metric="hamming")) < theta, axis=0)
#
# Theano weights
X = T.tensor3("x")
cutoff = T.scalar("theta")
X_flat = X.reshape((X.shape[0], X.shape[1]*X.shape[2]))
N_list, updates = theano.map(lambda x: 1.0 / T.sum(T.dot(X_flat, x) / T.dot(x, x) > 1 - cutoff), X_flat)
weightfun = theano.function(inputs=[X, cutoff], outputs=[N_list],allow_input_downcast=True)
#
self.weights = weightfun(self.x_train, self.theta)[0]
else:
# If not using weights, use an isotropic weight matrix
self.weights = np.ones(self.x_train.shape[0])
self.Neff = np.sum(self.weights)
print ("Neff =",str(self.Neff))
print ("Data Shape =",self.x_train.shape)
示例9: compute_tree
# 需要导入模块: import theano [as 别名]
# 或者: from theano import map [as 别名]
def compute_tree(self, x_word, x_index, num_parent, tree):
self.recursive_unit = self.create_recursive_unit()
#num_nodes = self.num_nodes+1
def ini_unit(x):
return theano.shared(self.init_vector([self.hidden_dim]))
#init_node_h = 0 * theano.shared(self.init_vector([self.num_nodes, self.hidden_dim]))
init_node_h, _ = theano.scan(
fn=ini_unit,
sequences=[ x_word ])
#n_steps=num_nodes)
#dummy = 0 * theano.shared(self.init_vector([self.hidden_dim]))
#init_node_h = T.concatenate([dummy, all_node_h], axis=0)
'''self.recursive_unit = self.create_recursive_unit()
self.leaf_unit = self.create_leaf_unit()
num_parents = tree.shape[0] # num internal nodes
num_leaves = self.num_nodes - num_parents
# compute leaf hidden states
leaf_h, _ = theano.map(
fn=self.leaf_unit,
sequences=[ x_word[:num_leaves], x_index[:num_leaves] ])
if self.irregular_tree:
init_node_h = T.concatenate([leaf_h, leaf_h, leaf_h], axis=0)
else:
init_node_h = leaf_h'''
# use recurrence to compute internal node hidden states
def _recurrence(x_word, x_index, node_info, node_h, last_h):
parent_h = node_h[node_info[0]]
child_h = self.recursive_unit(x_word, x_index, parent_h)
#node_h[node_info[1]] = child_h
node_h = T.concatenate([node_h[:node_info[1]],
child_h.reshape([1, self.hidden_dim]),
node_h[node_info[1]+1:] ])
'''#child_exists = node_info > -1
#offset = 2*num_leaves * int(self.irregular_tree) - child_exists * t ### offset???
child_h = node_h[node_info + offset] * child_exists.dimshuffle(0, 'x') ### transpose??
parent_h = self.recursive_unit(x_word, x_index, child_h, child_exists)
node_h = T.concatenate([node_h,
parent_h.reshape([1, self.hidden_dim])])
return node_h[1:], parent_h'''
return node_h, child_h
dummy = theano.shared(self.init_vector([self.hidden_dim]))
(_, child_hs), _ = theano.scan(
fn=_recurrence,
outputs_info=[init_node_h, dummy],
sequences=[x_word[:-1], x_index, tree])
'''dummy = theano.shared(self.init_vector([self.hidden_dim]))
(_, parent_h), _ = theano.scan(
fn=_recurrence,
outputs_info=[init_node_h, dummy],
sequences=[x_word[num_leaves:], x_index[num_leaves:], tree, T.arange(num_parents)],
n_steps=num_parents)
return T.concatenate([leaf_h, parent_h], axis=0)'''
return child_hs[num_parent-1:]