本文整理汇总了Python中blocks.bricks.recurrent.LSTM.apply方法的典型用法代码示例。如果您正苦于以下问题:Python LSTM.apply方法的具体用法?Python LSTM.apply怎么用?Python LSTM.apply使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.bricks.recurrent.LSTM
的用法示例。
在下文中一共展示了LSTM.apply方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_bidir_lstm_stack
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def make_bidir_lstm_stack(seq, seq_dim, mask, sizes, skip=True, name=''):
bricks = []
curr_dim = [seq_dim]
curr_hidden = [seq]
hidden_list = []
for k, dim in enumerate(sizes):
fwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_fwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
fwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_fwd_lstm_%d'%(name,k))
bwd_lstm_ins = [Linear(input_dim=d, output_dim=4*dim, name='%s_bwd_lstm_in_%d_%d'%(name,k,l)) for l, d in enumerate(curr_dim)]
bwd_lstm = LSTM(dim=dim, activation=Tanh(), name='%s_bwd_lstm_%d'%(name,k))
bricks = bricks + [fwd_lstm, bwd_lstm] + fwd_lstm_ins + bwd_lstm_ins
fwd_tmp = sum(x.apply(v) for x, v in zip(fwd_lstm_ins, curr_hidden))
bwd_tmp = sum(x.apply(v) for x, v in zip(bwd_lstm_ins, curr_hidden))
fwd_hidden, _ = fwd_lstm.apply(fwd_tmp, mask=mask)
bwd_hidden, _ = bwd_lstm.apply(bwd_tmp[::-1], mask=mask[::-1])
hidden_list = hidden_list + [fwd_hidden, bwd_hidden]
if skip:
curr_hidden = [seq, fwd_hidden, bwd_hidden[::-1]]
curr_dim = [seq_dim, dim, dim]
else:
curr_hidden = [fwd_hidden, bwd_hidden[::-1]]
curr_dim = [dim, dim]
return bricks, hidden_list
示例2: CoreNetwork
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
class CoreNetwork(BaseRecurrent, Initializable):
def __init__(self, input_dim, dim, **kwargs):
super(CoreNetwork, self).__init__(**kwargs)
self.input_dim = input_dim
self.dim = dim
self.lstm = LSTM(dim=dim, name=self.name + '_lstm',
weights_init=self.weights_init,
biases_init=self.biases_init)
self.proj = Linear(input_dim=input_dim, output_dim=dim*4,
name=self.name + '_proj',
weights_init=self.weights_init,
biases_init=self.biases_init)
self.children = [self.lstm, self.proj]
def get_dim(self, name):
if name == 'inputs':
return self.input_dim
elif name in ['state', 'cell']:
return self.dim
else:
raise ValueError
@recurrent(sequences=['inputs'], states=['state', 'cell'], contexts=[],
outputs=['state', 'cell'])
def apply(self, inputs, state, cell):
state, cell = self.lstm.apply(self.proj.apply(inputs), state, cell,
iterate=False)
return state, cell
示例3: __init__
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def __init__(self, input_size, hidden_size, output_size):
self.input_size = input_size
self.hidden_size = hidden_size
self.output_size = output_size
x = tensor.tensor3('x', dtype=floatX)
y = tensor.tensor3('y', dtype=floatX)
x_to_lstm = Linear(name="x_to_lstm", input_dim=input_size, output_dim=4 * hidden_size,
weights_init=IsotropicGaussian(), biases_init=Constant(0))
lstm = LSTM(dim=hidden_size, name="lstm", weights_init=IsotropicGaussian(), biases_init=Constant(0))
lstm_to_output = Linear(name="lstm_to_output", input_dim=hidden_size, output_dim=output_size,
weights_init=IsotropicGaussian(), biases_init=Constant(0))
x_transform = x_to_lstm.apply(x)
h, c = lstm.apply(x_transform)
y_hat = lstm_to_output.apply(h)
y_hat = Logistic(name="y_hat").apply(y_hat)
self.cost = BinaryCrossEntropy(name="cost").apply(y, y_hat)
x_to_lstm.initialize()
lstm.initialize()
lstm_to_output.initialize()
self.computation_graph = ComputationGraph(self.cost)
示例4: main
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def main(max_seq_length, lstm_dim, batch_size, num_batches, num_epochs):
dataset_train = IterableDataset(generate_data(max_seq_length, batch_size,
num_batches))
dataset_test = IterableDataset(generate_data(max_seq_length, batch_size,
100))
stream_train = DataStream(dataset=dataset_train)
stream_test = DataStream(dataset=dataset_test)
x = T.tensor3('x')
y = T.matrix('y')
# we need to provide data for the LSTM layer of size 4 * ltsm_dim, see
# LSTM layer documentation for the explanation
x_to_h = Linear(1, lstm_dim * 4, name='x_to_h',
weights_init=IsotropicGaussian(),
biases_init=Constant(0.0))
lstm = LSTM(lstm_dim, name='lstm',
weights_init=IsotropicGaussian(),
biases_init=Constant(0.0))
h_to_o = Linear(lstm_dim, 1, name='h_to_o',
weights_init=IsotropicGaussian(),
biases_init=Constant(0.0))
x_transform = x_to_h.apply(x)
h, c = lstm.apply(x_transform)
# only values of hidden units of the last timeframe are used for
# the classification
y_hat = h_to_o.apply(h[-1])
y_hat = Logistic().apply(y_hat)
cost = BinaryCrossEntropy().apply(y, y_hat)
cost.name = 'cost'
lstm.initialize()
x_to_h.initialize()
h_to_o.initialize()
cg = ComputationGraph(cost)
algorithm = GradientDescent(cost=cost, parameters=cg.parameters,
step_rule=Adam())
test_monitor = DataStreamMonitoring(variables=[cost],
data_stream=stream_test, prefix="test")
train_monitor = TrainingDataMonitoring(variables=[cost], prefix="train",
after_epoch=True)
main_loop = MainLoop(algorithm, stream_train,
extensions=[test_monitor, train_monitor,
FinishAfter(after_n_epochs=num_epochs),
Printing(), ProgressBar()])
main_loop.run()
print 'Learned weights:'
for layer in (x_to_h, lstm, h_to_o):
print "Layer '%s':" % layer.name
for param in layer.parameters:
print param.name, ': ', param.get_value()
print
示例5: apply
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def apply(self, input_, target):
x_to_h = Linear(name='x_to_h',
input_dim=self.dims[0],
output_dim=self.dims[1] * 4)
pre_rnn = x_to_h.apply(input_)
pre_rnn.name = 'pre_rnn'
rnn = LSTM(activation=Tanh(),
dim=self.dims[1], name=self.name)
h, _ = rnn.apply(pre_rnn)
h.name = 'h'
h_to_y = Linear(name='h_to_y',
input_dim=self.dims[1],
output_dim=self.dims[2])
y_hat = h_to_y.apply(h)
y_hat.name = 'y_hat'
cost = SquaredError().apply(target, y_hat)
cost.name = 'MSE'
self.outputs = {}
self.outputs['y_hat'] = y_hat
self.outputs['cost'] = cost
self.outputs['pre_rnn'] = pre_rnn
self.outputs['h'] = h
# Initialization
for brick in (rnn, x_to_h, h_to_y):
brick.weights_init = IsotropicGaussian(0.01)
brick.biases_init = Constant(0)
brick.initialize()
示例6: create_rnn
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def create_rnn(hidden_dim, vocab_dim,mode="rnn"):
# input
x = tensor.imatrix('inchar')
y = tensor.imatrix('outchar')
#
W = LookupTable(
name = "W1",
#dim = hidden_dim*4,
dim = hidden_dim,
length = vocab_dim,
weights_init = initialization.IsotropicGaussian(0.01),
biases_init = initialization.Constant(0)
)
if mode == "lstm":
# Long Short Term Memory
H = LSTM(
hidden_dim,
name = 'H',
weights_init = initialization.IsotropicGaussian(0.01),
biases_init = initialization.Constant(0.0)
)
else:
# recurrent history weight
H = SimpleRecurrent(
name = "H",
dim = hidden_dim,
activation = Tanh(),
weights_init = initialization.IsotropicGaussian(0.01)
)
#
S = Linear(
name = "W2",
input_dim = hidden_dim,
output_dim = vocab_dim,
weights_init = initialization.IsotropicGaussian(0.01),
biases_init = initialization.Constant(0)
)
A = NDimensionalSoftmax(
name = "softmax"
)
initLayers([W,H,S])
activations = W.apply(x)
hiddens = H.apply(activations)#[0]
activations2 = S.apply(hiddens)
y_hat = A.apply(activations2, extra_ndim=1)
cost = A.categorical_cross_entropy(y, activations2, extra_ndim=1).mean()
cg = ComputationGraph(cost)
#print VariableFilter(roles=[WEIGHT])(cg.variables)
#W1,H,W2 = VariableFilter(roles=[WEIGHT])(cg.variables)
layers = (x, W, H, S, A, y)
return cg, layers, y_hat, cost
示例7: add_lstm
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def add_lstm(input_dim, input_var):
linear = Linear(input_dim=input_dim,output_dim=input_dim*4,name="linear_layer")
lstm = LSTM(dim=input_dim, name="lstm_layer")
testing_init(linear)
#linear.initialize()
default_init(lstm)
h = linear.apply(input_var)
return lstm.apply(h)
示例8: construct_model
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def construct_model(activation_function, r_dim, hidden_dim, out_dim):
# Construct the model
r = tensor.fmatrix('r')
x = tensor.fmatrix('x')
y = tensor.ivector('y')
nx = x.shape[0]
nj = x.shape[1] # also is r.shape[0]
nr = r.shape[1]
# r is nj x nr
# x is nx x nj
# y is nx
# Get a representation of r of size r_dim
r = DAE(r)
# r is now nj x r_dim
# r_rep is nx x nj x r_dim
r_rep = r[None, :, :].repeat(axis=0, repeats=nx)
# x3 is nx x nj x 1
x3 = x[:, :, None]
# concat is nx x nj x (r_dim + 1)
concat = tensor.concatenate([r_rep, x3], axis=2)
# Change concat from Batch x Time x Features to T X B x F
rnn_input = concat.dimshuffle(1, 0, 2)
linear = Linear(input_dim=r_dim + 1, output_dim=4 * hidden_dim,
name="input_linear")
lstm = LSTM(dim=hidden_dim, activation=activation_function,
name="hidden_recurrent")
top_linear = Linear(input_dim=hidden_dim, output_dim=out_dim,
name="out_linear")
pre_rnn = linear.apply(rnn_input)
states = lstm.apply(pre_rnn)[0]
activations = top_linear.apply(states)
activations = tensor.mean(activations, axis=0)
cost = Softmax().categorical_cross_entropy(y, activations)
pred = activations.argmax(axis=1)
error_rate = tensor.neq(y, pred).mean()
# Initialize parameters
for brick in (linear, lstm, top_linear):
brick.weights_init = IsotropicGaussian(0.1)
brick.biases_init = Constant(0.)
brick.initialize()
return cost, error_rate
示例9: example4
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def example4():
"""LSTM -> Plante lors de l'initialisation du lstm."""
x = tensor.tensor3('x')
dim=3
# gate_inputs = theano.function([x],x*4)
gate_inputs = Linear(input_dim=dim,output_dim=dim*4, name="linear",weights_init=initialization.Identity(), biases_init=Constant(2))
lstm = LSTM(dim=dim,activation=Tanh(), weights_init=IsotropicGaussian(), biases_init=Constant(0))
gate_inputs.initialize()
hg = gate_inputs.apply(x)
#print(gate_inputs.parameters)
#print(gate_inputs.parameters[1].get_value())
lstm.initialize()
h, cells = lstm.apply(hg)
print(lstm.parameters)
f = theano.function([x], h)
print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX)))
print(f(np.ones((dim, 1, dim), dtype=theano.config.floatX)))
print(f(4*np.ones((dim, 1, dim), dtype=theano.config.floatX)))
print("Good Job!")
# lstm_output =
#Initial State
h0 = tensor.matrix('h0')
c = tensor.matrix('cells')
h,c1 = lstm.apply(inputs=x, states=h0, cells=c) # lstm.apply(states=h0,cells=cells,inputs=gate_inputs)
f = theano.function([x, h0, c], h)
print("a")
print(f(np.ones((3, 1, 3), dtype=theano.config.floatX),
np.ones((1, 3), dtype=theano.config.floatX),
np.ones((1, 3), dtype=theano.config.floatX)))
示例10: lstm_layer
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def lstm_layer(self, h, n):
"""
Performs the LSTM update for a batch of word sequences
:param h The word embeddings for this update
:param n The number of layers of the LSTM
"""
# Maps the word embedding to a dimensionality to be used in the LSTM
linear = Linear(input_dim=self.hidden_size, output_dim=self.hidden_size * 4, name='linear_lstm' + str(n))
initialize(linear, sqrt(6.0 / (5 * self.hidden_size)))
lstm = LSTM(dim=self.hidden_size, name='lstm' + str(n))
initialize(lstm, 0.08)
return lstm.apply(linear.apply(h))
示例11: __init__
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def __init__(self, input1_size, input2_size, lookup1_dim=200, lookup2_dim=200, hidden_size=512):
self.hidden_size = hidden_size
self.input1_size = input1_size
self.input2_size = input2_size
self.lookup1_dim = lookup1_dim
self.lookup2_dim = lookup2_dim
x1 = tensor.lmatrix('durations')
x2 = tensor.lmatrix('syllables')
y = tensor.lmatrix('pitches')
lookup1 = LookupTable(dim=self.lookup1_dim, length=self.input1_size, name='lookup1',
weights_init=initialization.Uniform(width=0.01),
biases_init=Constant(0))
lookup1.initialize()
lookup2 = LookupTable(dim=self.lookup2_dim, length=self.input2_size, name='lookup2',
weights_init=initialization.Uniform(width=0.01),
biases_init=Constant(0))
lookup2.initialize()
merge = Merge(['lookup1', 'lookup2'], [self.lookup1_dim, self.lookup2_dim], self.hidden_size,
weights_init=initialization.Uniform(width=0.01),
biases_init=Constant(0))
merge.initialize()
recurrent_block = LSTM(dim=self.hidden_size, activation=Tanh(),
weights_init=initialization.Uniform(width=0.01)) #RecurrentStack([LSTM(dim=self.hidden_size, activation=Tanh())] * 3)
recurrent_block.initialize()
linear = Linear(input_dim=self.hidden_size, output_dim=self.input1_size,
weights_init=initialization.Uniform(width=0.01),
biases_init=Constant(0))
linear.initialize()
softmax = NDimensionalSoftmax()
l1 = lookup1.apply(x1)
l2 = lookup2.apply(x2)
m = merge.apply(l1, l2)
h = recurrent_block.apply(m)
a = linear.apply(h)
y_hat = softmax.apply(a, extra_ndim=1)
# ValueError: x must be 1-d or 2-d tensor of floats. Got TensorType(float64, 3D)
self.Cost = softmax.categorical_cross_entropy(y, a, extra_ndim=1).mean()
self.ComputationGraph = ComputationGraph(self.Cost)
self.Model = Model(y_hat)
示例12: Encoder
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
class Encoder(Initializable):
def __init__(self, image_feature_dim, embedding_dim, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.image_embedding = Linear(
input_dim=image_feature_dim
, output_dim=embedding_dim
# , weights_init=IsotropicGaussian(0.02)
# , biases_init=Constant(0.)
, name="image_embedding"
)
self.to_inputs = Linear(
input_dim=embedding_dim
, output_dim=embedding_dim*4 # gate_inputs = vstack(input, forget, cell, hidden)
# , weights_init=IsotropicGaussian(0.02)
# , biases_init=Constant(0.)
, name="to_inputs"
)
# Don't think this dim has to also be dimension, more arbitrary
self.transition = LSTM(
dim=embedding_dim, name="transition")
self.children = [ self.image_embedding
, self.to_inputs
, self.transition
]
@application(inputs=['image_vects', 'word_vects'], outputs=['image_embedding', 'sentence_embedding'])
def apply(self, image_vects, word_vects):
image_embedding = self.image_embedding.apply(image_vects)
# inputs = word_vects
inputs = self.to_inputs.apply(word_vects)
inputs = inputs.dimshuffle(1, 0, 2)
hidden, cells = self.transition.apply(inputs=inputs, mask=None)
# the last hidden state represents the accumulation of all the words (i.e. the sentence)
# grab all batches, grab the last value representing accumulation of the sequence, grab all features
sentence_embedding = hidden[-1]
# sentence_embedding = inputs.mean(axis=0)
return image_embedding, sentence_embedding
示例13: Encoder
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
class Encoder(Initializable):
def __init__(self, image_feature_dim, embedding_dim, **kwargs):
super(Encoder, self).__init__(**kwargs)
self.image_embedding = Linear(
input_dim=image_feature_dim
, output_dim=embedding_dim
, name="image_embedding"
)
self.to_inputs = Linear(
input_dim=embedding_dim
, output_dim=embedding_dim*4 # times 4 cuz vstack(input, forget, cell, hidden)
, name="to_inputs"
)
self.transition = LSTM(
dim=embedding_dim, name="transition")
self.children = [ self.image_embedding
, self.to_inputs
, self.transition
]
@application(
inputs=['image_vects', 'word_vects']
, outputs=['image_embedding', 'sentence_embedding']
)
def apply(self, image_vects, word_vects):
image_embedding = self.image_embedding.apply(image_vects)
inputs = self.to_inputs.apply(word_vects)
# shuffle dimensions to correspond to (sequence, batch, features)
inputs = inputs.dimshuffle(1, 0, 2)
hidden, cells = self.transition.apply(inputs=inputs, mask=None)
# last hidden state represents the accumulation of word embeddings
# (i.e. the sentence embedding)
sentence_embedding = hidden[-1]
return image_embedding, sentence_embedding
示例14: build_theano_functions
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
def build_theano_functions(self) :
#import pdb ; pdb.set_trace()
x = T.fmatrix('x')
s = T.fvector('s')
mu = T.fvector('mu')
mu = T.reshape(mu,(self.number_of_mix,1))
pi = T.fvector('pi')
lstm = LSTM(
dim=self.input_dim/4,
weights_init=IsotropicGaussian(0.5),
biases_init=Constant(1))
lstm.initialize()
h, c = lstm.apply(x)
h = h[0][0][-1]
LL = T.sum(pi*(1./(T.sqrt(2.*np.pi)*s))*T.exp(\
-0.5*(h-mu)**2/T.reshape(s,(self.number_of_mix,1))**2.).sum(axis=1))
cost = -T.log(LL)
#cg = ComputationGraph(cost)
#self.cg = cg
#parameters = cg.parameters
model = Model(cost)
self.model = model
parameters = model.parameters
grads = T.grad(cost, parameters)
updates = []
for i in range(len(grads)) :
updates.append(tuple([parameters[i], parameters[i] - self.lr*grads[i]]))
gradf = theano.function([x,s,mu,pi],[cost],updates=updates)
f = theano.function([x],[h])
return gradf, f
示例15: Model
# 需要导入模块: from blocks.bricks.recurrent import LSTM [as 别名]
# 或者: from blocks.bricks.recurrent.LSTM import apply [as 别名]
class Model(Initializable):
@lazy()
def __init__(self, config, **kwargs):
super(Model, self).__init__(**kwargs)
self.config = config
self.pre_context_embedder = ContextEmbedder(config.pre_embedder, name='pre_context_embedder')
self.post_context_embedder = ContextEmbedder(config.post_embedder, name='post_context_embedder')
in1 = 2 + sum(x[2] for x in config.pre_embedder.dim_embeddings)
self.input_to_rec = MLP(activations=[Tanh()], dims=[in1, config.hidden_state_dim], name='input_to_rec')
self.rec = LSTM(
dim = config.hidden_state_dim,
name = 'recurrent'
)
in2 = config.hidden_state_dim + sum(x[2] for x in config.post_embedder.dim_embeddings)
self.rec_to_output = MLP(activations=[Tanh()], dims=[in2, 2], name='rec_to_output')
self.sequences = ['latitude', 'latitude_mask', 'longitude']
self.context = self.pre_context_embedder.inputs + self.post_context_embedder.inputs
self.inputs = self.sequences + self.context
self.children = [ self.pre_context_embedder, self.post_context_embedder, self.input_to_rec, self.rec, self.rec_to_output ]
self.initial_state_ = shared_floatx_zeros((config.hidden_state_dim,),
name="initial_state")
self.initial_cells = shared_floatx_zeros((config.hidden_state_dim,),
name="initial_cells")
def _push_initialization_config(self):
for mlp in [self.input_to_rec, self.rec_to_output]:
mlp.weights_init = self.config.weights_init
mlp.biases_init = self.config.biases_init
self.rec.weights_init = self.config.weights_init
def get_dim(self, name):
return self.rec.get_dim(name)
@application
def initial_state(self, *args, **kwargs):
return self.rec.initial_state(*args, **kwargs)
@recurrent(states=['states', 'cells'], outputs=['destination', 'states', 'cells'], sequences=['latitude', 'longitude', 'latitude_mask'])
def predict_all(self, latitude, longitude, latitude_mask, **kwargs):
latitude = (latitude - data.train_gps_mean[0]) / data.train_gps_std[0]
longitude = (longitude - data.train_gps_mean[1]) / data.train_gps_std[1]
pre_emb = tuple(self.pre_context_embedder.apply(**kwargs))
latitude = tensor.shape_padright(latitude)
longitude = tensor.shape_padright(longitude)
itr = self.input_to_rec.apply(tensor.concatenate(pre_emb + (latitude, longitude), axis=1))
itr = itr.repeat(4, axis=1)
(next_states, next_cells) = self.rec.apply(itr, kwargs['states'], kwargs['cells'], mask=latitude_mask, iterate=False)
post_emb = tuple(self.post_context_embedder.apply(**kwargs))
rto = self.rec_to_output.apply(tensor.concatenate(post_emb + (next_states,), axis=1))
rto = (rto * data.train_gps_std) + data.train_gps_mean
return (rto, next_states, next_cells)
@predict_all.property('contexts')
def predict_all_inputs(self):
return self.context
@application(outputs=['destination'])
def predict(self, latitude, longitude, latitude_mask, **kwargs):
latitude = latitude.T
longitude = longitude.T
latitude_mask = latitude_mask.T
res = self.predict_all(latitude, longitude, latitude_mask, **kwargs)[0]
return res[-1]
@predict.property('inputs')
def predict_inputs(self):
return self.inputs
@application(outputs=['cost_matrix'])
def cost_matrix(self, latitude, longitude, latitude_mask, **kwargs):
latitude = latitude.T
longitude = longitude.T
latitude_mask = latitude_mask.T
res = self.predict_all(latitude, longitude, latitude_mask, **kwargs)[0]
target = tensor.concatenate(
(kwargs['destination_latitude'].dimshuffle('x', 0, 'x'),
kwargs['destination_longitude'].dimshuffle('x', 0, 'x')),
axis=2)
target = target.repeat(latitude.shape[0], axis=0)
ce = error.erdist(target.reshape((-1, 2)), res.reshape((-1, 2)))
ce = ce.reshape(latitude.shape)
return ce * latitude_mask
@cost_matrix.property('inputs')
def cost_matrix_inputs(self):
return self.inputs + ['destination_latitude', 'destination_longitude']
@application(outputs=['cost'])
def cost(self, latitude_mask, **kwargs):
return self.cost_matrix(latitude_mask=latitude_mask, **kwargs).sum() / latitude_mask.sum()
#.........这里部分代码省略.........