本文整理汇总了Python中blocks.model.Model.get_theano_function方法的典型用法代码示例。如果您正苦于以下问题:Python Model.get_theano_function方法的具体用法?Python Model.get_theano_function怎么用?Python Model.get_theano_function使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.model.Model
的用法示例。
在下文中一共展示了Model.get_theano_function方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_sampling
# 需要导入模块: from blocks.model import Model [as 别名]
# 或者: from blocks.model.Model import get_theano_function [as 别名]
def test_sampling():
# Create Theano variables
sampling_input = theano.tensor.lmatrix("input")
# Construct model
encoder = BidirectionalEncoder(vocab_size=10, embedding_dim=5, state_dim=8)
decoder = Decoder(vocab_size=12, embedding_dim=6, state_dim=8, representation_dim=16, theano_seed=1234)
sampling_representation = encoder.apply(sampling_input, theano.tensor.ones(sampling_input.shape))
generateds = decoder.generate(sampling_input, sampling_representation)
model = Model(generateds[1])
# Initialize model
encoder.weights_init = decoder.weights_init = IsotropicGaussian(0.01)
encoder.biases_init = decoder.biases_init = Constant(0)
encoder.push_initialization_config()
decoder.push_initialization_config()
encoder.bidir.prototype.weights_init = Orthogonal()
decoder.transition.weights_init = Orthogonal()
encoder.initialize()
decoder.initialize()
# Compile a function for the generated
sampling_fn = model.get_theano_function()
# Create literal variables
numpy.random.seed(1234)
x = numpy.random.randint(0, 10, size=(1, 2))
# Call function and check result
generated_step = sampling_fn(x)
assert len(generated_step[0].flatten()) == 4
示例2: __init__
# 需要导入模块: from blocks.model import Model [as 别名]
# 或者: from blocks.model.Model import get_theano_function [as 别名]
def __init__(self, model_name, model, stream, **kwargs):
super(RunOnTest, self).__init__(**kwargs)
self.model_name = model_name
cg = Model(model.predict(**stream.inputs()))
self.inputs = cg.inputs
self.outputs = model.predict.outputs
req_vars_test = model.predict.inputs + ['trip_id']
self.test_stream = stream.test(req_vars_test)
self.function = cg.get_theano_function()
示例3: print
# 需要导入模块: from blocks.model import Model [as 别名]
# 或者: from blocks.model.Model import get_theano_function [as 别名]
print(cg.inputs)
# Strangely, all the examples use : DataStreamMonitoring in MainLoop
model = Model(labels)
print("Model.dict_of_inputs():");
print(model.dict_of_inputs())
print("Model list inputs:");
print([ v.name for v in model.inputs])
## Model loading from saved file
model.set_parameter_values(load_parameter_values(save_state_path))
examine_embedding(lookup.W.get_value())
label_ner = model.get_theano_function()
print(model.inputs)
print("printed label_ner.params")
for test_data in data_stream.get_epoch_iterator():
ordered_batch = test_data[0:3] # Explicitly strip off the pre-defined labels
#print(ordered_batch)
results = label_ner(*ordered_batch)
#print(results) # This is a pure array of labels
inputs = _transpose(ordered_batch)
for tokens, mask, labels in zip(inputs[0], inputs[1], np.transpose(results)):
#print(labels)
for (token, m, label) in zip(tokens, mask, labels.tolist()):
if m<0.1:
示例4: main
# 需要导入模块: from blocks.model import Model [as 别名]
# 或者: from blocks.model.Model import get_theano_function [as 别名]
#.........这里部分代码省略.........
from blocks.roles import INPUT
inputs = VariableFilter(roles=[INPUT])(cg.variables)
# dropout_target = [v for k,v in newmodel.get_params().iteritems()
# if k.find('MLP')>=0 and k.endswith('.W') and not k.endswith('MLP_enc/linear_0.W')]
dropout_target = filter(lambda x: x.name.startswith('linear_'), inputs)
cg = apply_dropout(cg, dropout_target, 0.5)
target_cost = cg.outputs[0]
else:
target_cost = cost
if name == 'mnist':
if predict:
train_ds = MNIST("train")
else:
train_ds = MNIST("train", sources=['features'])
test_ds = MNIST("test")
else:
datasource_dir = os.path.join(fuel.config.data_path, name)
datasource_fname = os.path.join(datasource_dir , name+'.hdf5')
if predict:
train_ds = H5PYDataset(datasource_fname, which_set='train')
else:
train_ds = H5PYDataset(datasource_fname, which_set='train', sources=['features'])
test_ds = H5PYDataset(datasource_fname, which_set='test')
train_s = Flatten(DataStream(train_ds,
iteration_scheme=ShuffledScheme(
train_ds.num_examples, batch_size)))
test_s = Flatten(DataStream(test_ds,
iteration_scheme=ShuffledScheme(
test_ds.num_examples, batch_size)))
if predict:
from itertools import chain
fprop = newmodel.get_theano_function()
allpdata = None
alledata = None
f = train_s.sources.index('features')
assert f == test_s.sources.index('features')
sources = test_s.sources
alllabels = dict((s,[]) for s in sources if s != 'features')
for data in chain(train_s.get_epoch_iterator(), test_s.get_epoch_iterator()):
for s,d in zip(sources,data):
if s != 'features':
alllabels[s].extend(list(d))
pdata, edata = fprop(data[f])
if allpdata is None:
allpdata = pdata
else:
allpdata = np.vstack((allpdata, pdata))
if alledata is None:
alledata = edata
else:
alledata = np.vstack((alledata, edata))
print 'Saving',allpdata.shape,'intermidiate layer, for all training and test examples, to',name+'_z.npy'
np.save(name+'_z', allpdata)
print 'Saving',alledata.shape,'last encoder layer to',name+'_e.npy'
np.save(name+'_e', alledata)
print 'Saving additional labels/targets:',','.join(alllabels.keys()),
print ' of size',','.join(map(lambda x: str(len(x)),alllabels.values())),
print 'to',name+'_labels.pkl'
with open(name+'_labels.pkl','wb') as fp:
pickle.dump(alllabels, fp, -1)
else:
cg = ComputationGraph([target_cost])
algorithm = GradientDescent(
示例5: main
# 需要导入模块: from blocks.model import Model [as 别名]
# 或者: from blocks.model.Model import get_theano_function [as 别名]
#.........这里部分代码省略.........
# Add sampling
if config['hook_samples'] >= 1:
logger.info("Building sampler")
extensions.append( sample )
# Add early stopping based on bleu
if config['bleu_script'] is not None:
logger.info("Building bleu validator")
extensions.append(
BleuValidator(sampling_input, samples=samples, config=config,
model=search_model, data_stream=dev_stream,
normalize=config['normalized_bleu'],
every_n_batches=config['bleu_val_freq']))
# Reload model if necessary
if config['reload']:
extensions.append(LoadNMT(config['saveto']))
# Plot cost in bokeh if necessary
if use_bokeh and BOKEH_AVAILABLE:
extensions.append(
Plot('Cs-En', channels=[['decoder_cost_cost']],
after_batch=True))
sampling_fn = search_model.get_theano_function()
print(" - - - - - - - - - - - - - - " )
sort_k_batches = 12
batch_size = 80
seq_len = 50
trg_ivocab = None
src_vocab_size = config['src_vocab_size']
trg_vocab_size = config['trg_vocab_size']
unk_id = config['unk_id']
src_vocab = config['src_vocab']
trg_vocab = config['trg_vocab']
src_vocab = ensure_special_tokens(
src_vocab if isinstance(src_vocab, dict)
else cPickle.load(open(src_vocab)),
bos_idx=0, eos_idx=src_vocab_size - 1, unk_idx=unk_id)
trg_vocab = ensure_special_tokens(
trg_vocab if isinstance(trg_vocab, dict) else
cPickle.load(open(trg_vocab)),
bos_idx=0, eos_idx=trg_vocab_size - 1, unk_idx=unk_id)
if not trg_ivocab:
trg_ivocab = {v: k for k, v in trg_vocab.items()}
src_data = config['src_data']
trg_data = config['trg_data']
src_dataset = TextFile([src_data], src_vocab, None)
trg_dataset = TextFile([trg_data], trg_vocab, None)