本文整理汇总了Python中blocks.filter.VariableFilter方法的典型用法代码示例。如果您正苦于以下问题:Python filter.VariableFilter方法的具体用法?Python filter.VariableFilter怎么用?Python filter.VariableFilter使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类blocks.filter
的用法示例。
在下文中一共展示了filter.VariableFilter方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: init_beam_search
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def init_beam_search(self, beam_size):
"""Compile beam search and set the beam size.
See Blocks issue #500.
"""
if hasattr(self, '_beam_search') and self.beam_size == beam_size:
# Only recompile if the user wants a different beam size
return
self.beam_size = beam_size
generated = self.get_generate_graph(use_mask=False, n_steps=3)
cg = ComputationGraph(generated.values())
samples, = VariableFilter(
applications=[self.generator.generate], name="outputs")(cg)
self._beam_search = BeamSearch(beam_size, samples)
self._beam_search.compile()
示例2: test_saved_inner_graph
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def test_saved_inner_graph():
"""Make sure that the original inner graph is saved."""
x = tensor.tensor3()
recurrent = SimpleRecurrent(dim=3, activation=Tanh())
y = recurrent.apply(x)
application_call = get_application_call(y)
assert application_call.inner_inputs
assert application_call.inner_outputs
cg = ComputationGraph(application_call.inner_outputs)
# Check that the inner scan graph is annotated
# with `recurrent.apply`
assert len(VariableFilter(applications=[recurrent.apply])(cg)) == 3
# Check that the inner graph is equivalent to the one
# produced by a stand-alone of `recurrent.apply`
assert is_same_graph(application_call.inner_outputs[0],
recurrent.apply(*application_call.inner_inputs,
iterate=False))
示例3: test_collect
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def test_collect():
x = tensor.matrix()
mlp = MLP(activations=[Logistic(), Logistic()], dims=[784, 100, 784],
use_bias=False)
cost = SquaredError().apply(x, mlp.apply(x))
cg = ComputationGraph(cost)
var_filter = VariableFilter(roles=[PARAMETER])
W1, W2 = var_filter(cg.variables)
for i, W in enumerate([W1, W2]):
W.set_value(numpy.ones_like(W.get_value()) * (i + 1))
new_cg = collect_parameters(cg, cg.shared_variables)
collected_parameters, = new_cg.shared_variables
assert numpy.all(collected_parameters.get_value()[:784 * 100] == 1.)
assert numpy.all(collected_parameters.get_value()[784 * 100:] == 2.)
assert collected_parameters.ndim == 1
W1, W2 = VariableFilter(roles=[COLLECTED])(new_cg.variables)
assert W1.eval().shape == (784, 100)
assert numpy.all(W1.eval() == 1.)
assert W2.eval().shape == (100, 784)
assert numpy.all(W2.eval() == 2.)
示例4: setup_model
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def setup_model(p):
ladder = LadderAE(p)
# Setup inputs
input_type = TensorType('float32', [False] * (len(p.encoder_layers[0]) + 1))
x_only = input_type('features_unlabeled')
x = input_type('features_labeled')
y = theano.tensor.lvector('targets_labeled')
ladder.apply(x, y, x_only)
# Load parameters if requested
if p.get('load_from'):
with open(p.load_from + '/trained_params.npz') as f:
loaded = numpy.load(f)
cg = ComputationGraph([ladder.costs.total])
current_params = VariableFilter(roles=[PARAMETER])(cg.variables)
logger.info('Loading parameters: %s' % ', '.join(loaded.keys()))
for param in current_params:
assert param.get_value().shape == loaded[param.name].shape
param.set_value(loaded[param.name])
return ladder
示例5: create_training_computation_graphs
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def create_training_computation_graphs():
x = tensor.tensor4('features')
y = tensor.imatrix('targets')
convnet, mlp = create_model_bricks()
y_hat = mlp.apply(convnet.apply(x).flatten(ndim=2))
cost = BinaryCrossEntropy().apply(y, y_hat)
accuracy = 1 - tensor.neq(y > 0.5, y_hat > 0.5).mean()
cg = ComputationGraph([cost, accuracy])
# Create a graph which uses batch statistics for batch normalization
# as well as dropout on selected variables
bn_cg = apply_batch_normalization(cg)
bricks_to_drop = ([convnet.layers[i] for i in (5, 11, 17)] +
[mlp.application_methods[1].brick])
variables_to_drop = VariableFilter(
roles=[OUTPUT], bricks=bricks_to_drop)(bn_cg.variables)
bn_dropout_cg = apply_dropout(bn_cg, variables_to_drop, 0.5)
return cg, bn_dropout_cg
示例6: __init__
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def __init__(self, inputs, cg, reward_emitter, data, **kwargs):
self.input_accumulator = shared_floatx_zeros((2, 2), dtype='int64')
self.gain_accumulator = shared_floatx_zeros((2, 2, 2))
self.reward_accumulator = shared_floatx_zeros((2, 2, 2), dtype='int64')
self.dataset = data.get_dataset('train')
self.inputs = inputs
self.gains, = VariableFilter(
applications=[reward_emitter.cost],
roles=[INPUT], name='readouts')(cg.variables)
self.reward, = VariableFilter(
theano_name=reward_emitter.GAIN_MATRIX)(cg.variables)
kwargs.setdefault('before_training', True)
kwargs.setdefault('after_batch', True)
super(LogInputsGains, self).__init__(**kwargs)
示例7: get_cost_graph
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def get_cost_graph(self, batch=True,
prediction=None, prediction_mask=None):
if batch:
inputs = self.inputs
inputs_mask = self.inputs_mask
groundtruth = self.labels
groundtruth_mask = self.labels_mask
else:
inputs, inputs_mask = self.bottom.single_to_batch_inputs(
self.single_inputs)
groundtruth = self.single_labels[:, None]
groundtruth_mask = None
if not prediction:
prediction = groundtruth
if not prediction_mask:
prediction_mask = groundtruth_mask
cost = self.cost(inputs_mask=inputs_mask,
labels=prediction,
labels_mask=prediction_mask,
**inputs)
cost_cg = ComputationGraph(cost)
if self.criterion['name'].startswith("mse"):
placeholder, = VariableFilter(theano_name='groundtruth')(cost_cg)
cost_cg = cost_cg.replace({placeholder: groundtruth})
return cost_cg
示例8: __init__
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def __init__(self, beam_size, samples):
self.beam_size = beam_size
# Extracting information from the sampling computation graph
cg = ComputationGraph(samples)
self.inputs = cg.inputs
self.generator = get_brick(samples)
if not isinstance(self.generator, BaseSequenceGenerator):
raise ValueError
self.generate_call = get_application_call(samples)
if (not self.generate_call.application ==
self.generator.generate):
raise ValueError
self.inner_cg = ComputationGraph(self.generate_call.inner_outputs)
# Fetching names from the sequence generator
self.context_names = self.generator.generate.contexts
self.state_names = self.generator.generate.states
# Parsing the inner computation graph of sampling scan
self.contexts = [
VariableFilter(bricks=[self.generator],
name=name,
roles=[INPUT])(self.inner_cg)[0]
for name in self.context_names]
self.input_states = []
# Includes only those state names that were actually used
# in 'generate'
self.input_state_names = []
for name in self.generator.generate.states:
var = VariableFilter(
bricks=[self.generator], name=name,
roles=[INPUT])(self.inner_cg)
if var:
self.input_state_names.append(name)
self.input_states.append(var[0])
self.compiled = False
示例9: _compile_logprobs_computer
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def _compile_logprobs_computer(self):
# This filtering should return identical variables
# (in terms of computations) variables, and we do not care
# which to use.
readouts = VariableFilter(
applications=[self.generator.readout.readout],
roles=[OUTPUT])(self.inner_cg)[0]
costs = self.generator.readout.costs(readouts)
self.logprobs_computer = function(
self.contexts + self.input_states, costs,
on_unused_input='ignore')
示例10: test_variable_filter_roles_error
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def test_variable_filter_roles_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
# testing role error
VariableFilter(roles=PARAMETER)(cg.variables)
示例11: test_variable_filter_applications_error
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def test_variable_filter_applications_error():
# Creating computation graph
brick1 = Linear(input_dim=2, output_dim=2, name='linear1')
x = tensor.vector()
h1 = brick1.apply(x)
cg = ComputationGraph(h1)
VariableFilter(applications=brick1.apply)(cg.variables)
示例12: test_many_steps
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def test_many_steps(self):
x = tensor.tensor3('x')
mask = tensor.matrix('mask')
h = self.simple.apply(x, mask=mask, iterate=True)
calc_h = theano.function(inputs=[x, mask], outputs=[h])
x_val = 0.1 * numpy.asarray(list(itertools.permutations(range(4))),
dtype=theano.config.floatX)
x_val = numpy.ones((24, 4, 3),
dtype=theano.config.floatX) * x_val[..., None]
mask_val = numpy.ones((24, 4), dtype=theano.config.floatX)
mask_val[12:24, 3] = 0
h_val = numpy.zeros((25, 4, 3), dtype=theano.config.floatX)
for i in range(1, 25):
h_val[i] = numpy.tanh(h_val[i - 1].dot(
2 * numpy.ones((3, 3))) + x_val[i - 1])
h_val[i] = (mask_val[i - 1, :, None] * h_val[i] +
(1 - mask_val[i - 1, :, None]) * h_val[i - 1])
h_val = h_val[1:]
assert_allclose(h_val, calc_h(x_val, mask_val)[0], rtol=1e-04)
# Also test that initial state is a parameter
initial_state, = VariableFilter(roles=[INITIAL_STATE])(
ComputationGraph(h))
assert is_shared_variable(initial_state)
assert initial_state.name == 'initial_state'
示例13: _get_bn_params
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def _get_bn_params(self, output_vars):
# Pick out the nodes with batch normalization vars
cg = ComputationGraph(output_vars)
var_filter = VariableFilter(roles=[BNPARAM])
bn_ps = var_filter(cg.variables)
if len(bn_ps) == 0:
logger.warn('No batch normalization parameters found - is' +
' batch normalization turned off?')
self._bn = False
self._counter = None
self._counter_max = None
bn_share = []
output_vars_replaced = output_vars
else:
self._bn = True
assert len(set([p.name for p in bn_ps])) == len(bn_ps), \
'Some batch norm params have the same name'
logger.info('Batch norm parameters: %s' % ', '.join([p.name for p in bn_ps]))
# Filter out the shared variables from the model updates
def filter_share(par):
lst = [up for up in cg.updates if up.name == 'shared_%s' % par.name]
assert len(lst) == 1
return lst[0]
bn_share = map(filter_share, bn_ps)
# Replace the BN coefficients in the test data model - Replace the
# theano variables in the test graph with the shareds
output_vars_replaced = cg.replace(zip(bn_ps, bn_share)).outputs
# Pick out the counter
self._counter = self._param_from_updates(cg.updates, 'counter')
self._counter_max = self._param_from_updates(cg.updates, 'counter_max')
return bn_ps, bn_share, output_vars_replaced
示例14: _param_from_updates
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def _param_from_updates(self, updates, p_name):
var_filter = VariableFilter(roles=[BNPARAM])
bn_ps = var_filter(updates.keys())
p = [p for p in bn_ps if p.name == p_name]
assert len(p) == 1, 'No %s of more than one %s' % (p_name, p_name)
return p[0]
示例15: __init__
# 需要导入模块: from blocks import filter [as 别名]
# 或者: from blocks.filter import VariableFilter [as 别名]
def __init__(self, samples):
# Extracting information from the sampling computation graph
self.cg = ComputationGraph(samples)
self.inputs = self.cg.inputs
self.generator = get_brick(samples)
if not isinstance(self.generator, SequenceGenerator):
raise ValueError
self.generate_call = get_application_call(samples)
if (not self.generate_call.application ==
self.generator.generate):
raise ValueError
self.inner_cg = ComputationGraph(self.generate_call.inner_outputs)
# Fetching names from the sequence generator
self.context_names = self.generator.generate.contexts
self.state_names = self.generator.generate.states
# Parsing the inner computation graph of sampling scan
self.contexts = [
VariableFilter(bricks=[self.generator],
name=name,
roles=[INPUT])(self.inner_cg)[0]
for name in self.context_names]
self.input_states = []
# Includes only those state names that were actually used
# in 'generate'
self.input_state_names = []
for name in self.generator.generate.states:
var = VariableFilter(
bricks=[self.generator], name=name,
roles=[INPUT])(self.inner_cg)
if var:
self.input_state_names.append(name)
self.input_states.append(var[0])
self.compiled = False