本文整理汇总了Python中theano.gof.op.get_debug_values函数的典型用法代码示例。如果您正苦于以下问题:Python get_debug_values函数的具体用法?Python get_debug_values怎么用?Python get_debug_values使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_debug_values函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fprop
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if self.mlp.batch_size is not None and value.shape[0] != self.mlp.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert state_below.ndim == 2
if not hasattr(self, 'no_affine'):
self.no_affine = False
if self.no_affine:
rval = state_below
else:
assert self.W.ndim == 2
b = self.b
W = self.W
rval = T.dot(state_below, W) + b
for value in get_debug_values(rval):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return rval
示例2: entropy_h
def entropy_h(self, H_hat):
"""
.. todo::
WRITEME properly
entropy of the hidden layers under the mean field distribution
defined by H_hat
"""
for Hv in get_debug_values(H_hat[0]):
assert Hv.min() >= 0.0
assert Hv.max() <= 1.0
total = entropy_binary_vector(H_hat[0])
for H in H_hat[1:]:
for Hv in get_debug_values(H):
assert Hv.min() >= 0.0
assert Hv.max() <= 1.0
total += entropy_binary_vector(H)
return total
示例3: fprop
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if self.mlp.batch_size is not None and value.shape[0] != self.mlp.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert state_below.ndim == 2
assert self.W.ndim == 3
Z = T.tensordot(state_below, self.W, axes=[[1],[0]]) + self.b
rval = batched_softmax(Z)
for value in get_debug_values(rval):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return rval
示例4: fprop
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if value.shape[0] != self.mlp.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert self.W.ndim == 2
assert state_below.ndim == 2
b = self.b
Z = T.dot(state_below, self.W) + b
rval = T.nnet.softmax(Z)
for value in get_debug_values(rval):
assert value.shape[0] == self.mlp.batch_size
return rval
示例5: fprop
def fprop(self, state_below):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if self.mlp.batch_size is not None and value.shape[0] != self.mlp.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert state_below.ndim == 2
W = T.dot(self.V, self.U)
assert W.ndim == 2
Z = T.dot(state_below, W.T)
rval = Z
for value in get_debug_values(rval):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return (rval, state_below)
示例6: fprop
def fprop(self, state_below,targets):
self.input_space.validate(state_below)
if self.needs_reformat:
state_below = self.input_space.format_as(state_below, self.desired_space)
for value in get_debug_values(state_below):
if self.mlp.batch_size is not None and value.shape[0] != self.mlp.batch_size:
raise ValueError("state_below should have batch size "+str(self.dbm.batch_size)+" but has "+str(value.shape[0]))
self.desired_space.validate(state_below)
assert state_below.ndim == 2
if not hasattr(self, 'no_affine'):
self.no_affine = False
if self.no_affine:
raise NotImplementedError()
assert self.W_class.ndim == 3
assert self.W_cluster.ndim == 2
#we get the cluster by doing hW_cluster + b_cluster
probcluster = T.dot(state_below, self.W_cluster) + self.b_cluster
probcluster = T.nnet.softmax(probcluster)
#check this line again
batch_clusters = self.array_clusters[T.cast(T.argmax(targets).flatten(),'int32')]
Z = T.nnet.GroupDot(self.n_clusters)(state_below,
self.W_class,
self.b_class,
T.cast(batch_clusters,'int32'))
probclass = T.nnet.softmax(Z)
for value in get_debug_values(probclass):
if self.mlp.batch_size is not None:
assert value.shape[0] == self.mlp.batch_size
return probclass, probcluster
示例7: lrn_same_map
def lrn_same_map(c01b,size,pow,scale,image_side):
mx = None
for c01bv in get_debug_values(c01b):
assert not np.any(np.isinf(c01bv))
assert c01bv.shape[1] == image_side
assert c01bv.shape[2] == image_side
new_side = size-1+image_side
wide_infinity = T.alloc(0.0,
c01b.shape[0],
new_side,
new_side,
c01b.shape[3])
c01b_pad = T.set_subtensor(wide_infinity[:, 1:1+image_side, 1:1+image_side, :], T.sqr(c01b))
wide_infinity_count = T.alloc(0, c01b.shape[0], new_side,
new_side,c01b.shape[3])
c01b_count = T.set_subtensor(wide_infinity_count[:, 1:1+image_side, 1:1+image_side, :], 1)
for row_within_pool in xrange(size):
row_stop = image_side + row_within_pool
for col_within_pool in xrange(size):
col_stop = image_side + col_within_pool
cur = c01b_pad[:,
row_within_pool:row_stop:1,
col_within_pool:col_stop:1,
:]
cur_count = c01b_count[:,
row_within_pool:row_stop:1,
col_within_pool:col_stop:1,
:]
if mx is None:
mx = cur
count = cur_count
else:
mx = mx + cur
count = count + cur_count
mx /= count
mx = scale*mx
mx = mx+1
for mxv in get_debug_values(mx):
assert not np.any(np.isnan(mxv))
assert not np.any(np.isinf(mxv))
new_c01b = c01b/T.pow(mx,pow)
return new_c01b
示例8: expected_energy_term
def expected_energy_term(self, state, average, state_below, average_below):
# state = Print('h_state', attrs=['min', 'max'])(state)
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(lambda x,y: x * y, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
# Energy function is linear so it doesn't matter if we're averaging or not
# Specifically, our terms are -u^T W d - b^T d where u is the upward state of layer below
# and d is the downward state of this layer
bias_term = T.dot(state, self.b)
weights_term = (self.transformer.lmul(state_below) * state).sum(axis=1)
rval = -bias_term - weights_term
assert rval.ndim == 1
return rval
示例9: kl
def kl(Y, Y_hat, batch_axis):
"""
Warning: This function expects a sigmoid nonlinearity in the
output layer. Returns a batch (vector) of mean across units of
KL divergence for each example,
KL(P || Q) where P is defined by Y and Q is defined by Y_hat:
p log p - p log q + (1-p) log (1-p) - (1-p) log (1-q)
For binary p, some terms drop out:
- p log q - (1-p) log (1-q)
- p log sigmoid(z) - (1-p) log sigmoid(-z)
p softplus(-z) + (1-p) softplus(z)
Parameters
----------
Y : Variable
targets for the sigmoid outputs. Currently Y must be purely binary.
If it's not, you'll still get the right gradient, but the
value in the monitoring channel will be wrong.
Y_hat : Variable
predictions made by the sigmoid layer. Y_hat must be generated by
fprop, i.e., it must be a symbolic sigmoid.
batch_axis : list
list of axes to compute average kl divergence across.
Returns
-------
ave : Variable
average kl divergence between Y and Y_hat.
"""
assert hasattr(Y_hat, 'owner')
assert batch_axis is not None
owner = Y_hat.owner
assert owner is not None
op = owner.op
if not hasattr(op, 'scalar_op'):
raise ValueError("Expected Y_hat to be generated by an Elemwise "
"op, got "+str(op)+" of type "+str(type(op)))
assert isinstance(op.scalar_op, T.nnet.sigm.ScalarSigmoid)
for Yv in get_debug_values(Y):
if not (Yv.min() >= 0.0 and Yv.max() <= 1.0):
raise ValueError("Expected Y to be between 0 and 1. Either Y"
+ "< 0 or Y > 1 was found in the input.")
z, = owner.inputs
term_1 = Y * T.nnet.softplus(-z)
term_2 = (1 - Y) * T.nnet.softplus(z)
total = term_1 + term_2
naxes = total.ndim
axes_to_reduce = list(range(naxes))
del axes_to_reduce[batch_axis]
ave = total.mean(axis=axes_to_reduce)
return ave
示例10: expand_2d
def expand_2d(b01c, expand_shape, expand_stride, image_shape):
for b01cv in get_debug_values(b01c):
assert not np.any(np.isinf(b01cv))
assert b01cv.shape[1] == image_shape[0]
assert b01cv.shape[2] == image_shape[1]
assert b01cv.shape[3] == np.prod(expand_shape)
for i in range(len(expand_shape)):
assert expand_shape[i] % expand_stride[i] ==0
b0101 = b01c.reshape((b01c.shape[0], image_shape[0], image_shape[1],
expand_shape[0], expand_shape[1]))
required_r = (image_shape[0] - 1) * expand_stride[0] + expand_shape[0]
required_c = (image_shape[1] - 1) * expand_stride[1] + expand_shape[1]
wide_b01 = T.alloc(0., b01c.shape[0], required_r, required_c)
for row_within_expand in xrange(expand_shape[0]):
row_stop = (image_shape[0] - 1) * expand_stride[0] + \
row_within_expand + 1
for col_within_expand in xrange(expand_shape[1]):
col_stop = (image_shape[1] - 1) * expand_stride[1] + \
col_within_expand + 1
wide_b01 = T.inc_subtensor(wide_b01[:,
row_within_expand:row_stop:expand_stride[0],
col_within_expand:col_stop:expand_stride[1]],
b0101[:,:,:,row_within_expand, col_within_expand])
wide_b01 = wide_b01 / (expand_shape[0] / expand_stride[0]) ** 2
wide_b01c = wide_b01.reshape((b01c.shape[0], required_r, required_c, 1))
return wide_b01c
示例11: _validate_impl
def _validate_impl(self, is_numeric, batch):
# checks that batch isn't a tuple, checks batch.type against self.dtype
super(IndexSequenceSpace, self)._validate_impl(is_numeric, batch)
if is_numeric:
# Use the 'CudaNdarray' string to avoid importing
# theano.sandbox.cuda when it is not available
if not isinstance(batch, np.ndarray) \
and str(type(batch)) != "<type 'CudaNdarray'>":
raise TypeError("The value of a IndexSequenceSpace batch "
"should be a numpy.ndarray, or CudaNdarray, "
"but is %s." % str(type(batch)))
if batch.ndim != 2:
raise ValueError("The value of a IndexSequenceSpace batch "
"must be 2D, got %d dimensions for %s." %
(batch.ndim, batch))
if batch.shape[1] != self.dim:
raise ValueError("The width of a IndexSequenceSpace batch "
"must match with the space's dimension, but "
"batch has shape %s and dim = %d." %
(str(batch.shape), self.dim))
else:
if not isinstance(batch, theano.gof.Variable):
raise TypeError("IndexSequenceSpace batch should be a theano "
"Variable, got " + str(type(batch)))
if not isinstance(batch.type, (theano.tensor.TensorType,
CudaNdarrayType)):
raise TypeError("IndexSequenceSpace batch should be "
"TensorType or CudaNdarrayType, got " +
str(batch.type))
if batch.ndim != 2:
raise ValueError('IndexSequenceSpace batches must be 2D, got '
'%d dimensions' % batch.ndim)
for val in get_debug_values(batch):
self.np_validate(val)
示例12: entropy_binary_vector
def entropy_binary_vector(P):
"""
if P[i,j] represents the probability
of some binary random variable X[i,j] being 1
then rval[i] gives the entropy of the random vector
X[i,:]
"""
oneMinusP = 1.-P
PlogP = xlogx(P)
omPlogOmP = xlogx(oneMinusP)
term1 = - T.sum( PlogP , axis=1)
assert len(term1.type.broadcastable) == 1
term2 = - T.sum( omPlogOmP , axis =1 )
assert len(term2.type.broadcastable) == 1
rval = term1 + term2
for plp, olo, t1, t2, rv in get_debug_values(PlogP, omPlogOmP, term1, term2, rval):
debug_assert(not np.any(np.isnan(plp)))
debug_assert(not np.any(np.isinf(olo)))
debug_assert(not np.any(np.isnan(plp)))
debug_assert(not np.any(np.isinf(olo)))
debug_assert(not np.any(np.isnan(t1)))
debug_assert(not np.any(np.isnan(t2)))
debug_assert(not np.any(np.isnan(rv)))
return rval
示例13: mf_update
def mf_update(self, state_below, state_above, layer_above = None, double_weights = False, iter_name = None):
self.input_space.validate(state_below)
if self.requires_reformat:
if not isinstance(state_below, tuple):
for sb in get_debug_values(state_below):
if sb.shape[0] != self.dbm.batch_size:
raise ValueError("self.dbm.batch_size is %d but got shape of %d" % (self.dbm.batch_size, sb.shape[0]))
assert reduce(lambda x,y: x * y, sb.shape[1:]) == self.input_dim
state_below = self.input_space.format_as(state_below, self.desired_space)
if iter_name is None:
iter_name = 'anon'
if state_above is not None:
assert layer_above is not None
msg = layer_above.downward_message(state_above)
msg.name = 'msg_from_'+layer_above.layer_name+'_to_'+self.layer_name+'['+iter_name+']'
else:
msg = None
if double_weights:
state_below = 2. * state_below
state_below.name = self.layer_name + '_'+iter_name + '_2state'
z = self.transformer.lmul(state_below) + self.b
if self.layer_name is not None and iter_name is not None:
z.name = self.layer_name + '_' + iter_name + '_z'
if msg is not None:
z = z + msg
h = T.tanh(z)
return h
示例14: test_get_debug_values_exc
def test_get_debug_values_exc():
"""tests that get_debug_value raises an exception when
debugger is set to raise and a value is missing """
prev_value = config.compute_test_value
try:
config.compute_test_value = 'raise'
x = T.vector()
try:
for x_val in op.get_debug_values(x):
# this assert catches the case where we
# erroneously get a value returned
assert False
raised = False
except AttributeError:
raised = True
# this assert catches the case where we got []
# returned, and possibly issued a warning,
# rather than raising an exception
assert raised
finally:
config.compute_test_value = prev_value
示例15: test_kl
def test_kl():
"""
Test whether function kl() has properly processed the input.
"""
init_mode = theano.config.compute_test_value
theano.config.compute_test_value = 'raise'
try:
mlp = MLP(layers=[Sigmoid(dim=10, layer_name='Y', irange=0.1)],
nvis=10)
X = mlp.get_input_space().make_theano_batch()
Y = mlp.get_output_space().make_theano_batch()
X.tag.test_value = np.random.random(
get_debug_values(X)[0].shape).astype(theano.config.floatX)
Y_hat = mlp.fprop(X)
# This call should not raise any error:
ave = kl(Y, Y_hat, 1)
# The following calls should raise ValueError exceptions:
Y.tag.test_value[2][3] = 1.1
np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1)
Y.tag.test_value[2][3] = -0.1
np.testing.assert_raises(ValueError, kl, Y, Y_hat, 1)
finally:
theano.config.compute_test_value = init_mode