本文整理汇总了Python中pylearn2.utils.as_floatX函数的典型用法代码示例。如果您正苦于以下问题:Python as_floatX函数的具体用法?Python as_floatX怎么用?Python as_floatX使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了as_floatX函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_mean_H_given_V
def test_mean_H_given_V(self):
tol = 1e-6
# P(h_1 | v) / P(h_2 | v) = a
# => exp(-E(v, h_1)) / exp(-E(v,h_2)) = a
# => exp(E(v,h_2)-E(v,h_1)) = a
# E(v,h_2) - E(v,h_1) = log(a)
# also log P(h_1 | v) - log P(h_2) = log(a)
rng = N.random.RandomState([1, 2, 3])
m = 5
Vv = as_floatX(N.zeros((m, self.nv)) + rng.randn(self.nv))
Hv = as_floatX(rng.randn(m, self.nh) > 0.)
log_Pv = self.log_P_H_given_V_func(Hv, Vv)
Ev = self.E_func(Vv, Hv)
for i in xrange(m):
for j in xrange(i + 1, m):
log_a = log_Pv[i] - log_Pv[j]
e = Ev[j] - Ev[i]
assert abs(e-log_a) < tol
示例2: test_triangle_code
def test_triangle_code():
rng = np.random.RandomState([20,18,9])
m = 5
n = 6
k = 7
X = as_floatX(rng.randn(m,n))
D = as_floatX(rng.randn(k,n))
D_norm_squared = np.sum(D**2,axis=1)
X_norm_squared = np.sum(X**2,axis=1)
sq_distance = -2.0 * np.dot(X,D.T) + D_norm_squared + np.atleast_2d(X_norm_squared).T
distance = np.sqrt(sq_distance)
mu = np.mean(distance, axis = 1)
expected = np.maximum(0.0,mu.reshape(mu.size,1)-distance)
Xv = T.matrix()
Dv = T.matrix()
code = triangle_code(X = Xv, centroids = Dv)
actual = function([Xv,Dv],code)(X,D)
assert np.allclose(expected, actual)
示例3: test_d_negent_h_d_h
def test_d_negent_h_d_h(self):
"tests that the gradient of the negative entropy of h with respect to \hat{h} matches my analytical version of it "
model = self.model
ip = self.model.e_step
X = self.X
assert X.shape[0] == self.m
H = np.cast[config.floatX](self.model.rng.uniform(0.001,.999,(self.m, self.N)))
S = np.cast[config.floatX](self.model.rng.uniform(-5.,5.,(self.m, self.N)))
H_var = T.matrix(name='H_var')
H_var.tag.test_value = H
S_var = T.matrix(name='S_var')
S_var.tag.test_value = S
sigma0 = ip.infer_var_s0_hat()
Sigma1 = ip.infer_var_s1_hat()
mu0 = T.zeros_like(model.mu)
negent = - self.model.entropy_h( H_hat = H_var ).sum()
assert len(negent.type.broadcastable) == 0
grad_H = T.grad(negent, H_var)
grad_func = function([H_var, S_var], grad_H, on_unused_input = 'ignore')
grad_theano = grad_func(H,S)
half = as_floatX(0.5)
one = as_floatX(1.)
two = as_floatX(2.)
pi = as_floatX(np.pi)
e = as_floatX(np.e)
mu = self.model.mu
alpha = self.model.alpha
W = self.model.W
B = self.model.B
w = self.model.w
term1 = T.log(H_var)
term2 = -T.log(one - H_var)
analytical = term1 + term2
grad_analytical = function([H_var, S_var], analytical, on_unused_input = 'ignore')(H,S)
if not np.allclose(grad_theano, grad_analytical):
print 'grad theano: ',(grad_theano.min(), grad_theano.mean(), grad_theano.max())
print 'grad analytical: ',(grad_analytical.min(), grad_analytical.mean(), grad_analytical.max())
ad = np.abs(grad_theano-grad_analytical)
print 'abs diff: ',(ad.min(),ad.mean(),ad.max())
assert False
示例4: create_colors
def create_colors(n_colors):
"""
Create an array of n_colors
Parameters
----------
n_colors : int
The number of colors to create
Returns
-------
colors_rgb : np.array
An array of shape (n_colors, 3) in RGB format
"""
# Create the list of color hue
colors_hue = np.arange(n_colors)
colors_hue = as_floatX(colors_hue)
colors_hue *= 1./n_colors
# Set the color in HSV format
colors_hsv = np.ones((n_colors, 3))
colors_hsv[:, 2] *= .75
colors_hsv[:, 0] = colors_hue
# Put in a matplotlib-friendly format
colors_hsv = colors_hsv.reshape((1, )+colors_hsv.shape)
# Convert to RGB
colors_rgb = matplotlib.colors.hsv_to_rgb(colors_hsv)
colors_rgb = colors_rgb[0]
return colors_rgb
示例5: test_convolutional_compatible
def test_convolutional_compatible():
"""
VAE allows convolutional encoding networks
"""
encoding_model = MLP(
layers=[
SpaceConverter(layer_name="conv2d_converter", output_space=Conv2DSpace(shape=[4, 4], num_channels=1)),
ConvRectifiedLinear(
layer_name="h",
output_channels=2,
kernel_shape=[2, 2],
kernel_stride=[1, 1],
pool_shape=[1, 1],
pool_stride=[1, 1],
pool_type="max",
irange=0.01,
),
]
)
decoding_model = MLP(layers=[Linear(layer_name="h", dim=16, irange=0.01)])
prior = DiagonalGaussianPrior()
conditional = BernoulliVector(mlp=decoding_model, name="conditional")
posterior = DiagonalGaussian(mlp=encoding_model, name="posterior")
vae = VAE(nvis=16, prior=prior, conditional=conditional, posterior=posterior, nhid=16)
X = T.matrix("X")
lower_bound = vae.log_likelihood_lower_bound(X, num_samples=10)
f = theano.function(inputs=[X], outputs=lower_bound)
rng = make_np_rng(default_seed=11223)
f(as_floatX(rng.uniform(size=(10, 16))))
示例6: get_monitoring_channels
def get_monitoring_channels(self, data):
X, Y = data
rval = OrderedDict()
nll = self.nll(data)
rval['perplexity'] = as_floatX(10 ** (nll/np.log(10)))
return rval
示例7: learning_rate_updates
def learning_rate_updates(self):
"""
Compute a dictionary of shared variable updates related to annealing
the learning rate.
Returns
-------
updates : dict
A dictionary with the shared variables representing SGD metadata
as keys and a symbolic expression of how they are to be updated as
values.
"""
ups = {}
# Annealing coefficient. Here we're using a formula of
# min(base_lr, anneal_start / (iteration + 1))
if self.anneal_start is None:
annealed = sharedX(self.base_lr)
else:
frac = self.anneal_start / (self.iteration + 1.)
annealed = tensor.minimum(
as_floatX(frac),
self.base_lr # maximum learning rate
)
# Update the shared variable for the annealed learning rate.
ups[self.annealed] = annealed
ups[self.iteration] = self.iteration + 1
# Calculate the learning rates for each parameter, in the order
# they appear in self.params
learn_rates = [annealed * self.learning_rates[p] for p in self.params]
return ups, learn_rates
示例8: get_gradients
def get_gradients(self, model, data, ** kwargs):
v = data
mean_matrix = model.propup(v)
#======================================================
part_j = self.p - mean_matrix.mean(axis=0)
part_i1_matrix = mean_matrix * (1. - mean_matrix)
#part_i = T.dot(v.T, part_i1_matrix)
#part_orin = part_i * part_j #矩阵右乘一个行向量
#coeff_w = -2. * v.shape[0]
#gW = coeff_w * part_orin #HL sparse项产生的梯度,不含lambda_
#=======================================================
part_j1 = part_j
part_j2 = part_i1_matrix.mean(axis=0)
gc = -2. * part_j1 * part_j2
W, c, b = list(model.get_params())
#gradients = OrderedDict(izip([W, c], [1/self.p*gW, 1/self.p*gc]))
gradients = OrderedDict(izip([c], [as_floatX(1/self.p*gc)]))
updates = OrderedDict()
return gradients, updates
示例9: gibbs_step_for_v
def gibbs_step_for_v(self, v, rng):
# Sometimes, the number of examples in the data set is not a
# multiple of self.batch_size.
batch_size = v.shape[0]
# sample h given v
h_mean = self.mean_h_given_v(v)
h_mean_shape = (batch_size, self.nhid)
h_sample = as_floatX(rng.uniform(size=h_mean_shape) < h_mean)
# sample s given (v,h)
s_mu, s_var = self.mean_var_s_given_v_h1(v)
#s_mu_shape = (batch_size, self.nslab)
s_mu_shape = (16, self.nslab) # @dave: THEANO HACK (bugfix for rita2)
s_sample = s_mu + rng.normal(size=s_mu_shape) * tensor.sqrt(s_var)
#s_sample=(s_sample.reshape()*h_sample.dimshuffle(0,1,'x')).flatten(2)
# sample v given (s,h)
v_mean, v_var = self.mean_var_v_given_h_s(h_sample, s_sample)
#v_mean_shape = (batch_size, self.nvis)
v_mean_shape = (16, int(self.nvis)) # @dave: THEANO HACK (bugfix for rita2)
v_sample = rng.normal(size=v_mean_shape) * tensor.sqrt(v_var) + v_mean
del batch_size
return v_sample, locals()
示例10: setup
def setup(self):
"""
We use a small predefined 8x5 matrix for
which we know the ZCA transform.
"""
self.X = np.array([[-10.0, 3.0, 19.0, 9.0, -15.0],
[7.0, 26.0, 26.0, 26.0, -3.0],
[17.0, -17.0, -37.0, -36.0, -11.0],
[19.0, 15.0, -2.0, 5.0, 9.0],
[-3.0, -8.0, -35.0, -25.0, -8.0],
[-18.0, 3.0, 4.0, 15.0, 14.0],
[5.0, -4.0, -5.0, -7.0, -11.0],
[23.0, 22.0, 15.0, 20.0, 12.0]])
self.dataset = DenseDesignMatrix(X=as_floatX(self.X),
y=as_floatX(np.ones((8, 1))))
self.num_components = self.dataset.get_design_matrix().shape[1] - 1
示例11: cost
def cost(self,Y,q_h):
z = self.score(q_h)
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
log_prob_of = (Y * log_prob).sum(axis=1)
assert log_prob_of.ndim == 1
rval = as_floatX(log_prob_of.mean())
return - rval
示例12: cost_from_X
def cost_from_X(self, data):
X, Y = data
z = self.score(X)
z = z - z.max(axis=1).dimshuffle(0, 'x')
log_prob = z - T.log(T.exp(z).sum(axis=1).dimshuffle(0, 'x'))
log_prob_of = (Y * log_prob).sum(axis=1)
assert log_prob_of.ndim == 1
rval = as_floatX(log_prob_of.mean())
return - rval
示例13: test_score
def test_score(self):
rng = N.random.RandomState([1, 2, 3])
m = 10
Vv = as_floatX(rng.randn(m, self.nv))
Sv = self.score_func(Vv)
gSv = self.generic_score_func(Vv)
assert N.allclose(Sv, gSv)
示例14: theano_norms
def theano_norms(W):
"""
.. todo::
WRITEME properly
returns a vector containing the L2 norm of each
column of W, where W and the return value are symbolic
theano variables
"""
return T.sqrt(as_floatX(1e-8)+T.sqr(W).sum(axis=0))
示例15: test_free_energy
def test_free_energy(self):
rng = N.random.RandomState([1, 2, 3])
m = 2 ** self.nh
Vv = as_floatX(N.zeros((m, self.nv)) + rng.randn(self.nv))
F, = self.F_func(Vv[0:1, :])
Hv = as_floatX(N.zeros((m, self.nh)))
for i in xrange(m):
for j in xrange(self.nh):
Hv[i, j] = (i & (2 ** j)) / (2 ** j)
Ev = self.E_func(Vv, Hv)
Fv = -N.log(N.exp(-Ev).sum())
assert abs(F-Fv) < 1e-6