本文整理汇总了Python中theano.tensor.floor方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.floor方法的具体用法?Python tensor.floor怎么用?Python tensor.floor使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.floor方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: discretized_laplace
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def discretized_laplace(mean, logscale, binsize, sample=None):
scale = .5*T.exp(logscale)
if sample is None:
u = G.rng_curand.uniform(size=mean.shape) - .5
sample = mean - scale * T.sgn(u) * T.log(1-2*abs(u))
sample = T.floor(sample/binsize)*binsize #discretize the sample
d = .5*binsize
def cdf(x):
z = x-mean
return .5 + .5 * T.sgn(z) * (1.-T.exp(-abs(z)/scale))
def logmass1(x):
# General method for probability mass, but numerically unstable for large |x-mean|/scale
return T.log(cdf(x+d) - cdf(x-d) + 1e-7)
def logmass2(x):
# Only valid for |x-mean| >= d
return -abs(x-mean)/scale + T.log(T.exp(d/scale)-T.exp(-d/scale)) - np.log(2.).astype(G.floatX)
def logmass_stable(x):
switch = (abs(x-mean) < d)
return switch * logmass1(x) + (1-switch) * logmass2(x)
logp = logmass_stable(sample).flatten(2).sum(axis=1)
entr = None #(1 + logscale).flatten(2).sum(axis=1)
return RandomVariable(sample, logp, entr, mean=mean, scale=scale)
示例2: compute_sub_all_scores
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def compute_sub_all_scores(self, start_end):
plu = softmax(T.dot(self.trained_users[start_end], self.trained_items.T))[:, :-1] # (n_batch, n_item)
length = T.max(T.sum(self.tes_masks[start_end], axis=1)) # 253
cidx = T.arange(length).reshape((1, length)) + self.tra_accum_lens[start_end][:, 0].reshape((len(start_end), 1))
cl = T.sum(self.trained_items[self.tra_context_masks[cidx]], axis=2) # n_batch x seq_length x n_size
cl = cl.dimshuffle(1, 2, 0)
pb = self.trained_branch[self.routes] # (n_item x 4 x tree_depth x n_size)
shp0, shp1, shp2 = self.lrs.shape
lrs = self.lrs.reshape((shp0, shp1, shp2, 1, 1))
pr_bc = T.dot(pb, cl)
br = sigmoid(pr_bc * lrs) * T.ceil(abs(pr_bc)) # (n_item x 4 x tree_depth x seq_length x n_batch)
path = T.prod(br, axis=2) * self.probs.reshape((shp0, shp1, 1, 1))
del cl, pb, br, lrs
# paths = T.prod((T.floor(1 - path) + path), axis=1) # (n_item x seq_length x n_batch)
paths = T.sum(path, axis=1)
paths = T.floor(1 - paths) + paths
p = paths[:-1].T * plu.reshape((plu.shape[0], 1, plu.shape[1])) # (n_batch x n_item)
# p = plu.reshape((plu.shape[0], 1, plu.shape[1])) * T.ones((plu.shape[0], length, plu.shape[1]))
return T.reshape(p, (p.shape[0] * p.shape[1], p.shape[2])).eval()
示例3: floor
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def floor(x):
"""
Elemwise floor of `x`.
"""
# see decorator for function body
示例4: discretized_logistic
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def discretized_logistic(mean, logscale, binsize, sample=None):
scale = T.exp(logscale)
if sample is None:
u = G.rng_curand.uniform(size=mean.shape)
_y = T.log(-u/(u-1)) #inverse CDF of the logistic
sample = mean + scale * _y #sample from the actual logistic
sample = T.floor(sample/binsize)*binsize #discretize the sample
_sample = (T.floor(sample/binsize)*binsize - mean)/scale
logps = T.log( T.nnet.sigmoid(_sample + binsize/scale) - T.nnet.sigmoid(_sample) + 1e-7)
logp = logps.flatten(2).sum(axis=1)
#raise Exception()
entr = logscale.flatten(2)
entr = entr.sum(axis=1) + 2. * entr.shape[1].astype(G.floatX)
return RandomVariable(sample, logp, entr, mean=mean, logscale=logscale, logps=logps)
示例5: discretized_gaussian
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def discretized_gaussian(mean, logvar, binsize, sample=None):
scale = T.exp(.5*logvar)
if sample is None:
_y = G.rng_curand.normal(size=mean.shape)
sample = mean + scale * _y #sample from the actual logistic
sample = T.floor(sample/binsize)*binsize #discretize the sample
_sample = (T.floor(sample/binsize)*binsize - mean)/scale
def _erf(x):
return T.erf(x/T.sqrt(2.))
logp = T.log( _erf(_sample + binsize/scale) - _erf(_sample) + 1e-7) + T.log(.5)
logp = logp.flatten(2).sum(axis=1)
#raise Exception()
entr = (.5 * (T.log(2 * math.pi) + 1 + logvar)).flatten(2).sum(axis=1)
return RandomVariable(sample, logp, entr, mean=mean, logvar=logvar)
示例6: floor
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def floor(x):
return T.floor(x)
# UPDATES OPS
示例7: generate_forward_diffusion_sample
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def generate_forward_diffusion_sample(self, X_noiseless):
"""
Corrupt a training image with t steps worth of Gaussian noise, and
return the corrupted image, as well as the mean and covariance of the
posterior q(x^{t-1}|x^t, x^0).
"""
X_noiseless = X_noiseless.reshape(
(-1, self.n_colors, self.spatial_width, self.spatial_width))
n_images = X_noiseless.shape[0].astype('int16')
rng = Random().theano_rng
# choose a timestep in [1, self.trajectory_length-1].
# note the reverse process is fixed for the very
# first timestep, so we skip it.
# TODO for some reason random_integer is missing from the Blocks
# theano random number generator.
t = T.floor(rng.uniform(size=(1,1), low=1, high=self.trajectory_length,
dtype=theano.config.floatX))
t_weights = self.get_t_weights(t)
N = rng.normal(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
dtype=theano.config.floatX)
# noise added this time step
beta_forward = self.get_beta_forward(t)
# decay in noise variance due to original signal this step
alpha_forward = 1. - beta_forward
# compute total decay in the fraction of the variance due to X_noiseless
alpha_arr = 1. - self.beta_arr
alpha_cum_forward_arr = T.extra_ops.cumprod(alpha_arr).reshape((self.trajectory_length,1))
alpha_cum_forward = T.dot(t_weights.T, alpha_cum_forward_arr)
# total fraction of the variance due to noise being mixed in
beta_cumulative = 1. - alpha_cum_forward
# total fraction of the variance due to noise being mixed in one step ago
beta_cumulative_prior_step = 1. - alpha_cum_forward/alpha_forward
# generate the corrupted training data
X_uniformnoise = X_noiseless + (rng.uniform(size=(n_images, self.n_colors, self.spatial_width, self.spatial_width),
dtype=theano.config.floatX)-T.constant(0.5,dtype=theano.config.floatX))*T.constant(self.uniform_noise,dtype=theano.config.floatX)
X_noisy = X_uniformnoise*T.sqrt(alpha_cum_forward) + N*T.sqrt(1. - alpha_cum_forward)
# compute the mean and covariance of the posterior distribution
mu1_scl = T.sqrt(alpha_cum_forward / alpha_forward)
mu2_scl = 1. / T.sqrt(alpha_forward)
cov1 = 1. - alpha_cum_forward/alpha_forward
cov2 = beta_forward / alpha_forward
lam = 1./cov1 + 1./cov2
mu = (
X_uniformnoise * mu1_scl / cov1 +
X_noisy * mu2_scl / cov2
) / lam
sigma = T.sqrt(1./lam)
sigma = sigma.reshape((1,1,1,1))
mu.name = 'mu q posterior'
sigma.name = 'sigma q posterior'
X_noisy.name = 'X_noisy'
t.name = 't'
return X_noisy, t, mu, sigma
示例8: get_updates_sgd_momentum
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def get_updates_sgd_momentum(self, cost, params,
decay_mode=None, decay=0.,
momentum=0.9, nesterov=False,
grad_clip=None, constant_clip=True):
print(' - SGD: lr = %.2e' % (self.lr.get_value(borrow=True)), end='')
print(', decay = %.2f' % (decay), end='')
print(', momentum = %.2f' % (momentum), end='')
print(', nesterov =', nesterov, end='')
print(', grad_clip =', grad_clip)
self.grad_clip = grad_clip
self.constant_clip = constant_clip
self.iterations = theano.shared(
np.asarray(0., dtype=theano.config.floatX), borrow=True)
# lr = self.lr_float
lr = self.lr * (1.0 / (1.0 + decay * self.iterations))
# lr = self.lr * (decay ** T.floor(self.iterations / decay_step))
updates = [(self.iterations, self.iterations + 1.)]
# Get gradients and apply clipping
if self.grad_clip is None:
grads = T.grad(cost, params)
else:
assert self.grad_clip > 0
if self.constant_clip:
# Constant clipping using theano.gradient.grad_clip
clip = self.grad_clip
grads = T.grad(
theano.gradient.grad_clip(cost, -clip, clip),
params)
else:
# Adaptive clipping
clip = self.grad_clip / lr
grads_ = T.grad(cost, params)
grads = [T.clip(g, -clip, clip) for g in grads_]
for p, g in zip(params, grads):
# v_prev = theano.shared(p.get_value(borrow=True) * 0.)
p_val = p.get_value(borrow=True)
v_prev = theano.shared(np.zeros(p_val.shape, dtype=p_val.dtype),
broadcastable=p.broadcastable)
v = momentum * v_prev - lr * g
updates.append((v_prev, v))
if nesterov:
new_p = p + momentum * v - lr * g
else:
new_p = p + v
updates.append((p, new_p))
return updates
示例9: __theano_train__
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import floor [as 别名]
def __theano_train__(self, n_size):
"""
Pr(l|u, C(l)) = Pr(l|u) * Pr(l|C(l))
Pr(u, l, t) = Pr(l|u, C(l)) if C(l) exists,
Pr(l|u) otherwise.
$Theta$ = argmax Pr(u, l, t)
"""
tra_mask = T.ivector()
seq_length = T.sum(tra_mask) # 有效长度
wl = T.concatenate((self.wl, self.wl_m))
tidx, cidx, bidx, userid = T.ivector(), T.imatrix(), T.itensor3(), T.iscalar()
pb = self.pb[bidx] # (seq_length x 4 x depth x n_size)
lrs = self.lrs[tidx] # (seq_length x 4 x depth)
# user preference
xu = self.xu[userid]
plu = softmax(T.dot(xu, self.wl.T))
# geographical influence
cl = T.sum(wl[cidx], axis=1) # (seq_length x n_size)
cl = cl.reshape((cl.shape[0], 1, 1, cl.shape[1]))
br = sigmoid(T.sum(pb[:seq_length] * cl, axis=3) * lrs[:seq_length]) * T.ceil(abs(T.mean(cl, axis=3)))
path = T.prod(br, axis=2) * self.probs[tidx][:seq_length]
# paths = T.prod((T.floor(1-path) + path), axis=1)
paths = T.sum(path, axis=1)
paths = T.floor(1 - paths) + paths
# ----------------------------------------------------------------------------
# cost, gradients, learning rate, l2 regularization
lr, l2 = self.alpha_lambda[0], self.alpha_lambda[1]
seq_l2_sq = T.sum([T.sum(par ** 2) for par in [xu, self.wl]])
upq = - 1 * T.sum(T.log(plu[tidx[:seq_length]] * paths)) / seq_length
seq_costs = (
upq +
0.5 * l2 * seq_l2_sq)
seq_grads = T.grad(seq_costs, self.params)
seq_updates = [(par, par - lr * gra) for par, gra in zip(self.params, seq_grads)]
pars_subs = [(self.xu, xu), (self.pb, pb)]
seq_updates.extend([(par, T.set_subtensor(sub, sub - lr * T.grad(seq_costs, sub)))
for par, sub in pars_subs])
# ----------------------------------------------------------------------------
uidx = T.iscalar() # T.iscalar()类型是 TensorType(int32, )
self.seq_train = theano.function(
inputs=[uidx],
outputs=upq,
updates=seq_updates,
givens={
userid: uidx,
tidx: self.tra_target_masks[uidx],
cidx: self.tra_context_masks[T.arange(self.tra_accum_lens[uidx][0], self.tra_accum_lens[uidx][1])],
bidx: self.routes[self.tra_target_masks[uidx]],
tra_mask: self.tra_masks[uidx]
# tra_mask_cot: self.tra_masks_cot[T.arange(self.tra_accum_lens[uidx][0], self.tra_accum_lens[uidx][1])]
})