本文整理汇总了Python中theano.tensor.cast函数的典型用法代码示例。如果您正苦于以下问题:Python cast函数的具体用法?Python cast怎么用?Python cast使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了cast函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_data
def load_data(random_state=1066, n=1000, max_phrase_length=100):
data = utils.load_data(random_state=random_state,
n=n,
max_phrase_length=max_phrase_length)
X_train, y_train = data[0]
X_valid, y_valid = data[1]
X_test, y_test = data[2]
X_train = X_train.reshape((-1, max_phrase_length, 67)).transpose(0, 2, 1)
X_valid = X_valid.reshape((-1, max_phrase_length, 67)).transpose(0, 2, 1)
X_test = X_test.reshape((-1, max_phrase_length, 67)).transpose(0, 2, 1)
# Robert: what about reshaping this data for 1D convs?
# hstack() instead of hstack() in when creatign X in utils?
return dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
X_test=theano.shared(lasagne.utils.floatX(X_test)),
y_test=T.cast(theano.shared(y_test), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
num_examples_test=X_test.shape[0],
#input_height=X_train.shape[2], # what's the equivalent in our vectors?
#input_width=X_train.shape[3],
output_dim=5, # since five sentiment class
)
示例2: _transform_affine
def _transform_affine(theta, input, downsample_factor):
num_batch, num_channels, height, width = input.shape
theta = T.reshape(theta, (-1, 2, 3))
# grid of (x_t, y_t, 1), eq (1) in ref [1]
out_height = T.cast(height / downsample_factor[0], 'int64')
out_width = T.cast(width / downsample_factor[1], 'int64')
grid = _meshgrid(out_height, out_width)
# Transform A x (x_t, y_t, 1)^T -> (x_s, y_s)
T_g = T.dot(theta, grid)
x_s = T_g[:, 0]
y_s = T_g[:, 1]
x_s_flat = x_s.flatten()
y_s_flat = y_s.flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_height, out_width)
output = T.reshape(
input_transformed, (num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
示例3: load_data
def load_data(self):
data = self._load_data()
X_train, y_train = data[0]
X_valid, y_valid = data[1]
X_test, y_test = data[2]
# reshape for convolutions
X_train = X_train.reshape((X_train.shape[0], 1, 28, 28))
X_valid = X_valid.reshape((X_valid.shape[0], 1, 28, 28))
X_test = X_test.reshape((X_test.shape[0], 1, 28, 28))
return dict(
X_train=theano.shared(lasagne.utils.floatX(X_train)),
y_train=T.cast(theano.shared(y_train), 'int32'),
X_valid=theano.shared(lasagne.utils.floatX(X_valid)),
y_valid=T.cast(theano.shared(y_valid), 'int32'),
valid_set = X_valid,
y_valid_raw = y_valid,
X_test=theano.shared(lasagne.utils.floatX(X_test)),
y_test=T.cast(theano.shared(y_test), 'int32'),
num_examples_train=X_train.shape[0],
num_examples_valid=X_valid.shape[0],
num_examples_test=X_test.shape[0],
input_height=X_train.shape[2],
input_width=X_train.shape[3],
input_dim=[X_train.shape[2],X_train.shape[3]],
output_dim=10,
)
示例4: binarization
def binarization(W,H,binary=True,deterministic=False,stochastic=False,srng=None):
# (deterministic == True) <-> test-time <-> inference-time
if not binary or (deterministic and stochastic):
# print("not binary")
Wb = W
else:
# [-1,1] -> [0,1]
Wb = hard_sigmoid(W/H)
# Wb = T.clip(W/H,-1,1)
# Stochastic BinaryConnect
if stochastic:
# print("stoch")
Wb = T.cast(srng.binomial(n=1, p=Wb, size=T.shape(Wb)), theano.config.floatX)
# Deterministic BinaryConnect (round to nearest)
else:
# print("det")
Wb = T.round(Wb)
# 0 or 1 -> -1 or 1
Wb = T.cast(T.switch(Wb,H,-H), theano.config.floatX)
return Wb
示例5: get_monitoring_channels
def get_monitoring_channels(self, model, X, Y = None):
rval = OrderedDict()
history = model.mf(X, return_history = True)
q = history[-1]
if self.supervised:
assert Y is not None
Y_hat = q[-1]
true = T.argmax(Y,axis=1)
pred = T.argmax(Y_hat, axis=1)
#true = Print('true')(true)
#pred = Print('pred')(pred)
wrong = T.neq(true, pred)
err = T.cast(wrong.mean(), X.dtype)
rval['misclass'] = err
if len(model.hidden_layers) > 1:
q = model.mf(X, Y = Y)
pen = model.hidden_layers[-2].upward_state(q[-2])
Y_recons = model.hidden_layers[-1].mf_update(state_below = pen)
pred = T.argmax(Y_recons, axis=1)
wrong = T.neq(true, pred)
rval['recons_misclass'] = T.cast(wrong.mean(), X.dtype)
return rval
示例6: get_cost_grads_updates
def get_cost_grads_updates(self, x):
ha, h = self.network.propup(x, noisestd=self.train_hypers['noise_std'])
q = 0.9*self.q + 0.1*h.mean(axis=0)
### get correlation matrix for examples
# C = T.dot(x.T, h) / x.shape[0]
x_std = x.std(axis=0)
h_std = h.std(axis=0)
xz = (x - x.mean(0)) / (x.std(0) + 1e-2)
hz = (h - h.mean(0)) / (h.std(0) + 1e-2)
# C = T.dot(xz.T, hz) / x.shape[0]
C = T.dot(xz.T, hz)
lamb = T.cast(self.train_hypers['lamb'], self.dtype)
rho = T.cast(self.train_hypers['rho'], self.dtype)
# cost = (C**2).sum() + lamb*(T.abs_(q - rho)).sum()
# cost = (C**2).sum() / x.shape[0]**2 + lamb*(T.abs_(q - rho)).sum()
cost = (C**2).sum() / x.shape[0]**2 + lamb*((q - rho)**2).sum()
# lamb = T.cast(self.train_hypers['lamb'], self.dtype)
# rho = T.cast(self.train_hypers['rho'], self.dtype)
# cost = ((x - y)**2).mean(axis=0).sum() + lamb*(T.abs_(q - rho)).sum()
updates = {self.q: q}
return cost, self.grads(cost), updates
示例7: compute_hard_windows
def compute_hard_windows(self, image_shape, location, scale):
# find topleft(front) and bottomright(back) corners for each patch
a = location - 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)
b = location + 0.5 * (T.cast(self.patch_shape, theano.config.floatX) / scale)
# grow by three patch pixels
a -= self.kernel.k_sigma_radius(self.cutoff, scale)
b += self.kernel.k_sigma_radius(self.cutoff, scale)
# clip to fit inside image and have nonempty window
a = T.clip(a, 0, image_shape - 1)
b = T.clip(b, a + 1, image_shape)
if self.batched_window:
# take the bounding box of all windows; now the slices
# will have the same length for each sample and scan can
# be avoided. comes at the cost of typically selecting
# more of the input.
a = a.min(axis=0, keepdims=True)
b = b.max(axis=0, keepdims=True)
# make integer
a = T.cast(T.floor(a), 'int16')
b = T.cast(T.ceil(b), 'int16')
return a, b
示例8: resample_step
def resample_step(self):
idx=self.theano_rng.multinomial(pvals=T.reshape(self.weights_now,(1,self.npcl))).T
s_samp=T.sum(self.s_now*T.addbroadcast(idx,1),axis=0)
h_samp=T.sum(self.h_now*T.addbroadcast(idx,1),axis=0)
return T.cast(s_samp,'float32'), T.cast(h_samp,'float32')
示例9: get_monitoring_channels
def get_monitoring_channels(self, model, data, **kwargs):
rval = OrderedDict()
space, sources = self.get_data_specs(model)
X_data, X_condition = data
m = X_data.shape[space.get_batch_axis()]
G, D = model.generator, model.discriminator
# Compute false negatives w/ empirical samples
y_hat = D.fprop((X_data, X_condition))
rval["false_negatives"] = T.cast((y_hat < 0.5).mean(), "float32")
# Compute false positives w/ generated sample
G_conditional_data = self.condition_distribution.sample(m)
samples = G.sample(G_conditional_data)
y_hat = D.fprop((samples, G_conditional_data))
rval["false_positives"] = T.cast((y_hat > 0.5).mean(), "float32")
# y = T.alloc(0., m, 1)
cost = D.cost_from_X(((samples, G_conditional_data), y_hat))
sample_grad = T.grad(-cost, samples)
rval["sample_grad_norm"] = T.sqrt(T.sqr(sample_grad).sum())
_S, d_obj, g_obj, i_obj = self.get_samples_and_objectives(model, data)
if model.monitor_inference and i_obj != 0:
rval["objective_i"] = i_obj
if model.monitor_discriminator:
rval["objective_d"] = d_obj
if model.monitor_generator:
rval["objective_g"] = g_obj
rval["now_train_generator"] = self.now_train_generator
return rval
示例10: _transform
def _transform(theta, input, downsample_factor):
num_batch, num_channels, height, width = input.shape
theta = T.reshape(theta, (-1, 1))
# grid of (x_t, y_t, 1), eq (1) in ref [1]
out_height = T.cast(height / downsample_factor[0], 'int64')
out_width = T.cast(width / downsample_factor[1], 'int64')
grid = _meshgrid(out_height, out_width)
zeros = T.zeros_like(theta)
padded_theta = T.concatenate([theta, zeros], axis=1)
T_g = padded_theta.dimshuffle(0, 1, 'x') + grid.dimshuffle('x', 0, 1)
x_s = T_g[:, 0]
y_s = T_g[:, 1]
x_s_flat = x_s.flatten()
y_s_flat = y_s.flatten()
# dimshuffle input to (bs, height, width, channels)
input_dim = input.dimshuffle(0, 2, 3, 1)
input_transformed = _interpolate(
input_dim, x_s_flat, y_s_flat,
out_height, out_width)
output = T.reshape(
input_transformed, (num_batch, out_height, out_width, num_channels))
output = output.dimshuffle(0, 3, 1, 2) # dimshuffle to conv format
return output
示例11: __build_backprop
def __build_backprop(self):
y_init = self.outside_world.y_data_one_hot # initialize y=y_data
h_init = my_op(2 * (T.dot(rho(y_init), self.W2.T) + self.bh)) # initialize h by backward propagation
x_init = my_op(T.dot(rho(h_init), self.W1.T) + self.bx) # initialize x by backward propagation
Delta_y = y_init - self.y
Delta_h = h_init - self.h
Delta_x = x_init - self.x
by_dot = T.mean(Delta_y, axis=0)
W2_dot = T.dot(self.rho_h.T, Delta_y) / T.cast(self.x.shape[0], dtype=theano.config.floatX)
bh_dot = T.mean(Delta_h, axis=0)
W1_dot = T.dot(self.rho_x.T, Delta_h) / T.cast(self.x.shape[0], dtype=theano.config.floatX)
bx_dot = T.mean(Delta_x, axis=0)
alpha = T.fscalar('alpha')
by_new = self.by + alpha * by_dot
W2_new = self.W2 + alpha * W2_dot
bh_new = self.bh + alpha * bh_dot
W1_new = self.W1 + alpha * W1_dot
bx_new = self.bx + alpha * bx_dot
updates_states = [(self.x, x_init), (self.h, h_init), (self.y, y_init)]
updates_params = [(self.by, by_new), (self.W2, W2_new), (self.bh, bh_new), (self.W1, W1_new)]
backprop = theano.function(
inputs=[alpha],
outputs=[],
updates=updates_states+updates_params
)
return backprop
示例12: _step
def _step(self, x_tm1, u_tm1, inputs, x_prior, u_prior, *args):
# x_prior are previous states
# u_prior are causes from above
outputs = self.activation(T.dot(x_tm1, self.W))
rec_error = T.sqr(inputs - outputs).sum()
causes = (1 + T.exp(-T.dot(u_tm1, self.V))) * .5
if self.pool_flag:
batch_size = inputs.shape[0]
dim = causes.shape[1]
imgs = T.cast(T.sqrt(dim), 'int64')
causes_up = causes.reshape(
(batch_size, 1, imgs, imgs)).repeat(
self.pool_size, axis=2).repeat(self.pool_size,
axis=3).flatten(ndim=2)
else:
causes_up = causes
x = _IstaStep(rec_error, x_tm1, lambdav=self.gamma*causes_up,
x_prior=x_prior)
if self.pool_flag:
dim = T.cast(T.sqrt(x.shape[1]), 'int64')
x_pool = x.reshape((batch_size, 1, dim, dim))
x_pool = max_pool_2d(x_pool, ds=(self.pool_size, )*2).flatten(ndim=2)
else:
x_pool = x
prev_u_cost = .01 * self.gamma * T.sqr(u_tm1-u_prior).sum()
u_cost = causes * abs(x_pool) * self.gamma + prev_u_cost
u = _IstaStep(u_cost.sum(), u_tm1, lambdav=self.gamma)
causes = (1 + T.exp(-T.dot(u, self.V))) * .5
u_cost = causes * abs(x_pool) * self.gamma
return (x, u, u_cost, outputs)
示例13: compute_f_mu
def compute_f_mu(x, t, params):
[centers, spreads, biases, M, b]=params
diffs=x.dimshuffle(0,1,2,'x')-centers.dimshuffle('x','x',0,1)
scaled_diffs=(diffs**2)*T.exp(spreads).dimshuffle('x','x',0,1)
exp_terms=T.sum(scaled_diffs,axis=2)+biases.dimshuffle('x','x',0)*0.0
h=T.exp(-exp_terms)
sumact=T.sum(h,axis=2)
#Normalization
hnorm=h/sumact.dimshuffle(0,1,'x')
z=T.dot(hnorm,M)
z=T.reshape(z,(t.shape[0],t.shape[1],ntgates,nx))+b.dimshuffle('x','x',0,1) #nt by nb by ntgates by nx
#z=z+T.reshape(x,(t.shape[0],t.shape[1],1,nx))
tpoints=T.cast(T.arange(ntgates),'float32')/T.cast(ntgates-1,'float32')
tpoints=T.reshape(tpoints, (1,1,ntgates))
#tgating=T.exp(T.dot(t,muWT)+mubT) #nt by nb by ntgates
tgating=T.exp(-kT*(tpoints-t)**2)
tgating=tgating/T.reshape(T.sum(tgating, axis=2),(t.shape[0], t.shape[1], 1))
tgating=T.reshape(tgating,(t.shape[0],t.shape[1],ntgates,1))
mult=z*tgating
out=T.sum(mult,axis=2)
#out=out+x
return T.cast(out,'float32')
示例14: loss
def loss(x_0, n, t, params):
muparams=params[:5]
covparams=params[5:10]
tpoints=T.cast(T.arange(nsteps),'float32')/T.cast(nsteps,'float32')
betas=compute_betas(params[-1],tpoints)
def step(nt, bt, xt):
mean=xt*T.sqrt(1.0-bt)
xnew=T.cast(mean+T.sqrt(bt)*nt,'float32')
losst=T.cast(0.5*T.mean(T.sum((((mean-xnew)**2)/bt+T.log(np.pi*2.0*bt)),axis=1)),'float32')
return xnew, losst
[xhist, fwdlosshist],updates=theano.scan(fn=step,
outputs_info=[x_0, None],
sequences=[n, betas],
n_steps=nsteps)
forward_loss=-T.mean(fwdlosshist)+0.5*T.mean(T.sum((xhist[-1]**2+T.log(np.pi*2.0)),axis=1))
#f_mu=compute_f_mu(xhist,t,muparams)
#f_cov=compute_f_cov(xhist,t,covparams)
#diffs=(f_mu[2:]-xhist[:-1])**2
#gaussian_terms=T.sum(diffs*(1.0/f_cov[1:].dimshuffle(0,1,'x')),axis=2)
#det_terms=T.sum(T.log(f_cov[1:].dimshuffle(0,1,'x')),axis=2)
f_mu=compute_f_mu(xhist,t,muparams)+xhist*(T.sqrt(1.0-betas)).dimshuffle(0,'x','x')
f_cov=compute_f_cov(xhist,t,covparams)*betas.dimshuffle(0,'x')
xhist=T.concatenate([x_0.dimshuffle('x',0,1), xhist],axis=0)
diffs=(f_mu-xhist[:-1])**2
gaussian_terms=T.sum(diffs*(1.0/f_cov.dimshuffle(0,1,'x')),axis=2)
det_terms=T.sum(T.log(f_cov.dimshuffle(0,1,'x')),axis=2)
reverse_loss=T.mean(T.mean(gaussian_terms+det_terms))
return reverse_loss+forward_loss
示例15: test_elemwise_composite_float64
def test_elemwise_composite_float64():
# test that we don't fuse composite elemwise with float64 somewhere inside
# nvcc by default downcast them to float32. We would need to tell him not
# to do so, but that possible only on some device.
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
def get_all_basic_scalar(composite_op):
l = []
for i in composite_op.env.toposort():
if isinstance(i, theano.scalar.Composite):
l += get_all_basic_scalar(i)
else:
l.append(i)
return l
for mode in [mode_with_gpu, mode_with_gpu.excluding('gpu_after_fusion'),
mode_with_gpu.excluding('elemwise_fusion')]:
f = pfunc([a, b],
tensor.cast(tensor.lt(tensor.cast(a, 'float64') ** 2,
b),
'float32'), mode=mode)
out = f(av, bv)
assert numpy.all(out == ((av ** 2) < bv))
for node in f.maker.env.toposort():
if isinstance(node.op, cuda.GpuElemwise):
if isinstance(node.op.scalar_op, theano.scalar.Composite):
scals = get_all_basic_scalar(node.op.scalar_op)
for s in scals:
assert not any([i.type.dtype == 'float64'
for i in s.inputs + s.outputs])