本文整理汇总了Python中theano.tensor.cast方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.cast方法的具体用法?Python tensor.cast怎么用?Python tensor.cast使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.cast方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: ctc_update_log_p
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def ctc_update_log_p(skip_idxs, zeros, active, log_p_curr, log_p_prev):
active_skip_idxs = skip_idxs[(skip_idxs < active).nonzero()]
active_next = T.cast(T.minimum(
T.maximum(
active + 1,
T.max(T.concatenate([active_skip_idxs, [-1]])) + 2 + 1
), log_p_curr.shape[0]), 'int32')
common_factor = T.max(log_p_prev[:active])
p_prev = T.exp(log_p_prev[:active] - common_factor)
_p_prev = zeros[:active_next]
# copy over
_p_prev = T.set_subtensor(_p_prev[:active], p_prev)
# previous transitions
_p_prev = T.inc_subtensor(_p_prev[1:], _p_prev[:-1])
# skip transitions
_p_prev = T.inc_subtensor(_p_prev[active_skip_idxs + 2], p_prev[active_skip_idxs])
updated_log_p_prev = T.log(_p_prev) + common_factor
log_p_next = T.set_subtensor(
zeros[:active_next],
log_p_curr[:active_next] + updated_log_p_prev
)
return active_next, log_p_next
示例2: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def make_node(self, x, ilist):
x_ = as_cuda_ndarray_variable(x)
ilist_ = gpu_contiguous(T.cast(ilist, dtype=config.floatX)) # T.as_tensor_variable(ilist)
#if ilist_.type.dtype[:3] not in ('int', 'uin'):
# raise TypeError('index must be integers')
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
# # c code suppose it is int64
# if x.ndim in [1, 2, 3] and ilist_.dtype in [
# 'int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32']:
# ilist_ = tensor.cast(ilist_, 'int64')
bcast = (ilist_.broadcastable[0],) + x_.broadcastable[1:]
return theano.gof.Apply(self, [x_, ilist_],
[CudaNdarrayType(dtype=x.dtype,
broadcastable=bcast)()])
示例3: adadelta
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def adadelta(self, param, grad, updates, sample_idx=None, epsilon=1e-6):
v1 = np.float32(self.decay)
v2 = np.float32(1.0 - self.decay)
acc = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
upd = theano.shared(param.get_value(borrow=False) * 0., borrow=True)
if sample_idx is None:
acc_new = acc + grad ** 2
updates[acc] = acc_new
grad = T.sqrt(upd + epsilon) * grad
upd_new = v1 * upd + v2 * grad ** 2
updates[upd] = upd_new
else:
acc_s = acc[sample_idx]
acc_new = acc_s + grad ** 2
updates[acc] = T.set_subtensor(acc_s, acc_new)
upd_s = upd[sample_idx]
upd_new = v1 * upd_s + v2 * grad ** 2
updates[upd] = T.set_subtensor(upd_s, upd_new)
grad = T.sqrt(upd_s + epsilon) * grad
gradient_scaling = T.cast(T.sqrt(acc_new + epsilon), theano.config.floatX)
return grad / gradient_scaling
示例4: get_monitoring_channels
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def get_monitoring_channels(self, data):
if data is None:
m = 100
else:
m = data.shape[0]
n = self.mlp.get_input_space().get_total_dimension()
noise = self.get_noise((m, n))
rval = OrderedDict()
try:
rval.update(self.mlp.get_monitoring_channels((noise, None)))
except Exception:
warnings.warn("something went wrong with generator.mlp's monitoring channels")
if self.monitor_ll:
rval['ll'] = T.cast(self.ll(data, self.ll_n_samples, self.ll_sigma),
theano.config.floatX).mean()
rval['nll'] = -rval['ll']
return rval
示例5: adam_updates
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def adam_updates(params, cost, lr=0.001, mom1=0.9, mom2=0.999):
updates = []
grads = T.grad(cost, params)
t = th.shared(np.cast[th.config.floatX](1.))
for p, g in zip(params, grads):
v = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
mg = th.shared(np.cast[th.config.floatX](p.get_value() * 0.))
v_t = mom1*v + (1. - mom1)*g
mg_t = mom2*mg + (1. - mom2)*T.square(g)
v_hat = v_t / (1. - mom1 ** t)
mg_hat = mg_t / (1. - mom2 ** t)
g_t = v_hat / T.sqrt(mg_hat + 1e-8)
p_t = p - lr * g_t
updates.append((v, v_t))
updates.append((mg, mg_t))
updates.append((p, p_t))
updates.append((t, t+1))
return updates
示例6: get_output_for
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def get_output_for(self, input, deterministic=False, **kwargs):
if deterministic:
norm_features = (input-self.avg_batch_mean.dimshuffle(*self.dimshuffle_args)) / T.sqrt(1e-6 + self.avg_batch_var).dimshuffle(*self.dimshuffle_args)
else:
batch_mean = T.mean(input,axis=self.axes_to_sum).flatten()
centered_input = input-batch_mean.dimshuffle(*self.dimshuffle_args)
batch_var = T.mean(T.square(centered_input),axis=self.axes_to_sum).flatten()
batch_stdv = T.sqrt(1e-6 + batch_var)
norm_features = centered_input / batch_stdv.dimshuffle(*self.dimshuffle_args)
# BN updates
new_m = 0.9*self.avg_batch_mean + 0.1*batch_mean
new_v = 0.9*self.avg_batch_var + T.cast((0.1*input.shape[0])/(input.shape[0]-1),th.config.floatX)*batch_var
self.bn_updates = [(self.avg_batch_mean, new_m), (self.avg_batch_var, new_v)]
if hasattr(self, 'g'):
activation = norm_features*self.g.dimshuffle(*self.dimshuffle_args)
else:
activation = norm_features
if hasattr(self, 'b'):
activation += self.b.dimshuffle(*self.dimshuffle_args)
return self.nonlinearity(activation)
示例7: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def make_node(self, acts, labels, input_lengths):
# Unless specified, assume all sequences have full sequence length, i.e. acts_.shape[0]
if input_lengths == None:
input_lengths = T.cast(acts.shape[0], dtype="int32") * T.ones_like(acts[0,:,0], dtype=np.int32)
# acts.shape = [seqLen, batchN, outputUnit]
if acts.dtype != "float32":
raise Exception("acts must be float32 instead of %s" % acts.dtype)
# labels.shape = [batchN, labelLen]
if labels.dtype != "int32":
raise Exception("labels must be int32 instead of %s" % labels.dtype)
# input_lengths.shape = [batchN]
if input_lengths.dtype != "int32":
raise Exception("input_lengths must be int32 instead of %s" % input_lengths.dtype)
applyNode = theano.Apply(self, inputs=[acts, input_lengths, labels], outputs=[self.costs, self.gradients])
# Return only the cost. Gradient will be returned by grad()
self.default_output = 0
return applyNode
示例8: get_cost_updates
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def get_cost_updates(self, lr=0.1, persistent=None, k=1):
pre_sigmoid_ph, ph_mean, ph_sample = self.sample_h_given_v(self.input)
if persistent is None:
chain_start = ph_sample
else:
chain_start = persistent
([pre_sigmoid_nvs,nv_means,nv_samples,pre_sigmoid_nhs,nh_means,nh_samples],updates) = \
theano.scan(self.gibbs_step, outputs_info=[None, None, None, None, None, chain_start],n_steps=k,name="gibbs_step")
chain_end = nv_samples[-1]
cost = T.mean(self.free_energy(self.input)) - T.mean(self.free_energy(chain_end))
gparams = T.grad(cost, self.params, consider_constant=[chain_end])
for gparam, param in zip(gparams, self.params):
updates[param] = param - gparam * T.cast(lr,dtype=theano.config.floatX)
if persistent:
updates[persistent] = nh_samples[-1]
monitoring_cost = self.get_pseudo_likelihood_cost(updates)
else:
monitoring_cost = self.get_reconstruction_cost(updates,pre_sigmoid_nvs[-1])
return monitoring_cost, updates
示例9: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def make_node(self, x, y, p_data, p_ind, p_ptr, p_ncols):
x = tensor.as_tensor_variable(x)
y = tensor.as_tensor_variable(y)
p_data = tensor.as_tensor_variable(p_data)
p_ind = tensor.as_tensor_variable(p_ind)
p_ptr = tensor.as_tensor_variable(p_ptr)
p_ncols = tensor.as_tensor_variable(p_ncols)
assert p_ncols.dtype == 'int32'
dtype_out = scalar.upcast(x.type.dtype, y.type.dtype,
p_data.type.dtype)
dot_out = scalar.upcast(x.type.dtype, y.type.dtype)
# We call blas ?dot function that take only param of the same type
x = tensor.cast(x, dot_out)
y = tensor.cast(y, dot_out)
return gof.Apply(self, [x, y, p_data, p_ind, p_ptr, p_ncols], [
tensor.tensor(dtype=dtype_out, broadcastable=(False,)),
tensor.tensor(dtype=p_ind.type.dtype, broadcastable=(False,)),
tensor.tensor(dtype=p_ptr.type.dtype, broadcastable=(False,))
])
示例10: test_stabilize_log_softmax
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def test_stabilize_log_softmax():
mode = theano.compile.mode.get_default_mode()
mode = mode.including('local_log_softmax', 'specialize')
x = matrix()
y = softmax(x)
z = theano.tensor.log(y)
f = theano.function([x], z, mode=mode)
assert hasattr(f.maker.fgraph.outputs[0].tag, 'trace')
# check that the softmax has been optimized out
for node in f.maker.fgraph.toposort():
assert not isinstance(node.op, y.owner.op.__class__)
# call the function so debug mode can verify the optimized
# version matches the unoptimized version
rng = numpy.random.RandomState([2012, 8, 22])
f(numpy.cast[config.floatX](rng.randn(2, 3)))
示例11: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def make_node(self, x, ilist):
x_ = as_cuda_ndarray_variable(x)
ilist_ = tensor.as_tensor_variable(ilist)
if ilist_.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist_.type.ndim != 1:
raise TypeError('index must be vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
# c code suppose it is int64
if x.ndim in [1, 2, 3] and ilist_.dtype in [
'int8', 'int16', 'int32', 'uint8', 'uint16', 'uint32']:
ilist_ = tensor.cast(ilist_, 'int64')
bcast = (ilist_.broadcastable[0],) + x_.broadcastable[1:]
return Apply(self, [x_, ilist_],
[CudaNdarrayType(dtype=x.dtype,
broadcastable=bcast)()])
示例12: local_gpu_crossentorpy_softmax_argmax_1hot_with_bias
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def local_gpu_crossentorpy_softmax_argmax_1hot_with_bias(node):
if isinstance(node.op, tensor.nnet.CrossentropySoftmaxArgmax1HotWithBias):
x, b, y = node.inputs
if x.owner and isinstance(x.owner.op, HostFromGpu):
gpu_x, = x.owner.inputs
# if y is a cast to integers, we can go to the underlying
# thing if we want, since this gpu op will cast to integers
# internally anyway
int_cast_ops = (
tensor.basic._convert_to_int32,
tensor.basic._convert_to_int8,
tensor.basic._convert_to_int16,
tensor.basic._convert_to_int64)
while y.owner and y.owner.op in int_cast_ops:
y = y.owner.inputs[0]
gpu_nll, gpu_sm, gpu_am = \
GpuCrossentropySoftmaxArgmax1HotWithBias()(
gpu_x,
as_cuda_ndarray_variable(b),
as_cuda_ndarray_variable(cast(y, 'float32')))
am_dtype = node.outputs[2].type.dtype
return [host_from_gpu(gpu_nll),
host_from_gpu(gpu_sm),
cast(host_from_gpu(gpu_am), am_dtype)]
return False
示例13: test_elemwise_comparaison_cast
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def test_elemwise_comparaison_cast():
"""
test if an elemwise comparaison followed by a cast to float32 are
pushed to gpu.
"""
a = tensor.fmatrix()
b = tensor.fmatrix()
av = theano._asarray(numpy.random.rand(4, 4), dtype='float32')
bv = numpy.ones((4, 4), dtype='float32')
for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv),
(tensor.le, av <= bv), (tensor.ge, av >= bv)]:
f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu)
out = f(av, bv)
assert numpy.all(out == ans)
assert any([isinstance(node.op, cuda.GpuElemwise)
for node in f.maker.fgraph.toposort()])
示例14: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def make_node(self, x, ilist):
ctx_name = infer_context_name(x, ilist)
x_ = as_gpuarray_variable(x, ctx_name)
ilist__ = tensor.as_tensor_variable(ilist)
if ilist__.type.dtype[:3] not in ('int', 'uin'):
raise TypeError('index must be integers')
if ilist__.type.dtype != 'int64':
ilist__ = tensor.cast(ilist__, 'int64')
ilist_ = as_gpuarray_variable(ilist__, ctx_name)
if ilist_.type.dtype != 'int64':
raise TypeError('index must be int64')
if ilist_.type.ndim != 1:
raise TypeError('index must be a vector')
if x_.type.ndim == 0:
raise TypeError('cannot index into a scalar')
bcast = ilist_.broadcastable + x_.broadcastable[1:]
return gof.Apply(self, [x_, ilist_],
[GpuArrayType(dtype=x.dtype,
context_name=ctx_name,
broadcastable=bcast)()])
示例15: errors4one
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import cast [as 别名]
def errors4one(self, z, out, weight=None, distLabelType='12C'):
distBins = config.distCutoffs[distLabelType]
label8 = DistanceUtils.LabelsOfOneDistance(config.ContactDefinition, distBins)
label15 = DistanceUtils.LabelsOfOneDistance(config.InteractionLimit, distBins)
z3C = T.cast( T.ge(z, label8), 'int32') + T.cast( T.ge(z, label15), 'int32')
o3C = T.cast( T.ge(out, label8), 'int32') + T.cast( T.ge(out, label15), 'int32')
if weight is not None:
err = T.sum( T.mul(weight, T.neq(o3C, z3C) ) )*1./T.sum(weight)
else:
err = T.mean( T.neq(o3C , z3C) )
## err is s scalar, convert it to a tensor with ndim=1
return T.stack([err] )
## this function returns a vector of errors, the size of this vector is equal to the sum of ValueDims for all the responses