本文整理汇总了Python中theano.tensor.ones_like方法的典型用法代码示例。如果您正苦于以下问题:Python tensor.ones_like方法的具体用法?Python tensor.ones_like怎么用?Python tensor.ones_like使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类theano.tensor
的用法示例。
在下文中一共展示了tensor.ones_like方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: make_node
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def make_node(self, acts, labels, input_lengths):
# Unless specified, assume all sequences have full sequence length, i.e. acts_.shape[0]
if input_lengths == None:
input_lengths = T.cast(acts.shape[0], dtype="int32") * T.ones_like(acts[0,:,0], dtype=np.int32)
# acts.shape = [seqLen, batchN, outputUnit]
if acts.dtype != "float32":
raise Exception("acts must be float32 instead of %s" % acts.dtype)
# labels.shape = [batchN, labelLen]
if labels.dtype != "int32":
raise Exception("labels must be int32 instead of %s" % labels.dtype)
# input_lengths.shape = [batchN]
if input_lengths.dtype != "int32":
raise Exception("input_lengths must be int32 instead of %s" % input_lengths.dtype)
applyNode = theano.Apply(self, inputs=[acts, input_lengths, labels], outputs=[self.costs, self.gradients])
# Return only the cost. Gradient will be returned by grad()
self.default_output = 0
return applyNode
示例2: test_int32_dtype
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def test_int32_dtype(self):
# Reported on the theano-user mailing-list:
# https://groups.google.com/d/msg/theano-users/MT9ui8LtTsY/rwatwEF9zWAJ
size = 9
intX = 'int32'
C = tensor.matrix('C', dtype=intX)
I = tensor.matrix('I', dtype=intX)
fI = I.flatten()
data = tensor.ones_like(fI)
indptr = tensor.arange(data.shape[0] + 1, dtype='int32')
m1 = sparse.CSR(data, fI, indptr, (8, size))
m2 = sparse.dot(m1, C)
y = m2.reshape(shape=(2, 4, 9), ndim=3)
f = theano.function(inputs=[I, C], outputs=y)
i = numpy.asarray([[4, 3, 7, 7], [2, 8, 4, 5]], dtype=intX)
a = numpy.asarray(numpy.random.randint(0, 100, (size, size)),
dtype=intX)
f(i, a)
示例3: test_structured_add_s_v
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def test_structured_add_s_v(self):
sp_types = {'csc': sp.csc_matrix,
'csr': sp.csr_matrix}
for format in ['csr', 'csc']:
for dtype in ['float32', 'float64']:
x = theano.sparse.SparseType(format, dtype=dtype)()
y = tensor.vector(dtype=dtype)
f = theano.function([x, y], structured_add_s_v(x, y))
spmat = sp_types[format](random_lil((4, 3), dtype, 3))
spones = spmat.copy()
spones.data = numpy.ones_like(spones.data)
mat = numpy.asarray(numpy.random.rand(3), dtype=dtype)
out = f(spmat, mat)
utt.assert_allclose(as_ndarray(spones.multiply(spmat + mat)),
out.toarray())
示例4: sp_ones_like
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def sp_ones_like(x):
"""
Construct a sparse matrix of ones with the same sparsity pattern.
Parameters
----------
x
Sparse matrix to take the sparsity pattern.
Returns
-------
A sparse matrix
The same as `x` with data changed for ones.
"""
# TODO: don't restrict to CSM formats
data, indices, indptr, shape = csm_properties(x)
return CSM(format=x.format)(tensor.ones_like(data), indices, indptr, shape)
示例5: test_gpujoin_gpualloc
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def test_gpujoin_gpualloc():
a = T.fmatrix('a')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
b = T.fmatrix('b')
b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')
f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
mode=mode_without_gpu)
f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
mode=mode_with_gpu)
f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
T.ones_like(b)) + 4,
mode=mode_with_gpu)
assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, B.GpuAlloc)
for node in f_gpu.maker.fgraph.toposort()]) == 2
assert sum([node.op == B.gpu_join
for node in f_gpu.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, B.GpuAlloc)
for node in f_gpu2.maker.fgraph.toposort()]) == 2
assert sum([node.op == B.gpu_join
for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
示例6: test_gpualloc_output_to_gpu
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def test_gpualloc_output_to_gpu():
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
a = tcn.shared_constructor(a_val)
b = T.fscalar()
f = theano.function([b], T.ones_like(a) + b, mode=mode_without_gpu)
f_gpu = theano.function([b], B.gpu_from_host(T.ones_like(a)) + b,
mode=mode_with_gpu)
f(2)
f_gpu(2)
assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 1
assert sum([node.op == B.gpu_alloc
for node in f_gpu.maker.fgraph.toposort()]) == 1
assert numpy.allclose(numpy.ones(a.get_value(borrow=True).shape) + 9,
f_gpu(9))
assert numpy.allclose(f(5), f_gpu(5))
示例7: compute_kernel
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def compute_kernel(lls, lsf, x, z):
ls = T.exp(lls)
sf = T.exp(lsf)
if x.ndim == 1:
x = x[ None, : ]
if z.ndim == 1:
z = z[ None, : ]
lsre = T.outer(T.ones_like(x[ :, 0 ]), ls)
r2 = T.outer(T.sum(x * x / lsre, 1), T.ones_like(z[ : , 0 : 1 ])) - np.float32(2) * \
T.dot(x / lsre, T.transpose(z)) + T.dot(np.float32(1.0) / lsre, T.transpose(z)**2)
k = sf * T.exp(-np.float32(0.5) * r2)
return k
示例8: compute_psi1
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def compute_psi1(lls, lsf, xmean, xvar, z):
if xmean.ndim == 1:
xmean = xmean[ None, : ]
ls = T.exp(lls)
sf = T.exp(lsf)
lspxvar = ls + xvar
constterm1 = ls / lspxvar
constterm2 = T.prod(T.sqrt(constterm1), 1)
r2_psi1 = T.outer(T.sum(xmean * xmean / lspxvar, 1), T.ones_like(z[ : , 0 : 1 ])) \
- np.float32(2) * T.dot(xmean / lspxvar, T.transpose(z)) + \
T.dot(np.float32(1.0) / lspxvar, T.transpose(z)**2)
psi1 = sf * T.outer(constterm2, T.ones_like(z[ : , 0 : 1 ])) * T.exp(-np.float32(0.5) * r2_psi1)
return psi1
示例9: compute_log_ei
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def compute_log_ei(self, x, incumbent):
Kzz = compute_kernel(self.lls, self.lsf, self.z, self.z) + T.eye(self.z.shape[ 0 ]) * self.jitter * T.exp(self.lsf)
KzzInv = T.nlinalg.MatrixInversePSD()(Kzz)
LLt = T.dot(self.LParamPost, T.transpose(self.LParamPost))
covCavityInv = KzzInv + LLt * casting(self.n_points - self.set_for_training) / casting(self.n_points)
covCavity = T.nlinalg.MatrixInversePSD()(covCavityInv)
meanCavity = T.dot(covCavity, casting(self.n_points - self.set_for_training) / casting(self.n_points) * self.mParamPost)
KzzInvcovCavity = T.dot(KzzInv, covCavity)
KzzInvmeanCavity = T.dot(KzzInv, meanCavity)
Kxz = compute_kernel(self.lls, self.lsf, x, self.z)
B = T.dot(KzzInvcovCavity, KzzInv) - KzzInv
v_out = T.exp(self.lsf) + T.dot(Kxz * T.dot(Kxz, B), T.ones_like(self.z[ : , 0 : 1 ])) # + T.exp(self.lvar_noise)
m_out = T.dot(Kxz, KzzInvmeanCavity)
s = (incumbent - m_out) / T.sqrt(v_out)
log_ei = T.log((incumbent - m_out) * ratio(s) + T.sqrt(v_out)) + log_n_pdf(s)
return log_ei
示例10: normalize_batch_in_training
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def normalize_batch_in_training(x, gamma, beta,
reduction_axes, epsilon=1e-3):
"""Computes mean and std for batch then apply batch_normalization on batch.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_train'):
return _old_normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon)
if gamma is None:
if beta is None:
gamma = ones_like(x)
else:
gamma = ones_like(beta)
if beta is None:
if gamma is None:
beta = zeros_like(x)
beta = zeros_like(gamma)
normed, mean, stdinv = T.nnet.bn.batch_normalization_train(
x, gamma, beta, reduction_axes, epsilon)
return normed, mean, T.inv(stdinv ** 2)
示例11: batch_normalization
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3):
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if gamma is None:
gamma = ones_like(var)
if beta is None:
beta = zeros_like(mean)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = list(range(x.ndim - 1))
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated
示例12: apply
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def apply(self, inputs, states, cells, mask=None):
def slice_last(x, no):
return x[:, no * self.dim: (no + 1) * self.dim]
activation = tensor.dot(states, self.W_state) + inputs
in_gate = self.gate_activation.apply(
slice_last(activation, 0))
pre = slice_last(activation, 1)
forget_gate = self.gate_activation.apply(
pre + self.bias * tensor.ones_like(pre))
next_cells = (
forget_gate * cells +
in_gate * self.activation.apply(slice_last(activation, 2)))
out_gate = self.gate_activation.apply(
slice_last(activation, 3))
next_states = out_gate * self.activation.apply(next_cells)
if mask:
next_states = (mask[:, None] * next_states +
(1 - mask[:, None]) * states)
next_cells = (mask[:, None] * next_cells +
(1 - mask[:, None]) * cells)
return next_states, next_cells
示例13: test_gpujoin_gpualloc
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def test_gpujoin_gpualloc():
a = T.fmatrix('a')
a_val = numpy.asarray(numpy.random.rand(4, 5), dtype='float32')
b = T.fmatrix('b')
b_val = numpy.asarray(numpy.random.rand(3, 5), dtype='float32')
f = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)) + 4,
mode=mode_without_gpu)
f_gpu = theano.function([a, b], T.join(0, T.zeros_like(a), T.ones_like(b)),
mode=mode_with_gpu)
f_gpu2 = theano.function([a, b], T.join(0, T.zeros_like(a),
T.ones_like(b)) + 4,
mode=mode_with_gpu)
assert sum([node.op == T.alloc for node in f.maker.fgraph.toposort()]) == 2
assert sum([node.op == T.join for node in f.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, GpuAlloc)
for node in f_gpu.maker.fgraph.toposort()]) == 2
assert sum([node.op == gpu_join
for node in f_gpu.maker.fgraph.toposort()]) == 1
assert sum([isinstance(node.op, GpuAlloc)
for node in f_gpu2.maker.fgraph.toposort()]) == 2
assert sum([node.op == gpu_join
for node in f_gpu2.maker.fgraph.toposort()]) == 1
assert numpy.allclose(f(a_val, b_val), f_gpu2(a_val, b_val))
示例14: _compute_training_statistics
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def _compute_training_statistics(self, input_):
axes = (0,) + tuple((i + 1) for i, b in
enumerate(self.population_mean.broadcastable)
if b)
mean = input_.mean(axis=axes, keepdims=True)
assert mean.broadcastable[1:] == self.population_mean.broadcastable
add_role(mean, BATCH_NORM_MINIBATCH_ESTIMATE)
if self.mean_only:
stdev = tensor.ones_like(mean)
else:
stdev = tensor.sqrt(tensor.var(input_, axis=axes, keepdims=True) +
numpy.cast[theano.config.floatX](self.epsilon))
assert (stdev.broadcastable[1:] ==
self.population_stdev.broadcastable)
add_role(stdev, BATCH_NORM_MINIBATCH_ESTIMATE)
return mean, stdev
示例15: batch_normalization
# 需要导入模块: from theano import tensor [as 别名]
# 或者: from theano.tensor import ones_like [as 别名]
def batch_normalization(x, mean, var, beta, gamma, epsilon=1e-3):
"""Apply batch normalization on x given mean, var, beta and gamma.
"""
# TODO remove this if statement when Theano without
# T.nnet.bn.batch_normalization_test is deprecated
if not hasattr(T.nnet.bn, 'batch_normalization_test'):
return _old_batch_normalization(x, mean, var, beta, gamma, epsilon)
if gamma is None:
gamma = ones_like(var)
if beta is None:
beta = zeros_like(mean)
if mean.ndim == 1:
# based on TensorFlow's default: normalize along rightmost dimension
reduction_axes = list(range(x.ndim - 1))
else:
reduction_axes = [i for i in range(x.ndim) if mean.broadcastable[i]]
return T.nnet.bn.batch_normalization_test(
x, gamma, beta, mean, var, reduction_axes, epsilon)
# TODO remove this function when Theano without
# T.nnet.bn.batch_normalization_train is deprecated