本文整理汇总了Python中theano.tensor.log函数的典型用法代码示例。如果您正苦于以下问题:Python log函数的具体用法?Python log怎么用?Python log使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了log函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: cross_entropy
def cross_entropy(self, y):
#return (-(y * T.log(self.y) + (1.0 - y) * T.log(1.0 - self.y))).mean()
#return T.nnet.binary_crossentropy(self.y, y).mean()
y_used = self.y
y_used = T.clip(self.y, 0.0000001, 0.999999999)
return T.mean(-y * T.log(y_used) - (1 - y) * T.log(1 - y_used))
示例2: compileFunctions
def compileFunctions(self, x_image_global, examples, ib, B, K, corrupt):
if x_image_global == None:
x_image_global = self.x
if corrupt == 0.0:
self.x_c = self.x
else:
self.x_c = self.theano_rng.binomial(
size=self.x.shape, n=1, p=1-corrupt,
dtype=theano.config.floatX) * self.x
self.h = self.g(T.dot(self.x_c, self.W_hl) + self.b_hl)
self.x_r = self.o(T.dot(self.h, self.W_ol) + self.b_ol)
self.params = [self.W_hl, self.b_hl, self.b_ol]
self.cost = \
(- T.sum(
self.x * T.log(self.x_r) + (1 - self.x) * T.log(1 - self.x_r),
axis=(0,1)))
gparams = T.grad(self.cost, self.params)
updates = [
(param, param - K * gparam)
for param, gparam in zip(self.params, gparams)
]
fun_train = theano.function(
inputs=[ib],
outputs=(self.cost, self.x_r, self.x_c),
updates=updates,
givens={
x_image_global: examples[ib*B: (ib+1)*B]
}
)
return fun_train
示例3: get_reconstruction_cost
def get_reconstruction_cost(self, updates, pre_sigmoid_nv):
""" Approximation to the recontruction error
Note that this function requires the pre-sigmoid actiavtion as input. To
undertstand why this is so you need to understand a bit about how Theano works.
Whenever you compile a Theano function, the computational graph that you pass as input
gets optimized for speed and stability. This is done by changing several parts of
the subgraphs with others. One such optimization expresses terms of softplus. We need this
optimizations for the cross-entropy since sigmoid of numbers larger than 30. (or even less
then that) return to 1. and numbers of smaller than -30, turn to 0 which ini terms will force
theano to compute log(0) and thereforce we will get either -inf or NaN as cost. If the value is
expressed in terms of softplus we do not get this undersirable behaviour. This optimiation usually
works fine, but here we have a special case. The sigmoid is applied inside the scan op, while
the log is outisde. Therefore Theano will only see log(scan(...)) instead of log(sigmoid(..))
and will not apply the wanted optimization. We can not go and replace the sigmoid in scan
with something else alse, because this only needs to be done on the last step. Therefore the
easiest adn more efficient way is to get also teh pre-sigmoid activation as an output of
scan, and apply both the log and sigmoid outside scan sunch that Theano can catch and optimize
the expression.
"""
cross_entropy = T.mean(
T.sum(
self.input * T.log(T.nnet.sigmoid(pre_sigmoid_nv)) +
( 1 - self.input) * T.log(1 - T.nnet.sigmoid(pre_sigmoid_nv)),
axis=1
)
)
return cross_entropy
示例4: logp
def logp(self, value):
alpha = self.alpha
beta = self.beta
return bound(T.log(alpha) - T.log(beta)
+ (alpha - 1) * T.log(value/beta)
- (value/beta)**alpha,
value >= 0, alpha > 0, beta > 0)
示例5: _elbo_t
def _elbo_t(logp, uw, inarray, n_mcsamples, random_seed):
"""Create Theano tensor of approximate ELBO by Monte Carlo sampling.
"""
l = (uw.size / 2)
l_int = l.astype('int64')
u = uw[:l_int]
w = uw[l_int:]
# Callable tensor
def logp_(input):
return theano.clone(logp, {inarray: input}, strict=False)
# Naive Monte-Carlo
if random_seed is None:
r = MRG_RandomStreams(gen_random_state())
else:
r = MRG_RandomStreams(seed=random_seed)
if n_mcsamples == 1:
n = r.normal(size=inarray.tag.test_value.shape)
q = n * tt.exp(w) + u
elbo = logp_(q) + tt.sum(w) + 0.5 * l * (1 + tt.log(2.0 * np.pi))
else:
n = r.normal(size=(n_mcsamples, u.tag.test_value.shape[0]))
qs = n * tt.exp(w) + u
logps, _ = theano.scan(fn=lambda q: logp_(q),
outputs_info=None,
sequences=[qs])
elbo = tt.mean(logps) + tt.sum(w) + 0.5 * l * (1 + tt.log(2.0 * np.pi))
return elbo
示例6: get_model
def get_model(Ws, bs, dropout=False):
v = T.matrix('input')
m = T.matrix('missing')
q = T.matrix('target')
k = T.vector('normalization factor')
# Set all missing/target values to 0.5
keep_mask = (1-m) * (1-q)
h = keep_mask * (v * 2 - 1) # Convert to +1, -1
# Normalize layer 0
h *= k.dimshuffle(0, 'x')
for l in xrange(len(Ws)):
h = T.dot(h, Ws[l]) + bs[l]
if l < len(Ws) - 1:
h = h * (h > 0) # relu
if dropout:
mask = srng.binomial(n=1, p=0.5, size=h.shape)
h = h * mask * 2
output = sigmoid(h)
LL = v * T.log(output) + (1 - v) * T.log(1 - output)
# loss = -(q * LL).sum() / q.sum()
loss = -((1 - m) * LL).sum() / (1 - m).sum()
return v, m, q, k, output, loss
示例7: get_cost
def get_cost(self, p=0, sigma=1):
# the last layer
z = self.sigmoid_layers[-1].output
L = -T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
p_idx = len(self.sigmoid_layers)/2 - 1 #penalty layer, the middle layer
if p == 0:
cost = T.mean(L)
# cost = T.mean(T.sqrt(T.mean(self.errors, axis=1))) #Log Spectral Distance(LSD)
elif (p != 0) and (sigma == 0):# for square penalty
square_cost = self.get_square_cost(self.sigmoid_layers[p_idx].output, p)
cost = T.mean(L) + T.mean(square_cost)
elif(p != 0) and (sigma != 0):# for Gaussian penalty
gaussian_cost = self.get_gaussian_cost(self.sigmoid_layers[p_idx].output, p, sigma)
cost = T.mean(L) + T.mean(gaussian_cost)
# elif(p == -1) and (sigma == 0):#binary
# code_val = self.sigmoid_layers[p_idx].output
# binary_val = code_val>=0.5
# self.sigmoid_layers[p_idx+1].input = binary_val
# z = self.sigmoid_layers[-1].output
# L = -T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# cost = T.mean(L)
# elif(p == -1) and (sigma != 0):#add gaussian noise
# gaussian_data = self.theano_rng.normal(size=self.sigmoid_layers[p_idx-1].output.shape, std=sigma,
# dtype=theano.config.floatX)
# self.sigmoid_layers[p_idx].input = self.sigmoid_layers[p_idx-1].output + gaussian_data
# z = self.sigmoid_layers[-1].output
# L = -T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
# cost = T.mean(L)
else:
cost = T.mean(L)
return cost
示例8: negative_log_likelihood
def negative_log_likelihood(self, y):
""" Return the mean of the negative log-likelihood of the prediction
of this model under a given target distribution.
.. math::
\frac{1}{|\mathcal{D}|} \mathcal{L} (\theta=\{W,b\}, \mathcal{D}) =
\frac{1}{|\mathcal{D}|} \sum_{i=0}^{|\mathcal{D}|} \log(P(Y=y^{(i)}|x^{(i)}, W,b)) \\
\ell (\theta=\{W,b\}, \mathcal{D})
:type y: theano.tensor.TensorType
:param y: corresponds to a vector that gives for each example the
correct label
Note: we use the mean instead of the sum so that
the learning rate is less dependent on the batch size
"""
# y.shape[0] is (symbolically) the number of rows in y, i.e.,
# number of examples (call it n) in the minibatch
# T.arange(y.shape[0]) is a symbolic vector which will contain
# [0,1,2,... n-1] T.log(self.p_y_given_x) is a matrix of
# Log-Probabilities (call it LP) with one row per example and
# one column per class LP[T.arange(y.shape[0]),y] is a vector
# v containing [LP[0,y[0]], LP[1,y[1]], LP[2,y[2]], ...,
# LP[n-1,y[n-1]]] and T.mean(LP[T.arange(y.shape[0]),y]) is
# the mean (across minibatch examples) of the elements in v,
# i.e., the mean log-likelihood across the minibatch.
if self.is_binary:
-T.mean(T.log(self.p_y_given_x))
return -T.mean(T.log(self.p_y_given_x)[T.arange(y.shape[0]), y])
示例9: get_sparsity_cost
def get_sparsity_cost(self):
# update mean activation using exponential moving average
hack_h = self.h_given_v(self.sp_pos_v)
# define loss based on value of sp_type
if self.sp_type == 'kl':
eps = npy_floatX(1./self.batch_size)
loss = lambda targ, val: - npy_floatX(targ) * T.log(eps + val) \
- npy_floatX(1-targ) * T.log(1 - val + eps)
else:
raise NotImplementedError('Sparsity type %s is not implemented' % self.sp_type)
cost = T.zeros((), dtype=floatX)
params = []
if self.sp_weight['h']:
cost += self.sp_weight['h'] * T.sum(loss(self.sp_targ['h'], hack_h.mean(axis=0)))
params += [self.hbias]
if self.sp_type in ['kl'] and self.sp_weight['h']:
params += [self.Wv, self.alpha, self.mu]
if self.flags['split_norm']:
params += [self.scalar_norms]
return costmod.Cost(cost, params)
示例10: simple_RNN
def simple_RNN(nh):
Wx = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, (1, nh)).astype(theano.config.floatX))
Wh = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, (nh, nh)).astype(theano.config.floatX))
Wy = theano.shared(0.2 * numpy.random.uniform(-1.0, 1.0, (nh, 1)).astype(theano.config.floatX))
bh = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
by = theano.shared(numpy.zeros(1, dtype=theano.config.floatX))
h0 = theano.shared(numpy.zeros(nh, dtype=theano.config.floatX))
p = [Wx, Wh, Wy, bh, by, h0]
x = T.matrix()
def recurrence(x_t, h_tm1):
h_t = T.tanh(T.dot(x_t, Wx) + T.dot(h_tm1, Wh) + bh)
s_t = T.dot(h_t, Wy) + by
return [h_t, s_t]
([h, activations], updates) = theano.scan(fn=recurrence, sequences=x, outputs_info=[h0, dict()])
t = x[0, 0]
s = activations[-1, 0]
y = T.nnet.sigmoid(s)
loss = -t*T.log(y + 1e-14) - (1-t)*T.log((1-y) + 1e-14)
acc = T.neq(T.round(y), t)
return p, [x], s, [loss, acc], h
示例11: get_cost_updates
def get_cost_updates(self, contraction_level, learning_rate, cost_measure="cross_entropy"):
""" This function computes the cost and the updates for one trainng
step of the cA """
y = self.get_hidden_values(self.x)
z = self.get_reconstructed_input(y)
J = self.get_jacobian(y, self.W)
if cost_measure=="cross_entropy":
#self.L_rec = - T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z), axis=1)
self.L_rec = T.mean(- T.sum(self.x * T.log(z) + (1 - self.x) * T.log(1 - z),axis=1))
elif cost_measure=="euclidean":
self.L_rec = T.mean(T.sum((self.x-z)**2,axis=1))
# Compute the jacobian and average over the number of samples/minibatch
self.L_jacob = T.mean(T.sum(J ** 2) / self.n_batchsize)
cost = self.L_rec + contraction_level * self.L_jacob
# compute the gradients of the cost of the `cA` with respect
# to its parameters
gparams = T.grad(cost, self.params)
# generate the list of updates
updates = []
for param, gparam in zip(self.params, gparams):
updates.append((param, param - learning_rate * gparam))
return (cost, updates)
示例12: forward_jacobian_log_det
def forward_jacobian_log_det(self, x):
y_sum = self.forward_map(x).sum()
dy_dx = th.grad(y_sum, x)
if self.fudge != 0.:
return tt.log(dy_dx + self.fudge).sum()
else:
return tt.log(dy_dx).sum()
示例13: GMM
def GMM(y, mu, sig, coeff):
"""
Gaussian mixture model negative log-likelihood
Parameters
----------
y : TensorVariable
mu : FullyConnected (Linear)
sig : FullyConnected (Softplus)
coeff : FullyConnected (Softmax)
"""
n_dim = y.ndim
shape_y = y.shape
y = y.reshape((-1, shape_y[-1]))
y = y.dimshuffle(0, 1, "x")
mu = mu.reshape((-1, mu.shape[-1] / coeff.shape[-1], coeff.shape[-1]))
sig = sig.reshape((-1, sig.shape[-1] / coeff.shape[-1], coeff.shape[-1]))
coeff = coeff.reshape((-1, coeff.shape[-1]))
inner = -0.5 * T.sum(T.sqr(y - mu) / sig ** 2 + 2 * T.log(sig) + T.log(2 * np.pi), axis=-2)
nll = -logsumexp(T.log(coeff) + inner, axis=-1)
# Adjust dimension
new_dim = T.set_subtensor(shape_y[-1], 1)
nll = nll.reshape(new_dim, ndim=n_dim)
nll = nll.flatten(n_dim - 1)
return nll
示例14: test_log1msigm_to_softplus
def test_log1msigm_to_softplus(self):
x = T.matrix()
out = T.log(1 - sigmoid(x))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
assert len(topo) == 2
assert isinstance(topo[0].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[1].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54, 11).astype(config.floatX))
# Same test with a flatten
out = T.log(1 - T.flatten(sigmoid(x)))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
assert len(topo) == 3
assert isinstance(topo[0].op, T.Flatten)
assert isinstance(topo[1].op.scalar_op,
theano.tensor.nnet.sigm.ScalarSoftplus)
assert isinstance(topo[2].op.scalar_op, theano.scalar.Neg)
f(numpy.random.rand(54, 11).astype(config.floatX))
# Same test with a reshape
out = T.log(1 - sigmoid(x).reshape([x.size]))
f = theano.function([x], out, mode=self.m)
topo = f.maker.fgraph.toposort()
#assert len(topo) == 3
assert any(isinstance(node.op, T.Reshape) for node in topo)
assert any(isinstance(getattr(node.op, 'scalar_op', None),
theano.tensor.nnet.sigm.ScalarSoftplus)
for node in topo)
f(numpy.random.rand(54, 11).astype(config.floatX))
示例15: unet_crossentropy_loss_sampled
def unet_crossentropy_loss_sampled(y_true, y_pred):
print 'unet_crossentropy_loss_sampled'
epsilon = 1.0e-4
y_pred_clipped = T.flatten(T.clip(y_pred, epsilon, 1.0-epsilon))
y_true = T.flatten(y_true)
# this seems to work
# it is super ugly though and I am sure there is a better way to do it
# but I am struggling with theano to cooperate
# filter the right indices
indPos = T.nonzero(y_true)[0] # no idea why this is a tuple
indNeg = T.nonzero(1-y_true)[0]
# shuffle
n = indPos.shape[0]
indPos = indPos[srng.permutation(n=n)]
n = indNeg.shape[0]
indNeg = indNeg[srng.permutation(n=n)]
# take equal number of samples depending on which class has less
n_samples = T.cast(T.min([T.sum(y_true), T.sum(1-y_true)]), dtype='int64')
indPos = indPos[:n_samples]
indNeg = indNeg[:n_samples]
loss_vector = -T.mean(T.log(y_pred_clipped[indPos])) - T.mean(T.log(1-y_pred_clipped[indNeg]))
average_loss = T.mean(loss_vector)
print 'average_loss:', average_loss
return average_loss