本文整理汇总了Python中autograd.numpy.log方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.log方法的具体用法?Python numpy.log怎么用?Python numpy.log使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.log方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def __init__(self, n_var=2, n_constr=2, **kwargs):
super().__init__(n_var, n_constr, **kwargs)
a, b = anp.zeros(n_constr + 1), anp.zeros(n_constr + 1)
a[0], b[0] = 1, 1
delta = 1 / (n_constr + 1)
alpha = delta
for j in range(n_constr):
beta = a[j] * anp.exp(-b[j] * alpha)
a[j + 1] = (a[j] + beta) / 2
b[j + 1] = - 1 / alpha * anp.log(beta / a[j + 1])
alpha += delta
self.a = a[1:]
self.b = b[1:]
示例2: log_norm
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def log_norm(self):
try:
return self._log_norm
except AttributeError:
if self.frame != self.model_frame:
images_ = self.images[self.slices_for_images]
weights_ = self.weights[self.slices_for_images]
else:
images_ = self.images
weights_ = self.weights
# normalization of the single-pixel likelihood:
# 1 / [(2pi)^1/2 (sigma^2)^1/2]
# with inverse variance weights: sigma^2 = 1/weight
# full likelihood is sum over all data samples: pixel in images
# NOTE: this assumes that all pixels are used in likelihood!
log_sigma = np.zeros(weights_.shape, dtype=self.weights.dtype)
cuts = weights_ > 0
log_sigma[cuts] = np.log(1 / weights_[cuts])
self._log_norm = (
np.prod(images_.shape) / 2 * np.log(2 * np.pi)
+ np.sum(log_sigma) / 2
)
return self._log_norm
示例3: get_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def get_loss(self, model):
"""Computes the loss/fidelity of a given model wrt to the observation
Parameters
----------
model: array
A model from `Blend`
Returns
-------
loss: float
Loss of the model
"""
model_ = self.render(model)
images_ = self.images
weights_ = self.weights
# properly normalized likelihood
log_sigma = np.zeros(weights_.shape, dtype=weights_.dtype)
cuts = weights_ > 0
log_sigma[cuts] = np.log(1 / weights_[cuts])
log_norm = (
np.prod(images_.shape) / 2 * np.log(2 * np.pi)
+ np.sum(log_sigma) / 2
)
return log_norm + 0.5 * np.sum(weights_ * (model_ - images_) ** 2)
示例4: objective
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def objective(self, w):
obj = 0
N = float(sum([np.sum(d[1]) for d in self.data_list]))
for F,S in self.data_list:
psi = np.dot(F, w)
lam = self.link(psi)
obj -= np.sum(S * np.log(lam) -lam*self.dt) / N
# assert np.isfinite(ll)
# Add penalties
obj += (0.5 * np.sum(w[1:]**2) / self.sigma**2) / N
obj += np.sum(np.abs(w[1:]) * self.lmbda) / N
# assert np.isfinite(obj)
return obj
示例5: goto_time
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def goto_time(self, t, add_time=True):
# if exponentially growing, add extra time points whenever
# the population size doubles
if self.curr_g != 0 and t < float('inf'):
halflife = np.abs(np.log(.5) / self.curr_g)
add_t = self.curr_t + halflife
while add_t < t:
self._push_time(add_t)
add_t += halflife
while self.time_stack and self.time_stack[0] < t:
self.step_time(hq.heappop(self.time_stack))
self.step_time(t, add=False)
if add_time:
# put t on queue to be added when processing next event
# (allows further events to change population size before plotting)
self._push_time(t)
示例6: _entropy
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def _entropy(self):
counts = self._total_freqs
n_snps = float(self.n_snps())
p = counts / n_snps
# return np.sum(p * np.log(p))
ret = np.sum(p * np.log(p))
# correct for missing data
sampled_n = np.sum(self.configs.value, axis=2)
sampled_n_counts = co.Counter()
assert len(counts) == len(sampled_n)
for c, n in zip(counts, sampled_n):
n = tuple(n)
sampled_n_counts[n] += c
sampled_n_counts = np.array(
list(sampled_n_counts.values()), dtype=float)
ret = ret + np.sum(sampled_n_counts / n_snps *
np.log(n_snps / sampled_n_counts))
assert not np.isnan(ret)
return ret
示例7: _composite_log_likelihood
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def _composite_log_likelihood(data, demo, mut_rate=None, truncate_probs=0.0, vector=False, p_missing=None, use_pairwise_diffs=False, **kwargs):
try:
sfs = data.sfs
except AttributeError:
sfs = data
sfs_probs = np.maximum(expected_sfs(demo, sfs.configs, normalized=True, **kwargs),
truncate_probs)
log_lik = sfs._integrate_sfs(np.log(sfs_probs), vector=vector)
# add on log likelihood of poisson distribution for total number of SNPs
if mut_rate is not None:
log_lik = log_lik + \
_mut_factor(sfs, demo, mut_rate, vector,
p_missing, use_pairwise_diffs)
if not vector:
log_lik = np.squeeze(log_lik)
return log_lik
示例8: simple_nea_admixture_demo
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def simple_nea_admixture_demo(N_chb_bottom, N_chb_top, pulse_t, pulse_p, ej_chb, ej_yri, sampled_n=(14, 10)):
ej_chb = pulse_t + ej_chb
ej_yri = ej_chb + ej_yri
G_chb = -np.log(N_chb_top / N_chb_bottom) / ej_chb
model = momi.DemographicModel(1., .25)
model.add_leaf("yri")
model.add_leaf("chb")
model.set_size("chb", 0., N=N_chb_bottom, g=G_chb)
model.move_lineages("chb", "nea", t=pulse_t, p=pulse_p)
model.move_lineages("chb", "yri", t=ej_chb)
model.move_lineages("yri", "nea", t=ej_yri)
return model
#events = [('-en', 0., 'chb', N_chb_bottom),
# ('-eg', 0, 'chb', G_chb),
# ('-ep', pulse_t, 'chb', 'nea', pulse_p),
# ('-ej', ej_chb, 'chb', 'yri'),
# ('-ej', ej_yri, 'yri', 'nea'),
# ]
#return make_demo_hist(events, ('yri', 'chb'), sampled_n)
## return make_demography(events, ('yri','chb'), sampled_n)
示例9: _compute_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def _compute_loss(self):
"""Compute and store loss value for the given batch of examples."""
if self._loss_computed:
return
self._compute_distances()
# NLL loss from the NIPS paper.
exp_negative_distances = np.exp(-self.euclidean_dists) # (1 + neg_size, batch_size)
# Remove the value for the true edge (u,v) from the partition function
Z = exp_negative_distances[1:].sum(axis=0) # (batch_size)
self.exp_negative_distances = exp_negative_distances # (1 + neg_size, batch_size)
self.Z = Z # (batch_size)
self.pos_loss = self.euclidean_dists[0].sum()
self.neg_loss = np.log(self.Z).sum()
self.loss = self.pos_loss + self.neg_loss # scalar
self._loss_computed = True
示例10: _nll_loss_fn
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def _nll_loss_fn(poincare_dists):
"""
Parameters
----------
poincare_dists : numpy.array
All distances d(u,v) and d(u,v'), where v' is negative. Shape (1 + negative_size).
Returns
----------
log-likelihood loss function from the NIPS paper, Eq (6).
"""
exp_negative_distances = grad_np.exp(-poincare_dists)
# Remove the value for the true edge (u,v) from the partition function
# return -grad_np.log(exp_negative_distances[0] / (- exp_negative_distances[0] + exp_negative_distances.sum()))
return poincare_dists[0] + grad_np.log(exp_negative_distances[1:].sum())
示例11: _loglikelihood
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def _loglikelihood(params, x, tx, T):
warnings.simplefilter(action="ignore", category=FutureWarning)
"""Log likelihood for optimizer."""
alpha, beta, gamma, delta = params
betaln_ab = betaln(alpha, beta)
betaln_gd = betaln(gamma, delta)
A = betaln(alpha + x, beta + T - x) - betaln_ab + betaln(gamma, delta + T) - betaln_gd
B = 1e-15 * np.ones_like(T)
recency_T = T - tx - 1
for j in np.arange(recency_T.max() + 1):
ix = recency_T >= j
B = B + ix * betaf(alpha + x, beta + tx - x + j) * betaf(gamma + 1, delta + tx + j)
B = log(B) - betaln_gd - betaln_ab
return logaddexp(A, B)
示例12: _build_errors_df
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def _build_errors_df(name_errors, label):
"""Helper to build errors DataFrame."""
series = []
percentiles = np.linspace(0, 100, 21)
index = percentiles / 100
for name, errors in name_errors:
series.append(pd.Series(
np.nanpercentile(errors, q=percentiles), index=index, name=name))
df = pd.concat(series, axis=1)
df.columns.name = 'derivative'
df.index.name = 'quantile'
df = df.stack().rename('error').reset_index()
with np.errstate(divide='ignore'):
df['log(error)'] = np.log(df['error'])
if label is not None:
df['label'] = label
return df
示例13: softplus
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def softplus(x):
""" Numerically stable transform from real line to positive reals
Returns np.log(1.0 + np.exp(x))
Autograd friendly and fully vectorized
@param x: array of values in (-\infty, +\infty)
@return ans : array of values in (0, +\infty), same size as x
"""
if not isinstance(x, float):
mask1 = x > 0
mask0 = np.logical_not(mask1)
out = np.zeros_like(x)
out[mask0] = np.log1p(np.exp(x[mask0]))
out[mask1] = x[mask1] + np.log1p(np.exp(-x[mask1]))
return out
if x > 0:
return x + np.log1p(np.exp(-x))
else:
return np.log1p(np.exp(x))
示例14: EM
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def EM(init_params, data, callback=None):
def EM_update(params):
natural_params = list(map(np.log, params))
loglike, E_stats = vgrad(log_partition_function)(natural_params, data) # E step
if callback: callback(loglike, params)
return list(map(normalize, E_stats)) # M step
def fixed_point(f, x0):
x1 = f(x0)
while different(x0, x1):
x0, x1 = x1, f(x1)
return x1
def different(params1, params2):
allclose = partial(np.allclose, atol=1e-3, rtol=1e-3)
return not all(map(allclose, params1, params2))
return fixed_point(EM_update, init_params)
示例15: make_nn_funs
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import log [as 别名]
def make_nn_funs(input_shape, layer_specs, L2_reg):
parser = WeightsParser()
cur_shape = input_shape
for layer in layer_specs:
N_weights, cur_shape = layer.build_weights_dict(cur_shape)
parser.add_weights(layer, (N_weights,))
def predictions(W_vect, inputs):
"""Outputs normalized log-probabilities.
shape of inputs : [data, color, y, x]"""
cur_units = inputs
for layer in layer_specs:
cur_weights = parser.get(W_vect, layer)
cur_units = layer.forward_pass(cur_units, cur_weights)
return cur_units
def loss(W_vect, X, T):
log_prior = -L2_reg * np.dot(W_vect, W_vect)
log_lik = np.sum(predictions(W_vect, X) * T)
return - log_prior - log_lik
def frac_err(W_vect, X, T):
return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))
return parser.N, predictions, loss, frac_err