本文整理汇总了Python中autograd.numpy.abs方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.abs方法的具体用法?Python numpy.abs怎么用?Python numpy.abs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.abs方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: goto_time
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def goto_time(self, t, add_time=True):
# if exponentially growing, add extra time points whenever
# the population size doubles
if self.curr_g != 0 and t < float('inf'):
halflife = np.abs(np.log(.5) / self.curr_g)
add_t = self.curr_t + halflife
while add_t < t:
self._push_time(add_t)
add_t += halflife
while self.time_stack and self.time_stack[0] < t:
self.step_time(hq.heappop(self.time_stack))
self.step_time(t, add=False)
if add_time:
# put t on queue to be added when processing next event
# (allows further events to change population size before plotting)
self._push_time(t)
示例2: area_projected
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def area_projected(self):
# Returns the area of the wing as projected onto the XY plane (top-down view).
area = 0
for i in range(len(self.xsecs) - 1):
chord_eff = (self.xsecs[i].chord
+ self.xsecs[i + 1].chord) / 2
this_xyz_te = self.xsecs[i].xyz_te()
that_xyz_te = self.xsecs[i + 1].xyz_te()
span_le_eff = np.abs(
self.xsecs[i].xyz_le[1] - self.xsecs[i + 1].xyz_le[1]
)
span_te_eff = np.abs(
this_xyz_te[1] - that_xyz_te[1]
)
span_eff = (span_le_eff + span_te_eff) / 2
area += chord_eff * span_eff
if self.symmetric:
area *= 2
return area
示例3: accel_gradient
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def accel_gradient(eps_arr, mode='max'):
# set the permittivity of the FDFD and solve the fields
F.eps_r = eps_arr.reshape((Nx, Ny))
Ex, Ey, Hz = F.solve(source)
# compute the gradient and normalize if you want
G = npa.sum(Ey * eta / Ny)
if mode == 'max':
return -np.abs(G) / Emax(Ex, Ey, eps_r)
elif mode == 'avg':
return -np.abs(G) / Eavg(Ex, Ey)
else:
return -np.abs(G / E0)
# define the gradient for autograd
示例4: one_of_K_code
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def one_of_K_code(arr):
"""
Make a one-of-K coding out of the numpy array.
For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form
[[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1]]
"""
U = np.unique(arr)
n = len(arr)
nu = len(U)
X = np.zeros((n, nu))
for i, u in enumerate(U):
Ii = np.where( np.abs(arr - u) < 1e-8 )
#ni = len(Ii)
X[Ii[0], i] = 1
return X
示例5: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def __init__(self, means, variances, pmix=None):
"""
means: a k x d 2d array specifying the means.
variances: a one-dimensional length-k array of variances
pmix: a one-dimensional length-k array of mixture weights. Sum to one.
"""
k, d = means.shape
if k != len(variances):
raise ValueError('Number of components in means and variances do not match.')
if pmix is None:
pmix = old_div(np.ones(k),float(k))
if np.abs(np.sum(pmix) - 1) > 1e-8:
raise ValueError('Mixture weights do not sum to 1.')
self.pmix = pmix
self.means = means
self.variances = variances
示例6: _blocked_gibbs_next
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def _blocked_gibbs_next(self, X, H):
"""
Sample from the mutual conditional distributions.
"""
dh = H.shape[1]
n, dx = X.shape
B = self.B
b = self.b
# Draw H.
XB2C = np.dot(X, self.B) + 2.0*self.c
# Ph: n x dh matrix
Ph = DSGaussBernRBM.sigmoid(XB2C)
# H: n x dh
H = (np.random.rand(n, dh) <= Ph)*2 - 1.0
assert np.all(np.abs(H) - 1 <= 1e-6 )
# Draw X.
# mean: n x dx
mean = old_div(np.dot(H, B.T),2.0) + b
X = np.random.randn(n, dx) + mean
return X, H
示例7: _evaluate
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def _evaluate(self, x, out, *args, **kwargs):
out["F"] = 418.9829 * self.n_var - np.sum(x * np.sin(np.sqrt(np.abs(x))), axis=1)
示例8: _evaluate
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def _evaluate(self, x, out, *args, **kwargs):
l = []
for i in range(2):
l.append(-10 * anp.exp(-0.2 * anp.sqrt(anp.square(x[:, i]) + anp.square(x[:, i + 1]))))
f1 = anp.sum(anp.column_stack(l), axis=1)
f2 = anp.sum(anp.power(anp.abs(x), 0.8) + 5 * anp.sin(anp.power(x, 3)), axis=1)
out["F"] = anp.column_stack([f1, f2])
示例9: calc_constraint
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def calc_constraint(self, theta, a, b, c, d, e, f1, f2):
return - (anp.cos(theta) * (f2 - e) - anp.sin(theta) * f1 -
a * anp.abs(anp.sin(b * anp.pi * (anp.sin(theta) * (f2 - e) + anp.cos(theta) * f1) ** c)) ** d)
示例10: _do
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def _do(self, F, weights, **kwargs):
v = anp.abs(F - self.utopian_point) * weights
tchebi = v.max(axis=1)
return tchebi
示例11: _callback
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def _callback(self, *parameters, it=None, e_rel=1e-3, callback=None, min_iter=1):
# raise ArithmeticError if some of the parameters have become inf/nan
self.check_parameters()
if it > min_iter and abs(self.loss[-2] - self.loss[-1]) < e_rel * np.abs(
self.loss[-1]
):
raise StopIteration("scarlet.Blend.fit() converged")
if callback is not None:
callback(*parameters, it=it)
示例12: transformed_expi
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def transformed_expi(x):
abs_x = np.abs(x)
ser = abs_x < 1. / 45.
nser = np.logical_not(ser)
# ret = np.zeros(x.shape)
# ret[ser], ret[nser] = transformed_expi_series(x[ser]), transformed_expi_naive(x[nser])))
# return ret
# We use np.concatenate to combine.
# would be better to use ret[ser] and ret[nser] as commented out above
# but array assignment not yet supported by autograd
assert np.all(abs_x[:-1] >= abs_x[1:])
return np.concatenate((transformed_expi_naive(x[nser]), transformed_expi_series(x[ser])))
示例13: expm1d
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def expm1d(x, eps=1e-6):
x = np.array(x)
abs_x = np.abs(x)
if x.shape:
# FIXME: don't require abs_x to be increasing
assert np.all(abs_x[1:] >= abs_x[:-1])
small = abs_x < eps
big = ~small
return np.concatenate([expm1d_taylor(x[small]),
expm1d_naive(x[big])])
elif abs_x < eps:
return expm1d_taylor(x)
else:
return expm1d_naive(x)
示例14: check_num_snps
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def check_num_snps(sampled_n_dict, demo, num_loci, mut_rate, ascertainment_pop=None, error_matrices=None):
if error_matrices is not None or ascertainment_pop is not None:
# TODO
raise NotImplementedError
#seg_sites = momi.simulate_ms(
# ms_path, demo, num_loci=num_loci, mut_rate=mut_rate)
#sfs = seg_sites.sfs
num_bases = 1000
sfs = demo.simulate_data(
sampled_n_dict=sampled_n_dict,
muts_per_gen=mut_rate/num_bases,
recoms_per_gen=0,
length=num_bases,
num_replicates=num_loci)._sfs
n_sites = sfs.n_snps(vector=True)
n_sites_mean = np.mean(n_sites)
n_sites_sd = np.std(n_sites)
# TODO this test isn't very useful because expected_branchlen is not used anywhere internally anymore
n_sites_theoretical = demo.expected_branchlen(sampled_n_dict) * mut_rate
#n_sites_theoretical = momi.expected_total_branch_len(
# demo, ascertainment_pop=ascertainment_pop, error_matrices=error_matrices) * mut_rate
zscore = -np.abs(n_sites_mean - n_sites_theoretical) / n_sites_sd
pval = scipy.stats.norm.cdf(zscore) * 2.0
assert pval >= .05
示例15: my_t_test
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import abs [as 别名]
def my_t_test(labels, theoretical, observed, min_samples=25):
assert theoretical.ndim == 1 and observed.ndim == 2
assert len(theoretical) == observed.shape[
0] and len(theoretical) == len(labels)
n_observed = np.sum(observed > 0, axis=1)
theoretical, observed = theoretical[
n_observed > min_samples], observed[n_observed > min_samples, :]
labels = np.array(list(map(str, labels)))[n_observed > min_samples]
n_observed = n_observed[n_observed > min_samples]
runs = observed.shape[1]
observed_mean = np.mean(observed, axis=1)
bias = observed_mean - theoretical
variances = np.var(observed, axis=1)
t_vals = bias / np.sqrt(variances) * np.sqrt(runs)
# get the p-values
abs_t_vals = np.abs(t_vals)
p_vals = 2.0 * scipy.stats.t.sf(abs_t_vals, df=runs - 1)
print("# labels, p-values, empirical-mean, theoretical-mean, nonzero-counts")
toPrint = np.array([labels, p_vals, observed_mean,
theoretical, n_observed]).transpose()
toPrint = toPrint[np.array(toPrint[:, 1], dtype='float').argsort()[
::-1]] # reverse-sort by p-vals
print(toPrint)
print("Note p-values are for t-distribution, which may not be a good approximation to the true distribution")
# p-values should be uniformly distributed
# so then the min p-value should be beta distributed
return scipy.stats.beta.cdf(np.min(p_vals), 1, len(p_vals))