本文整理汇总了Python中autograd.numpy.mean方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.mean方法的具体用法?Python numpy.mean怎么用?Python numpy.mean使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.mean方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _many_score_cov
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def _many_score_cov(params, data, demo_func, **kwargs):
params = np.array(params)
def f_vec(x):
ret = _composite_log_likelihood(
data, demo_func(*x), vector=True, **kwargs)
# centralize
return ret - np.mean(ret)
# g_out = einsum('ij,ik', jacobian(f_vec)(params), jacobian(f_vec)(params))
# but computed in a roundabout way because jacobian implementation is slow
def _g_out_antihess(x):
l = f_vec(x)
lc = make_constant(l)
return np.sum(0.5 * (l**2 - l * lc - lc * l))
return autograd.hessian(_g_out_antihess)(params)
示例2: get_mcl_normal_direction_at_chord_fraction
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def get_mcl_normal_direction_at_chord_fraction(self, chord_fraction):
# Returns the normal direction of the mean camber line at a specified chord fraction.
# If you input a single value, returns a 1D numpy array with 2 elements (x,y).
# If you input a vector of values, returns a 2D numpy array. First index is the point number, second index is (x,y)
# Right now, does it by finite differencing camber values :(
# When I'm less lazy I'll make it do it in a proper, more efficient way
# TODO make this not finite difference
epsilon = np.sqrt(np.finfo(float).eps)
cambers = self.get_camber_at_chord_fraction(chord_fraction)
cambers_incremented = self.get_camber_at_chord_fraction(chord_fraction + epsilon)
dydx = (cambers_incremented - cambers) / epsilon
if dydx.shape == 1: # single point
normal = np.hstack((-dydx, 1))
normal /= np.linalg.norm(normal)
return normal
else: # multiple points vectorized
normal = np.column_stack((-dydx, np.ones(dydx.shape)))
normal /= np.expand_dims(np.linalg.norm(normal, axis=1), axis=1) # normalize
return normal
示例3: make_nn_funs
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def make_nn_funs(input_shape, layer_specs, L2_reg):
parser = WeightsParser()
cur_shape = input_shape
for layer in layer_specs:
N_weights, cur_shape = layer.build_weights_dict(cur_shape)
parser.add_weights(layer, (N_weights,))
def predictions(W_vect, inputs):
"""Outputs normalized log-probabilities.
shape of inputs : [data, color, y, x]"""
cur_units = inputs
for layer in layer_specs:
cur_weights = parser.get(W_vect, layer)
cur_units = layer.forward_pass(cur_units, cur_weights)
return cur_units
def loss(W_vect, X, T):
log_prior = -L2_reg * np.dot(W_vect, W_vect)
log_lik = np.sum(predictions(W_vect, X) * T)
return - log_prior - log_lik
def frac_err(W_vect, X, T):
return np.mean(np.argmax(T, axis=1) != np.argmax(pred_fun(W_vect, X), axis=1))
return parser.N, predictions, loss, frac_err
示例4: black_box_variational_inference
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def black_box_variational_inference(logprob, D, num_samples):
"""Implements http://arxiv.org/abs/1401.0118, and uses the
local reparameterization trick from http://arxiv.org/abs/1506.02557"""
def unpack_params(params):
# Variational dist is a diagonal Gaussian.
mean, log_std = params[:D], params[D:]
return mean, log_std
def gaussian_entropy(log_std):
return 0.5 * D * (1.0 + np.log(2*np.pi)) + np.sum(log_std)
rs = npr.RandomState(0)
def variational_objective(params, t):
"""Provides a stochastic estimate of the variational lower bound."""
mean, log_std = unpack_params(params)
samples = rs.randn(num_samples, D) * np.exp(log_std) + mean
lower_bound = gaussian_entropy(log_std) + np.mean(logprob(samples, t))
return -lower_bound
gradient = grad(variational_objective)
return variational_objective, gradient, unpack_params
示例5: fit_gaussian_draw
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
示例6: _blocked_gibbs_next
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def _blocked_gibbs_next(self, X, H):
"""
Sample from the mutual conditional distributions.
"""
dh = H.shape[1]
n, dx = X.shape
B = self.B
b = self.b
# Draw H.
XB2C = np.dot(X, self.B) + 2.0*self.c
# Ph: n x dh matrix
Ph = DSGaussBernRBM.sigmoid(XB2C)
# H: n x dh
H = (np.random.rand(n, dh) <= Ph)*2 - 1.0
assert np.all(np.abs(H) - 1 <= 1e-6 )
# Draw X.
# mean: n x dx
mean = old_div(np.dot(H, B.T),2.0) + b
X = np.random.randn(n, dx) + mean
return X, H
示例7: gmm_sample
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def gmm_sample(self, mean=None, w=None, N=10000,n=10,d=2,seed=10):
np.random.seed(seed)
self.d = d
if mean is None:
mean = np.random.randn(n,d)*10
if w is None:
w = np.random.rand(n)
w = old_div(w,sum(w))
multi = np.random.multinomial(N,w)
X = np.zeros((N,d))
base = 0
for i in range(n):
X[base:base+multi[i],:] = np.random.multivariate_normal(mean[i,:], np.eye(self.d), multi[i])
base += multi[i]
llh = np.zeros(N)
for i in range(n):
llh += w[i] * stats.multivariate_normal.pdf(X, mean[i,:], np.eye(self.d))
#llh = llh/sum(llh)
return X, llh
示例8: constraint_c4_cylindrical
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def constraint_c4_cylindrical(f, r): # cylindrical
l = anp.mean(f, axis=1)
l = anp.expand_dims(l, axis=1)
g = -anp.sum(anp.power(f - l, 2), axis=1) + anp.power(r, 2)
return g
示例9: loss_fcn
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def loss_fcn(params, inputs, targets):
return np.mean(np.square(predict_fcn(params, inputs) - targets))
# Iterator over mini-batches
示例10: godambe
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def godambe(self, inverse=False):
"""
Returns Godambe Information.
If the true params are in the interior of the parameter space,
the composite MLE will be approximately Gaussian with mean 0,
and covariance given by the Godambe information.
"""
fisher_inv = inv_psd(self.fisher, tol=self.psd_rtol)
ret = check_psd(np.dot(fisher_inv, np.dot(
self.score_cov, fisher_inv)), tol=self.psd_rtol)
if not inverse:
ret = inv_psd(ret, tol=self.psd_rtol)
return ret
示例11: sd
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def sd(self):
"""
Standard deviation of the statistic, estimated via jackknife
"""
resids = self.jackknifed_array - self.observed
return np.sqrt(np.mean(resids**2) * (
len(self.jackknifed_array) - 1))
示例12: bias
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def bias(self):
return np.mean(self.jackknifed_array - self.observed) * (
len(self.jackknifed_array) - 1)
示例13: _score_cov
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def _score_cov(self, params):
params = np.array(params)
def f_vec(x):
ret = self._log_lik(x, vector=True)
# centralize
return ret - np.mean(ret)
j = ag.jacobian(f_vec)(params)
return np.einsum('ij, ik', j, j)
示例14: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def __init__(self, full_surface, pieces, rgen):
try:
assert pieces > 0 and pieces == int(pieces)
except (TypeError, AssertionError):
raise ValueError("pieces should be a positive integer")
self.pieces = full_surface._get_stochastic_pieces(pieces, rgen)
self.total_snp_counts = full_surface.sfs._total_freqs
logger.info("Created {n_batches} minibatches, with an average of {n_snps} SNPs and {n_sfs} unique SFS entries per batch".format(n_batches=len(
self.pieces), n_snps=full_surface.sfs.n_snps() / float(len(self.pieces)), n_sfs=np.mean([len(piece.sfs.configs) for piece in self.pieces])))
self.rgen = rgen
self.full_surface = full_surface
示例15: check_num_snps
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import mean [as 别名]
def check_num_snps(sampled_n_dict, demo, num_loci, mut_rate, ascertainment_pop=None, error_matrices=None):
if error_matrices is not None or ascertainment_pop is not None:
# TODO
raise NotImplementedError
#seg_sites = momi.simulate_ms(
# ms_path, demo, num_loci=num_loci, mut_rate=mut_rate)
#sfs = seg_sites.sfs
num_bases = 1000
sfs = demo.simulate_data(
sampled_n_dict=sampled_n_dict,
muts_per_gen=mut_rate/num_bases,
recoms_per_gen=0,
length=num_bases,
num_replicates=num_loci)._sfs
n_sites = sfs.n_snps(vector=True)
n_sites_mean = np.mean(n_sites)
n_sites_sd = np.std(n_sites)
# TODO this test isn't very useful because expected_branchlen is not used anywhere internally anymore
n_sites_theoretical = demo.expected_branchlen(sampled_n_dict) * mut_rate
#n_sites_theoretical = momi.expected_total_branch_len(
# demo, ascertainment_pop=ascertainment_pop, error_matrices=error_matrices) * mut_rate
zscore = -np.abs(n_sites_mean - n_sites_theoretical) / n_sites_sd
pval = scipy.stats.norm.cdf(zscore) * 2.0
assert pval >= .05