本文整理汇总了Python中autograd.numpy.ones方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.ones方法的具体用法?Python numpy.ones怎么用?Python numpy.ones使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.ones方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: expected_tmrca
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def expected_tmrca(demography, sampled_pops=None, sampled_n=None):
"""
The expected time to most recent common ancestor of the sample.
Parameters
----------
demography : Demography
Returns
-------
tmrca : float-like
See Also
--------
expected_deme_tmrca : tmrca of subsample within a deme
expected_sfs_tensor_prod : compute general class of summary statistics
"""
vecs = [np.ones(n + 1) for n in demography.sampled_n]
n0 = len(vecs[0]) - 1.0
vecs[0] = np.arange(n0 + 1) / n0
return np.squeeze(expected_sfs_tensor_prod(vecs, demography))
示例2: _mut_factor_het
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def _mut_factor_het(sfs, demo, mut_rate, vector, p_missing):
mut_rate = mut_rate * np.ones(sfs.n_loci)
E_het = expected_heterozygosity(
demo,
restrict_to_pops=np.array(
sfs.sampled_pops)[sfs.ascertainment_pop])
p_missing = p_missing * np.ones(len(sfs.ascertainment_pop))
p_missing = p_missing[sfs.ascertainment_pop]
lambd = np.einsum("i,j->ij", mut_rate, E_het * (1.0 - p_missing))
counts = sfs.avg_pairwise_hets[:, sfs.ascertainment_pop]
ret = -lambd + counts * np.log(lambd) - scipy.special.gammaln(counts + 1)
ret = ret * sfs.sampled_n[sfs.ascertainment_pop] / float(
np.sum(sfs.sampled_n[sfs.ascertainment_pop]))
if not vector:
ret = np.sum(ret)
else:
ret = np.sum(ret, axis=1)
return ret
示例3: get_mcl_normal_direction_at_chord_fraction
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def get_mcl_normal_direction_at_chord_fraction(self, chord_fraction):
# Returns the normal direction of the mean camber line at a specified chord fraction.
# If you input a single value, returns a 1D numpy array with 2 elements (x,y).
# If you input a vector of values, returns a 2D numpy array. First index is the point number, second index is (x,y)
# Right now, does it by finite differencing camber values :(
# When I'm less lazy I'll make it do it in a proper, more efficient way
# TODO make this not finite difference
epsilon = np.sqrt(np.finfo(float).eps)
cambers = self.get_camber_at_chord_fraction(chord_fraction)
cambers_incremented = self.get_camber_at_chord_fraction(chord_fraction + epsilon)
dydx = (cambers_incremented - cambers) / epsilon
if dydx.shape == 1: # single point
normal = np.hstack((-dydx, 1))
normal /= np.linalg.norm(normal)
return normal
else: # multiple points vectorized
normal = np.column_stack((-dydx, np.ones(dydx.shape)))
normal /= np.expand_dims(np.linalg.norm(normal, axis=1), axis=1) # normalize
return normal
示例4: optimize_and_lls
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def optimize_and_lls(optfun):
num_iters = 200
elbos = []
def callback(params, t, g):
elbo_val = -objective(params, t)
elbos.append(elbo_val)
if t % 50 == 0:
print("Iteration {} lower bound {}".format(t, elbo_val))
init_mean = -1 * np.ones(D)
init_log_std = -5 * np.ones(D)
init_var_params = np.concatenate([init_mean, init_log_std])
variational_params = optfun(num_iters, init_var_params, callback)
return np.array(elbos)
# let's optimize this with a few different step sizes
示例5: test_getter
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def test_getter():
def fun(input_list):
A = np.sum(input_list[0])
B = np.sum(input_list[1])
C = np.sum(input_list[1])
return A + B + C
d_fun = grad(fun)
input_list = [npr.randn(5, 6),
npr.randn(4, 3),
npr.randn(2, 4)]
result = d_fun(input_list)
assert np.allclose(result[0], np.ones((5, 6)))
assert np.allclose(result[1], 2 * np.ones((4, 3)))
assert np.allclose(result[2], np.zeros((2, 4)))
示例6: test_getter
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def test_getter():
def fun(input_tuple):
A = np.sum(input_tuple[0])
B = np.sum(input_tuple[1])
C = np.sum(input_tuple[1])
return A + B + C
d_fun = grad(fun)
input_tuple = (npr.randn(5, 6),
npr.randn(4, 3),
npr.randn(2, 4))
result = d_fun(input_tuple)
assert np.allclose(result[0], np.ones((5, 6)))
assert np.allclose(result[1], 2 * np.ones((4, 3)))
assert np.allclose(result[2], np.zeros((2, 4)))
示例7: test_getter
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def test_getter():
def fun(input_dict):
A = np.sum(input_dict['item_1'])
B = np.sum(input_dict['item_2'])
C = np.sum(input_dict['item_2'])
return A + B + C
d_fun = grad(fun)
input_dict = {'item_1' : npr.randn(5, 6),
'item_2' : npr.randn(4, 3),
'item_X' : npr.randn(2, 4)}
result = d_fun(input_dict)
assert np.allclose(result['item_1'], np.ones((5, 6)))
assert np.allclose(result['item_2'], 2 * np.ones((4, 3)))
assert np.allclose(result['item_X'], np.zeros((2, 4)))
示例8: make_IO_matrices
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def make_IO_matrices(indices, N):
""" Makes matrices that relate the sparse matrix entries to their locations in the matrix
The kth column of I is a 'one hot' vector specifing the k-th entries row index into A
The kth column of J is a 'one hot' vector specifing the k-th entries columnn index into A
O = J^T is for notational convenience.
Armed with a vector of M entries 'a', we can construct the sparse matrix 'A' as:
A = I @ diag(a) @ O
where 'diag(a)' is a (MxM) matrix with vector 'a' along its diagonal.
In index notation:
A_ij = I_ik * a_k * O_kj
In an opposite way, given sparse matrix 'A' we can strip out the entries `a` using the IO matrices as follows:
a = diag(I^T @ A @ O^T)
In index notation:
a_k = I_ik * A_ij * O_kj
"""
M = indices.shape[1] # number of indices in the matrix
entries_1 = npa.ones(M) # M entries of all 1's
ik, jk = indices # separate i and j components of the indices
indices_I = npa.vstack((ik, npa.arange(M))) # indices into the I matrix
indices_J = npa.vstack((jk, npa.arange(M))) # indices into the J matrix
I = make_sparse(entries_1, indices_I, shape=(N, M)) # construct the I matrix
J = make_sparse(entries_1, indices_J, shape=(N, M)) # construct the J matrix
O = J.T # make O = J^T matrix for consistency with my notes.
return I, O
示例9: _make_A
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def _make_A(self, eps_vec):
eps_vec_xx, eps_vec_yy = self._grid_average_2d(eps_vec)
eps_vec_xx_inv = 1 / (eps_vec_xx + 1e-5) # the 1e-5 is for numerical stability
eps_vec_yy_inv = 1 / (eps_vec_yy + 1e-5) # autograd throws 'divide by zero' errors.
indices_diag = npa.vstack((npa.arange(self.N), npa.arange(self.N)))
entries_DxEpsy, indices_DxEpsy = spsp_mult(self.entries_Dxb, self.indices_Dxb, eps_vec_yy_inv, indices_diag, self.N)
entires_DxEpsyDx, indices_DxEpsyDx = spsp_mult(entries_DxEpsy, indices_DxEpsy, self.entries_Dxf, self.indices_Dxf, self.N)
entries_DyEpsx, indices_DyEpsx = spsp_mult(self.entries_Dyb, self.indices_Dyb, eps_vec_xx_inv, indices_diag, self.N)
entires_DyEpsxDy, indices_DyEpsxDy = spsp_mult(entries_DyEpsx, indices_DyEpsx, self.entries_Dyf, self.indices_Dyf, self.N)
entries_d = 1 / EPSILON_0 * npa.hstack((entires_DxEpsyDx, entires_DyEpsxDy))
indices_d = npa.hstack((indices_DxEpsyDx, indices_DyEpsxDy))
entries_diag = MU_0 * self.omega**2 * npa.ones(self.N)
entries_a = npa.hstack((entries_d, entries_diag))
indices_a = npa.hstack((indices_d, indices_diag))
return entries_a, indices_a
示例10: test_preconditioning
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def test_preconditioning():
def f(x, y, z, a, b):
return np.sum(x ** 2) + np.sum((y - 3) ** 2) + np.sum((z + a) ** 4)
a = 2
b = 5
shapes = [(2, 3), (2, 2), (3,)]
optim_vars_init = [np.ones(shape) for shape in shapes]
def precon_fwd(x, y, z, a, b):
return 3 * x, y / 2, z * 4
def precon_bwd(x, y, z, a, b):
return x / 3, 2 * y, z / 4
optim_vars, res = minimize(f, optim_vars_init, args=(a, b),
precon_fwd=precon_fwd, precon_bwd=precon_bwd)
assert res['success']
assert [var.shape for var in optim_vars] == shapes
for var, target in zip(optim_vars, [0, 3, -a]):
assert_allclose(var, target, atol=1e-1)
示例11: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def __init__(self, means, variances, pmix=None):
"""
means: a k x d 2d array specifying the means.
variances: a one-dimensional length-k array of variances
pmix: a one-dimensional length-k array of mixture weights. Sum to one.
"""
k, d = means.shape
if k != len(variances):
raise ValueError('Number of components in means and variances do not match.')
if pmix is None:
pmix = old_div(np.ones(k),float(k))
if np.abs(np.sum(pmix) - 1) > 1e-8:
raise ValueError('Mixture weights do not sum to 1.')
self.pmix = pmix
self.means = means
self.variances = variances
示例12: constraint_c2
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def constraint_c2(f, r):
n_obj = f.shape[1]
v1 = anp.inf * anp.ones(f.shape[0])
for i in range(n_obj):
temp = (f[:, i] - 1) ** 2 + (anp.sum(f ** 2, axis=1) - f[:, i] ** 2) - r ** 2
v1 = anp.minimum(temp.flatten(), v1)
a = 1 / anp.sqrt(n_obj)
v2 = anp.sum((f - a) ** 2, axis=1) - r ** 2
g = anp.minimum(v1, v2.flatten())
return g
示例13: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def __init__(self):
self.n_var = 20
self.n_constr = 2
self.n_obj = 1
self.func = self._evaluate
self.xl = anp.zeros(self.n_var)
self.xu = 10 * anp.ones(self.n_var)
super(G2, self).__init__(n_var=self.n_var, n_obj=self.n_obj, n_constr=self.n_constr, xl=self.xl, xu=self.xu,
type_var=anp.double)
示例14: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def __init__(self, n_var=10):
super().__init__(n_var)
self.xl = -5 * anp.ones(self.n_var)
self.xl[0] = 0.0
self.xu = 5 * anp.ones(self.n_var)
self.xu[0] = 1.0
self.func = self._evaluate
示例15: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import ones [as 别名]
def __init__(self, const_1=5, const_2=0.1):
# define lower and upper bounds - 1d array with length equal to number of variable
xl = -5 * anp.ones(10)
xu = 5 * anp.ones(10)
super().__init__(n_var=10, n_obj=1, n_constr=2, xl=xl, xu=xu, evaluation_of="auto")
# store custom variables needed for evaluation
self.const_1 = const_1
self.const_2 = const_2
# implemented the function evaluation function - the arrays to fill are provided directly