本文整理汇总了Python中autograd.numpy.array方法的典型用法代码示例。如果您正苦于以下问题:Python numpy.array方法的具体用法?Python numpy.array怎么用?Python numpy.array使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类autograd.numpy
的用法示例。
在下文中一共展示了numpy.array方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: fast_zero_pad
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def fast_zero_pad(arr, pad_width):
"""Fast version of numpy.pad when `mode="constant"`
Executing `numpy.pad` with zeros is ~1000 times slower
because it doesn't make use of the `zeros` method for padding.
Paramters
---------
arr: array
The array to pad
pad_width: tuple
Number of values padded to the edges of each axis.
See numpy docs for more.
Returns
-------
result: array
The array padded with `constant_values`
"""
newshape = tuple([a+ps[0]+ps[1] for a, ps in zip(arr.shape, pad_width)])
result = np.zeros(newshape, dtype=arr.dtype)
slices = tuple([slice(start, s-end) for s, (start, end) in zip(result.shape, pad_width)])
result[slices] = arr
return result
示例2: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def __init__(self, image, image_fft=None):
"""Initialize the object
Parameters
----------
image: array
The real space image.
image_fft: dict
A dictionary of {shape: fft_value} for which each different
shape has a precalculated FFT.
axes: int or tuple
The dimension(s) of the array that will be transformed.
"""
if image_fft is None:
self._fft = {}
else:
self._fft = image_fft
self._image = image
示例3: render
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def render(self, model):
"""Convolve a model to the observation frame
Parameters
----------
model: array
The model from `Blend`
Returns
-------
image_model: array
`model` mapped into the observation frame
"""
if self._diff_kernels is not None:
model_images = self.convolve(model)
else:
model_images = model
return model_images[self.slices_for_model]
示例4: get_loss
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def get_loss(self, model):
"""Computes the loss/fidelity of a given model wrt to the observation
Parameters
----------
model: array
The model from `Blend`
Returns
-------
result: array
Scalar tensor with the likelihood of the model
given the image data
"""
model_ = self.render(model)
if self.frame != self.model_frame:
images_ = self.images[self.slices_for_images]
weights_ = self.weights[self.slices_for_images]
else:
images_ = self.images
weights_ = self.weights
return self.log_norm + np.sum(weights_ * (model_ - images_) ** 2) / 2
示例5: G
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def G(self):
full_W = np.array([node.w for node in self.nodes])
WB = full_W[:,1:].reshape((self.K,self.K, self.B))
# Weight matrix is summed over impulse response functions
WT = WB.sum(axis=2)
# Impulse response weights are normalized weights
GT = WB / WT[:,:,None]
# Then we transpose so that the impuolse matrix is (outgoing x incoming x basis)
G = np.transpose(GT, [1,0,2])
# TODO: Decide if this is still necessary
for k1 in range(self.K):
for k2 in range(self.K):
if G[k1,k2,:].sum() < 1e-2:
G[k1,k2,:] = 1.0/self.B
return G
示例6: get_treeseq_configs
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def get_treeseq_configs(treeseq, sampled_n):
mat = np.zeros((len(sampled_n), sum(sampled_n)), dtype=int)
j = 0
for i, n in enumerate(sampled_n):
for _ in range(n):
mat[i, j] = 1
j += 1
mat = scipy.sparse.csr_matrix(mat)
def get_config(genos):
derived_counts = mat.dot(genos)
return np.array([
sampled_n - derived_counts,
derived_counts
]).T
for v in treeseq.variants():
yield get_config(v.genotypes)
示例7: _entropy
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def _entropy(self):
counts = self._total_freqs
n_snps = float(self.n_snps())
p = counts / n_snps
# return np.sum(p * np.log(p))
ret = np.sum(p * np.log(p))
# correct for missing data
sampled_n = np.sum(self.configs.value, axis=2)
sampled_n_counts = co.Counter()
assert len(counts) == len(sampled_n)
for c, n in zip(counts, sampled_n):
n = tuple(n)
sampled_n_counts[n] += c
sampled_n_counts = np.array(
list(sampled_n_counts.values()), dtype=float)
ret = ret + np.sum(sampled_n_counts / n_snps *
np.log(n_snps / sampled_n_counts))
assert not np.isnan(ret)
return ret
示例8: build_config_list
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def build_config_list(sampled_pops, counts, sampled_n=None, ascertainment_pop=None):
"""
if sampled_n is not None, counts is the derived allele counts
if sampled_n is None, counts has an extra trailing axis:
counts[...,0] is ancestral allele count,
counts[...,1] is derived allele count
"""
if sampled_n is not None:
sampled_n = np.array(sampled_n, dtype=int)
counts1 = np.array(counts, dtype=int, ndmin=2)
counts0 = sampled_n - counts1
counts = np.array([counts0, counts1], dtype=int)
counts = np.transpose(counts, axes=[1, 2, 0])
counts = np.array(counts, ndmin=3, dtype=int)
assert counts.shape[1:] == (len(sampled_pops), 2)
counts.setflags(write=False)
return ConfigList(sampled_pops, counts, sampled_n, ascertainment_pop)
示例9: build_full_config_list
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def build_full_config_list(sampled_pops, sampled_n, ascertainment_pop=None):
sampled_n = np.array(sampled_n)
if ascertainment_pop is None:
ascertainment_pop = [True] * len(sampled_pops)
ascertainment_pop = np.array(ascertainment_pop)
ranges = [list(range(n + 1)) for n in sampled_n]
config_list = []
for x in it.product(*ranges):
x = np.array(x, dtype=int)
if not (np.all(x[ascertainment_pop] == 0) or np.all(
x[ascertainment_pop] == sampled_n[ascertainment_pop])):
config_list.append(x)
return build_config_list(
sampled_pops, np.array(config_list, dtype=int), sampled_n,
ascertainment_pop=ascertainment_pop)
示例10: subsample_probs
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def subsample_probs(self, subconfig):
"""
Returns the probability of subsampling subconfig
from each config.
"""
subconfig = np.array(subconfig)
total_counts_dict = {p: n for p, n in zip(self.sampled_pops,
subconfig.sum(axis=1))
if n > 0}
derived_counts_dict = {p: [0]*(n+1)
for p, n in total_counts_dict.items()}
for p, d in zip(self.sampled_pops, subconfig[:, 1]):
if p in derived_counts_dict:
derived_counts_dict[p][d] = 1
num = self.count_subsets(derived_counts_dict, total_counts_dict)
denom = self.count_subsets({}, total_counts_dict)
# avoid 0/0
assert np.all(num[denom == 0] == 0)
denom[denom == 0] = 1
return num / denom
# TODO: remove this method (and self.sampled_n attribute)
示例11: _build_old_new_idxs
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def _build_old_new_idxs(self, folded):
idxs = self.full_configs._augmented_idxs(folded)
denom_idx_key = 'denom_idx'
denom_idx = idxs[denom_idx_key]
idxs = {k: v[self.sub_idxs]
for k, v in list(idxs.items()) if k != denom_idx_key}
old_idxs = np.array(
list(set(sum(map(list, idxs.values()), [denom_idx]))))
old_2_new_idxs = {old_id: new_id for new_id,
old_id in enumerate(old_idxs)}
idxs = {k: np.array([old_2_new_idxs[old_id]
for old_id in v], dtype=int)
for k, v in list(idxs.items())}
idxs[denom_idx_key] = old_2_new_idxs[denom_idx]
return old_idxs, idxs
示例12: _many_score_cov
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def _many_score_cov(params, data, demo_func, **kwargs):
params = np.array(params)
def f_vec(x):
ret = _composite_log_likelihood(
data, demo_func(*x), vector=True, **kwargs)
# centralize
return ret - np.mean(ret)
# g_out = einsum('ij,ik', jacobian(f_vec)(params), jacobian(f_vec)(params))
# but computed in a roundabout way because jacobian implementation is slow
def _g_out_antihess(x):
l = f_vec(x)
lc = make_constant(l)
return np.sum(0.5 * (l**2 - l * lc - lc * l))
return autograd.hessian(_g_out_antihess)(params)
示例13: sfs
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def sfs(self, n):
if n == 0:
return np.array([0.])
Et_jj = self.etjj(n)
#assert np.all(Et_jj[:-1] - Et_jj[1:] >= 0.0) and np.all(Et_jj >= 0.0) and np.all(Et_jj <= self.tau)
ret = np.sum(Et_jj[:, None] * Wmatrix(n), axis=0)
before_tmrca = self.tau - np.sum(ret * np.arange(1, n) / n)
# ignore branch length above untruncated TMRCA
if self.tau == float('inf'):
before_tmrca = 0.0
ret = np.concatenate((np.array([0.0]), ret, np.array([before_tmrca])))
return ret
# def transition_prob(self, v, axis=0):
# return moran_model.moran_action(self.scaled_time, v, axis=axis)
示例14: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def __init__(self):
super().__init__(n_var=4, n_obj=1, n_constr=4, type_var=anp.double)
self.xl = anp.array([1, 1, 10.0, 10.0])
self.xu = anp.array([99, 99, 200.0, 200.0])
示例15: __init__
# 需要导入模块: from autograd import numpy [as 别名]
# 或者: from autograd.numpy import array [as 别名]
def __init__(self):
self.n_var = 13
self.n_constr = 9
self.n_obj = 1
self.xl = anp.zeros(self.n_var)
self.xu = anp.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 100, 100, 100, 1])
super(G1, self).__init__(n_var=self.n_var, n_obj=self.n_obj, n_constr=self.n_constr, xl=self.xl, xu=self.xu,
type_var=anp.double)