本文整理汇总了Python中numpy.nditer函数的典型用法代码示例。如果您正苦于以下问题:Python nditer函数的具体用法?Python nditer怎么用?Python nditer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nditer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: group_ref_color_atom_overlaps
def group_ref_color_atom_overlaps(results):
"""
Create a 3D masked array containing all overlap scores.
Parameters
----------
scores : array_like
2D array containing reference molecule color atom overlap results.
"""
# get maximum number of ref color atoms
# don't use `for result in it` because that gives an array of size 1
max_size = 0
it = np.nditer(results, flags=['multi_index', 'refs_ok'])
for _ in it:
max_size = max(max_size, len(results[it.multi_index]))
# build a masked array containing results
# don't use data[it.multi_index][:result.size] because that assigns
# to a view and not to data
data = np.ma.masked_all((results.shape[:2] + (max_size,)), dtype=float)
it = np.nditer(results, flags=['multi_index', 'refs_ok'])
for _ in it:
i, j = it.multi_index
result = results[i, j]
data[i, j, :result.size] = result
return data
示例2: test_iter_allocate_output_subtype
def test_iter_allocate_output_subtype():
# Make sure that the subtype with priority wins
# 2018-04-29: moved here from core.tests.test_nditer, given the
# matrix specific shape test.
# matrix vs ndarray
a = np.matrix([[1, 2], [3, 4]])
b = np.arange(4).reshape(2, 2).T
i = np.nditer([a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
assert_(type(i.operands[2]) is np.matrix)
assert_(type(i.operands[2]) is not np.ndarray)
assert_equal(i.operands[2].shape, (2, 2))
# matrix always wants things to be 2D
b = np.arange(4).reshape(1, 2, 2)
assert_raises(RuntimeError, np.nditer, [a, b, None], [],
[['readonly'], ['readonly'], ['writeonly', 'allocate']])
# but if subtypes are disabled, the result can still work
i = np.nditer([a, b, None], [],
[['readonly'], ['readonly'],
['writeonly', 'allocate', 'no_subtype']])
assert_(type(i.operands[2]) is np.ndarray)
assert_(type(i.operands[2]) is not np.matrix)
assert_equal(i.operands[2].shape, (1, 2, 2))
示例3: launch_boolean_query
def launch_boolean_query(self, query, num_results):
doc_relevance_vector = np.zeros(len(self.doc_index.index))
query_feature_vector = \
helpers.create_doc_index(self.dictionary, helpers.docs2bows([query], self.dictionary)).index[0]
iter_count = 0
for doc_feature_vector in self.doc_index.index:
if np.sum(query_feature_vector) > 0 and np.array_equal(
np.where((query_feature_vector > 0) & (doc_feature_vector > 0)),
np.where(query_feature_vector > 0)):
doc_relevance_vector[iter_count] = 1
iter_count += 1
relevant_docs = np.where(doc_relevance_vector == 1)[0]
if relevant_docs.size == 0:
return []
else:
results_shown = 0
for doc in np.nditer(relevant_docs):
if results_shown < num_results:
print('[ID: ' + str(doc + 1) + '] ' + self.corpus[doc])
results_shown += 1
ranking = []
for doc in np.nditer(relevant_docs):
ranking.append((doc, 1))
return ranking
示例4: _form_slip_xyz_file_string
def _form_slip_xyz_file_string(self):
_txt = ''
for lon, lat, s in zip(np.nditer(self.lons),
np.nditer(self.lats),
np.nditer(self.slip)):
_txt +='%f %f %f\n'%(lon, lat, s)
return _txt
示例5: var_inner
def var_inner(self,var_v1,var_v2):
v1=[]
v2=[]
for m1,m2 in zip(var_v1,var_v2):
v1=v1+[x for x in np.nditer(m1, op_flags=['readwrite'])]
v2=v2+[x for x in np.nditer(m2, op_flags=['readwrite'])]
return np.inner(v1,v2)
示例6: descend_weights_numeric
def descend_weights_numeric(cost, weights, reg, learn, step):
"""
Gradient descent, for weights
cost - objective function, not requiring parameters, without regularisation
weights - their derivative will be approximated
reg - regularisation factor
learn - (negative) learning rate
step - step size for derivative
"""
derivative = []
for arr in weights:
der = zeros(arr.shape)
it = nditer(arr, flags=['multi_index'], op_flags=['readwrite'])
for value in it:
old_val = value.copy()
old_obj = cost()
value[...] += step
new_obj = cost()
value[...] = old_val
grad = (new_obj - old_obj)/step
grad = add_reg(old_val, grad, reg)
der[it.multi_index] = grad
derivative.append(der)
for n, arr in enumerate(weights):
der = derivative[n]
it = nditer(arr, flags=['multi_index'], op_flags=['readwrite'])
for value in it:
value[...] = descend(value[...], der[it.multi_index]*learn)
示例7: search
def search(self, fn, top_n=10, sim_thresh=None):
"""
retrieval face from database,
return top_n similar faces' imgIDs, return None if failed
"""
if top_n > len(self.data):
top_n = len(self.data)
aligned_fn = send2align(fn)
aligned_arr = path2arr(aligned_fn)
if aligned_arr is None:
print "align none."
return None
deepIDfea = self.model.getID([aligned_arr])[0]
sims = [cosine_similarity(deepIDfea, item[1])[0][0] for item in self.data]
# print len(self.data), len(sims)
for i in range(len(sims)):
print sims[i], self.data[i][0]
sort_index = np.argsort(-np.array(sims))
result = []
if sim_thresh is None:
for index in np.nditer(sort_index):
cur_id = self.data[index][0].split("-")[0]
if cur_id not in result and len(result) < top_n:
result.append(cur_id)
return result
else:
for index in np.nditer(sort_index):
if sims[index] < sim_thresh:
break
cur_id = self.data[index][0].split("-")[0]
if cur_id not in result:
result.append(cur_id)
return result
示例8: __init__
def __init__(self, maxResult=10, gridSpec=None, verbose=True):
self.gridSpec = gridSpec
self.maxResult = maxResult
self.enableGrid = False
self.verbose = verbose
# Calculate exact grid
self.grid = []
gsTau = self.gridSpec[0]
gsS = self.gridSpec[1]
if len(gsTau) > 1 and len(gsS) > 1:
self.enableGrid = True
countTau = 5
countS = 5
if len(gsTau) > 2:
countTau = int(gsTau[2])
if len(gsS) > 2:
countS = int(gsS[2])
minTau = gsTau[0] - gsTau[1]
maxTau = (gsTau[0] + gsTau[1]) * (1+ (1/ (2*countTau)))
minS = gsS[0] - gsS[1]
maxS = (gsS[0] + gsS[1]) * (1+ (1/ (2*countS)))
tau = np.arange(minTau, maxTau, (gsTau[1] * 2.0) / countTau)
S = np.arange(minS, maxS, (gsS[1] * 2.0) / countS)
for t in np.nditer(tau):
for s in np.nditer(S):
self.grid.append( np.array([t, s]) )
self.dTau = tau[1] - tau[0]
self.dS = S[1] - S[0]
self.bounds = [ [minTau, maxTau], [minS, maxS] ]
示例9: run
def run(self):
# temperature iteration
for dmu in np.nditer(self.delta_mu):
data = []
self.mu[0] += dmu
self.mu[1] = -self.mu[0]
self.x_[1] = self.x_1
self.x_[0] = 1 - self.x_1
print(' mu = {:06.4f}:'.format(self.mu[0].item(0)))
# delta mu iteration
for temp in np.nditer(self.temp):
self.beta = np.float64(pow(self.bzc * temp, -1))
# calculate
self.__run()
# push result into data
data.append({'temp': temp.item(0), 'c': self.x_[1].item(0)})
print(' T = {:06.3f}K, c = {:06.6f}, count = {}'.
format(temp.item(0), self.x_[1].item(0), self.count))
print('\n')
# save result to output
self.output['Results'].append(
{'mu': self.mu[0].item(0), 'data': data})
self.mu[0] -= dmu
示例10: calc
def calc(self, input):
"""
Calculates the network output for the given input
@param input A array of inputs [in1, in2,..]
@return lastNetResult
"""
lastNetResult = np.array(input)
# save each layer in/output for training
self.inputs = []
self.outputs = []
for i in range(len(self.layout) - 1):
# append bias
# self.outputFun(lastNetResult)
lastNetResult = np.hstack((lastNetResult, [1]))
self.inputs.append(lastNetResult)
# calc result
lastNetResult = np.dot(self.weights[i], lastNetResult)
if i == len(self.layout) - 2:
# different activation function for last layer
lastNetResult = np.array(list(map(
self.last_layer_transfer, np.nditer(lastNetResult))))
else:
# lastNetResult = self.layer_transfer(lastNetResult)
lastNetResult = np.array(list(map(
self.layer_transfer, np.nditer(lastNetResult))))
self.outputs.append(lastNetResult)
return lastNetResult
示例11: test_external_loop
def test_external_loop(self):
from numpy import arange, nditer, array
a = arange(24).reshape(2, 3, 4)
import sys
if '__pypy__' in sys.builtin_module_names:
raises(NotImplementedError, nditer, a, flags=['external_loop'])
skip('nditer external_loop not implmented')
r = []
n = 0
for x in nditer(a, flags=['external_loop']):
r.append(x)
n += 1
assert n == 1
assert (array(r) == range(24)).all()
r = []
n = 0
for x in nditer(a, flags=['external_loop'], order='F'):
r.append(x)
n += 1
assert n == 12
assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21], [ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23]]).all()
e = raises(ValueError, 'r[0][0] = 0')
assert str(e.value) == 'assignment destination is read-only'
r = []
for x in nditer(a.T, flags=['external_loop'], order='F'):
r.append(x)
array_r = array(r)
assert len(array_r.shape) == 2
assert array_r.shape == (1,24)
assert (array(r) == arange(24)).all()
示例12: process
def process(self):
# counts
self.count += 1
# calculate eta
eta_sum = np.float64(0)
dt_ = np.zeros((2, 2, 2, 2, 2, 2), np.float64)
it = np.nditer(dt_, flags=['multi_index'])
while not it.finished:
i, j, k, l, m, n = it.multi_index
dt_[i, j, k, l, m, n] = __eta_dt(self, i, j, k, l, m, n)
eta_sum += dt_[i, j, k, l, m, n]
it.iternext()
############################
# normalization
############################
self.checker = np.float64(0)
# 4-body
self.m41_ = np.zeros((2, 2, 2, 2), np.float64)
# 3-body
self.m31_ = np.zeros((2, 2, 2), np.float64)
# pair
self.m21_ = np.zeros((2, 2), np.float64)
self.m22_ = np.zeros((2, 2), np.float64)
m22_ = np.zeros((2, 2), np.float64)
# point
self.x_ = np.zeros((2), np.float64)
it = np.nditer(dt_, flags=['multi_index'])
while not it.finished:
i, j, k, l, m, n = it.multi_index
# print('self.zt_{} is: {}'.format(it.multi_index, self.zt_[i, j, k]))
dt_[i, j, k, l, m, n] /= eta_sum
self.checker += np.absolute(dt_[i, j, k, l, m, n] -
self.dt_[i, j, k, l, m, n])
# dt_
self.dt_[i, j, k, l, m, n] = dt_[i, j, k, l, m, n]
# m41_
self.m41_[i, j, k, l] += self.dt_[i, j, k, l, m, n]
# m31_
self.m31_[i, m, k] += self.dt_[i, j, k, l, m, n]
# m21_
self.m21_[i, j] += self.dt_[i, j, k, l, m, n]
# m22_
self.m22_[j, n] += self.dt_[i, j, k, l, m, n]
m22_[i, m] += self.dt_[i, j, k, l, m, n]
# x_
self.x_[i] += self.dt_[i, j, k, l, m, n]
it.iternext()
示例13: buildDistanceMatrix
def buildDistanceMatrix(self):
for head, ngrams in self.head_clusters.iteritems():
word_indices = []
stmt_indices = []
priority_indices = []
feature_words = []
sections = []
dm_w_rows = []
dm_s_rows = []
dm_p_rows = []
for ngram in ngrams:
word_indices.append(ngram[3][1])
stmt_indices.append(ngram[3][0])
priority_indices.append(ngram[1])
feature_words.append(ngram[0])
sections.append(ngram[-1])
word_indices_clone = word_indices
stmt_indices_clone = stmt_indices
priority_indices_clone = priority_indices
for word_index, stmt_index, priority_index in zip(word_indices, stmt_indices, priority_indices):
dm_w_row = []
dm_s_row = []
dm_p_row = []
for word_index_clone, stmt_index_clone, priority_index_clone in zip(word_indices_clone, stmt_indices_clone, priority_indices_clone):
dm_w_row.append(fabs(((1 + word_index) * (1 + stmt_index)) - ((1 + word_index_clone) * (1 + stmt_index_clone))))
dm_s_row.append(fabs((1 + stmt_index) - (1 + stmt_index_clone)))
dm_p_row.append(fabs(float(priority_index) - float(priority_index_clone)))
dm_w_rows.append(dm_w_row)
dm_s_rows.append(dm_s_row)
dm_p_rows.append(dm_p_row)
dm_w = np.array(dm_w_rows)
dm_s = np.array(dm_s_rows)
dm_p = np.array(dm_p_rows)
#print dm_w
#print dm_s
#print dm_p
prox_mat = []
for w_dist, s_dist, PI in zip(np.nditer(dm_w), np.nditer(dm_s), np.nditer(dm_p)):
if PI == 0.0:
proximity_score = ((w_dist + len(np.unique(dm_s) * s_dist)) / (dm_w.shape[0] * len(np.unique(dm_s))))
prox_mat.append(proximity_score)
else:
proximity_score = ((w_dist + len(np.unique(dm_s) * s_dist)) / (dm_w.shape[0] * len(np.unique(dm_s)))) * log10(10 * PI)
prox_mat.append(proximity_score)
ps = np.array(prox_mat)
ps = np.reshape(ps, dm_w.shape)
#print ps
for r, row in enumerate(ps):
for i, ele in enumerate(row):
if ele == min(row):
self.f2.writerow([feature_words[r], priority_indices[r], 1 - np.min(row), feature_words[i], sections[r]])
示例14: test_external_loop
def test_external_loop(self):
from numpy import arange, nditer, array
a = arange(24).reshape(2, 3, 4)
import sys
r = []
for x in nditer(a, flags=['external_loop']):
r.append(x)
assert len(r) == 1
assert r[0].shape == (24,)
assert (array(r) == range(24)).all()
r = []
for x in nditer(a, flags=['external_loop'], order='F'):
r.append(x)
assert len(r) == 12
assert (array(r) == [[ 0, 12], [ 4, 16], [ 8, 20], [ 1, 13], [ 5, 17], [ 9, 21],
[ 2, 14], [ 6, 18], [10, 22], [ 3, 15], [ 7, 19], [11, 23],
]).all()
e = raises(ValueError, 'r[0][0] = 0')
assert str(e.value) == 'assignment destination is read-only'
r = []
for x in nditer(a.T, flags=['external_loop'], order='F'):
r.append(x)
array_r = array(r)
assert len(array_r.shape) == 2
assert array_r.shape == (1,24)
assert (array(r) == arange(24)).all()
示例15: rvs
def rvs(self, loc=0, scale=1, size=1):
"""Random variates.
Parameters
----------
loc : float or np.ndarray
0-D or 1-D tensor.
scale : float or np.ndarray
0-D or 1-D tensor, with all elements constrained to
:math:`scale > 0`.
size : int
Number of random variable samples to return.
Returns
-------
np.ndarray
A np.ndarray of dimensions size x shape.
"""
if not isinstance(loc, np.ndarray):
loc = np.asarray(loc)
if not isinstance(scale, np.ndarray):
scale = np.asarray(scale)
if len(loc.shape) == 0:
return stats.norm.rvs(loc, scale, size=size)
x = []
for locidx, scaleidx in zip(np.nditer(loc), np.nditer(scale)):
x += [stats.norm.rvs(locidx, scaleidx, size=size)]
# Note this doesn't work for multi-dimensional sizes.
x = np.asarray(x).transpose()
return x