本文整理汇总了Python中numpy.fromiter函数的典型用法代码示例。如果您正苦于以下问题:Python fromiter函数的具体用法?Python fromiter怎么用?Python fromiter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了fromiter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: decode4js
def decode4js(obj):
"""
return decoded Python object from encoded object.
"""
out = obj
if isinstance(obj, dict):
classname = obj.pop('__class__', None)
if classname is None:
return obj
elif classname == 'Complex':
out = obj['value'][0] + 1j*obj['value'][1]
elif classname in ('List', 'Tuple'):
out = []
for item in obj['value']:
out.append(decode4js(item))
if classname == 'Tuple':
out = tuple(out)
elif classname == 'Array':
if obj['__dtype__'].startswith('complex'):
re = np.fromiter(obj['value'][0], dtype='double')
im = np.fromiter(obj['value'][1], dtype='double')
out = re + 1j*im
else:
out = np.fromiter(obj['value'], dtype=obj['__dtype__'])
out.shape = obj['__shape__']
elif classname in ('Dict', 'Parameter', 'Group'):
out = {}
for key, val in obj.items():
out[key] = decode4js(val)
if classname == 'Parameter':
out = Parameter(**out)
elif classname == 'Group':
out = Group(**out)
return out
示例2: calculate_switch_stats
def calculate_switch_stats(mappable, linkage_map_file, linkage_map_format, MST_grouping_threshold):
genotypes_of_locus = mappable
if linkage_map_format.lower() == 'mst':
ini_map, loci_on_lg = parse_map_file_MST(linkage_map_file)
elif linkage_map_format.lower() == 'rqtl':
ini_map, loci_on_lg = parse_map_file_rqtl(linkage_map_file)
else:
raise ValueError("unknown linkage_map_format")
int_arr = convert_genotypes_to_int_array(genotypes_of_locus, ini_map)
num_loci = int_arr.shape[0]
num_pairs = int((num_loci * (num_loci-1))/2)
pairs = itertools.combinations(int_arr, 2)
R = numpy.fromiter(getR(pairs), dtype = numpy.float64, count = num_pairs)
pairs = itertools.combinations(int_arr, 2)
NR = numpy.fromiter(getNR(pairs), dtype = numpy.float64, count = num_pairs)
ml_R_frac = get_ml_R_frac(R = R, NR = NR)
Z = get_LOD(R = R, NR = NR, R_frac = ml_R_frac)
NR_matrix = get_NR_matrix(NR)
#rf = get_rf_matrix(ml_R_frac)
lod = get_lod_matrix(Z)
index_of_lg = get_index_of_LG(loci_on_lg)
lgs_longer_than_1 = find_LGs_with_multiple_loci(index_of_lg, loci_on_lg)
#mean_rf = get_LG_pairwise_mean_rf(lgs_longer_than_1, rf, index_of_lg)
#mean_lod = get_LG_pairwise_mean_lod(lgs_longer_than_1,lod, index_of_lg)
sum_lod = get_LG_pairwise_sum_lod(lgs_longer_than_1,lod, index_of_lg)
sq_sum_lod = get_square_form(sum_lod, lgs_longer_than_1)
n = len(mappable.items()[0][1]) #number of individuals
NR_threshold = get_threshold_recombinants_for_same_LGs(n, MST_grouping_threshold)
NR_under_threshold = get_LG_pairwise_count_NR_threshold(lgs_longer_than_1, NR_matrix, index_of_lg, threshold = NR_threshold)
sq_NR_matrix = get_square_form(NR_under_threshold, lgs_longer_than_1)
return(ini_map, sq_sum_lod, sq_NR_matrix, R, NR, lgs_longer_than_1)
示例3: _stats_to_movie_results
def _stats_to_movie_results(bam_stats, movie_names):
"""
Separate out per-movie results from process stats.
"""
results = []
movies = sorted(list(movie_names))
for movie_name in movies:
def _base_calls():
for r in bam_stats:
if r.movieName == movie_name:
yield r.qLen
def _num_passes():
for r in bam_stats:
if r.movieName == movie_name:
yield r.numPasses
def _accuracy():
for r in bam_stats:
if r.movieName == movie_name:
yield r.readScore
read_lengths = np.fromiter(_base_calls(), dtype=np.int64, count=-1)
num_passes = np.fromiter(_num_passes(), dtype=np.int64, count=-1)
accuracy = np.fromiter(_accuracy(), dtype=np.float, count=-1)
results.append(MovieResult(
movie_name, read_lengths, accuracy, num_passes))
return results
示例4: _bam_file_to_movie_results
def _bam_file_to_movie_results(file_name):
"""
Read what is assumed to be a single BAM file (as a ConsensusReadSet).
"""
from pbcore.io import IndexedBamReader
results = []
with IndexedBamReader(file_name) as bam:
for rg in bam.readGroupTable:
assert rg["ReadType"] == "CCS"
movies = list(set([rg["MovieName"] for rg in bam.readGroupTable]))
for movie_name in movies:
def _base_calls():
for r in bam:
if r.movieName == movie_name:
yield r.peer.query_length
def _num_passes():
for r in bam:
if r.movieName == movie_name:
yield r.numPasses
def _accuracy():
for r in bam:
if r.movieName == movie_name:
yield r.readScore
read_lengths = np.fromiter(_base_calls(), dtype=np.int64, count=-1)
num_passes = np.fromiter(_num_passes(), dtype=np.int64, count=-1)
accuracy = np.fromiter(_accuracy(), dtype=np.float, count=-1)
results.append(MovieResult(
file_name, movie_name, read_lengths, accuracy, num_passes))
return results
示例5: _listparser
def _listparser(dlist, freq=None):
"Constructs a DateArray from a list."
dlist = np.array(dlist, copy=False, ndmin=1)
# Case #1: dates as strings .................
if dlist.dtype.kind in 'SU':
#...construct a list of dates
dlist = np.fromiter((Date(freq, string=s).value for s in dlist),
dtype=int)
# Case #2: dates as numbers .................
elif dlist.dtype.kind in 'if':
#...hopefully, they are values
pass
# Case #3: dates as objects .................
elif dlist.dtype.kind == 'O':
template = dlist[0]
#...as Date objects
if isinstance(template, Date):
dlist = np.fromiter((d.value for d in dlist), dtype=int)
if freq in (_c.FR_UND, None):
freq = template.freq
#...as mx.DateTime objects
elif hasattr(template, 'absdays'):
dlist = np.fromiter((Date(freq, datetime=m) for m in dlist),
dtype=int)
#...as datetime objects
elif hasattr(template, 'toordinal'):
dlist = np.fromiter((Date(freq, datetime=d) for d in dlist),
dtype=int)
#
result = dlist.view(DateArray)
result.freq = freq
return result
示例6: _fit_once
def _fit_once(self, X, initial_order):
adj_matrix = self.adj_matrix_strategy(X)
N = adj_matrix.shape[0]
degrees = adj_matrix.sum(axis=1)
boundary = np.zeros(N)
ordering = prc.createOrder(initial_order)
policy = prc.tiloPolicyStruct()
if self.refine_order:
prc.RefineTILO(adj_matrix, ordering, policy)
else:
prc.TILO(adj_matrix, ordering, policy)
boundary = np.fromiter(ordering.b.b, dtype=float)[:-1]
ordering = np.fromiter(ordering.vdata, dtype=int)
#print 'BDR', boundary
#print 'PRS', pinch_ratios(boundary)
#print 'ORD', ordering
pinch_ratios, clusters = self._find_clusters(ordering, boundary)
labels = np.zeros(N, dtype=int)
for i, cluster in enumerate(clusters):
labels[cluster] = i
return ordering, boundary, labels, pinch_ratios
示例7: system_values
def system_values(self, when: Union[Real, Sequence[Real]], which: Union[str, Sequence[str]]=None):
which = self._observable_names if which is None else which
max_when = when if isinstance(when, Real) else max(when)
self.integrate_to(max_when)
if len(self.solution_times) == 1:
# Handle scipy bug when there is only one time point
# TODO (drhagen): super hacky solution here
state_interpolator = lambda t: self.solution_states[0]
else:
state_interpolator = interp1d(self.solution_times, self.solution_states, axis=0, assume_sorted=True,
copy=False)
# Extract values from solution
output_fun = self.ode_system.outputs
if isinstance(which, str) and isinstance(when, Real):
states = state_interpolator(when)
return output_fun(which, when, states)
elif isinstance(which, str):
return np.fromiter((output_fun(which, when_i, state_interpolator(when_i)) for when_i in when),
'float', count=len(when))
elif isinstance(when, Real):
states = state_interpolator(when)
return np.fromiter((output_fun(which_i, when, states) for which_i in which),
'float', count=len(which))
else:
def values():
for when_i in when:
states = state_interpolator(when_i)
for which_i in which:
yield output_fun(which_i, when_i, states)
values = np.fromiter(values(), 'float', count=len(which)*len(when))
return np.reshape(values, [len(when), len(which)])
示例8: _computeNormalizations
def _computeNormalizations(self):
#Use a generator instead of a list to gain speed
generator1 = (x.value for x in self.parameters.values()[1::2])
self.alphas = numpy.fromiter(generator1,float)
#alphasDiff = self.alphas[:-1]-self.alphas[1:]
generator2 = (x.value for x in self.parameters.values()[2::2])
self.betas = numpy.fromiter(generator2,float)
#betasDiff = self.betas[:-1]-self.betas[1:]
#bLogEpivot = self.betas*self.logPivotEnergies
#bLogEpivotDiff = bLogEpivot[1:]-bLogEpivot[:-1]
self.normalizations[0] = self.parameters['K'].value
self.normalizations[1:-1] = (self._logP(self.energyBreaks,self.alphas[:-1],self.betas[:-1],self.pivotEnergies[:-1])/
self._logP(self.energyBreaks,self.alphas[1:],self.betas[1:],self.pivotEnergies[1:])
)
self.normalizations[-1] = 1.0
#This compute the cumulative product of the array
#(i.e., the first elements is a0, the second a0*a1,
#the third a0*a1*a2, and so on...)
self.products = numpy.cumprod(self.normalizations)
示例9: get_world_endpoints
def get_world_endpoints(edges, pos, scale):
"""Returns the edge endpoints in homogeneous world coordinates
Parameters
----------
edges : iterable of Edge
pos : numpy array
scale : float
Returns
-------
tuple of iterable of points
a value in the form `(start_points, end_points)`, where
`start_points` and `end_points` are in the form of a numpy matrix
"""
edge_starts = (coord
for edge in edges
for coord in chain(scale * edge.start + pos, (1.0, )))
edge_ends = (coord
for edge in edges
for coord in chain(scale * edge.end + pos, (1.0, )))
homo_starts = np.fromiter(edge_starts, np.float, count=4 * len(edges))
homo_ends = np.fromiter(edge_ends, np.float, count=4 * len(edges))
homo_starts = homo_starts.reshape((len(edges), 4))
homo_ends = homo_ends.reshape((len(edges), 4))
return homo_starts, homo_ends
示例10: test_vector
def test_vector(self):
v1 = Vector(self.list1)
v2 = Vector(2*x for x in self.list1)
self.assertEqual(2*v1, v2)
n1 = np.fromiter(v1, int)
n2 = np.fromiter(v2, int)
self.assertEqual(v1.dot(v2), np.dot(n1,n2))
示例11: get_charge_resolution
def get_charge_resolution(self):
"""
Calculate and obtain the charge resolution graph arrays.
Returns
-------
true_charge : ndarray
The X axis true charges.
chargeres : ndarray
The Y axis charge resolution values.
chargeres_error : ndarray
The error on the charge resolution.
scaled_chargeres : ndarray
The Y axis charge resolution divided by the Goal.
scaled_chargeres_error : ndarray
The error on the charge resolution divided by the Goal.
"""
log.debug('[chargeres] Calculating charge resolution')
true_charge = np.fromiter(iter(self.sum_dict.keys()), dtype=int)
summed_charge = np.fromiter(iter(self.sum_dict.values()), dtype=float)
num = np.fromiter(iter(self.n_dict.values()), dtype=int)
chargeres = np.sqrt((summed_charge / num) + true_charge) / true_charge
chargeres_error = chargeres * (1 / np.sqrt(2 * num))
scale = self.goal(true_charge)
scaled_chargeres = chargeres/scale
scaled_chargeres_error = chargeres_error/scale
return true_charge, chargeres, chargeres_error, \
scaled_chargeres, scaled_chargeres_error
示例12: __init__
def __init__(self, image):
# number of points
self.nx = int(image.shape[0])
self.ny = int(image.shape[1])
# spacing
self.dx = 1.0
self.dy = 1.0
# limits
self.xmin = 0
self.ymin = 0
self.xmax = float(self.nx)
self.ymax = float(self.ny)
# lengths
self.lx = abs(self.xmax - self.xmin)
self.ly = abs(self.ymax - self.ymin)
# mesh
self.y, self.x = np.meshgrid(
np.fromiter(((0.5 + i) * self.dx for i in range(self.nx)),
dtype=np.float64, count=self.nx),
np.fromiter(((0.5 + i) * self.dy for i in range(self.ny)),
dtype=np.float64, count=self.ny))
示例13: extract_surf
def extract_surf(jpgfile):
start = time.time()
out = os.path.join(os.path.dirname(jpgfile), os.path.basename(jpgfile)[:-4] + 'surf.npy')
if os.path.exists(out):
INFO('%s already exists' % out)
return
im = cv.LoadImageM(jpgfile, cv.CV_LOAD_IMAGE_GRAYSCALE)
INFO('cv loaded %dx%d image' % (im.rows, im.cols))
g, features = cv.ExtractSURF(im, None, cv.CreateMemStorage(), (0, 500, 3, 4))
data = np.ndarray(len(features), SURFReader.surf_dtype)
for i in range(len(features)):
data[i]['vec'] = np.fromiter(features[i], np.float32)
data[i]['geom'] = np.fromiter([g[i][0][0], g[i][0][1], g[i][2]], np.uint16)
data[i]['index'] = 0
## Simple Quantization into bytes
# for i in range(len(features)):
# surfvalues = np.fromiter(features[i], np.float)
#
# assert max(surfvalues) <= 1.0
# assert min(surfvalues) >= -1.0
#
# data[i]['vec'] = np.int8(127*surfvalues)
# data[i]['geom'] = np.fromiter([g[i][0][0], g[i][0][1], g[i][2]], np.uint16)
# data[i]['index'] = 0
save_atomic(lambda d: np.save(d, data), out)
INFO('cv wrote %d features' % len(features))
INFO_TIMING('took %f seconds' % (time.time() - start))
示例14: decodePacket
def decodePacket(bin_data, data_size = None, packet_mode = 'i64u', track_t0 = False):
# Works only for i64bit unpacked mode
global compressed_t0
#assert (packet_mode == 'i64u')
if packet_mode == 'i64u':
data_size = len(bin_data)//ctypes.sizeof(Timetag_I64)
t = ctypes.cast(bin_data, timetag_I64_p)
time = np.fromiter((i.time for i in t), np.int64, data_size)
channel = np.fromiter((i.channel for i in t), np.int8, data_size)
if packet_mode == 'i64c':
data_size = len(bin_data)//ctypes.sizeof(Timetag_I64c)
t = ctypes.cast(bin_data, timetag_I64c_p)
#if t[0].highlow == 0:
# ctypes.cast(bin_data, timetag_I64c_p)
highlow = np.fromiter((i.highlow for i in t ), np.uint64, data_size)
time = np.fromiter((i.timehigh for i in t ), np.uint64, data_size)+(cumsum(highlow))*2**27
channel = np.fromiter((i.channel for i in t ), np.uint8, data_size)
time = time[highlow == 0]
channel = channel[highlow == 0]
if track_t0:
time = time + compressed_t0
compressed_t0 += sum(highlow)*2**27
else:
track_t0 = 0
return(time, channel)
示例15: token_type
def token_type(disc_clsdict, wrd_corpus, fragments_within, fragments_cross,
dest, verbose, n_jobs):
if verbose:
print banner('TOKEN/TYPE')
ptoc, rtoc, ptyc, rtyc = _token_type_sub(disc_clsdict, wrd_corpus,
fragments_cross, 'cross',
verbose, n_jobs)
ftoc = np.fromiter((fscore(ptoc[i], rtoc[i]) for i in xrange(ptoc.shape[0])),
dtype=np.double)
ftyc = np.fromiter((fscore(ptyc[i], rtyc[i]) for i in xrange(ptyc.shape[0])),
dtype=np.double)
ptow, rtow, ptyw, rtyw = _token_type_sub(disc_clsdict, wrd_corpus,
fragments_within, 'within',
verbose, n_jobs)
ftow = np.fromiter((fscore(ptow[i], rtow[i]) for i in xrange(ptow.shape[0])),
dtype=np.double)
ftyw = np.fromiter((fscore(ptyw[i], rtyw[i]) for i in xrange(rtyw.shape[0])),
dtype=np.double)
with open(path.join(dest, 'token_type'), 'w') as fid:
fid.write(pretty_score_f(ptoc, rtoc, ftoc, 'token total',
len(fragments_cross),
sum(map(len, fragments_cross))))
fid.write('\n')
fid.write(pretty_score_f(ptyc, rtyc, ftyc, 'type total',
len(fragments_cross),
sum(map(len, fragments_cross))))
fid.write('\n')
fid.write(pretty_score_f(ptow, rtow, ftow, 'token within-speaker only',
len(fragments_within),
sum(map(len, fragments_within))))
fid.write('\n')
fid.write(pretty_score_f(ptyw, rtyw, ftyw, 'type within-speaker only',
len(fragments_within),
sum(map(len, fragments_within))))