本文整理汇总了Python中builtins.zip方法的典型用法代码示例。如果您正苦于以下问题:Python builtins.zip方法的具体用法?Python builtins.zip怎么用?Python builtins.zip使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类builtins
的用法示例。
在下文中一共展示了builtins.zip方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_row
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def get_row(self, *row, **options):
"""Format the row into a single Cell spanning all output columns.
Args:
*row: A list of objects to render in the same order as columns are
defined.
Returns:
A single Cell object spanning the entire row.
"""
result = []
for c, x in zip(self.columns, row):
merged_opts = c.options.copy()
merged_opts.update(options)
if not merged_opts.get("hidden"):
result.append(c.render_row(x, **options) or Cell(""))
return JoinedCell(
*result, tablesep=self.options.get("tablesep", " "))
示例2: ParseMemoryRuns
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def ParseMemoryRuns(self, fhandle):
# Set acquisition mode. If the driver does not support this mode it will
# just fall back to the default.
win32file.DeviceIoControl(
fhandle, CTRL_IOCTRL,
struct.pack("I", PMEM_MODE_PTE), 4, None)
result = win32file.DeviceIoControl(
fhandle, INFO_IOCTRL, b"", 102400, None)
fmt_string = "Q" * len(self.FIELDS)
self.memory_parameters = dict(zip(self.FIELDS, struct.unpack_from(
fmt_string, result)))
offset = struct.calcsize(fmt_string)
for x in range(self.memory_parameters["NumberOfRuns"]):
start, length = struct.unpack_from("QQ", result, x * 16 + offset)
self.add_run(start, start, length, self.fhandle_as)
示例3: _get_as_histograms
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def _get_as_histograms(self):
histogram_map = {}
hist_list = [('ttl', 'ttl', False), ('objsz', 'objsz', False), ('objsz', 'object-size', True)]
hist_dumps = [util.Future(self.cluster.info_histogram, hist[0],
logarithmic = hist[2],
raw_output=True,
nodes=self.nodes).start()
for hist in hist_list]
for hist, hist_dump in zip(hist_list, hist_dumps):
hist_dump = hist_dump.result()
for node in hist_dump:
if node not in histogram_map:
histogram_map[node] = {}
if not hist_dump[node] or isinstance(hist_dump[node], Exception):
continue
histogram_map[node][hist[1]] = hist_dump[node]
return histogram_map
示例4: _get_value_and_diff
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def _get_value_and_diff(self, prev, slice_val):
diff = []
value = []
under_limit = True
if self.upper_limit_check:
under_limit = False
if prev:
temp = ([b - a for b, a in zip(slice_val, prev)])
if not self.upper_limit_check or any(i >= self.upper_limit_check for i in temp):
diff = ([b for b in temp])
under_limit = True
else:
if not self.upper_limit_check or any(i >= self.upper_limit_check for i in slice_val):
diff = ([b for b in slice_val])
under_limit = True
if under_limit:
value = ([b for b in slice_val])
return value, diff
示例5: parse_line
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def parse_line(cls, line):
fields = dict((name, val.strip().decode())
for name, val in zip(cls.fields, line.split(b",")))
for fld in ["sport", "dport"]:
try:
fields[fld] = int(
fields[fld],
16 if fields[fld].startswith("0x") else 10,
)
except ValueError:
if not fields[fld]:
del fields[fld]
fields["src"] = fields.pop("saddr")
fields["dst"] = fields.pop("daddr")
fields["csbytes"] = int(fields.pop("sbytes"))
fields["cspkts"] = int(fields.pop("spkts"))
fields["scbytes"] = int(fields.pop("dbytes"))
fields["scpkts"] = int(fields.pop("dpkts"))
fields["start_time"] = datetime.datetime.fromtimestamp(
float(fields.pop("stime"))
)
fields["end_time"] = datetime.datetime.fromtimestamp(
float(fields.pop("ltime"))
)
return fields
示例6: parse_line
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def parse_line(self, line):
line = line.decode().rstrip('\r\n')
if not line:
self.nextline_headers = True
return next(self)
line = [elt.strip() for elt in line.split(',')]
if self.nextline_headers:
self.fields = line
self.cur_types = [self.types.get(field) for field in line]
self.nextline_headers = False
return next(self)
return dict(zip(
self.fields,
(self.converters.get(self.cur_types[i])(val)
for (i, val) in enumerate(line)),
))
示例7: get_fuzz_target_weights
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def get_fuzz_target_weights():
"""Get a list of fuzz target weights based on the current fuzzer."""
job_type = environment.get_value('JOB_NAME')
target_jobs = list(fuzz_target_utils.get_fuzz_target_jobs(job=job_type))
fuzz_targets = fuzz_target_utils.get_fuzz_targets_for_target_jobs(target_jobs)
weights = {}
for fuzz_target, target_job in zip(fuzz_targets, target_jobs):
if not fuzz_target:
logs.log_error('Skipping weight assignment for fuzz target %s.' %
target_job.fuzz_target_name)
continue
weights[fuzz_target.binary] = target_job.weight
return weights
示例8: setUp
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def setUp(self):
"""Set up."""
super(UntrustedRunEngineFuzzerTest, self).setUp()
environment.set_value('JOB_NAME', 'libfuzzer_asan_job')
job = data_types.Job(
name='libfuzzer_asan_job',
environment_string=(
'RELEASE_BUILD_BUCKET_PATH = '
'gs://clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-([0-9]+).zip\n'
'REVISION_VARS_URL = https://commondatastorage.googleapis.com/'
'clusterfuzz-test-data/test_libfuzzer_builds/'
'test-libfuzzer-build-%s.srcmap.json\n'))
job.put()
self.temp_dir = tempfile.mkdtemp(dir=environment.get_value('FUZZ_INPUTS'))
environment.set_value('USE_MINIJAIL', False)
示例9: is_valid_flatten_or_unflatten
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def is_valid_flatten_or_unflatten(src_axes, dst_axes):
"""
Checks whether we can flatten OR unflatten from src_axes to dst_axes.
The requirements are that the components of axes should all be
present in new_axes and that they should be laid out in the same
order. This check is symmetric.
"""
# inflate
src_axes = Axes.as_flattened_list(src_axes)
dst_axes = Axes.as_flattened_list(dst_axes)
# check equal number of Axis
if len(src_axes) != len(dst_axes):
return False
# check all Axis are equal
equal = [src == dst for src, dst in zip(src_axes, dst_axes)]
return all(equal)
示例10: _make_strides
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def _make_strides(inner_size, axes, full_sizes):
"""
Generates a tuple of strides for a set of axes. See _make_stride
for a description of the stride given to each axis.
Arguments:
inner_size: The total size of all dimensions smaller than
the axes.
axes: The axes for which we are generating strides.
full_sizes: The size of each axis.
Returns:
inner_size: The total size of these axes and all smaller dimensions.
strides: The strides generated for the axes.
"""
full_strides = []
for axis, fsz in reversed(list(zip(axes, full_sizes))):
inner_size, stride = _make_stride(inner_size, axis, fsz)
full_strides.append(stride)
return inner_size, tuple(reversed(full_strides))
示例11: expand_offsets
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def expand_offsets(cur_rect_l, cur_rect_u, offsets):
'''
Expand offsets at different level along each dimension to generate the
final offsets for all candidate by computing the sum of each tuple in the
cross product of offset arrays.
e.g For the some dimension two level offsets [[0, 1, 0], [2, 4, 2]] will be expanded to
[2 4 2 3 5 3 2 4 2]
cur_rect_l and cur_rect_u: coordinates of the lower and upper corner of the range.
offsets: Nested array representing offsets of ranges along dimension, level of hierarchy
'''
# remove empty list(no query at this level)
offsets = [list(filter(lambda x: len(x) > 0, d)) for d in offsets]
assert all([len(d) == len(offsets[0]) for d in offsets]),\
"Shape of offsets along each dimension should match."
if len(offsets[0]) < 1:
return [], []
# expand offsets across different levels.
expanded_offsets = [HierarchicalRanges.quick_product(*d).sum(axis=0) for d in offsets]
lower = np.vstack([ l + offset for l, offset in zip(cur_rect_l, expanded_offsets)]).T
upper = np.vstack([ u + offset for u, offset in zip(cur_rect_u, expanded_offsets)]).T
return lower, upper
示例12: select
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def select(self):
QtQ = self.W.gram().dense_matrix()
n = self.domain_shape[0]
err, inv, weights, queries = self._GreedyHierByLv(
QtQ, n, 0, withRoot=False)
# form matrix from queries and weights
row_list = []
for q, w in zip(queries, weights):
if w > 0:
row = np.zeros(self.domain_shape[0])
row[q[0]:q[1] + 1] = w
row_list.append(row)
mat = np.vstack(row_list)
mat = sparse.csr_matrix(mat) if sparse.issparse(mat) is False else mat
return matrix.EkteloMatrix(mat)
示例13: canonical_ordering
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def canonical_ordering(mapping):
""" remap according to the canonical order.
if bins are noncontiguous, use position of first occurrence.
e.g. [3,4,1,1] => [1,2,3,3]; [3,4,1,1,0,1]=>[0,1,2,2,3,2]
"""
unique, indices, inverse, counts = mapping_statistics(mapping)
uniqueInverse, indexInverse = np.unique(inverse,return_index =True)
indexInverse.sort()
newIndex = inverse[indexInverse]
tups = list(zip(uniqueInverse, newIndex))
tups.sort(key=lambda x: x[1])
u = np.array( [u for (u,i) in tups] )
mapping = u[inverse].reshape(mapping.shape)
return mapping
示例14: calc_mean_lifetime
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def calc_mean_lifetime(dx, t1=0, t2=np.inf, ph_sel=Ph_sel('all')):
"""Compute the mean lifetime in each burst.
Arguments:
t1, t2 (floats): min and max value (in TCSPC bin units) for the
nanotime to be included in the mean
ph_sel (Ph_sel object): object defining the photon selection.
See :mod:`fretbursts.ph_sel` for details.
Returns:
List of arrays of per-burst mean lifetime. One array per channel.
"""
mean_lifetimes = []
for bursts, nanot, mask in zip(dx.mburst, dx.nanotimes,
dx.iter_ph_masks(ph_sel)):
selection = (nanot > t1) * (nanot < t2)
# Select photons in ph_sel AND with nanotime in [t1, t2]
if isarray(mask):
selection *= mask
mean_lifetimes.append(
burstlib.burst_ph_stats(nanot, bursts, mask=selection,
func=np.mean) - t1)
return mean_lifetimes
示例15: burst_data_period_mean
# 需要导入模块: import builtins [as 别名]
# 或者: from builtins import zip [as 别名]
def burst_data_period_mean(dx, burst_data):
"""Compute mean `burst_data` in each period.
Arguments:
dx (Data object): contains the burst data to process
burst_data (list of arrays): one array per channel, each array
has one element of "burst data" per burst.
Returns:
2D of arrays with shape (nch, nperiods).
Example:
burst_period_mean(dx, dx.nt)
"""
mean_burst_data = np.zeros((dx.nch, dx.nperiods))
for ich, (b_data_ch, period) in enumerate(zip(burst_data, dx.bp)):
for iperiod in range(dx.nperiods):
mean_burst_data[ich, iperiod] = b_data_ch[period == iperiod].mean()
return mean_burst_data