本文整理匯總了Python中itertools.compress方法的典型用法代碼示例。如果您正苦於以下問題:Python itertools.compress方法的具體用法?Python itertools.compress怎麽用?Python itertools.compress使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類itertools
的用法示例。
在下文中一共展示了itertools.compress方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: subset_cells
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def subset_cells(self, keep_cells):
"""
Write a file reduced to just the given cells
"""
keep_cells = set(keep_cells)
subset_file_name = self.tag_file_name(c_SUBSET_POSTFIX)
if subset_file_name is None:
return(None)
with self.get_write_handle(subset_file_name) as file_writer:
csv_writer = csv.writer(file_writer, delimiter=self.delimiter)
check_handle = self.csv_handle
keep_cells.add(c_EXPRESSION_00_ELEMENT)
header = next(check_handle)
row_1 = next(check_handle)
if (len(header) == (len(row_1) - 1)) and (c_EXPRESSION_00_ELEMENT not in header):
header = [c_EXPRESSION_00_ELEMENT] + header
header_index = [cell in keep_cells for cell in header]
# Need to add the header rows
csv_writer.writerow(list(itertools.compress(header,header_index)))
csv_writer.writerow(list(itertools.compress(row_1,header_index)))
for file_line in check_handle:
csv_writer.writerow(list(itertools.compress(file_line,header_index)))
return(subset_file_name)
示例2: analyze_strings
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def analyze_strings(mystrings):
"""
A list of mystrings gets classified and only the predicted API keys are returned
:param mystrings: a list of mystrings to be analyzed
:return: a list of valid api keys
:rtype: list
"""
# for performance it's better to create a new list instead of removing elements from list
smali_strings_filtered = []
strings_features = []
for string in mystrings:
features = string_classifier.calculate_all_features(string.value)
if features:
features_list = list(features)
smali_strings_filtered.append(string)
strings_features.append(features_list)
if len(strings_features) > 0:
prediction = classifier.predict(np.array(strings_features))
api_keys_strings = itertools.compress(smali_strings_filtered, prediction) # basically a bitmask
return api_keys_strings
return []
示例3: nearby_now
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def nearby_now(self) -> List[Tuple[str, Pos, float]]:
now = datetime.utcnow()
t1 = time()
self.last_query_t = t1
lons, lats, alts, errors = self.orbs.get_lonlatalt(now)
t2 = time()
rough_near = np.logical_and(np.abs(lats - self.loc.lat) < 3, np.abs(lons - self.loc.long) < 3)
valid_satpos = list(
zip(self.satnames[~errors][rough_near], lats[rough_near], lons[rough_near], alts[rough_near]))
nearby = [(name, Pos(lat=lat, long=lon), alt) for name, lat, lon, alt in valid_satpos if
distance.distance(self.loc, (lat, lon)).km < 200]
t3 = time()
print("loc:{:.2f}s dist: {:.2f}s tot: {:.2f}s, sats: {:02d}".format(t2 - t1, t3 - t2, t3 - t1, len(nearby)))
if not self.filtered_errors:
print("filtering errors")
self.satnames = self.satnames[~errors]
self.tles = itertools.compress(self.tles, ~errors)
self.create_orbitals()
self.filtered_errors = True
return nearby
示例4: _unpack_available_edges
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def _unpack_available_edges(avail, weight=None, G=None):
"""Helper to separate avail into edges and corresponding weights"""
if weight is None:
weight = 'weight'
if isinstance(avail, dict):
avail_uv = list(avail.keys())
avail_w = list(avail.values())
else:
def _try_getitem(d):
try:
return d[weight]
except TypeError:
return d
avail_uv = [tup[0:2] for tup in avail]
avail_w = [1 if len(tup) == 2 else _try_getitem(tup[-1])
for tup in avail]
if G is not None:
# Edges already in the graph are filtered
# flags = [(G.has_node(u) and G.has_node(v) and not G.has_edge(u, v))
# for u, v in avail_uv]
flags = [not G.has_edge(u, v) for u, v in avail_uv]
avail_uv = list(it.compress(avail_uv, flags))
avail_w = list(it.compress(avail_w, flags))
return avail_uv, avail_w
示例5: _generate_columns_and_selector
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def _generate_columns_and_selector(self, parsed_args, column_names):
"""Generate included columns and selector according to parsed args.
:param parsed_args: argparse.Namespace instance with argument values
:param column_names: sequence of strings containing names
of output columns
"""
if not parsed_args.columns:
columns_to_include = column_names
selector = None
else:
columns_to_include = [c for c in column_names
if c in parsed_args.columns]
if not columns_to_include:
raise ValueError('No recognized column names in %s. '
'Recognized columns are %s.' %
(str(parsed_args.columns), str(column_names)))
# Set up argument to compress()
selector = [(c in columns_to_include)
for c in column_names]
return columns_to_include, selector
示例6: test_compress
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def test_compress():
yield verify_same, compress, itertools.compress, None, [1, 2, 3], [1, 2, 3]
yield verify_same, compress, itertools.compress, None, [1, 2, 3], [1, 0, 0]
yield verify_same, compress, itertools.compress, None, [1, 2, 3], [1, 0]
yield verify_same, compress, itertools.compress, None, [1, 2], [1, 0, 1]
yield verify_same, compress, itertools.compress, None, [1, 2], [0, 0]
yield verify_same, compress, itertools.compress, None, [1, 2], [0]
yield verify_same, compress, itertools.compress, None, [1, 2], [0, 0, 0]
yield (verify_pickle, compress, itertools.compress, 3, 1, [1, 2, 3],
[1, 2, 3])
yield (verify_pickle, compress, itertools.compress, 3, 0, [1, 2, 3],
[1, 2, 3])
yield (verify_pickle, compress, itertools.compress, 1, 0, [1, 2, 3],
[1, 0, 0])
yield (verify_pickle, compress, itertools.compress, 1, 0, [1, 2, 3],
[1, 0])
yield (verify_pickle, compress, itertools.compress, 1, 0, [1, 2],
[1, 0, 1])
示例7: compress
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def compress(items, flags):
"""
Selects from ``items`` where the corresponding value in ``flags`` is True.
This is similar to :func:`numpy.compress`.
This is actually a simple alias for :func:`itertools.compress`.
Args:
items (Iterable[Any]): a sequence to select items from
flags (Iterable[bool]): corresponding sequence of bools
Returns:
Iterable[Any]: a subset of masked items
Example:
>>> import ubelt as ub
>>> items = [1, 2, 3, 4, 5]
>>> flags = [False, True, True, False, True]
>>> list(ub.compress(items, flags))
[2, 3, 5]
"""
return it.compress(items, flags)
示例8: trim
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def trim(table: Table, blank=None) -> Table:
def isempty(v):
return v is None or str(v).strip(blank) == ""
table = iter2seq(table)
nonemptyflags = [
any(not isempty(v) for v in col)
for col in transpose(table)
]
for row in table:
if all(isempty(v) for v in row):
continue
yield list(itertools.compress(row, nonemptyflags))
示例9: assert_args_presence
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def assert_args_presence(args, doc, member, name):
args_not_in_doc = [arg not in doc for arg in args]
if any(args_not_in_doc):
raise ValueError(
"{} {} arguments are not present in documentation ".format(name, list(
compress(args, args_not_in_doc))), member.__module__)
words = doc.replace('*', '').split()
# Check arguments styling
styles = [arg + ":" not in words for arg in args]
if any(styles):
raise ValueError(
"{} {} are not style properly 'argument': documentation".format(
name,
list(compress(args, styles))),
member.__module__)
# Check arguments order
indexes = [words.index(arg + ":") for arg in args]
if indexes != sorted(indexes):
raise ValueError(
"{} arguments order is different from the documentation".format(name),
member.__module__)
示例10: select_subclassdata
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def select_subclassdata(X, y,totalClassNum,SubClassNum, subClassIndexList,normalize=True):
X= np.array(list(itertools.compress(X, [subClassIndexList.__contains__(c) for c in y])))
y= np.array(list(itertools.compress(y, [subClassIndexList.__contains__(c) for c in y])))
d = {}
for i in xrange(SubClassNum):
d.update({subClassIndexList[i]: (totalClassNum+i)})
d1 = {}
for i in xrange(SubClassNum):
d1.update({(totalClassNum+i): i})
for k, v in d.iteritems():
np.place(y,y==k,v)
for k, v in d1.iteritems():
np.place(y,y==k,v)
return X,y
示例11: cut_off_drivers_of
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def cut_off_drivers_of(dstSignal, statements):
"""
Cut off drivers from statements
"""
separated = []
stm_filter = []
for stm in statements:
stm._clean_signal_meta()
d = stm._cut_off_drivers_of(dstSignal)
if d is not None:
separated.append(d)
f = d is not stm
stm_filter.append(f)
return list(compress(statements, stm_filter)), separated
示例12: main
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def main(args):
argc = len(args)
ratio = 0.05
if argc < 3:
print("Usage:", args[0], "<if> <of> <ratio={}>".format(ratio))
return -1
elif argc > 3:
ratio = float(args[3])
with open(args[1], encoding='utf-8') as ifh, open(args[2], 'w', encoding='utf-8') as ofh:
for l in ifh:
words = l.rstrip().split()
indices = np.random.random_sample((len(words),)) > ratio
selected_words = list(compress(words, indices))
ofh.write(" ".join(selected_words)+"\n")
return 0
示例13: import_json
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def import_json(path='json/MPI_annotations.json', order='json/MPI_order.npy'):
"""Get the json file containing the dataset.
We want the data to be shuffled, however the training has to be repeatable.
This means that once shuffled the order has to me mantained."""
with open(path) as data_file:
data_this = json.load(data_file)
data_this = np.array(data_this['root'])
num_samples = len(data_this)
if os.path.exists(order):
idx = np.load(order)
else:
idx = np.random.permutation(num_samples).tolist()
np.save(order, idx)
is_not_validation = [not data_this[i]['isValidation']
for i in range(num_samples)]
keep_data_idx = list(compress(idx, is_not_validation))
data = data_this[keep_data_idx]
return data, len(keep_data_idx)
示例14: handle_crash
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def handle_crash(varr, vpath, ssname, vlist, varr_list, frame_dict):
seg1_list = list(filter(lambda v: re.search('seg1', v), vlist))
seg2_list = list(filter(lambda v: re.search('seg2', v), vlist))
if seg1_list and seg2_list:
tframe = frame_dict[ssname]
varr1 = darr.concatenate(
list(compress(varr_list, seg1_list)),
axis=0)
varr2 = darr.concatenate(
list(compress(varr_list, seg2_list)),
axis=0)
fm1, fm2 = varr1.shape[0], varr2.shape[0]
fm_crds = varr.coords['frame']
fm_crds1 = fm_crds.sel(frame=slice(None, fm1 - 1)).values
fm_crds2 = fm_crds.sel(frame=slice(fm1, None)).values
fm_crds2 = fm_crds2 + (tframe - fm_crds2.max())
fm_crds_new = np.concatenate([fm_crds1, fm_crds2], axis=0)
return varr.assign_coords(frame=fm_crds_new)
else:
return varr
示例15: micc_dictionary
# 需要導入模塊: import itertools [as 別名]
# 或者: from itertools import compress [as 別名]
def micc_dictionary(paper):
'''
Analogous to citation_number_dictionary, but for MICCs rather than the number of citations.
Co-citations are when two citations are included in the same end note (e.g, '[3-5]')
:return: dict of counts for co-citation occurrences
'''
all_groups = [group_cleaner(g) for g in citation_grouper(paper)]
references = paper.find_all("ref")
max_ref_num = len(references)
results = {}
for i in range(1, max_ref_num + 1):
counts = [g.count(i) for g in all_groups]
cite_groups = compress(all_groups, counts)
cocite_counts = [len(g) - 1 for g in cite_groups]
if len(cocite_counts) == 0:
cocite_counts = [-1]
results[i] = median(cocite_counts)
return results