本文整理汇总了Python中six.moves.filter函数的典型用法代码示例。如果您正苦于以下问题:Python filter函数的具体用法?Python filter怎么用?Python filter使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了filter函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_content_snippet2
def get_content_snippet2(content, keyword, max_lines=10):
max_lines = int(max_lines)
p = re.compile(
r'(?P<before>.*)%s(?P<after>.*)' %
re.escape(keyword),
re.MULTILINE | re.IGNORECASE | re.DOTALL)
m = p.search(content)
html = ""
if m:
words = list(filter(
lambda x: x != "",
striptags(
m.group("before")).split("\n")))
before_lines = words[-max_lines // 2:]
words = list(filter(
lambda x: x != "",
striptags(
m.group("after")).split("\n")))
after = "<br/>".join(words[:max_lines - len(before_lines)])
before = "<br/>".join(before_lines)
html = "%s %s %s" % (before, striptags(keyword), after)
kw_p = re.compile(r'(%s)' % keyword, re.IGNORECASE)
html = kw_p.sub(r"<strong>\1</strong>", html)
html = mark_safe(html)
else:
html = " ".join(
list(filter(
lambda x: x != "",
striptags(content).replace(
"\n",
" ").split(" ")))[
:max_lines])
return html
示例2: parse_celery_workers
def parse_celery_workers(celery_workers):
"""
Parses the response from the flower get workers api into a list of hosts
we expect to be running and a list of hosts we expect to be stopped
"""
expect_stopped = []
expect_running = list(filter(
lambda hostname: not hostname.endswith('_timestamp'),
celery_workers,
))
timestamped_workers = list(filter(
lambda hostname: hostname.endswith('_timestamp'),
celery_workers,
))
def _strip_timestamp(hostname):
return '.'.join(hostname.split('.')[:-1])
timestamped_workers = sorted(timestamped_workers, key=_strip_timestamp)
for hostname, group in groupby(timestamped_workers, _strip_timestamp):
sorted_workers = sorted(list(group), reverse=True)
expect_running.append(sorted_workers.pop(0))
expect_stopped.extend(sorted_workers)
return expect_running, expect_stopped
示例3: startup
def startup(self, group):
""" Prepare for a new run.
Args
----
group : `Group`
Group that owns this recorder.
"""
myparams = myunknowns = myresids = set()
if MPI:
rank = group.comm.rank
owned = group._owning_ranks
# Compute the inclusion lists for recording
if self.options['record_params']:
myparams = set(filter(self._check_path, group.params))
if self.options['record_unknowns']:
myunknowns = set(filter(self._check_path, group.unknowns))
if self.options['record_resids']:
myresids = set(filter(self._check_path, group.resids))
self._filtered[group.pathname] = {
'p': myparams,
'u': myunknowns,
'r': myresids
}
示例4: _get_eligible_broker_pair
def _get_eligible_broker_pair(self, under_loaded_rg, eligible_partition):
"""Evaluate and return source and destination broker-pair from over-loaded
and under-loaded replication-group if possible, return None otherwise.
Return source broker with maximum partitions and destination broker with
minimum partitions based on following conditions:-
1) At-least one broker in under-loaded group which does not have
victim-partition. This is because a broker cannot have duplicate replica.
2) At-least one broker in over-loaded group which has victim-partition
"""
under_brokers = list(filter(
lambda b: eligible_partition not in b.partitions,
under_loaded_rg.brokers,
))
over_brokers = list(filter(
lambda b: eligible_partition in b.partitions,
self.brokers,
))
# Get source and destination broker
source_broker, dest_broker = None, None
if over_brokers:
source_broker = max(
over_brokers,
key=lambda broker: len(broker.partitions),
)
if under_brokers:
dest_broker = min(
under_brokers,
key=lambda broker: len(broker.partitions),
)
return (source_broker, dest_broker)
示例5: process
def process(self):
"""
Process the file upload and add products to the range
"""
all_ids = set(self.extract_ids())
products = self.range.included_products.all()
existing_skus = products.values_list('stockrecord__partner_sku',
flat=True)
existing_skus = set(filter(bool, existing_skus))
existing_upcs = products.values_list('upc', flat=True)
existing_upcs = set(filter(bool, existing_upcs))
existing_ids = existing_skus.union(existing_upcs)
new_ids = all_ids - existing_ids
products = Product._default_manager.filter(
models.Q(stockrecord__partner_sku__in=new_ids) |
models.Q(upc__in=new_ids))
for product in products:
self.range.add_product(product)
# Processing stats
found_skus = products.values_list('stockrecord__partner_sku',
flat=True)
found_skus = set(filter(bool, found_skus))
found_upcs = set(filter(bool, products.values_list('upc', flat=True)))
found_ids = found_skus.union(found_upcs)
missing_ids = new_ids - found_ids
dupes = set(all_ids).intersection(existing_ids)
self.mark_as_processed(products.count(), len(missing_ids), len(dupes))
示例6: check
def check(process_output, judge_output, split_on='lines', **kwargs):
split_pattern = {
'lines': b'[\r\n]',
'whitespace': b'[\s]',
}.get(split_on)
if not split_pattern:
raise InternalError('invalid `split_on` mode')
process_lines = list(filter(None, resplit(split_pattern, utf8bytes(process_output))))
judge_lines = list(filter(None, resplit(split_pattern, utf8bytes(judge_output))))
if len(process_lines) != len(judge_lines):
return False
if split_on == 'lines':
process_lines = list(map(six.binary_type.split, process_lines))
judge_lines = list(map(six.binary_type.split, judge_lines))
process_lines.sort()
judge_lines.sort()
for process_line, judge_line in zip(process_lines, judge_lines):
if process_line != judge_line:
return False
return True
示例7: process_trade
def process_trade(self, trade_event):
if trade_event.sid not in self.open_orders:
return
if trade_event.volume < 1:
# there are zero volume trade_events bc some stocks trade
# less frequently than once per minute.
return
orders = self.open_orders[trade_event.sid]
orders.sort(key=lambda o: o.dt)
# Only use orders for the current day or before
current_orders = filter(
lambda o: o.dt <= trade_event.dt,
orders)
processed_orders = []
for txn, order in self.process_transactions(trade_event,
current_orders):
processed_orders.append(order)
yield txn, order
# remove closed orders. we should only have to check
# processed orders
def not_open(order):
return not order.open
closed_orders = filter(not_open, processed_orders)
for order in closed_orders:
orders.remove(order)
if len(orders) == 0:
del self.open_orders[trade_event.sid]
示例8: test_simple_plan_add_on_creation
def test_simple_plan_add_on_creation(self):
# add a sample plan to the plans backend
mocurly.backend.plans_backend.add_object(self.base_backed_plan_data['plan_code'], self.base_backed_plan_data)
self.assertEqual(len(mocurly.backend.plan_add_ons_backend.datastore), 0)
# now create some addons
plan = recurly.Plan.get(self.base_backed_plan_data['plan_code'])
for add_on in self.base_add_on_data:
add_on['name'] = add_on['add_on_code'].upper()
add_on['unit_amount_in_cents'] = recurly.Money(**add_on['unit_amount_in_cents'])
plan.create_add_on(recurly.AddOn(**add_on))
self.assertEqual(len(mocurly.backend.plan_add_ons_backend.datastore), 2)
foo_add_on_backed = mocurly.backend.plan_add_ons_backend.get_object(self.base_backed_plan_data['plan_code'] + '__foo')
add_ons = filter(lambda add_on: add_on['add_on_code'] == 'foo', self.base_add_on_data)
foo_add_on = next(add_ons)
for k, v in foo_add_on.items():
if k == 'unit_amount_in_cents':
self.assertEqual(foo_add_on_backed[k], dict((curr, str(amt)) for curr, amt in v.currencies.items()))
else:
self.assertEqual(foo_add_on_backed[k], v)
bar_add_on_backed = mocurly.backend.plan_add_ons_backend.get_object(self.base_backed_plan_data['plan_code'] + '__bar')
add_ons = filter(lambda add_on: add_on['add_on_code'] == 'bar', self.base_add_on_data)
bar_add_on = next(add_ons)
for k, v in bar_add_on.items():
if k == 'unit_amount_in_cents':
self.assertEqual(bar_add_on_backed[k], dict((curr, str(amt)) for curr, amt in v.currencies.items()))
else:
self.assertEqual(bar_add_on_backed[k], v)
# make sure foreign keys are linked properly
self.assertEqual(len(plan.add_ons()), 2)
示例9: test_get_questions_with_repeats
def test_get_questions_with_repeats(self):
"""
This test ensures that questions that start with the repeat group id
do not get marked as repeats. For example:
/data/repeat_name <-- repeat group path
/data/repeat_name_count <-- question path
Before /data/repeat_name_count would be tagged as a repeat incorrectly.
See http://manage.dimagi.com/default.asp?234108 for context
"""
form = self.app.get_form(self.form_with_repeats_unique_id)
questions = form.wrapped_xform().get_questions(
['en'],
include_groups=True,
)
repeat_name_count = list(filter(
lambda question: question['value'] == '/data/repeat_name_count',
questions,
))[0]
self.assertIsNone(repeat_name_count['repeat'])
repeat_question = list(filter(
lambda question: question['value'] == '/data/repeat_name/question5',
questions,
))[0]
self.assertEqual(repeat_question['repeat'], '/data/repeat_name')
示例10: _leaf_versions
def _leaf_versions(tree, rc):
'''
Recursively traverse the versions tree in a depth-first fashion,
and collect the last node of each branch, i.e. leaf versions.
'''
versions = []
if _is_iterable(tree):
for subtree in tree:
versions.extend(_leaf_versions(subtree, rc))
if not versions:
if rc:
last_rc = next(filter(lambda v: v.is_rc, reversed(tree)), None)
last_prod = next(
filter(lambda v: not v.is_rc, reversed(tree)), None)
if last_rc and last_prod and (last_prod < last_rc):
versions.extend([last_prod, last_rc])
elif not last_prod:
versions.append(last_rc)
else:
# Either there is no RC, or we ignore the RC as older than
# the latest production version:
versions.append(last_prod)
else:
versions.append(tree[-1])
return versions
示例11: retrieve_keys
def retrieve_keys(bucket, key, prefix='', postfix='', delim='/',
directories=False, recursive=False):
"""
Retrieve keys from a bucket
"""
if key and prefix:
assert key.endswith(delim)
key += prefix
# check whether key is a directory
if not key.endswith(delim) and key:
# check for matching prefix
if BotoClient.check_prefix(bucket, key + delim, delim=delim):
# found a directory
key += delim
listdelim = delim if not recursive else None
results = bucket.list(prefix=key, delimiter=listdelim)
if postfix:
func = lambda k_: BotoClient.filter_predicate(k_, postfix, inclusive=True)
return filter(func, results)
elif not directories:
func = lambda k_: BotoClient.filter_predicate(k_, delim, inclusive=False)
return filter(func, results)
else:
return results
示例12: removePyc
def removePyc(folder, only_excess=True, show_logs=True):
folder = sp(folder)
for root, dirs, files in os.walk(folder):
pyc_files = filter(lambda filename: filename.endswith(".pyc"), files)
py_files = set(filter(lambda filename: filename.endswith(".py"), files))
excess_pyc_files = (
filter(lambda pyc_filename: pyc_filename[:-1] not in py_files, pyc_files) if only_excess else pyc_files
)
for excess_pyc_file in excess_pyc_files:
full_path = os.path.join(root, excess_pyc_file)
if show_logs:
log.debug("Removing old PYC file: %s", full_path)
try:
os.remove(full_path)
except:
log.error("Couldn't remove %s: %s", (full_path, traceback.format_exc()))
for dir_name in dirs:
full_path = os.path.join(root, dir_name)
if len(os.listdir(full_path)) == 0:
try:
os.rmdir(full_path)
except:
log.error("Couldn't remove empty directory %s: %s", (full_path, traceback.format_exc()))
示例13: test_in
def test_in(self):
values = ['a', 'b', 'c']
filter = self.get_filter('in', values)
for value in values:
self.assertTrue(filter({'foo': value}))
for value in ['d', 'e', 'f']:
self.assertFalse(filter({'foo': value}))
示例14: get_revert_migrations
def get_revert_migrations(self, current_migrations, backup_migrations):
current_migrations, all_migrations = itertools.tee(reversed(list(map(
Migration,
filter(None, current_migrations.splitlines()),
))))
all_migrations = utils.OrderedSet(all_migrations)
backup_migrations = reversed(list(map(
Migration,
filter(None, backup_migrations.splitlines()),
)))
revert_migrations = collections.OrderedDict()
while True:
while True:
backup_migration = next(backup_migrations, None)
if not backup_migration or backup_migration in all_migrations:
break
for current_migration in current_migrations:
if current_migration == backup_migration:
break
revert_migration = self._get_parent_migration(
current_migration,
migrations=all_migrations,
)
revert_migrations[current_migration.app] = revert_migration
if backup_migration is None:
return revert_migrations.values()
示例15: label_and_sentence
def label_and_sentence(line, clean_fn):
label_text = re.split(TSVSeqLabelReader.SPLIT_ON, line)
label = label_text[0]
text = label_text[1:]
text = ' '.join(list(filter(lambda s: len(s) != 0, [clean_fn(w) for w in text])))
text = list(filter(lambda s: len(s) != 0, re.split('\s+', text)))
return label, text