本文整理匯總了Python中filter函數的典型用法代碼示例。如果您正苦於以下問題:Python filter函數的具體用法?Python filter怎麽用?Python filter使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了filter函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: run
def run(self, entity, discourse, syntax):
if discourse == 'new':
if len(self.dbpedia[entity]['givenNames']) > 0:
givenNames = self.dbpedia[entity]['givenNames']
first = filter(lambda x: len(x) == min(map(lambda x: len(x), givenNames)), givenNames)[0]
surnames = self.dbpedia[entity]['surnames']
last = filter(lambda x: len(x) == min(map(lambda x: len(x), surnames)), surnames)[0]
name = str(first).strip() + ' ' + str(last).strip()
else:
birthNames = self.dbpedia[entity]['birthNames']
name = str(filter(lambda x: len(x) == min(map(lambda x: len(x), birthNames)), birthNames)[0]).strip()
else:
if len(self.dbpedia[entity]['surnames']) > 0:
surnames = self.dbpedia[entity]['surnames']
last = filter(lambda x: len(x) == min(map(lambda x: len(x), surnames)), surnames)[0]
name = str(last).strip()
else:
birthNames = self.dbpedia[entity]['birthNames']
name = str(filter(lambda x: len(x) == min(map(lambda x: len(x), birthNames)), birthNames)[0]).strip().split()[-1]
name = self.realize(name, syntax)
return prep.get_label(name, self.dbpedia[entity]), name
示例2: _EXPERIMENTAL_VERBAL_PREDICATE_FEATURE_Infinitive
def _EXPERIMENTAL_VERBAL_PREDICATE_FEATURE_Infinitive(self):
xcomp_children = filter(lambda x:x.get_parent_relation() in clausal_complement, self.children)
ret = ([],[])
for xcomp_child in xcomp_children:
aux_children = filter(lambda x:x.get_parent_relation() in aux_dependencies, xcomp_child.children)
to_children = filter(lambda x:x.pos == TO, aux_children)
if not to_children:
return (False,False)
assert (len(to_children)==1)
to_child = to_children[0]
subj_children = filter(lambda x:x.get_parent_relation() in subject_dependencies, xcomp_child.children)
adv_children = filter(lambda x:x.get_parent_relation() in adverb_dependencies, self.children)
# if subj_children:
# print(" ".join([self.word,subj_children[0].word,to_child.word,xcomp_child.word]))
# if adv_children:
# print(" ".join([adv_children[0].word,self.word,to_child.word,xcomp_child.word]))
#ids = [x.id for x in [xcomp_child,to_child]]
words = " ".join([self.word,to_child.word,xcomp_child.word])
ret[1].extend([self.id,to_child.id,xcomp_child.id])
# chaining
childRes = xcomp_child._VERBAL_PREDICATE_FEATURE_Infinitive()
if childRes[0]:
words += " "+" ".join(childRes[0][0].split(" ")[1:])
ret[0].append(words)
return ret
示例3: calculateSparseDictCOO
def calculateSparseDictCOO(data_set, data_label_hash, jump=1, valid_flag=False):
row = []
col = []
data = []
row_valid = []
col_valid = []
data_valid = []
doc_ids = set(sorted(map(lambda row:int(row[0]), data_set)))
base_ids_list = filter(lambda ids: ids % jump == 0, doc_ids)
train_ids = base_ids_list
valid_ids = set()
if valid_flag:
valid_index = filter(lambda ids: ids % validation_perc == 0, range(len(base_ids_list)))
valid_ids = [base_ids_list[i] for i in valid_index]
base_ids = set(base_ids_list)
train_ids = sorted(base_ids - set(valid_ids))
labels = map(lambda trid: int(data_label_hash[trid]), train_ids)
labels_valid = map(lambda vlid: int(data_label_hash[vlid]), valid_ids)
for i in range(len(data_set)):
if int(data_set[i][0]) in train_ids:
row.append(int(data_set[i][0]))
col.append(int(data_set[i][1])-1)
data.append(int(data_set[i][2]))
# labels.append(int(data_label_hash[int(data_set[i][0])]))
elif int(data_set[i][0]) in valid_ids:
row_valid.append(int(data_set[i][0]))
col_valid.append(int(data_set[i][1])-1)
data_valid.append(int(data_set[i][2]))
# labels_valid.append(int(data_label_hash[int(data_set[i][0])]))
train = translate(row), col, data, labels
valid = translate(row_valid), col_valid, data_valid, labels_valid
return train, valid
示例4: _all_commands
def _all_commands(self):
path = builtins.__xonsh_env__.get('PATH', [])
# did PATH change?
path_hash = hash(tuple(path))
cache_valid = path_hash == self._path_checksum
self._path_checksum = path_hash
# did aliases change?
al_hash = hash(tuple(sorted(builtins.aliases.keys())))
self._alias_checksum = al_hash
cache_valid = cache_valid and al_hash == self._alias_checksum
pm = self._path_mtime
# did the contents of any directory in PATH change?
for d in filter(os.path.isdir, path):
m = os.stat(d).st_mtime
if m > pm:
pm = m
cache_valid = False
self._path_mtime = pm
if cache_valid:
return self._cmds_cache
allcmds = set()
for d in filter(os.path.isdir, path):
allcmds |= set(os.listdir(d))
allcmds |= set(builtins.aliases.keys())
self._cmds_cache = frozenset(allcmds)
return self._cmds_cache
示例5: fetch_production
def fetch_production(country_code='SE', session=None):
r = session or requests.session()
timestamp = arrow.now().timestamp * 1000
url = 'http://driftsdata.statnett.no/restapi/ProductionConsumption/GetLatestDetailedOverview?timestamp=%d' % timestamp
response = r.get(url)
obj = response.json()
data = {
'countryCode': country_code,
'production': {
'nuclear': float(filter(
lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Nuclear', country_code),
obj['NuclearData'])[0]['value'].replace(u'\xa0', '')),
'hydro': float(filter(
lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Hydro', country_code),
obj['HydroData'])[0]['value'].replace(u'\xa0', '')),
'wind': float(filter(
lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Wind', country_code),
obj['WindData'])[0]['value'].replace(u'\xa0', '')),
'unknown':
float(filter(
lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('Thermal', country_code),
obj['ThermalData'])[0]['value'].replace(u'\xa0', '')) +
float(filter(
lambda x: x['titleTranslationId'] == 'ProductionConsumption.%s%sDesc' % ('NotSpecified', country_code),
obj['NotSpecifiedData'])[0]['value'].replace(u'\xa0', '')),
},
'storage': {},
'source': 'driftsdata.stattnet.no',
}
data['datetime'] = arrow.get(obj['MeasuredAt'] / 1000).datetime
return data
示例6: test_search_filter_expired
def test_search_filter_expired(self):
""" Account.search() with expire_start, expire_stop args. """
all = _set_of_ids(self._accounts)
non_expired = _set_of_ids(filter(nonexpired_filter, self._accounts))
expired = _set_of_ids(filter(expired_filter, self._accounts))
# Test criterias
self.assertGreaterEqual(len(non_expired), 1)
self.assertGreaterEqual(len(expired), 1)
# Tests: search params, must match
for params, match_set, fail_set in (
({'expire_start': None, 'expire_stop': None,
'owner_id': self.db_tools.get_initial_group_id()},
all, set()),
({'expire_start': '[:now]', 'expire_stop': None,
'owner_id': self.db_tools.get_initial_group_id()},
non_expired, expired),
({'expire_start': None, 'expire_stop': '[:now]',
'owner_id': self.db_tools.get_initial_group_id()},
expired, non_expired),):
result = _set_of_ids(self._ac.search(**params))
self.assertGreaterEqual(len(result), len(match_set))
self.assertTrue(result.issuperset(match_set))
self.assertSetEqual(result.intersection(fail_set), set())
示例7: main
def main(self, argv):
"""
Receives and executes the commands
"""
global _cs
#import traceback
if self.CHIPSEC_LOADED_AS_EXE:
import zipfile
myzip = zipfile.ZipFile("library.zip")
cmds = map( self.map_modname_zip, filter(self.f_mod_zip, myzip.namelist()) )
else:
#traceback.print_stack()
mydir = imp.find_module('chipsec')[1]
cmds_dir = os.path.join(mydir,os.path.join("utilcmd"))
cmds = map( self.map_modname, filter(self.f_mod, os.listdir(cmds_dir)) )
if logger().VERBOSE:
logger().log( '[CHIPSEC] Loaded command-line extensions:' )
logger().log( ' %s' % cmds )
module = None
for cmd in cmds:
try:
#exec 'from chipsec.utilcmd.' + cmd + ' import *'
cmd_path = 'chipsec.utilcmd.' + cmd
module = importlib.import_module( cmd_path )
cu = getattr(module, 'commands')
self.commands.update(cu)
except ImportError, msg:
logger().error( "Couldn't import util command extension '%s'" % cmd )
raise ImportError, msg
示例8: remove_chain
def remove_chain(self, name, wrap=True):
"""Remove named chain.
This removal "cascades". All rule in the chain are removed, as are
all rules in other chains that jump to it.
If the chain is not found, this is merely logged.
"""
name = get_chain_name(name, wrap)
chain_set = self._select_chain_set(wrap)
if name not in chain_set:
LOG.warn(_('Attempted to remove chain %s which does not exist'),
name)
return
chain_set.remove(name)
self.rules = filter(lambda r: r.chain != name, self.rules)
if wrap:
jump_snippet = '-j %s-%s' % (binary_name, name)
else:
jump_snippet = '-j %s' % (name,)
self.rules = filter(lambda r: jump_snippet not in r.rule, self.rules)
示例9: get_vw_nvalues
def get_vw_nvalues(model_run_uuid):
"""
Given a model run uuid that contains the lookup table and ESRI .asc with
vegetation codes, return an ascii file that has the n-values properly
assigned
"""
vwc = default_vw_client()
records = vwc.dataset_search(model_run_uuid=model_run_uuid).records
downloads = [r['downloads'][0] for r in records]
asc_url = filter(lambda d: d.keys().pop() == 'ascii',
downloads).pop()['ascii']
xlsx_url = filter(lambda d: d.keys().pop() == 'xlsx',
downloads).pop()['xlsx']
asc_path = 'tmp_' + str(uuid4()) + '.asc'
vwc.download(asc_url, asc_path)
xlsx_path = 'tmp_' + str(uuid4()) + '.xlsx'
vwc.download(xlsx_url, xlsx_path)
asc_nvals = vegcode_to_nvalue(asc_path, xlsx_path)
os.remove(asc_path)
os.remove(xlsx_path)
return asc_nvals
示例10: collectintargz
def collectintargz(target, source, env):
""" Puts all source files into a tar.gz file. """
# the rpm tool depends on a source package, until this is chagned
# this hack needs to be here that tries to pack all sources in.
sources = env.FindSourceFiles()
# filter out the target we are building the source list for.
#sources = [s for s in sources if not (s in target)]
sources = filter(lambda s, t=target: not (s in t), sources)
# find the .spec file for rpm and add it since it is not necessarily found
# by the FindSourceFiles function.
#sources.extend( [s for s in source if str(s).rfind('.spec')!=-1] )
spec_file = lambda s: string.rfind(str(s), '.spec') != -1
sources.extend( filter(spec_file, source) )
# as the source contains the url of the source package this rpm package
# is built from, we extract the target name
#tarball = (str(target[0])+".tar.gz").replace('.rpm', '')
tarball = string.replace(str(target[0])+".tar.gz", '.rpm', '')
try:
#tarball = env['SOURCE_URL'].split('/')[-1]
tarball = string.split(env['SOURCE_URL'], '/')[-1]
except KeyError, e:
raise SCons.Errors.UserError( "Missing PackageTag '%s' for RPM packager" % e.args[0] )
示例11: getPrefLabel
def getPrefLabel(self):
if self.load_on_cuis:
if len(self.atoms) == 1:
return self.atoms[0][MRCONSO_STR]
labels = set([x[MRCONSO_STR] for x in self.atoms])
if len(labels) == 1:
return labels.pop()
#if there's only one ISPREF=Y then that one.
is_pref_atoms = filter(lambda x: x[MRCONSO_ISPREF] == 'Y', self.atoms)
if len(is_pref_atoms) == 1:
return is_pref_atoms[0][MRCONSO_STR]
elif len(is_pref_atoms) > 1:
is_pref_atoms = filter(lambda x: x[MRCONSO_STT] == 'PF', is_pref_atoms)
if len(is_pref_atoms) > 0:
return is_pref_atoms[0][MRCONSO_STR]
is_pref_atoms = filter(lambda x: x[MRCONSO_STT] == 'PF', self.atoms)
if len(is_pref_atoms) == 1:
return is_pref_atoms[0][MRCONSO_STR]
return self.atoms[0][MRCONSO_STR]
else:
#if ISPREF=Y is not 1 then we look into MRRANK.
if len(self.rank) > 0:
sort_key = \
lambda x: int(self.rank[self.rank_by_tty[x[MRCONSO_TTY]][0]][MRRANK_RANK])
mmrank_sorted_atoms = sorted(self.atoms, key=sort_key, reverse=True)
return mmrank_sorted_atoms[0][MRCONSO_STR]
#there is no rank to use
else:
pref_atom = filter(lambda x: 'P' in x[MRCONSO_TTY], self.atoms)
if len(pref_atom) == 1:
return pref_atom[0][MRCONSO_STR]
raise AttributeError, "Unable to select pref label"
示例12: run_filters
def run_filters(self, check_removed=True):
"""
Run all the filters in self.filters.
@:param check_removed: (bool) Check if as a station has already been added to the removed_station_ids by a
previous filter. Skips the remaining filters.
:return:
"""
# Check if filters have been initialized before running.
if not bool(self.filters):
sys.exit("ERROR run_filters: no filters have been initialized.")
# Get list of all stations in the time series folder
o_dir = os.getcwd()
os.chdir(self.ts_path)
# stations = [n for n in os.listdir('.') if n.isdigit()] # list of all station folder names
# Iterate through all the stations and apply filters
for i, stat in enumerate(self.stations):
print 'Processing station: %s' % stat
if self.iter_time_seris: # Only open and process time series if necessary
self.ts_df = pd.read_csv('./%s/time_series.csv' % stat, index_col='Timestamp')
self.ts_df['date'] = [d[0:10] for d in self.ts_df.index]
self.ts_df['hour'] = [d[-8:-6] for d in self.ts_df.index]
# Apply all the filters in the self.filters
for filter in self.filters:
# TODO setting check_removed to False will cause the OneClass_SVM filtering to break due to empty features (Andrew 16/07/25)
if check_removed and stat in self.removed_station_ids:
break
filter(str(stat))
try:
[self.cleaned_station_ids.remove(s) for s in self.removed_station_ids] # remove the removed from the cleaned
except KeyError as e:
pass
os.chdir(o_dir)
示例13: get_exportable_members
def get_exportable_members( self, sort=None ):
"""returns list of internal declarations that should\\could be exported"""
#TODO: obviously this function should be shorter. Almost all logic of this class
# should be spread between decl_wrapper classes
members = [mv for mv in self.public_members if mv.ignore == False and mv.exportable]
#protected and private virtual functions that not overridable and not pure
#virtual should not be exported
for member in self.protected_members:
if isinstance( member, declarations.calldef_t ):
members.append( member )
else:
pass
vfunction_selector = lambda member: isinstance( member, declarations.member_function_t ) \
and member.virtuality == declarations.VIRTUALITY_TYPES.PURE_VIRTUAL
members.extend( list(filter( vfunction_selector, self.private_members )) )
def is_exportable( decl ):
#filter out non-public member operators - `Py++` does not support them right now
if isinstance( decl, declarations.member_operator_t ) \
and decl.access_type != declarations.ACCESS_TYPES.PUBLIC:
return False
#remove artificial constructors
if isinstance( decl, declarations.constructor_t ) and decl.is_artificial:
return False
if decl.ignore == True or decl.exportable == False:
return False
return True
#-#if declarations.has_destructor( self ) \
#-# and not declarations.has_public_destructor( self ):
members = list(filter( is_exportable, members ))
sorted_members = members
if sort:
sorted_members = sort( members )
return sorted_members
示例14: find_max_match
def find_max_match(self, options_list, el_value):
"""
Finds the Longest Word Trimmed Match for selecting text in options field.
@param options_list: The list of options in the options field.
@param el_value: The text to be matched in the options.
"""
el_value_list = el_value.split()
# Remove all words of length = 1 such as hyphens.
el_value_list = filter(lambda x: len(x) > 1, el_value_list)
# Initialise max_len as 0 and matchec_option = None.
max_len = 0
matched_option = None
for option in options_list:
text = option.text
text_list = text.split()
# Remove all words of length = 1 such as hyphens.
text_list = filter(lambda x: len(x) > 1, text_list)
# Find intersection of el_value_list and text_list
matched_list = list(set(el_value_list).intersection(text_list))
# matched_len is number of matching words for the current option.
matched_len = len(matched_list)
# Save the maximum matched option in matched_option.
if matched_len > max_len:
matched_option = option
max_len = matched_len
# Return the maximum matched option.
return matched_option
示例15: getWeekdayMeals
def getWeekdayMeals(day_id):
"Takes an int in range [0-4] and returns a dict of all meals that day."
breakfast = tables[day_id].findAll('td', class_='breakfast')
lunch = tables[day_id].findAll('td', class_='lunch')
dinner = tables[day_id].findAll('td', class_='dinner')
breakfast = filter(None, [f.text for f in breakfast])
lunch = filter(None, [f.text for f in lunch])
dinner = filter(None, [f.text for f in dinner])
splitComma = lambda s: s.split(', ')
strStrip = lambda s: s.encode('ascii', 'ignore').strip()
breakfast = map(splitComma, breakfast)
breakfast = [b for sublist in breakfast for b in sublist]
breakfast = map(strStrip, breakfast)
lunch = map(splitComma, lunch)
lunch = [b for sublist in lunch for b in sublist]
lunch = map(strStrip, lunch)
dinner = map(splitComma, dinner)
dinner = [b for sublist in dinner for b in sublist]
dinner = map(strStrip, dinner)
meals_dict = {'breakfast': breakfast,
'lunch': lunch,
'dinner': dinner}
return meals_dict