本文整理汇总了Python中filter.Filter类的典型用法代码示例。如果您正苦于以下问题:Python Filter类的具体用法?Python Filter怎么用?Python Filter使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Filter类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Initialize
def Initialize(credentials=None, opt_url=None):
"""Initialize the EE library.
If this hasn't been called by the time any object constructor is used,
it will be called then. If this is called a second time with a different
URL, this doesn't do an un-initialization of e.g.: the previously loaded
Algorithms, but will overwrite them and let point at alternate servers.
Args:
credentials: OAuth2 credentials.
opt_url: The base url for the EarthEngine REST API to connect to.
"""
data.initialize(credentials, (opt_url + '/api' if opt_url else None), opt_url)
# Initialize the dynamically loaded functions on the objects that want them.
ApiFunction.initialize()
Element.initialize()
Image.initialize()
Feature.initialize()
Collection.initialize()
ImageCollection.initialize()
FeatureCollection.initialize()
Filter.initialize()
Geometry.initialize()
List.initialize()
Number.initialize()
String.initialize()
Date.initialize()
Dictionary.initialize()
_InitializeGeneratedClasses()
_InitializeUnboundMethods()
示例2: run
def run(self):
timer = Timer()
timer.start()
# Create list of search urls
search_urls = []
filter = Filter(self.filter, self.startpage, self.maxpages)
for page in range(self.startpage, self.startpage + self.maxpages):
search_urls.append(filter.create_filter_url((page - 1) * 10))
# Create pool of worker threads
pool = ThreadPool(4)
# Open the urls in their own threads
organisaties = pool.map(unwrap_self_process_search, zip([self] * len(search_urls), search_urls))
pool.close()
pool.join()
results = {}
results["organisaties"] = self.consolidate(organisaties)
timer.stop()
results["stats"] = { "exectime": timer.exectime(), "matches": { "total": str(self.search_results["results"]), "pages": str(self.search_results["pages"]) }, "read": { "page_from": str(self.startpage), "page_to": str(self.maxpages) } }
return results
示例3: __getPlaintext
def __getPlaintext(self):
# extract plaintext from pdf
paper = PdfLib(self.wd + os.sep + self.filename)
textBeginning = self.__guessDocBegining(self.filename)
plaintext = paper.pdf2txt(textBeginning, "max")
# normalize text
f = Filter(asString=plaintext)
plaintext = (
f.substitutions()
.oneCharPerLine()
.normalizeCaracters()
.lower()
.uselessCharacters()
.multipleDots()
.listEnum()
.digits()
.shortTokens()
.multipleSpaces()
.getResult()
)
# experience shows, that less than 6000 characters is mostly waste
if len(plaintext) > 6000:
result = {}
result[self.langKey] = self.__guessLang(plaintext)
result[self.plaintextKey] = plaintext
result[self.filenameKey] = self.filename
return result
else:
raise Exception(u"Document is too short.")
示例4: Initialize
def Initialize(credentials="persistent", opt_url=None):
"""Initialize the EE library.
If this hasn't been called by the time any object constructor is used,
it will be called then. If this is called a second time with a different
URL, this doesn't do an un-initialization of e.g.: the previously loaded
Algorithms, but will overwrite them and let point at alternate servers.
Args:
credentials: OAuth2 credentials. 'persistent' (default) means use
credentials already stored in the filesystem, or raise an explanatory
exception guiding the user to create those credentials.
opt_url: The base url for the EarthEngine REST API to connect to.
"""
if credentials == "persistent":
credentials = _GetPersistentCredentials()
data.initialize(credentials, (opt_url + "/api" if opt_url else None), opt_url)
# Initialize the dynamically loaded functions on the objects that want them.
ApiFunction.initialize()
Element.initialize()
Image.initialize()
Feature.initialize()
Collection.initialize()
ImageCollection.initialize()
FeatureCollection.initialize()
Filter.initialize()
Geometry.initialize()
List.initialize()
Number.initialize()
String.initialize()
Date.initialize()
Dictionary.initialize()
Terrain.initialize()
_InitializeGeneratedClasses()
_InitializeUnboundMethods()
示例5: __init__
def __init__(self, jail):
Filter.__init__(self, jail)
self.__modified = False
self.__lastModTime = dict()
self.__file404Cnt = dict()
logSys.info("Created FilterPoll")
示例6: addLogPath
def addLogPath(self, path):
if self.containsLogPath(path):
logSys.error(path + " already exists")
else:
self.monitor.watch_file(path, self.callback)
Filter.addLogPath(self, path)
logSys.info("Added logfile = %s" % path)
示例7: delLogPath
def delLogPath(self, path):
if not self.containsLogPath(path):
logSys.error(path + " is not monitored")
else:
self.monitor.stop_watch(path)
Filter.delLogPath(self, path)
logSys.info("Removed logfile = %s" % path)
示例8: _generateArtifactList
def _generateArtifactList(options):
# load configuration
logging.info("Loading configuration...")
config = Configuration()
config.load(options)
# build list
logging.info("Building artifact list...")
listBuilder = ArtifactListBuilder(config)
artifactList = listBuilder.buildList()
logging.debug("Generated list contents:")
for gat in artifactList:
priorityList = artifactList[gat]
for priority in priorityList:
versionList = priorityList[priority]
for version in versionList:
logging.debug(" %s:%s", gat, version)
#filter list
logging.info("Filtering artifact list...")
listFilter = Filter(config)
artifactList = listFilter.filter(artifactList)
logging.debug("Filtered list contents:")
for gat in artifactList:
priorityList = artifactList[gat]
for priority in priorityList:
versionList = priorityList[priority]
for version in versionList:
logging.debug(" %s:%s", gat, version)
logging.info("Artifact list generation done")
return artifactList
示例9: config_analysis
def config_analysis( alg_list ) :
filt_event = Filter('FilterTauEvent')
filt_event.cut_nTau = '== 1'
filt_event.cut_tauStatus = ' == 3'
alg_list.append(filt_event)
示例10: __init__
def __init__(self, **options):
Filter.__init__(self, **options)
self.predicates = self.predicates or self.build_predicate_list()
if len(self.predicates) == 0:
raise NotImplementedError("Must provide at least one predicate")
self.normalize_predicates()
self.normalize_counted_features()
self.setup_counts()
示例11: get_entry
def get_entry(self, dn, attributes=None):
if not attributes:
attributes = ['*']
filter = Filter()
filter.add_equal('objectClass', '*')
results = self.search(dn, filter.build(), ldap.SCOPE_BASE, attributes)
return results[0][1]
示例12: foveation_sequence
def foveation_sequence():
frame_down_factor = 1
mem_down_factor = 2 # relative to the frame down factor
coarse_down_factor = 2 # for the coarse comparison
fs = 80
fovea_shape = (fs, fs)
full_values = 128
values = full_values / 2**frame_down_factor
index = 15
n_frames = 10
source = KittiMultiViewSource(index, test=False, n_frames=n_frames)
full_shape = source.frame_ten[0].shape
frame_ten = [downsample(source.frame_ten[0], frame_down_factor),
downsample(source.frame_ten[1], frame_down_factor)]
frame_shape = frame_ten[0].shape
average_disp = source.get_average_disparity()
average_disp = cv2.pyrUp(average_disp)[:frame_shape[0],:frame_shape[1]-values]
filter = Filter(average_disp, frame_down_factor, mem_down_factor,
fovea_shape, frame_shape, values, verbose=False, memory_length=0)
plt.figure()
import matplotlib.cm as cm
for i in range(0, 10, 2):
frame = [downsample(source.frame_sequence[i][0], frame_down_factor),
downsample(source.frame_sequence[i][1], frame_down_factor)]
filter_disp, fovea_corner = filter.process_frame(None, frame)
edge = 5
plt.subplot(5,1,i/2+1)
# plt.subplot(5,2,i+1)
plt.imshow(trim(frame[0], values, edge), cmap = cm.Greys_r)
# remove_axes()
# plt.subplot(5,2,i+2)
# plt.imshow(trim(filter_disp, values, edge), vmin=0, vmax=full_values)
fovea_corner = fovea_corner[0]
# plot_edges(fovea_ij, (fs, fs))
fi, fj = fovea_corner
fm = fs
fn = fs
plt.plot([fj, fj+fn, fj+fn, fj, fj], [fi, fi, fi+fm, fi+fm, fi], 'white')
# plt.scatter(fovea_corner[1]-values+fs/2, fovea_corner[0]-edge+fs/2, s=100, c='green', marker='+', linewidths=2)
# plt.scatter(fovea_corner[1]-values, fovea_corner[0]-edge, s=9, c='green', marker='+', linewidths=3)
# plt.scatter(fovea_corner[1]-values+fs, fovea_corner[0]-edge+fs, s=9, c='green', marker='+', linewidths=3)
# plt.scatter(fovea_corner[1]-values, fovea_corner[0]-edge+fs, s=9, c='green', marker='+', linewidths=3)
# plt.scatter(fovea_corner[1]-values+fs, fovea_corner[0]-edge, s=9, c='green', marker='+', linewidths=3)
remove_axes()
plt.tight_layout(-1)
plt.show()
示例13: JobFilter
class JobFilter(object):
def __init__(self, sites):
self._sites = sites
self._jobs = []
self._bad_titled_jobs = []
self._bad_content_jobs = []
self._filter = Filter()
def get_todays_jobs(self):
self._get_todays_links()
self._filter_on_titles()
self._get_postings_content()
self._filter_on_content()
return (self._jobs, self._bad_content_jobs, self._bad_titled_jobs)
def _get_todays_links(self):
for site in self._sites:
list_soup = self._beautiful_soupify_url(site.get_job_listing_url())
links = site.get_todays_links(list_soup)
jobs = []
for link in links:
jobs.append(Job(link.string, link['href'], site))
self._jobs = jobs
def _filter_on_titles(self):
get_posting_jobs = []
for job in self._jobs:
if self._filter.title(job.get_title()):
get_posting_jobs.append(job)
else:
self._bad_titled_jobs.append(job)
self._jobs = get_posting_jobs
def _get_postings_content(self):
for job in self._jobs:
content_soup = self._beautiful_soupify_url(job.get_link())
job.set_content(content_soup)
def _filter_on_content(self):
good_jobs = []
for job in self._jobs:
if self._filter.content(job.get_content()):
good_jobs.append(job)
else:
self._bad_content_jobs.append(job)
self._jobs = good_jobs
def _beautiful_soupify_url(self, url):
html = urllib2.urlopen(url).read()
return BeautifulSoup(html)
示例14: filter
def filter(self, q):
"""
q is the query.
"""
if not isinstance(q, str):
s = "filter: invalid query argument. You must use a string."
self.logger.error(s)
raise TypeError(s)
f = Filter(self.sites, q)
f.apply()
示例15: __init__
def __init__(self):
Filter.__init__(self)
self._name = "Outlier"
self._input = None
self._dimensions = None
self._output = None
# cutoff: we compare the neighborhood average to the current datum; if the
# datum is in the 95th (or higher) percentile, it is considered an outlier.
self.set_parameter_floatrange("cutoff", 0.95, 0.0,1.0)
# if the averaging out is inclusive of the datum in question
self.set_parameter("inclusive", False, bool)