本文整理汇总了Python中util.elapsed函数的典型用法代码示例。如果您正苦于以下问题:Python elapsed函数的具体用法?Python elapsed怎么用?Python elapsed使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了elapsed函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: gets_a_pdf
def gets_a_pdf(link, base_url):
if is_purchase_link(link):
return False
absolute_url = get_link_target(link, base_url)
start = time()
with closing(requests.get(absolute_url, stream=True, timeout=5, verify=False)) as r:
if resp_is_pdf(r):
print u"http header says this is a PDF. took {}s from {}".format(elapsed(start), absolute_url)
return True
# some publishers send a pdf back wrapped in an HTML page using frames.
# this is where we detect that, using each publisher's idiosyncratic templates.
# we only check based on a whitelist of publishers, because downloading this whole
# page (r.content) is expensive to do for everyone.
if 'onlinelibrary.wiley.com' in absolute_url:
# = closed journal http://doi.org/10.1111/ele.12585
# = open journal http://doi.org/10.1111/ele.12587
if '<iframe' in r.content:
print u"this is a Wiley 'enhanced PDF' page. took {}s".format(elapsed(start))
return True
elif 'ieeexplore' in absolute_url:
# (this is a good example of one dissem.in misses)
# = open journal http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6740844
# = closed journal http://ieeexplore.ieee.org/xpl/articleDetails.jsp?arnumber=6045214
if '<frame' in r.content:
print u"this is a IEEE 'enhanced PDF' page. took {}s".format(elapsed(start))
return True
print u"we've decided this ain't a PDF. took {}s".format(elapsed(start))
return False
示例2: run_ica
def run_ica():
log('loading data')
start = util.now()
voxels, xdim, ydim, zdim = load_data()
log(' elapsed: {}'.format(util.elapsed(start)))
log('running independent component analysis')
start = util.now()
ica = decomposition.FastICA(n_components=64, max_iter=200)
sources = ica.fit_transform(voxels)
sources = to_dataframe(sources, load_subject_ids(), ['X{}'.format(i) for i in range(64)])
log(' elapsed: {}'.format(util.elapsed(start)))
log('calculating correlations between voxel and component time courses')
start = util.now()
correlations = []
for voxel in voxels.columns[:32]:
voxel = voxels[voxel]
max_correlation = 0
for source in sources.columns:
source = sources[source]
correlation = np.corrcoef(voxel, source)
if correlation > max_correlation:
max_correlation = correlation
correlations.append(max_correlation)
log(' elapsed: {}'.format(util.elapsed(start)))
示例3: build_granules
def build_granules(model):
'''build granules'''
model.granules = {}
for gid in model.granule_gids:
g = mkgranule(gid)
model.granules.update({gid : g})
elapsed('%d granules built'%int(pc.allreduce(len(model.granules),1)))
示例4: mk_mitrals
def mk_mitrals(model):
''' Create all the mitrals specified by mitral_gids set.'''
model.mitrals = {}
for gid in model.mitral_gids:
m = mkmitral.mkmitral(gid)
model.mitrals.update({gid : m})
util.elapsed('%d mitrals created and connections to mitrals determined'%int(pc.allreduce(len(model.mitrals),1)))
示例5: load_campaign
def load_campaign(filename, campaign=None, limit=None):
with open("data/" + filename, "r") as f:
lines = f.read().split("\n")
print "found {} ORCID lines".format(len(lines))
print len(lines)
if limit:
lines = lines[:limit]
total_start = time()
row_num = 0
for line in lines:
row_num += 1
# can have # as comments
if line.startswith("#"):
print "skipping comment line"
continue
loop_start = time()
email = None
if "," in line:
(dirty_orcid, email, twitter) = line.split(",")
else:
dirty_orcid = line
try:
orcid_id = clean_orcid(dirty_orcid)
except NoOrcidException:
try:
print u"\n\nWARNING: no valid orcid_id in line {}; skipping\n\n".format(line)
except UnicodeDecodeError:
print u"\n\nWARNING: no valid orcid_id and line throws UnicodeDecodeError; skipping\n\n"
continue
my_person = Person.query.filter_by(orcid_id=orcid_id).first()
if my_person:
print u"row {}, already have person {}, skipping".format(row_num, orcid_id)
else:
print u"row {}, making person {}".format(row_num, orcid_id)
my_person = make_person(orcid_id, store_in_db=True)
my_person.campaign = campaign
my_person.email = email
my_person.twitter = twitter
db.session.merge(my_person)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail on {}".format(my_person.orcid_id)
print "row {}: finished {} in {}s\n".format(row_num, orcid_id, elapsed(loop_start))
print "finished load_campaign on {} profiles in {}s\n".format(len(lines), elapsed(total_start))
示例6: register_mitrals
def register_mitrals(model):
'''register mitrals'''
for gid in model.mitrals:
if h.section_exists("initialseg", model.mitrals[gid]):
s = model.mitrals[gid].initialseg
pc.set_gid2node(gid, rank)
pc.cell(gid, h.NetCon(s(1)._ref_v, None, sec=s))
if not mpiece_exists(gid): # must not be doing multisplit
wholemitral(gid, model.mitrals[gid])
elapsed('mitrals registered')
示例7: update_fn
def update_fn(self, cls, method_name, objects, index=1):
# we are in a fork! dispose of our engine.
# will get a new one automatically
# if is pooling, need to do .dispose() instead
db.engine.dispose()
start = time()
num_obj_rows = len(objects)
# logger.info(u"{pid} {repr}.{method_name}() got {num_obj_rows} objects in {elapsed} seconds".format(
# pid=os.getpid(),
# repr=cls.__name__,
# method_name=method_name,
# num_obj_rows=num_obj_rows,
# elapsed=elapsed(start)
# ))
for count, obj in enumerate(objects):
start_time = time()
if obj is None:
return None
method_to_run = getattr(obj, method_name)
# logger.info(u"***")
logger.info(u"*** #{count} starting {repr}.{method_name}() method".format(
count=count + (num_obj_rows*index),
repr=obj,
method_name=method_name
))
method_to_run()
logger.info(u"finished {repr}.{method_name}(). took {elapsed} seconds".format(
repr=obj,
method_name=method_name,
elapsed=elapsed(start_time, 4)
))
# for handling the queue
if not (method_name == "update" and obj.__class__.__name__ == "Pub"):
obj.finished = datetime.datetime.utcnow().isoformat()
# db.session.merge(obj)
start_time = time()
commit_success = safe_commit(db)
if not commit_success:
logger.info(u"COMMIT fail")
logger.info(u"commit took {} seconds".format(elapsed(start_time, 2)))
db.session.remove() # close connection nicely
return None # important for if we use this on RQ
示例8: build_net_round_robin
def build_net_round_robin(model, connection_file):
enter = h.startsw()
dc.mk_mitrals(model)
read_mconnection_info(model, connection_file)
dc.mk_gconnection_info(model)
model.gids = model.mitral_gids.copy()
model.gids.update(model.granule_gids)
register_mitrals(model)
build_granules(model)
register_granules(model)
build_synapses(model)
elapsed('build_net_round_robin')
if rank == 0: print "round robin setuptime ", h.startsw() - t_begin
示例9: mk_gconnection_info
def mk_gconnection_info(model):
mk_gconnection_info_part1(model)
mk_gconnection_info_part2(model)
# # Save full network Mitral-Granule connections
# mitral2granule = {}
# for mgid in model.mitral_gids:
# mitral2granule.update({mgid: [gc[3] for gc in model.mconnections[mgid]]})
#
# import cPickle as pickle
# with open('mitral2granule.p', 'wb') as fp:
# pickle.dump(mitral2granule, fp)
util.elapsed('mk_gconnection_info (#granules = %d)'%int(pc.allreduce(len(model.granule_gids),1)))
示例10: scroll_through_all_dois
def scroll_through_all_dois(query_doi=None, first=None, last=None, today=False, week=False, chunk_size=1000):
# needs a mailto, see https://github.com/CrossRef/rest-api-doc#good-manners--more-reliable-service
headers={"Accept": "application/json", "User-Agent": "mailto:[email protected]"}
if first:
base_url = "https://api.crossref.org/works?filter=from-created-date:{first},until-created-date:{last}&rows={rows}&select=DOI&cursor={next_cursor}"
else:
base_url = "https://api.crossref.org/works?filter=until-created-date:{last}&rows={rows}&select=DOI&cursor={next_cursor}"
next_cursor = "*"
has_more_responses = True
number_added = 0
while has_more_responses:
has_more_responses = False
start_time = time()
url = base_url.format(
first=first,
last=last,
rows=chunk_size,
next_cursor=next_cursor)
logger.info(u"calling url: {}".format(url))
resp = requests.get(url, headers=headers)
logger.info(u"getting crossref response took {} seconds. url: {}".format(elapsed(start_time, 2), url))
if resp.status_code != 200:
logger.info(u"error in crossref call, status_code = {}".format(resp.status_code))
return number_added
resp_data = resp.json()["message"]
next_cursor = resp_data.get("next-cursor", None)
if next_cursor:
next_cursor = quote(next_cursor)
if resp_data["items"] and len(resp_data["items"]) == chunk_size:
has_more_responses = True
dois_from_api = [clean_doi(api_raw["DOI"]) for api_raw in resp_data["items"]]
added_pubs = add_new_pubs_from_dois(dois_from_api)
if dois_from_api:
logger.info(u"got {} dois from api".format(len(dois_from_api)))
if added_pubs:
logger.info(u"{}: saved {} new pubs, including {}".format(
first, len(added_pubs), added_pubs[-2:]))
number_added += len(added_pubs)
logger.info(u"loop done in {} seconds".format(elapsed(start_time, 2)))
return number_added
示例11: update_fn
def update_fn(cls, method_name, obj_id_list, shortcut_data=None, index=1):
# we are in a fork! dispose of our engine.
# will get a new one automatically
db.engine.dispose()
start = time()
q = db.session.query(cls).options(orm.undefer('*')).filter(cls.id.in_(obj_id_list))
obj_rows = q.all()
num_obj_rows = len(obj_rows)
print "{repr}.{method_name}() got {num_obj_rows} objects in {elapsed}sec".format(
repr=cls.__name__,
method_name=method_name,
num_obj_rows=num_obj_rows,
elapsed=elapsed(start)
)
for count, obj in enumerate(obj_rows):
start_time = time()
if obj is None:
return None
method_to_run = getattr(obj, method_name)
print u"\n***\n{count}: starting {repr}.{method_name}() method".format(
count=count + (num_obj_rows*index),
repr=obj,
method_name=method_name
)
if shortcut_data:
method_to_run(shortcut_data)
else:
method_to_run()
print u"finished {repr}.{method_name}(). took {elapsed}sec".format(
repr=obj,
method_name=method_name,
elapsed=elapsed(start_time, 4)
)
commit_success = safe_commit(db)
if not commit_success:
print u"COMMIT fail"
db.session.remove() # close connection nicely
return None # important for if we use this on RQ
示例12: update_fn
def update_fn(cls, method_name, obj_id_list, shortcut_data=None):
# we are in a fork! dispose of our engine.
# will get a new one automatically
db.engine.dispose()
start = time()
q = db.session.query(cls).filter(cls.id.in_(obj_id_list))
if cls.__name__ == "Person":
q = q.options(person_load_options())
obj_rows = q.all()
num_obj_rows = len(obj_rows)
print "{repr}.{method_name}() got {num_obj_rows} objects in {elapsed}sec".format(
repr=cls.__name__,
method_name=method_name,
num_obj_rows=num_obj_rows,
elapsed=elapsed(start)
)
for obj in obj_rows:
start_time = time()
if obj is None:
return None
method_to_run = getattr(obj, method_name)
print u"\nstarting {repr}.{method_name}() method".format(
repr=obj,
method_name=method_name
)
if shortcut_data:
method_to_run(shortcut_data)
else:
method_to_run()
print u"finished {repr}.{method_name}(). took {elapsed}sec".format(
repr=obj,
method_name=method_name,
elapsed=elapsed(start_time, 4)
)
db.session.commit()
db.session.remove() # close connection nicely
return None # important for if we use this on RQ
示例13: set_data_for_all_products
def set_data_for_all_products(self, method_name, high_priority=False, include_products=None):
start_time = time()
threads = []
# use all products unless passed a specific set
if not include_products:
include_products = self.all_products
# start a thread for each product
for work in include_products:
method = getattr(work, method_name)
process = threading.Thread(target=method, args=[high_priority])
process.start()
threads.append(process)
# wait till all work is done
for process in threads:
process.join()
# now go see if any of them had errors
# need to do it this way because can't catch thread failures; have to check
# object afterwards instead to see if they logged failures
for work in include_products:
if work.error:
# don't print out doi here because that could cause another bug
# print u"setting person error; {} for product {}".format(work.error, work.id)
self.error = work.error
print u"finished {method_name} on {num} products in {sec}s".format(
method_name=method_name.upper(),
num = len(include_products),
sec = elapsed(start_time, 2)
)
示例14: harvest
def harvest(self, **kwargs): # pragma: no cover
"""Make HTTP requests to the OAI server.
:param kwargs: OAI HTTP parameters.
:rtype: :class:`sickle.OAIResponse`
"""
start_time = time()
for _ in range(self.max_retries):
if self.http_method == 'GET':
payload_str = "&".join("%s=%s" % (k,v) for k,v in kwargs.items())
url_without_encoding = u"{}?{}".format(self.endpoint, payload_str)
http_response = requests.get(url_without_encoding,
**self.request_args)
self.http_response_url = http_response.url
else:
http_response = requests.post(self.endpoint, data=kwargs,
**self.request_args)
self.http_response_url = http_response.url
if http_response.status_code == 503:
retry_after = self.RETRY_SECONDS
logger.info("HTTP 503! Retrying after %d seconds..." % retry_after)
sleep(retry_after)
else:
logger.info("took {} seconds to call pmh url: {}".format(elapsed(start_time), http_response.url))
http_response.raise_for_status()
if self.encoding:
http_response.encoding = self.encoding
return OAIResponse(http_response, params=kwargs)
示例15: _grep_for_dep_lines
def _grep_for_dep_lines(self, query_str, include_globs, exclude_globs):
arg_list =['zipgrep', query_str, self.temp_file_name]
arg_list += include_globs
arg_list.append("-x")
arg_list += exclude_globs
start = time()
try:
print "Running zipgrep: '{}'".format(" ".join(arg_list))
self.dep_lines = subprocess32.check_output(
arg_list,
timeout=90
)
except subprocess32.CalledProcessError:
# heroku throws an error here when there are no dep lines to find.
# but it's fine. there just aren't no lines.
pass
except subprocess32.TimeoutExpired:
# too many files, we'll skip it and move on.
self.error = "grep_timeout"
pass
finally:
self.grep_elapsed = elapsed(start, 4)
#print "found these dep lines: {}".format(self.dep_lines)
print "finished dep lines search in {} sec".format(self.grep_elapsed)