本文整理汇总了Python中timeit.default_timer函数的典型用法代码示例。如果您正苦于以下问题:Python default_timer函数的具体用法?Python default_timer怎么用?Python default_timer使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了default_timer函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __download_range
def __download_range(self, k, dst):
try:
_, ext = os.path.splitext(dst)
ds = []
parts = []
logging.info("Download %s start", k.name)
for startByte in range(0, k.size, self.splitMB):
output_part = self.new_temp_file(suffix=ext)
parts.append(output_part)
endByte = min(startByte + self.splitMB - 1, k.size)
logging.debug(
"deferToThreadPool %s start=%d end=%d size=%d cnt=%d",
k.name,
startByte,
endByte,
endByte - startByte,
len(ds),
)
d = twisted.internet.threads.deferToThreadPool(
reactor,
reactor.getThreadPool(), # @UndefinedVariable
self.__downloadOne,
k,
startByte,
endByte,
output_part,
len(ds),
)
ds.append(d)
if os.path.exists(dst):
os.remove(dst)
fout = file(dst, "wb")
start = timeit.default_timer()
for cnt, p in enumerate(parts):
yield ds[cnt]
shutil.copyfileobj(file(p, "rb"), fout)
size = min(k.size, (cnt + 1) * self.splitMB)
elapsed = timeit.default_timer() - start
speedstr = formatFileSize(size / elapsed)
sizestr = formatFileSize(size)
percent = (float(cnt) / len(parts)) * 100.0
logging.info(
"%03d/%03d (%.2f%%) speed=%s/s, elapsed=%.2f, size=%s",
cnt,
len(parts),
percent,
speedstr,
elapsed,
sizestr,
)
except Exception:
logging.error("download error", exc_info=True)
raise
示例2: __execEvent__
def __execEvent__(self, eventName, ntime, commandHandler):
last = self.__events__[eventName]["lastExecTime"]
timeInterval = self.__events__[eventName]["timeInterval"]
if ntime - last >= timeInterval:
start = default_timer()
self.__events__[eventName]["function"](commandHandler, self.__events__[eventName]["channels"])
timeTaken = default_timer() - start
stats = self.__events__[eventName]["stats"]
if stats["average"] == None:
stats["average"] = timeTaken
stats["min"] = timeTaken
stats["max"] = timeTaken
else:
stats["average"] = (stats["average"]+timeTaken) / 2.0
if timeTaken < stats["min"]:
stats["min"] = timeTaken
if timeTaken > stats["max"]:
stats["max"] = timeTaken
self.__events__[eventName]["lastExecTime"] = time.time()
示例3: main
def main():
start_time = timeit.default_timer()
proxies = []
targets = ['http://www.google-proxy.net/','http://free-proxy-list.net/']
for i in range(len(targets)):
proxy = proxy_scraper(targets[i])
for u in range(len(proxy)):
proxy_found = str(proxy[u]['ip'])+":"+str(proxy[u]['port'])
if proxy_found not in proxies:
if is_proxy_existed(proxy[u]['ip']) != True:
print proxy[u]['ip'] +" - "+ proxy[u]['port'] +" - "+ proxy[u]['hostname']
create_proxy(proxy[u]['ip'], proxy[u]['port'], proxy[u]['hostname'], proxy[u]['service'], proxy[u]['latitude'], proxy[u]['longitude'], proxy[u]['city'], proxy[u]['country'])
proxies.append(proxy_found)
# save to a file
file_name = "data_proxies.cfg"
write_file( file_name, "\n".join(proxies) )
print("\n%s proxies found. File saved. You can find it under '%s'." % (len(proxies), file_name))
# measure time
print "\nElapsed time: %d sec" % (timeit.default_timer() - start_time)
示例4: trim_data
def trim_data(crime_data, part, total_parts):
print 'Trimming unnecessary data...',
time1 = tm.default_timer()
crime_data = crime_data[crime_data['YEAR'] >= 2006]
crime_data = crime_data[crime_data['YEAR'] <= 2015]
crime_data = crime_data[pd.notnull(crime_data['NEIGHBOURHOOD'])]
crime_data = crime_data.drop('HUNDRED_BLOCK', axis=1)
crime_data = crime_data.sort_index()
if TEST_VAL:
print 'Taking subset of crime data (1000 row sample)...',
crime_data = crime_data.head(1005)
if part is not None and total_parts is not None:
start_index = int(1.0*(part-1)/total_parts*crime_data['YEAR'].count())
end_index = int(1.0*part/total_parts*crime_data['YEAR'].count())
if part == total_parts: end_index = crime_data['YEAR'].count()
crime_data = crime_data[start_index:end_index]
print 'Start index, end index, size:',start_index,end_index, crime_data['YEAR'].count()
print 'Finished'
print 'Time taken:', tm.default_timer()-time1, ' seconds\n'
return crime_data
示例5: read_features
def read_features(features):
"""
read all the features in the 'features' array and return a numpy array
currently only compute the grand mean and std
"""
start = timeit.default_timer()
x = []
y = []
for fn in glob.glob(os.path.join(FT_DIR, "*.npy")):
start = fn.rfind('/')
end = fn.rfind('.')
ext = fn[start+1:end]
genre, _= ext.split('_')
data = np.load(fn)
surface_ft = data[:-1] #5 features
ft_vec = [np.mean(ft) for ft in surface_ft] + [np.std(ft) for ft in surface_ft]
ceps = data[-1]#mfcc features
cep_len = len(ceps)
ft_vec += np.mean(ceps[int(cep_len / 10.):int(cep_len * 9 / 10.)], axis=0).tolist()
x.append(ft_vec)
y.append(GENRE_DICT[genre])
end = timeit.default_timer()
print("reading all features takes: ", (end - start))
return np.array(x), np.array(y)
示例6: evaluate
def evaluate(im, algo, gt_illuminant, i, range_thresh, bin_num, dst_folder):
new_im = None
start_time = timeit.default_timer()
if algo=="grayworld":
new_im = cv2.xphoto.autowbGrayworld(im, 0.95)
elif algo=="nothing":
new_im = im
elif algo=="learning_based":
new_im = cv2.xphoto.autowbLearningBased(im, None, range_thresh, 0.98, bin_num)
elif algo=="GT":
gains = gt_illuminant / min(gt_illuminant)
g1 = float(1.0 / gains[2])
g2 = float(1.0 / gains[1])
g3 = float(1.0 / gains[0])
new_im = cv2.xphoto.applyChannelGains(im, g1, g2, g3)
time = 1000*(timeit.default_timer() - start_time) #time in ms
if len(dst_folder)>0:
if not os.path.exists(dst_folder):
os.makedirs(dst_folder)
im_name = ("%04d_" % i) + algo + ".jpg"
cv2.imwrite(os.path.join(dst_folder, im_name), stretch_to_8bit(new_im))
#recover the illuminant from the color balancing result, assuming the standard model:
estimated_illuminant = [0, 0, 0]
eps = 0.01
estimated_illuminant[2] = np.percentile((im[:,:,0] + eps) / (new_im[:,:,0] + eps), 50)
estimated_illuminant[1] = np.percentile((im[:,:,1] + eps) / (new_im[:,:,1] + eps), 50)
estimated_illuminant[0] = np.percentile((im[:,:,2] + eps) / (new_im[:,:,2] + eps), 50)
res = np.arccos(np.dot(gt_illuminant,estimated_illuminant)/
(np.linalg.norm(gt_illuminant) * np.linalg.norm(estimated_illuminant)))
return (time, (res / np.pi) * 180)
示例7: spawn_runpy
def spawn_runpy(cp, wait=60, cb=check_rst):
"as decorator to run job"
global WAITQ, RUNQ, CFG
pool = Pool(processes=CFG['MAXJOBS'])
while len(WAITQ) > 0 or len(RUNQ) > 0:
if len(RUNQ) <= CFG['MAXJOBS'] and len(WAITQ) > 0:
path, test = WAITQ.pop()
rst = pool.apply_async(call_runpy, (cp, path, test,))
RUNQ.append((rst, test, timeit.default_timer()))
else:
for r in RUNQ:
usec = float("%.2f" %(timeit.default_timer()-r[2]))
if r[0].successful:
print "[{0}] success used {1} usec".format(r[1], usec)
RUNQ.remove(r)
if cb:
cb(r[1], 'pass', usec)
else:
if usec > CFG['TIMEOUT']:
print "[{0}] unsuccess used timeout {1} usec".format(r[1], usec)
r[0].terminate()
if cb:
cb(r[1], 'fail', usec)
time.sleep(float(wait))
示例8: load_indicators_to_mongo_zh
def load_indicators_to_mongo_zh(is_incremental):
print("start loading indicator data(zh) from JSON file to MongoDB...")
all_start = timeit.default_timer()
static = Static()
f = io.open(static.output_folder + '/worldbank_wdi_indicators_zh.json', 'r', encoding='utf8', errors='ignore')
json_str = f.readline()
indicator_array = json.loads(json_str)
f.close()
client = MongoClient(static.mongo_url, static.mongo_port)
db = client[static.database_name]
## print(db.collection_names())
indicator_col = db[static.indicator_col_name]
if not is_incremental:
indicator_col.drop()
for ind in indicator_array:
indicator_key = ind['id'].replace('.', '_') + '_ZH'
data_type = 'number'
if(ind['name'].find('百分比') > -1):
data_type = 'percentage'
topics = []
for topic in ind['topics']:
topics.append(topic['value'])
indicator_rec = {'indicator_key': indicator_key, 'original_id': ind['id'], 'indicator_text': ind['name'], 'data_type': data_type, 'sourceOrganization': ind['sourceOrganization'], 'sourceNote': ind['sourceNote'], 'topics': topics, 'data_source': '世界发展指标', 'dimension': [{'dimension_key': 'year', 'dimension_text': '年'}, {'dimension_key': 'region', 'dimension_text': '区域'}, {'dimension_key': 'country', 'dimension_text': '国家'}]}
pk = indicator_col.insert(indicator_rec)
print(indicator_key + ' ' + ind['name'] + ' inserted.')
print("job is complete.")
print("total records: " + str(indicator_col.count()))
print("total time cost: " + str(round(timeit.default_timer() - all_start)) + 's')
示例9: main
def main():
"""
"""
logging.info("Reading file:%s", "data/sample.avi")
vid = AoRecording.AoRecording(filepath="data/sample.avi")
vid.load_video()
logging.info("Starting parallel processing")
tic = timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align_parallel()
vid.create_average_frame()
vid.create_stdev_frame()
toc = timeit.default_timer()
print "Parallel Process took {}:".format(toc - tic)
vid.create_stdev_frame()
logging.info("writing output")
vid.write_video("output/output_parallel.avi")
vid.write_average_frame("output/lucky_average_parallel.png")
vid.write_frame("output/lucky_stdev.png", "stdev")
logging.info("Starting serial processing")
tic = timeit.default_timer()
vid.filter_frames()
vid.fixed_align_frames()
vid.complete_align()
vid.create_average_frame()
toc = timeit.default_timer()
print "Serial Process took {}:".format(toc - tic)
logging.info("writing output")
vid.write_video("output/output_serial.avi")
vid.write_frame("output/lucky_average_serial.png", "average")
示例10: _run_analyzers_on_event
def _run_analyzers_on_event(self):
'''Run all analysers on the current event, self.event.
Returns a tuple (success?, last_analyzer_name).
'''
for i,analyzer in enumerate(self._analyzers):
if not analyzer.beginLoopCalled:
analyzer.beginLoop(self.setup)
start = timeit.default_timer()
if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:
memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if memNow > self.memLast :
print "Mem Jump detected before analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
self.memLast=memNow
ret = analyzer.process( self.event )
if self.memReportFirstEvent >=0 and iEv >= self.memReportFirstEvent:
memNow=resource.getrusage(resource.RUSAGE_SELF).ru_maxrss
if memNow > self.memLast :
print "Mem Jump detected in analyzer %s at event %s. RSS(before,after,difference) %s %s %s "%( analyzer.name, iEv, self.memLast, memNow, memNow-self.memLast)
self.memLast=memNow
if self.timeReport:
self.timeReport[i]['events'] += 1
if self.timeReport[i]['events'] > 0:
self.timeReport[i]['time'] += timeit.default_timer() - start
if ret == False:
return (False, analyzer.name)
return (True, analyzer.name)
示例11: load_rowdata_to_mongo_zh
def load_rowdata_to_mongo_zh(is_incremental):
print("start loading row data(zh) from JSON file to MongoDB...")
all_start = timeit.default_timer()
static = Static()
bydim_dir = static.output_folder + static.dataset_bydim_folder
client = MongoClient(static.mongo_url, static.mongo_port)
db = client[static.database_name]
dataset_col = db[static.dataset_col_name]
if not is_incremental:
dataset_col.drop()
file_path_array = []
for idx, file in enumerate(os.listdir(bydim_dir)):
file_path = os.path.join(bydim_dir, file)
if os.path.isfile(file_path):
file_path_array.append(file_path)
print(str(len(file_path_array)) + " files are loaded")
counter = []
mapfunc = partial(insert_by_dim, counter=counter, dataset_col=dataset_col, all_start=all_start)
pool = ThreadPool(12)
pool.map(mapfunc, file_path_array)
pool.close()
pool.join()
print("All the threads are completed. Total number is " + str(len(counter)) + "\n")
print("total time cost: " + str(round(timeit.default_timer() - all_start)) + 's')
示例12: worker
def worker(F, chargers, sensors, p_list, sensors_p, p_list_p):
"""worker function, used to create processing"""
result = {}
tic = timeit.default_timer()
anser = reconfiguration.iaa.solution(chargers, sensors, p_list, args['B'], sensors_p, p_list_p, F, args['p_min'])
toc = timeit.default_timer()
result['IAA'] = (toc - tic, anser)
if DEBUG:
print "============================================="
print "# solution IAA #"
print "============================================="
pprint(anser)
tic = timeit.default_timer()
anser = solution.solutionOpt.solution(chargers, sensors_p, p_list_p)
toc = timeit.default_timer()
result['Opt'] = (toc - tic, anser)
if DEBUG:
print "============================================="
print "# solution Opt #"
print "============================================="
pprint(anser)
return result
示例13: loop_sd_mean
def loop_sd_mean(alphabet):
print("======== sd-mean test===========")
start = timeit.default_timer()
count = 0
letters_number_list = []
entropy_list = []
for i in list(range(1,101)): # this is sd
alphabet1 = eliminate_sd(alphabet,i)
for j in list(range(1,101)): # this is mean
alphabet2 = eliminate_mean(alphabet1,j)
letters_number = len(alphabet2)
letters_number_list.append((i,j,letters_number))
balanced_alphabet = rebalance(alphabet2)
entropy = calculate_entropy(balanced_alphabet)
entropy_list.append((i,j,entropy))
count = count+1
print(count)
stop = timeit.default_timer()
time = (stop - start)
print (letters_number_list)
print (entropy_list)
print("======== sd-mean test===========")
print('Running Time (s): %f' %time)
示例14: analyze_files
def analyze_files(self, iterCount, loci_classes, adapt_threshold):
Rmodel = VRmodel.VregMRmodel(iterCount, loci_classes, adapt_threshold)
print "len(Rmodel.rfmodels)=", len(Rmodel.rfmodels)
ofile = open("bkg_out.dat","a+")
Rmodel.set_bckgoutfile( ofile )
for species in self.speciesList:
fbar= self.S[species]["WGS"]
print fbar
outFile = self.outDir + os.path.basename(fbar).replace(".fasta", "_"+str(iterCount)+"_outRF.fasta")
ofile = open(outFile,"w")
Rmodel.set_outfile( ofile )
fb = self.outDir + os.path.basename(fbar).replace(".fasta", "_"+str(iterCount)+"_exon.fasta")
exfile1 = open(fb,"w")
Rmodel.set_exon_outfiles( exfile1 )
start_time = timeit.default_timer()
gene_cnt=0
for strand in [1, -1]:
qbar=deepcopy(self.contigs)
print "STRAND=", strand
for record in SeqIO.parse(fbar, "fasta"):
if self.check_contigs:
if ( record.id.split("|")[3] not in self.contigs):
continue
print "record.id=", record.id
print "cnts=",record.id.split("|")[3]
print "qbar=", qbar
if self.check_contigs:
qbar.remove(record.id.split("|")[3])
if strand == 1:
seq=record.seq
else:
seq=record.seq.reverse_complement()
Rmodel.set_record(record.id, record.name, record.description)
seq_size=len(seq)
res= self.mapper( divide_work(seq) )
"""
print "len(res)=", len(res)
for ix in range(2):
print res[ix][0], res[ix][1], type(res[ix][2])
"""
Elist=Rmodel.exon_MRprobabilities(res)
gene_cnt = Rmodel.V_exon_model(gene_cnt, seq, strand, Elist)
#res=None
#Elist=None
if len(qbar)==0:
break
ofile.close()
elapsed = timeit.default_timer() - start_time
print "ELAPSED TIME =", elapsed
示例15: runTestCode
def runTestCode(self):
"""
This function ties into the debug menu. It is meant to allow execution
of some test code. Feel free to change the contents of this function.
"""
start = timeit.default_timer()
monsters = []
lib = Libraries.MonsterLibrary()
stop = timeit.default_timer()
time = stop - start
print "Created library in " + str(time) + " seconds"
for i in range(0, 10000):
myRandom = lib.getRandomMonster(random.randint(0, 80))
monsters.append(myRandom)
# lib = Libraries.ItemLibrary()
# myItem = lib.createItem('heal')
# print myItem
# myItem = lib.createItem('sword')
# print myItem
# myItem = lib.createItem('cloak')
# print myItem
# myItem = lib.createItem('fireball')
# print myItem
stop = timeit.default_timer()
time = stop - start
print "Created " + str(len(monsters)) + " monsters in " + str(time) + " seconds"