本文整理汇总了Python中threadpool.makeRequests函数的典型用法代码示例。如果您正苦于以下问题:Python makeRequests函数的具体用法?Python makeRequests怎么用?Python makeRequests使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了makeRequests函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: crawl
def crawl(self, para_file):
""" using thread pool to speed up crawling.
"""
if not self.writer and not self.method:
return
fpara = open(para_file, 'r')
pool = threadpool.ThreadPool(self.poolsize)
parlst = list()
for line in fpara:
if self.stopped:
break # Stop current crawling
parlst.append(line.strip())
if len(parlst) > 10:
requests = threadpool.makeRequests(self.retrieve, parlst)
map(pool.putRequest, requests)
pool.wait()
self.writer.flush()
del parlst[:]
#Flush the last part of lines in parlst
if not self.stopped:
requests = threadpool.makeRequests(self.retrieve, parlst)
map(pool.putRequest, requests)
pool.wait()
self.writer.flush()
fpara.close()
self.writer.close()
if not self.stopped:
logging.info('Retrieving finished.')
else:
logging.info('Retrieving interrupted.')
return
示例2: start
def start(self, jenkinsHost,
jenkinsUser="root",
jenkinsKey=globals.configJenkinsKey):
self.__jenkinsHost = jenkinsHost
self.__jenkinsUser = jenkinsUser
self.__jenkinsKey = jenkinsKey
# Keep startup times short. Only connect a few threads to Jenkins upfront. The rest
# will be dynamically added as required. This also enables authentication issues to be
# resolved by the system admin or developer without taking the tool down
for i in range(globals.threadPoolSize):
threadRequests = makeRequests(self.connectInit, [((i,), {})])
[globals.threadPool.putRequest(req) for req in threadRequests]
# Do 1 to validate the connection. This is synchronous
for i in range(1):
threadRequests = makeRequests(self.connect, [((i,), {})])
[globals.threadPool.putRequest(req) for req in threadRequests]
globals.threadPool.wait()
# Thread queueing results in new thread connections later, so do them all now
# asynchronously so that they are ready when needed without having to wait. This is
# the connectRetry logic, so all paths are used during startup
for i in range(globals.threadPoolSize - 1):
threadRequests = makeRequests(self.connectRetry, [((), {})])
[globals.threadPool.putRequest(req) for req in threadRequests]
示例3: __init__
def __init__(self):
self.logger = logger
cp = ConfigParser.SafeConfigParser()
cp.read('config\config.ini')
self.req_que_size = (int)(cp.get('autoLevel', 'req_que_size'))
self.resp_que_size = (int)(cp.get('autoLevel', 'resp_que_size'))
self.threadpoolSize = (int)(cp.get('autoLevel', 'threadpoolSize'))
self.accu_threshold = (int)(cp.get('autoLevel', 'accu_threshold'))
self.max_alarm_level = (int)(cp.get('autoLevel', 'max_alarm_level'))
self.threadpo = threadpool.ThreadPool(self.threadpoolSize)
self.root = Node()
self.reqQueue = Queue.Queue(maxsize=self.req_que_size)
self.respQueue = Queue.Queue(maxsize=self.resp_que_size)
self.alarmDura = {}
children = {}
for i in cp.get('autoLevel', 'alarm').split(','):
key = (int)(i.split('-')[0])
value = (int)(i.split('-')[1])
self.alarmDura[key] = value
for key in cp.get('autoLevel', 'org').split(','):
children[(int)(key)] = AlarmNode(org=(int)(key), req_que_size=self.req_que_size,
resp_que_size=self.resp_que_size, threadpo=self.threadpo,
alarmDura=self.alarmDura, accu_threshold=self.accu_threshold,
max_alarm_level=self.max_alarm_level, parentRespQueue=self.respQueue)
self.logger.debug('init: %s, %s' % (self.alarmDura, children))
self.root.setChildren(children)
self.alaMsgTask = threadpool.makeRequests(self.alaMsg, [(None, None)])
self.scanTimeTask = threadpool.makeRequests(self.scanTime, [(None, None)])
self.threadpo.putRequest(self.alaMsgTask[0])
self.threadpo.putRequest(self.scanTimeTask[0])
示例4: simulateStep
def simulateStep(self):
tc = self.threadCount
kwargs = [([], {'firstRow': self.size/tc * x, 'lastRow': self.size/tc * x + self.size/tc}) for x in range(tc)]
[self.pool.putRequest(req) for req in threadpool.makeRequests(self.countAllNeighbours, kwargs)]
self.pool.wait()
[self.pool.putRequest(req) for req in threadpool.makeRequests(self.simulateLifeAndDeath, kwargs)]
self.pool.wait()
示例5: thread_use
def thread_use():
pool = threadpool.ThreadPool(10)
# single parameter
option = [1, 2]
requests = threadpool.makeRequests(thread_sing_parameter, option)
[pool.putRequest(req) for req in requests]
pool.wait()
# multiple parameters
option = [([1, 2, 3], None), ([4, 5, 6], None)]
requests = threadpool.makeRequests(thread_multiple_parameter, option)
[pool.putRequest(req) for req in requests]
pool.wait()
示例6: check_all_instance
def check_all_instance(instance_ports):
datas = [(item, {}) for item in instance_ports]
reqs = threadpool.makeRequests(check_one_instance, datas, run_success, run_fault)
[pool.putRequest(req) for req in reqs]
log.debug("pool.wait for check instance")
pool.wait()
log.debug("pool.wait end")
示例7: sumbit
def sumbit(self,record_time):
log.info( "i did at %s %d" % (util.get_local_time(), record_time) )
coll = Collector()
requests = threadpool.makeRequests(coll.collect, [(record_time)], self.print_result)
for req in requests:
log.info("get request")
self.pool.putRequest(req)
示例8: pattern4
def pattern4(fundinfolist4, maxreturn, threadnum):
#Check fund manager perf
print 'Fund list 4: 基于pattern3的结果,排序当前基金经理业绩'
#Get manager perf if buyable
def checkBuyableAndGetPerf(fund):
isBuyable(fund)
if fund.buyable:
getManagerPerf(fund)
else:
fund.managerperf = '-1000' #With this, no need do extra filter on buyable
#Get fund manager perf
pool2 = threadpool.ThreadPool(threadnum)
requests2 = threadpool.makeRequests(checkBuyableAndGetPerf, fundinfolist4)
[pool2.putRequest(req) for req in requests2]
pool2.wait()
fundInfoListOrdered4 = sorted(fundinfolist4, key=lambda fund:string.atof(fund.managerperf),reverse=True)
meetnum = 0
fundlinkTemp = 'http://fund.eastmoney.com/%s.html'
jjjllinkTemp = 'http://fund.eastmoney.com/f10/jjjl_%s.html'
for i in range(0, min(maxreturn,len(fundInfoListOrdered4))):
fundinfolist4.append(fundInfoListOrdered4[i])
fundlink = fundlinkTemp % fundInfoListOrdered4[i].fundcode
jjjllink = jjjllinkTemp % fundInfoListOrdered4[i].fundcode
print ' %s %s %s 净值:%s 业绩:%s Duration:%s' % (fundlink, jjjllink, fundInfoListOrdered4[i].name,
fundInfoListOrdered4[i].latestvalue,
fundInfoListOrdered4[i].managerperf.encode('utf-8'),
fundInfoListOrdered4[i].managerduration.encode('utf-8'))
示例9: start
def start(self, norm_target_func=None, *args, **kwargs):
def args_generator(poc_name):
func_args = {
'poc_name': poc_name,
'options': self.options,
'success': None,
'poc_ret': {},
}
for target in self.seed_targets:
if norm_target_func:
func_args['options']['target'] = norm_target_func(target.strip(), *args, **kwargs)
else:
func_args['options']['target'] = target.strip()
yield deepcopy(func_args)
for name, func2run in self.funcs2run:
requests = threadpool.makeRequests(callable_=func2run,
args_list=args_generator(name),
callback=self.handle_result,
exc_callback=self.handle_result)
[self.tp.putRequest(req) for req in requests]
self.total_num += requests.__len__()
self.tp.wait()
self.tp.dismissWorkers(100, do_join=True)
return self.total_num, self.success_num
示例10: getIDS
def getIDS(names):
cnt=[]
poolsize=60
def f(name):
for i in [1,2]:
try:
us_set=['Mozilla','IE','Opera','Chrome','Magic','theSage','Iceweasel','Rockmelt']
ua=random.choice(us_set)
req=urllib2.Request('http://graph.facebook.com/'+name+'?fields=id,name',headers={'User-Agent':ua})
r=urllib2.urlopen(req)
r=r.read()
r=eval(r)
result.append((r['id'],r['name']))
cnt.append(True)
if len(names)>550:
time.sleep(60)# to prevent blocking of ip address by limiting rate
except Exception as e:
print e.reason,len(cnt)
if 'Nont Found' in e.reason:
if len(names)>550:
time.sleep(60)# to prevent blocking of ip address by limiting rate
break
if 'Forbidden' in e.reason:
time.sleep(600)
if len(names)>550:
time.sleep(60)# to prevent blocking of ip address by limiting rate
pass
result=[]
pool=tp.ThreadPool(poolsize)
requests=tp.makeRequests(f,names)
r=[pool.putRequest(req) for req in requests]
pool.wait()
pool.dismissWorkers(poolsize)
return result
示例11: batchTest
def batchTest(self, norm_target_func=None, *args, **kwds):
'''
the func must be the run() function in a poc class.
'''
def argsGenerator():
func_args = {
'options': self.options,
'success': None,
'poc_ret': {},
}
for seed in self.seed_iter:
if norm_target_func:
func_args['options']['target'] = norm_target_func(seed.strip(), *args, **kwds)
else:
func_args['options']['target'] = seed.strip()
yield deepcopy(func_args)
requests = threadpool.makeRequests(callable_=self.func2run,
args_list = argsGenerator(),
callback=self.cbSaveResult,
exc_callback=self.cbHandleErr)
[self.tp.putRequest(req) for req in requests]
self.tp.wait()
self.tp.dismissWorkers(100, do_join=True)
return self.total_num, self.finished_num, self.err_num
示例12: run
def run(self):
pool = threadpool.ThreadPool(10)
reqs = threadpool.makeRequests(self._scan_start, self.dicts)
[pool.putRequest(req) for req in reqs]
pool.wait()
pool.dismissWorkers(20, do_join=True)
return self.result
示例13: create_records_cache
def create_records_cache(self, provider, path):
""" Creates an array of Records classed (s.a.) after parsing all records under `path'.
Assumes a valid session """
records = []
# If we are in `/' then get all records
for d in self.provider.metadata(path).lsdirs():
recpath = d + "/record.json"
log.debug(recpath)
records.append(recpath)
requests = threadpool.makeRequests(self.records_worker, records, self.append_records_cache, self.handle_exception)
#insert the requests into the threadpool
# This is ugly but will need serious refactoring for the local provider.
# basically: if using local storage then just use one thread to avoid choking on the HD.
# For dropbox and other remote providers use multi-theading
if ( provider == "local" ):
pool = threadpool.ThreadPool(1)
else:
pool = threadpool.ThreadPool(min(len(requests), default_number))
for req in requests:
pool.putRequest(req)
log.debug("Work request #%s added." % req.requestID)
#wait for them to finish (or you could go and do something else)
pool.wait()
pool.dismissWorkers(min(len(requests), 20), do_join=True)
log.debug("workers length: %s" % len(pool.workers))
示例14: winconf_multi
def winconf_multi(nb_jobs = 4):
from ranwinconf.list_AD import get_all_servers
pattern = conf_get_IFP(config, "GENERAL", "PATTERN", "operatingSystem='*Server*'")
print "Retrieving server list from AD based on pattern: %s" % pattern
server_list = get_all_servers("objectClass='computer' AND %s" % pattern)
server_name_list = [ computer.cn for computer in server_list]
print "%d servers were retrieved" % len(server_name_list)
import threadpool
pool = threadpool.ThreadPool(nb_jobs)
requests = threadpool.makeRequests(thread_work, server_name_list)
for req in requests:
pool.putRequest(req)
pool.wait()
for server_name in server_name_list:
manage_vcs_and_notification(server_name, "%s.txt" % server_name)
示例15: main
def main():
print "=="
f=open('delay_dist_fail.txt','r',102400);
lines = f.readlines()
length = len(lines)
total = length
print length
logging.info("length:%d",length)
index = 0
process_cnt=0;
pool = threadpool.ThreadPool(100)
for line in lines :
index +=1;
logging.info(line)
process_cnt +=1 ;
#print line
requests = threadpool.makeRequests(appendStatusAndTime,[line], asyn_callback)
for req in requests:
req.total = length;
req.process = process_cnt
pool.putRequest(req)
if index == 200:
pool.wait()
index =0
#print "====="
pool.wait()