本文整理汇总了Python中multiprocessing.dummy.Pool.join方法的典型用法代码示例。如果您正苦于以下问题:Python Pool.join方法的具体用法?Python Pool.join怎么用?Python Pool.join使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.dummy.Pool
的用法示例。
在下文中一共展示了Pool.join方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: check_and_rank_ip
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def check_and_rank_ip(session):
def ping_jd(ip):
t = time.time()
try:
respond = requests.post('http://so.m.jd.com/ware/searchList.action',
data={'_format_': 'json', 'stock': 1, 'page': 1, 'keyword': '手机'},
proxies=ip.to_proxy(), timeout=5).content
json.loads(respond)
ip.rank = int(100 * (time.time() - t))
except Exception:
ip.rank = None
return ip
print datetime.now(), '开始判断ip活性'
from multiprocessing.dummy import Pool as ThreadPool
all_ip = session.query(IP).all()
pool = ThreadPool(100)
ips = pool.map(ping_jd, all_ip)
for ip in ips:
session.add(ip)
session.query(IP).filter(IP.rank == None).delete()
session.commit()
pool.close()
pool.join()
return session.query(IP).count()
示例2: parallel_bulk
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def parallel_bulk(client, actions, thread_count=4, chunk_size=500,
max_chunk_bytes=100 * 1014 * 1024,
expand_action_callback=expand_action, **kwargs):
"""
Parallel version of the bulk helper run in multiple threads at once.
:arg client: instance of :class:`~elasticsearch.Elasticsearch` to use
:arg actions: iterator containing the actions
:arg thread_count: size of the threadpool to use for the bulk requests
:arg chunk_size: number of docs in one chunk sent to es (default: 500)
:arg max_chunk_bytes: the maximum size of the request in bytes (default: 100MB)
:arg raise_on_error: raise ``BulkIndexError`` containing errors (as `.errors`)
from the execution of the last chunk when some occur. By default we raise.
:arg raise_on_exception: if ``False`` then don't propagate exceptions from
call to ``bulk`` and just report the items that failed as failed.
:arg expand_action_callback: callback executed on each action passed in,
should return a tuple containing the action line and the data line
(`None` if data line should be omitted).
"""
# Avoid importing multiprocessing unless parallel_bulk is used
# to avoid exceptions on restricted environments like App Engine
from multiprocessing.dummy import Pool
actions = map(expand_action_callback, actions)
pool = Pool(thread_count)
for result in pool.imap(
lambda chunk: list(_process_bulk_chunk(client, chunk, **kwargs)),
_chunk_actions(actions, chunk_size, max_chunk_bytes, client.transport.serializer)
):
for item in result:
yield item
pool.close()
pool.join()
示例3: createDemo
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def createDemo(self):
usersData = []
event = Event()
pool = ThreadPool(multiprocessing.cpu_count() * 2)
pool = ThreadPool(5)
for i in range(0, 1000):
string = hashlib.sha224()
string.update('{}'.format(random.random()))
first = 'first{}'.format(string.hexdigest()[0:10])
string.update('{}'.format(random.random()))
last = 'last{}'.format(string.hexdigest()[0:10])
tel = '{}'.format(8005550000 + i)
email = 'email{}@localhost.email'.format(string.hexdigest()[0:10])
postData = {
'first': first,
'last': last,
'tel': tel,
'email': email,
'pass': 'password',
'type': 'customer',
'event': event
}
usersData.append(postData)
results = pool.map(self.createUser, usersData)
pool.close()
pool.join()
示例4: build_words_weight
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def build_words_weight():
st = time.time()
bigvs = BigVs.objects.all()
def _build(b):
data = ArticlePostedResults.active_objects.filter(bigv__v_id=b.v_id, is_correct__in=(0, 1)).values('is_correct').annotate(count=Count('is_correct')).order_by('is_correct')
sum_c , w, c = 0, 0, 0
for d in data:
if d['is_correct'] == 1:
c = d['count']
sum_c += d['count']
if sum_c:
w = c * 1.0 / sum_c
c = w * 200
sum_c = 200
data = Judgement.objects.filter(article__bigv=b, judge__isnull=False).values('judge').annotate(count=Count('judge')).order_by('judge')
for d in data:
if d['judge'] == 'right':
c += d['count']
sum_c += d['count']
if sum_c:
w = int(round(c * 1.0 / sum_c * 100))
b.words_weight = w
b.save()
print b.name, c, sum_c, w
pool = Pool(8)
pool.map(_build, bigvs)
pool.close()
pool.join()
ed = time.time()
debug('build_words_weight', ed - st)
示例5: run
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def run(threads):
urls = ['http://www.python.org',
'http://www.python.org/about/',
'http://www.onlamp.com/pub/a/python/2003/04/17/metaclasses.html',
'http://www.python.org/doc/',
'http://www.python.org/download/',
'http://www.python.org/getit/',
'http://www.python.org/community/',
'https://wiki.python.org/moin/',
'http://planet.python.org/',
'https://wiki.python.org/moin/LocalUserGroups',
'http://www.python.org/psf/',
'http://docs.python.org/devguide/',
'http://www.python.org/community/awards/'
]
results = []
scontext = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
requests = [urllib.request.Request(url=url,data=b'None',
headers={'User-Agent':' Mozilla/5.0 (Windows NT 6.1; WOW64; rv:12.0) Gecko/20100101 Firefox/12.0'})
for url in urls]
pool = ThreadPool(threads)
results = list(pool.map(lambda x: urllib.request.urlopen(x, context=scontext), requests))
pool.close()
pool.join()
dataLen = [len(result.read().decode('utf-8')) for result in results]
print(threads, 'поток(ов), прочитано', sum(dataLen), 'байт')
示例6: Producer
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def Producer():
# urls = [
# 'http://www.python.org',
# 'http://www.python.org/about/',
# 'http://www.onlamp.com/pub/a/python/2003/04/17/metaclasses.html',
# 'http://www.python.org/doc/',
# 'http://www.python.org/download/',
# 'http://www.python.org/getit/',
# 'http://www.python.org/community/',
# 'https://wiki.python.org/moin/',
# 'http://planet.python.org/',
# 'https://wiki.python.org/moin/LocalUserGroups',
# 'http://www.python.org/psf/',
# 'http://docs.python.org/devguide/',
# 'http://www.python.org/community/awards/'
# # etc..
# ]
# 'http://wwww.qq.com','http://www.baidu.com'
urls = [
'http://www.taobao.com','http://www.sina.com.cn'
]
start_time = time.time()
# Make the Pool of workers
pool = ThreadPool(4)
# Open the urls in their own threads
# and return the results
results = pool.map(urllib2.urlopen, urls)
#close the pool and wait for the work to finish
pool.close()
pool.join()
print "Done! time Taken()",format(time.time()-start_time)
示例7: load
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def load(cls, docs, ignore_errors=False):
"""Force load the provided docs to read from file system."""
if not docs:
return
pod = docs[0].pod
def load_func(doc):
"""Force the doc to read the source file."""
try:
# pylint: disable=pointless-statement
doc.has_serving_path() # Using doc fields forces file read.
except document_front_matter.BadFormatError:
if not ignore_errors:
raise
with pod.profile.timer('DocsLoader.load'):
if ThreadPool is None or len(docs) < cls.MIN_POOL_COUNT:
for doc in docs:
load_func(doc)
return
pool_size = min(cls.MAX_POOL_SIZE, len(docs) * cls.POOL_RATIO)
pool_size = int(round(pool_size))
thread_pool = ThreadPool(pool_size)
results = thread_pool.imap_unordered(load_func, docs)
# Loop results to make sure that the threads are all processed.
for _ in results:
pass
thread_pool.close()
thread_pool.join()
示例8: multiRunuser
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def multiRunuser():
pool = ThreadPool(cpu_count() * 8)
global ip_list
global results
results = pool.map_async(runuser, ip_list)
pool.close()
pool.join()
示例9: main
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def main():
parser = argparse.ArgumentParser(usage='%(prog)s [options] SERVER_URL',
description=__doc__)
parser.add_argument(
'-t', '--threads',
help='Number of threads (simultaneous connections)',
dest='threads', default=1, type=int)
parser.add_argument('server', help='URL of server')
args = parser.parse_args()
server = args.server
if not server.startswith('http://'):
server = 'http://{}'.format(server)
icons = []
for font_id, font in fonts.items():
for char in font['characters']:
url = os.path.join(server, 'icon', font_id, '000', char)
icons.append((font_id, char, url))
icons.sort()
print('{} icons to test on {} ...'.format(len(icons), args.server))
if MAX_ICONS:
icons = icons[:MAX_ICONS]
pool = Pool(args.threads)
pool.map(check_icon, icons)
pool.close()
pool.join()
示例10: e_cal
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def e_cal(l, cores):
global LOOPS
'''
e calculator
this function will recive digits of float
and calculate and print status during working.
This function will return value of e.
'''
p = Pool()
getcontext().prec = l
e = Decimal(0)
i = 0
temp = 0
c = 0
while True:
fact = p.map(math.factorial, range(i, i+cores)) #parallel process factorial
e += sum(p.map(one_div, fact)) #processed factorial will total in here
i += cores
c += 1
LOOPS += 1
sys.stdout.write("\r%i loops passed." % (c) ) #Print Loop status
sys.stdout.flush()
#print i, "loops passed."
if e == temp:
break
temp = e
sys.stdout.write("\r%i loops passed.\n" % (c) )
print i
p.close()
p.join()
return e
示例11: BurstUc
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def BurstUc(host, path, passfile):
hostuser = host.split('.')
hostuser = hostuser[len(hostuser)-2]
hostdir = [hostuser,hostuser+hostuser,'admin'+hostuser,hostuser+'123','manage'+hostuser,hostuser+'123456',hostuser+'admin','123'+hostuser]
opts_list = []
f = open(passfile, 'r')
password = f.read().split()
dic = password+hostdir
pool = ThreadPool(10)
host1 = host+path
for x in range(len(dic)):
mima = dic[x]
opts = {
'host': host1,
'password': mima
}
opts_list.append(opts)
pool.map(LoginUc,opts_list)
pool.join()
print "All PassWord Run Over"
示例12: eval_dir
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def eval_dir(fn, files_list):
pool = ThreadPool(WORKER_NUM)
results = pool.map(fn, files_list)
# close the pool and wait for the work to finish
pool.close()
pool.join()
return sum(results)
示例13: getAllSecrets
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def getAllSecrets(version="", region=None, table="credential-store",
context=None, credential=None, session=None, **kwargs):
'''
fetch and decrypt all secrets
'''
if session is None:
session = get_session(**kwargs)
dynamodb = session.resource('dynamodb', region_name=region)
kms = session.client('kms', region_name=region)
secrets = listSecrets(region, table, **kwargs)
# Only return the secrets that match the pattern in `credential`
# This already works out of the box with the CLI get action,
# but that action doesn't support wildcards when using as library
if credential and WILDCARD_CHAR in credential:
names = set(expand_wildcard(credential,
[x["name"]
for x in secrets]))
else:
names = set(x["name"] for x in secrets)
pool = ThreadPool(min(len(names), THREAD_POOL_MAX_SIZE))
results = pool.map(
lambda credential: getSecret(credential, version, region, table, context, dynamodb, kms, **kwargs),
names)
pool.close()
pool.join()
return dict(zip(names, results))
示例14: get_proxys
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def get_proxys(file_name, thread_num=5):
"""这里的文件内容可以是从cn-proxy.com复制过来的数据"""
proxys = []
ip_reg = re.compile(r'^\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}', re.I)
try:
with open(file_name, 'r') as fd_proxy:
for line in fd_proxy:
if line and line.strip():
print 'line',line.strip()
if ip_reg.match(line.strip()):
ip, port = line.strip().split()[0], line.strip().split()[1]
proxy = '%s:%s' %(ip, port)
print 'proxy',proxy
# if test_connection(proxy):
if proxy:
proxys.append(proxy)
pool = ThreadPool(thread_num)
results = pool.map(test_connection,proxys)
pool.close()
pool.join()
proxys = list(set(results))
proxys = sorted(proxys,key=lambda x:x.split(".")[0])
except Exception,e:
print 'error',e
示例15: create_all_preflop_two_hand_equity
# 需要导入模块: from multiprocessing.dummy import Pool [as 别名]
# 或者: from multiprocessing.dummy.Pool import join [as 别名]
def create_all_preflop_two_hand_equity(verbose=False, save=False, distributed=False, nb_process=4):
"""returns preflop_two_hand_equity for all two hand preflop combinations"""
global all_preflop_two_hands
print '\n--------------- start create_all_preflop_two_hand_equity'
print 'all preflop two hands = \nstart = {}\nend = {}\nnb of elements = {}'.format(all_preflop_two_hands[:5], all_preflop_two_hands[-5:], len(all_preflop_two_hands))
t0 = timer()
if (distributed):
pool = ThreadPool(nb_process)
equity = pool.map(preflop_two_hand_equity, all_preflop_two_hands[:])
pool.close()
pool.join()
else:
equity = []
for k, p in enumerate(all_preflop_two_hands[:]):
if (verbose):
# print k,' - ', p
sys.stdout.write('\rk=%5d / %5d : {}' % (k+1, len(all_preflop_two_hands)), p)
sys.stdout.flush()
equity.append(preflop_two_hand_equity(p))
t1 = timer()
print 'all_preflop_two_hand_equity time = {:9.4f} s'.format(t1-t0)
print 'exact number of distinct (rankwise) pairs of preflop hands = {}'.format(np.array([len(e) for e in equity]).sum())
if (save):
cPickle.dump(equity, open(os.path.join('Tables', 'all_preflop_two_hand_equity.pk'), 'wb'))
print '{} saved to disk as {}'.format('equity', os.path.join('Tables', 'all_preflop_two_hand_equity.pk'))
return equity