本文整理汇总了Python中threading.active_count函数的典型用法代码示例。如果您正苦于以下问题:Python active_count函数的具体用法?Python active_count怎么用?Python active_count使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了active_count函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_tcp_server
def run_tcp_server(host='', port=21567):
server_addr = (host, port)
tcp_server_socket = skt.socket(skt.AF_INET, skt.SOCK_STREAM)
tcp_server_socket.bind(server_addr)
tcp_server_socket.listen(5)
client_skt_addr_dict = {}
print 'waiting for connection...'
try:
while True:
tcp_client_socket, cli_addr = tcp_server_socket.accept()
t = MyThread(tcp_client_socket, cli_addr)
client_skt_addr_dict[(tcp_client_socket, cli_addr)] = t
t.start()
print '=' * 20
for d in client_skt_addr_dict:
print client_skt_addr_dict[d]
except (KeyboardInterrupt, EOFError), err:
print 'got error,cnt=', err, threading.active_count()
for d in client_skt_addr_dict:
t = client_skt_addr_dict[d]
print 'cnt=', threading.active_count()
t.quit()
tcp_client_socket.close()
tcp_server_socket.close()
print client_skt_addr_dict, threading.active_count()
示例2: percentage_fancy
def percentage_fancy(self):
if not DNSreverse._percentage_bound:
DNSreverse._percentage_bound = DNSreverse.ip_amount / 10.0
if not int(DNSreverse._percentage_bound):
DNSreverse._percentage_bound = 1.0
if not DNSreverse.ip_done:
return
sync_on_disk = False
if not DNSreverse.ip_done % int(DNSreverse._percentage_bound):
print "%d\t%d%%\t%s\tT%d" % (DNSreverse.ip_done,
(DNSreverse.ip_done * (10 / DNSreverse._percentage_bound) ),
time.ctime(), threading.active_count())
sync_on_disk = True
# other random possibility based on birthday paradox to show counters...
if random.randint(0, int(DNSreverse._percentage_bound * 10 )) == DNSreverse.ip_done:
print "%d\t%d%%\t%s\tT%d" % (DNSreverse.ip_done,
(DNSreverse.ip_done * (10 / DNSreverse._percentage_bound) ),
time.ctime(), threading.active_count())
sync_on_disk = True
if sync_on_disk:
DNSreverse.save_status(mandatory=False)
示例3: main
def main():
global xtime
global success
global fail
success=0
fail=0
xtime=time.strftime("%Y-%m-%d[%H.%M.%S]")
print xtime
MaxThreads=sys.argv[1]
MaxThreads=int(MaxThreads)
mythreads = Queue.Queue(maxsize = 0)
for i in open("u.txt").readlines():#先将所有线程装入队列,等待取出
i=i.strip('\n')
t=threading.Thread(target=post, args=(i,))
t.setDaemon(True)
mythreads.put(t)
print 'Total Threads:%d' %MaxThreads
print 'Total URLs:%d' %mythreads.qsize()
time.sleep(2)
while True:#若条件都不满足,则死循环
if(threading.active_count() == 1 and mythreads.qsize() == 0): #若剩余URL数等于0,活动线程为1,则退出.主线程占一个 #2015-9-28 00:43 Fixed
print 'Done at %s' %time.strftime("%Y-%m-%d[%H.%M.%S]")
break
elif(threading.active_count() < MaxThreads): #判断正在运行的线程数量,如果小于输入值则继续添加线程
if (mythreads.qsize() ==0 ): #如果剩余URL为0,则不从列队中读取(否则一直处于卡死状态),并改变窗口标题提示用户 #2015-9-28 20:15 Fixed
os.system("title No URL left,waiting to exit,Current threads: %d,Success:%d,Failed:%d" %(threading.active_count(),success,fail))
time.sleep(60) #60秒之后回到上一个if判断线程是否全部结束
else:
os.system("title Current threads: %d,URLs left: %d,Success:%d,Failed:%d" %(threading.active_count(),mythreads.qsize(),success,fail)) #更改窗口标题,如觉得太消耗CPU资源可以注释掉 #线程及URL的变化只会在启动线程时变化 2015-10-2 22:30 Fixed
t=mythreads.get() #取出一个线程
t.start() #加载该线程
t.join(1) #阻塞一秒钟,然后加载下个线程,不愿意等可以注释掉
print 'Success:%d,Failed:%d' %(success,fail)
示例4: test_kafka_producer_gc_cleanup
def test_kafka_producer_gc_cleanup():
threads = threading.active_count()
producer = KafkaProducer(api_version='0.9') # set api_version explicitly to avoid auto-detection
assert threading.active_count() == threads + 1
del(producer)
gc.collect()
assert threading.active_count() == threads
示例5: loop
def loop():
while True:
print threading.current_thread().name
print threading.active_count()
print '%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%'
if len(iidbuf) is not 0:
print 10000000-len(iidbuf)
iid= iidbuf[0]
iidbuf.pop(0)
print iid
try:
if imdb_ids.find_one({'imdbID': iid}) == None:
url='http://www.imdb.com/title/'+iid+'/'
req = urllib2.Request(url, headers={ 'User-Agent': 'Mozilla/5.0' })
cont = urllib2.urlopen(req).read()
soup = BeautifulSoup(cont, "lxml")
data={}
data['imdbID']=iid
data['html_full']=cont
imdb_ids.insert_one(data)
print bcolors.OKGREEN+ "ADDED" + bcolors.ENDC
except:
print '!!!'
end=time.time()
print bcolors.OKBLUE+str((end-start)/60) + "M" + bcolors.ENDC
示例6: active_thread_counter
def active_thread_counter():
with self.thread_watcher.register(u'线程计数器'):
current_count = threading.active_count()
while True:
if current_count != threading.active_count():
current_count = threading.active_count()
self.emit(SIGNAL('activeThreadCountChanged(int)'), current_count)
示例7: event_chessboardClick
def event_chessboardClick(self, sq):
print("ANZ THREADS", active_count())
if type(self.players[self.game.playerTurn]) is players.Human:
self.players[self.game.playerTurn].processInput(self.game, sq)
else:
if active_count() == 1:
Thread(target=self.players[self.game.playerTurn].doMove, args=(copy.deepcopy(self.game),)).start()
示例8: run
def run(selif):
db = MySQLdb.connect(host=PsikonOptions.TASK_DB_HOST, user=PsikonOptions.TASK_DB_USER, passwd=PsikonOptions.TASK_DB_PASS, db=PsikonOptions.TASK_DB_NAME)
counter = 0
while True:
counter = counter + 1
totalThreads = threading.active_count() - 1
if totalThreads < PsikonOptions.THREAD_LIMIT:
realLimit = PsikonOptions.THREAD_LIMIT - totalThreads
cur = db.cursor()
cur.execute("""SELECT id FROM sys_tasks WHERE task_status=%s LIMIT %s""", (BlogTask.STATUS_PENDING, realLimit))
while True:
row = cur.fetchone()
if row == False or row == None:
break
logger.info("New task ID: " + str(row[0]))
TaskThread(row[0]).start()
if counter == 10:
logger.info("Threads: " + str(threading.active_count()))
counter = 0
time.sleep(1)
示例9: test_active_count_py2
def test_active_count_py2(self):
self.assertEqual(threading.active_count(), 1)
class TestingThread(threading.Thread):
def __init__(self, _condition):
super(TestingThread, self).__init__()
self.__condition = _condition
def run(self):
self.__condition.acquire()
try: # also can use 'with self.__condition'
self.__condition.wait()
finally:
self.__condition.release()
condition = threading.Condition()
thread = TestingThread(condition)
thread.start()
self.assertEqual(threading.active_count(), 2)
condition.acquire()
try: # also can use 'with self.condition'
condition.notify()
finally:
condition.release()
thread.join()
示例10: run
def run(target_amount):
if target_amount < 1:
target_amount = 10
ip_list = ip_getter.read_from_file()
if len(ip_list) < 1:
return
result_queue = Queue.PriorityQueue()
while(result_queue.qsize() < target_amount):
cur_ip = get_random_ip_from_list(ip_list)
print 'checking %s...' % cur_ip
threading.Thread(target=do_test, args=(cur_ip, result_queue)).start()
while(threading.active_count() > k_max_thread_count):
time.sleep(2)
while(threading.active_count() > 1):
time.sleep(2)
result_list = []
while not result_queue.empty():
c_ip = result_queue.get()
c_info = c_ip.info()
result_list.append(c_info[0])
print "ip:%s delay:%d" % (c_info[0], c_info[1])
for i in result_list:
sys.stdout.write(i+'|')
示例11: scrape_all_disease_contents_once
def scrape_all_disease_contents_once(disease_db):
common.get_logger().warning("Scraping all disease contents once more...")
main_thread = threading.currentThread()
disease_names = list(disease_db[common.ALL_DISEASES_VIEW].keys())
n_diseases = len(disease_names)
i_disease = 0
is_still_need_to_scrape = False
# create threads to scrape disease contents
while (i_disease < n_diseases):
if (threading.active_count() - 1 < common.MAX_THREADS) and \
(i_disease < n_diseases):
disease_name = disease_names[i_disease]
the_disease = disease_db[common.ALL_DISEASES_VIEW][disease_name]
if not the_disease.is_already_scraped:
is_still_need_to_scrape = True
t = threading.Thread(target=scrape_all_contents_of_a_disease,
args=(the_disease, i_disease, n_diseases))
t.start()
i_disease += 1
if threading.active_count() - 1 >= common.MAX_THREADS:
time.sleep(common.TIMEOUT_WAIT_THREAD_FINISH)
# wait for all the threads to finish
while (threading.active_count() > 1):
time.sleep(common.TIMEOUT_WAIT_THREAD_FINISH)
return is_still_need_to_scrape
示例12: error_cleanup
def error_cleanup():
global vm
global schds
for schd_job in schd_jobs:
thread = threading.Thread(target=delete_scheduler_job, args=(schd_job.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
for schd_trigger in schd_triggers:
thread = threading.Thread(target=delete_scheduler_trigger, args=(schd_trigger.uuid, ))
while threading.active_count() > 10:
time.sleep(0.5)
exc = sys.exc_info()
thread.start()
while threading.activeCount() > 1:
exc = sys.exc_info()
time.sleep(0.1)
if vm:
try:
vm.destroy()
except:
test_util.test_logger('expected exception when destroy VM since too many queued task')
示例13: run
def run(self):
"""
This method creates a pool of threads, starts them, and waits for the
'input_queue' to be empty before asking them to stop.
Results, if any, will be available in the 'output_queue'.
"""
self._set_pool_size()
log.debug('Active threads: %d' % threading.active_count()) #@UndefinedVariable
# Create threads and add them to the pool
for i in range(self.pool_size): #@UnusedVariable
thread = self.thread_class(self.in_queue, self.out_queue,
**self._thread_args)
thread.name = 'Worker-%02d' % i
self._thread_pool.append(thread)
thread.start()
log.debug('Active threads: %d' % threading.active_count()) #@UndefinedVariable
# Wait for the threads to process all the clients in the queue
while not self.in_queue.empty():
pass
# Ask threads to stop
for thread in self._thread_pool:
thread.join()
self.finished = True
示例14: test_thread_count
def test_thread_count():
"""
スレッド生成数のテスト
"""
before_num = threading.active_count()
server = APNSProxyServer(dummy_setting)
server.create_workers({
"myApp1": {
"application_id": "myApp1",
"name": "My App1",
"sandbox": False,
"cert_file": "sample.cert",
"key_file": "sample.key"
},
"myApp2": {
"application_id": "myApp2",
"name": "My App2",
"sandbox": False,
"cert_file": "sample.cert",
"key_file": "sample.key"
},
}, 3)
after_num = threading.active_count()
eq_(before_num + 6, after_num)
示例15: test
def test():
global session_uuid
global session_to
global session_mc
vm_num = os.environ.get('ZSTACK_TEST_NUM')
if not vm_num:
vm_num = 0
else:
vm_num = int(vm_num)
test_util.test_logger('ZSTACK_THREAD_THRESHOLD is %d' % thread_threshold)
test_util.test_logger('ZSTACK_TEST_NUM is %d' % vm_num)
org_num = vm_num
vm_creation_option = test_util.VmOption()
image_name = os.environ.get('imageName_s')
image_uuid = test_lib.lib_get_image_by_name(image_name).uuid
l3_name = os.environ.get('l3VlanNetworkName1')
conditions = res_ops.gen_query_conditions('name', '=', l3_name)
l3_uuid = res_ops.query_resource_with_num(res_ops.L3_NETWORK, conditions, \
session_uuid, start = 0, limit = 1)[0].uuid
vm_creation_option.set_l3_uuids([l3_uuid])
conditions = res_ops.gen_query_conditions('type', '=', 'UserVm')
instance_offering_uuid = res_ops.query_resource(res_ops.INSTANCE_OFFERING, conditions)[0].uuid
vm_creation_option.set_image_uuid(image_uuid)
vm_creation_option.set_instance_offering_uuid(instance_offering_uuid)
session_uuid = acc_ops.login_as_admin()
#change account session timeout.
session_to = con_ops.change_global_config('identity', 'session.timeout', '720000', session_uuid)
session_mc = con_ops.change_global_config('identity', 'session.maxConcurrent', '10000', session_uuid)
vm_creation_option.set_session_uuid(session_uuid)
vm = test_vm_header.ZstackTestVm()
random_name = random.random()
vm_name = 'multihost_basic_vm_%s' % str(random_name)
vm_creation_option.set_name(vm_name)
while vm_num > 0:
check_thread_exception()
vm.set_creation_option(vm_creation_option)
vm_num -= 1
thread = threading.Thread(target=create_vm, args=(vm,))
while threading.active_count() > thread_threshold:
time.sleep(1)
thread.start()
while threading.active_count() > 1:
time.sleep(0.01)
cond = res_ops.gen_query_conditions('name', '=', vm_name)
vms = res_ops.query_resource_count(res_ops.VM_INSTANCE, cond, session_uuid)
con_ops.change_global_config('identity', 'session.timeout', session_to, session_uuid)
con_ops.change_global_config('identity', 'session.maxConcurrent', session_mc, session_uuid)
acc_ops.logout(session_uuid)
if vms == org_num:
test_util.test_pass('Create %d VMs Test Success' % org_num)
else:
test_util.test_fail('Create %d VMs Test Failed. Only find %d VMs.' % (org_num, vms))