本文整理汇总了Python中multiprocessing.current_process函数的典型用法代码示例。如果您正苦于以下问题:Python current_process函数的具体用法?Python current_process怎么用?Python current_process使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了current_process函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: choice_set_worker
def choice_set_worker(work_queue,done_queue,network,trip_data,master_config,trip_times,ext_bound):
this_network=network
#initialize link randomizer
link_randomizer=None
if master_config.choice_set_config['method']=='doubly_stochastic' and not master_config.choice_set_config['randomize_after']:
link_randomizer=master_config.choice_set_config.get_link_randomizer(this_network,master_config)
if '1' in current_process().name:
for var in link_randomizer['variables']:
print var," zero p: ",link_randomizer['zero']['probs'][var]
print var," posi m: ",link_randomizer['pos']['means'][var]
if master_config.choice_set_config['method']=='doubly_stochastic' and master_config.choice_set_config['randomize_after']:
link_randomizer=master_config.choice_set_config['randomize_after_dev']
idx=0
for trip_id in iter(work_queue.get,'STOP'):
idx=idx+1
print time.asctime(time.localtime()), "-", current_process().name, "-",idx, ". trip_id: ", trip_id[0], ", sub_trip: ", trip_id[1], ", stage: ", trip_id[2]
the_set,chosen_overlap=generate_choice_set(this_network,trip_data[trip_id],master_config.choice_set_config,link_randomizer,master_config['time_dependent_relation'],trip_times[trip_id[0]],ext_bound)
done_queue.put((trip_id[0],the_set,chosen_overlap))
done_queue.put('STOP')
return True
示例2: TestCustomLogging
def TestCustomLogging(l, n, s):
for i in range(n):
l.info("Info - {}- {}".format(multiprocessing.current_process().name, time.strftime("%d.%m.%Y %H:%M:%S", time.gmtime())))
l.error("Error - {} - {}".format(multiprocessing.current_process().name, time.strftime("%d.%m.%Y %H:%M:%S", time.gmtime())))
time.sleep(0.2)
if not s.is_set():
s.set()
示例3: scheduler
def scheduler(list_key=Conf.Q_LIST):
"""
Creates a task from a schedule at the scheduled time and schedules next run
"""
for s in Schedule.objects.exclude(repeats=0).filter(next_run__lt=timezone.now()):
args = ()
kwargs = {}
# get args, kwargs and hook
if s.kwargs:
try:
# eval should be safe here cause dict()
kwargs = eval('dict({})'.format(s.kwargs))
except SyntaxError:
kwargs = {}
if s.args:
args = ast.literal_eval(s.args)
# single value won't eval to tuple, so:
if type(args) != tuple:
args = (args,)
q_options = kwargs.get('q_options', {})
if s.hook:
q_options['hook'] = s.hook
# set up the next run time
if not s.schedule_type == s.ONCE:
next_run = arrow.get(s.next_run)
if s.schedule_type == s.HOURLY:
next_run = next_run.replace(hours=+1)
elif s.schedule_type == s.DAILY:
next_run = next_run.replace(days=+1)
elif s.schedule_type == s.WEEKLY:
next_run = next_run.replace(weeks=+1)
elif s.schedule_type == s.MONTHLY:
next_run = next_run.replace(months=+1)
elif s.schedule_type == s.QUARTERLY:
next_run = next_run.replace(months=+3)
elif s.schedule_type == s.YEARLY:
next_run = next_run.replace(years=+1)
s.next_run = next_run.datetime
s.repeats += -1
# send it to the cluster
q_options['list_key'] = list_key
q_options['group'] = s.name or s.id
kwargs['q_options'] = q_options
s.task = tasks.async(s.func, *args, **kwargs)
# log it
if not s.task:
logger.error(
_('{} failed to create a task from schedule [{}]').format(current_process().name, s.name or s.id))
else:
logger.info(
_('{} created a task from schedule [{}]').format(current_process().name, s.name or s.id))
# default behavior is to delete a ONCE schedule
if s.schedule_type == s.ONCE:
if s.repeats < 0:
s.delete()
return
# but not if it has a positive repeats
s.repeats = 0
# save the schedule
s.save()
示例4: pusher
def pusher(task_queue, event, broker=None):
"""
Pulls tasks of the broker and puts them in the task queue
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
"""
if not broker:
broker = get_broker()
logger.info(_('{} pushing tasks at {}').format(current_process().name, current_process().pid))
while True:
try:
task_set = broker.dequeue()
except Exception as e:
logger.error(e)
# broker probably crashed. Let the sentinel handle it.
sleep(10)
break
if task_set:
for task in task_set:
ack_id = task[0]
# unpack the task
try:
task = signing.SignedPackage.loads(task[1])
except (TypeError, signing.BadSignature) as e:
logger.error(e)
broker.fail(ack_id)
continue
task['ack_id'] = ack_id
task_queue.put(task)
logger.debug(_('queueing from {}').format(broker.list_key))
if event.is_set():
break
logger.info(_("{} stopped pushing tasks").format(current_process().name))
示例5: cluster_tuples_parallel
def cluster_tuples_parallel(self, patterns, matched_tuples, child_conn):
updated_patterns = list(patterns)
count = 0
for t in matched_tuples:
count += 1
if count % 500 == 0:
print(multiprocessing.current_process(), count, \
"tuples processed")
# go through all patterns(clusters of tuples) and find the one with
# the highest similarity score
max_similarity = 0
max_similarity_cluster_index = 0
for i in range(0, len(updated_patterns)):
extraction_pattern = updated_patterns[i]
accept, score = self.similarity_all(t, extraction_pattern)
if accept is True and score > max_similarity:
max_similarity = score
max_similarity_cluster_index = i
# if max_similarity < min_degree_match create a new cluster
if max_similarity < self.config.threshold_similarity:
c = Pattern(t)
updated_patterns.append(c)
# if max_similarity >= min_degree_match add to the cluster with
# the highest similarity
else:
updated_patterns[max_similarity_cluster_index].add_tuple(t)
# Eliminate clusters with two or less patterns
new_patterns = [p for p in updated_patterns if len(p.tuples) > 5]
pid = multiprocessing.current_process().pid
print(multiprocessing.current_process(), "Patterns: ", len(new_patterns))
child_conn.send((pid, new_patterns))
示例6: main
def main(OBJECTID, lck, count, length, getArea=False):
"""
OBJECTID - the objectid of the feature from the wfs service
lck - multiprocess lock
count - how many features have been processed
length - the total number of features to be processed
getArea - boolean flag to indicate whether to capture the area of intersection
"""
try:
logging.basicConfig(filename=LOG_FILENAME, level=logging.DEBUG, format='%(asctime)s %(levelname)s %(message)s',)
logging.info(str(os.getpid()) + " OBJECTID " + str(OBJECTID) + " (" + str(count) + " out of " + str(length) + ")")
multiprocessing.current_process().cnt += 1
conn = dbconnect('species_especies_schema') # connect to PostGIS
# intersect the species range features with the intersectingfeature features
if getArea: # populate the area using the intersection area between the wdpa and the species
conn.cur.execute("SELECT * from especies.intersect_species_wdpa_area(%s,false)" % OBJECTID)
else:
conn.cur.execute("SELECT * from especies.intersect_species_wdpa(%s,false)" % OBJECTID)
intersectingfeatures = conn.cur.fetchall() # get all of the intersecting PAs for the species
if len(intersectingfeatures) > 0:
for intersectingfeature in intersectingfeatures: # iterate through the intersectingfeatures
if getArea: # populate the output table
conn.cur.execute("SELECT especies.insert_species_wdpa_area(%s,%s,%s,%s)" % (OBJECTID, intersectingfeature[1], intersectingfeature[2], intersectingfeature[3]))
else:
conn.cur.execute("SELECT especies.insert_species_wdpa(%s,%s,%s)" % (OBJECTID, intersectingfeature[1], intersectingfeature[2]))
else:
raise Exception("No intersecting features for OBJECTID %s" % OBJECTID)
except Exception as inst:
logging.error(str(os.getpid()) + " " + inst.args[0])
finally:
conn.cur.close()
del(conn)
示例7: monitor
def monitor(result_queue, broker=None):
"""
Gets finished tasks from the result queue and saves them to Django
:type result_queue: multiprocessing.Queue
"""
if not broker:
broker = get_broker()
name = current_process().name
logger.info(_("{} monitoring at {}").format(name, current_process().pid))
for task in iter(result_queue.get, "STOP"):
# acknowledge
ack_id = task.pop("ack_id", False)
if ack_id:
broker.acknowledge(ack_id)
# save the result
if task.get("cached", False):
save_cached(task, broker)
else:
save_task(task)
# log the result
if task["success"]:
logger.info(_("Processed [{}]").format(task["name"]))
else:
logger.error(_("Failed [{}] - {}").format(task["name"], task["result"]))
logger.info(_("{} stopped monitoring results").format(name))
示例8: pusher
def pusher(task_queue, event, list_key=Conf.Q_LIST, r=redis_client):
"""
Pulls tasks of the Redis List and puts them in the task queue
:type task_queue: multiprocessing.Queue
:type event: multiprocessing.Event
:type list_key: str
"""
logger.info(_("{} pushing tasks at {}").format(current_process().name, current_process().pid))
while True:
try:
task = r.blpop(list_key, 1)
except Exception as e:
logger.error(e)
# redis probably crashed. Let the sentinel handle it.
sleep(10)
break
if task:
# unpack the task
try:
task = signing.SignedPackage.loads(task[1])
except (TypeError, signing.BadSignature) as e:
logger.error(e)
continue
task_queue.put(task)
logger.debug(_("queueing from {}").format(list_key))
if event.is_set():
break
logger.info(_("{} stopped pushing tasks").format(current_process().name))
示例9: run
def run(queue, params):
""" Start a publisher thread to publish forever """
try:
(dbFile, dbBackupDir, times, submitType) = getStartParams(params)
log.info('{0} started'.format(current_process().name))
commandQueue = Queue.Queue()
interruptRequest = threading.Event()
publishThread = threading.Thread(target=publishForever,
args=(commandQueue, interruptRequest, times, submitType, dbFile, dbBackupDir),
name="PublishThread")
publishThread.start()
dispatch = {'stop' : stopCommandHandler, 'set' : setCommandHandler}
while not stopRequest.isSet():
(command, params) = queue.get()
log.info('{0} recieved command: [{1}]'.format(current_process().name, str(command)))
try:
dispatch[command](interruptRequest, commandQueue, params)
queue.put(Response())
except Exception as ex:
queue.put(Response(message=str(ex), status=ResponseStatus.ERROR))
log.info('{0} Waiting for {1} to stop'.format(current_process().name, publishThread.name))
publishThread.join()
log.info('{0} ... OK'.format(current_process().name))
except Exception as ex:
log.exception(repr(ex))
log.info('{0} terminated'.format(__name__))
示例10: reducer
def reducer(q_manager, project_drs, options):
_logger.info(multiprocessing.current_process().name + ' with pid ' +
str(os.getpid()))
reducer_queue_consume(q_manager, project_drs, options)
_logger.info(multiprocessing.current_process().name + ' with pid ' +
str(os.getpid()) + ' finished cleanly.')
return
示例11: getTaskProcess
def getTaskProcess(self):
while True:
array=[]
if self.taskleft()>0:
for i in range(10):
try:
req = self.q_request.get(block=True,timeout=1000)
array.append(req)
except:
continue
# break
# req = self.q_request.get()
with self.lock: #要保证该操作的原子性,进入critical area
self.running=self.running+1
threadname=multiprocessing.current_process().name
print '进程'+threadname+'发起请求: '
ans=self.do_job(self.job,req,threadname)
# ans = self.connectpool.getConnect(req)
# self.lock.release()
if self.needfinishqueue>0:
self.q_finish.put((req,ans))
# self.lock.acquire()
with self.lock:
self.running= self.running-1
threadname=multiprocessing.current_process().name
print '进程'+threadname+'完成请求'
示例12: _remove_adapters
def _remove_adapters(self, adapter, info_file, sum_file, tmp_decontam_fastq, tmp_rmadapter_fastq):
sys.stdout.write("[Preqc] %s removing adapters from %s" %(multiprocessing.current_process().name, tmp_decontam_fastq))
cutadapt_cmd = " cutadapt -b %s -O %d -m %d --info-file %s -o %s %s " %(adapter, self.min_overlap, self.min_readlen, info_file, tmp_rmadapter_fastq, tmp_decontam_fastq)
sys.stdout.write(multiprocessing.current_process().name + "\t" + cutadapt_cmd + "\n")
p = subprocess.Popen(shlex.split(cutadapt_cmd), stdout=open(sum_file, 'w'))
p.wait()
return p.returncode
示例13: worker
def worker(work_queue, done_queue):
try:
for url in iter(work_queue.get, 'STOP'):
status_code = print_site_status(url)
done_queue.put("%s | %s | %s" % (current_process().name, url, status_code))
except Exception, e:
done_queue.put("%s | %s | %s | %s" % (current_process().name, url,5000,e.message))
示例14: Map
def Map(L):
sentence_max = 0
#temp_file = get_tempfile(L)
text = prepjob(L)
#data_file = load(temp_file)
local_words = {}
print multiprocessing.current_process().name, 'to map region:', \
L[1],"to",L[2]
while True:
raw_line = text.readline()
if not raw_line:
break
pass
for sentence in splitSentence(raw_line):
for (i, word) in enumerate(sentence.split()):
if i > sentence_max:
sentence_max = i
if not word.isspace():
sanitized = sanitize(word).lower()
local_words[sanitized] = incrementTuple(i,
local_words.get(sanitized, (0, {})))
out = []
sum = 0
for (key, value) in local_words.items():
if key is not '' and value is not None:
sum += value[0]
out.append((key, value))
print multiprocessing.current_process().name, 'mapped tokens:', \
sum, 'sentence max:', sentence_max
#data_file.close()
#os.remove(temp_file)
return out
示例15: doStuff
def doStuff(self):
"""
This is the method that does the work
"""
while (not self.stop_event.is_set()) and (not self.waiting_queue.empty()):
# Get a job from the queue
try:
self.waiting_lock.acquire()
job = self.waiting_queue.get()
except queue.Queue.Empty:
break
finally:
self.waiting_lock.release()
# Do the work
print("{0}: Starting {1}".format(multiprocessing.current_process(), job))
time.sleep(1)
print("{0}: Finished {1}".format(multiprocessing.current_process(), job))
time.sleep(1)
# Put the result back on the result Queue. (Doesn't have to be the same object as Source Q)
try:
self.complete_lock.acquire()
self.complete_queue.put(job)
except queue.Queue.Empty:
break
finally:
self.complete_lock.release()