本文整理汇总了Python中multiprocessing.Process.start方法的典型用法代码示例。如果您正苦于以下问题:Python Process.start方法的具体用法?Python Process.start怎么用?Python Process.start使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Process
的用法示例。
在下文中一共展示了Process.start方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: KeepAliveClientTest
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
class KeepAliveClientTest(TestCase):
server_address = ("127.0.0.1", 65535)
def __init__(self, *args, **kwargs):
super(KeepAliveClientTest, self).__init__(*args, **kwargs)
self.server_process = Process(target=self._run_server)
def setUp(self):
super(KeepAliveClientTest, self).setUp()
self.client = Client(["%s:%d" % self.server_address])
self.server_process.start()
time.sleep(.10)
def tearDown(self):
self.server_process.terminate()
super(KeepAliveClientTest, self).tearDown()
def _run_server(self):
self.server = BaseHTTPServer.HTTPServer(self.server_address, ClientAddressRequestHandler)
self.server.handle_request()
def test_client_keepalive(self):
for x in range(10):
result = self.client.sql("select * from fake")
another_result = self.client.sql("select again from fake")
self.assertEqual(result, another_result)
示例2: _find_active_serial_ports_from
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def _find_active_serial_ports_from(self, wait_duration, device_files):
"""
Find and returns list of active USB serial ports.
This spawns a process that actually does the work.
Args:
device_files (list of strings):
List of device files that will be checked for serial ports.
Note that any other device file than ttyUSBx will be ignored.
Returns:
List of device files that have active serial port.
Example: ["ttyUSB2", "ttyUSB4", "ttyUSB7"]
"""
serial_results = Queue()
serial_finder = Process(
target=TopologyBuilder._get_active_serial_device_files,
args=(self, serial_results, wait_duration, device_files))
if self._verbose:
print "Serial thread - Finding active serial ports"
logging.info("Finding active serial ports")
serial_finder.start()
return serial_results
示例3: benchmark
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def benchmark(self, request, pk):
queryset = Attempt.objects.all()
attempt = get_object_or_404(queryset, id=pk)
serializer = AttemptSerializer(attempt)
# check payload
payload = dict(request.data)
if 'database' not in payload and 'benchmark' not in payload:
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
# run benchmark
process = Process(target = utils.run_benchmark, args = (pk, payload['database'], payload['benchmark']))
process.start()
# utils.run_benchmark(pk, payload['database'], payload['benchmark'])
# shoule know the deployer id
deployer_id = 1
log_file_path = os.path.join(os.path.dirname(__file__), os.pardir, 'vagrant', str(deployer_id) + '.log')
def stream_response_generator():
last_line_no = 0
while process.is_alive():
time.sleep(1)
with open(log_file_path, 'r') as log_file:
content = log_file.readlines()
line_no = len(content)
if line_no > last_line_no:
yield ''.join(content[last_line_no:])
last_line_no = line_no
time.sleep(1)
with open(log_file_path, 'r') as log_file:
content = log_file.readlines()
line_no = len(content)
if line_no > last_line_no:
yield ''.join(content[last_line_no:])
return StreamingHttpResponse(stream_response_generator())
示例4: nct_tagging
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def nct_tagging(index_name, host, port_no, process_ids,
stopwords, umls, pos, nprocs=1):
# open the clinical trail ids file to process
nct_ids = []
for line in open(process_ids, 'rb'):
nct_ids.append(line.strip())
# Check if index exists
index = es_index.ElasticSearch_Index(index_name, host=host, port=port_no)
index.add_field('ec_tags_umls', term_vector=True)
# Get clinical
# process each clinical trial and store to XML file
log.info('processing clinical trials')
procs = []
chunksize = int(math.ceil(len(nct_ids) / float(nprocs)))
for i in xrange(nprocs):
p = Process(target=_worker, args=(nct_ids[chunksize * i:chunksize * (i + 1)],
index_name, host, port_no,
stopwords, umls, pos, (i + 1)))
procs.append(p)
p.start()
for p in procs:
p.join()
示例5: start_echo_server_process
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def start_echo_server_process():
# XXX DO NOT FORGET TO KILL THE PROCESS IF THE TEST DOES NOT SUCCEED
sleep()
p = Process(target=start_echo_server)
p.start()
sleep(1.5)
return p
示例6: processFiles
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def processFiles(patch_dir):
root = os.getcwd()
glbl.data_dirs = {}
if root != patch_dir: working_path = root+"/"+patch_dir
else: working_path = root
for path, dirs, files in os.walk(working_path):
if len(dirs) == 0: glbl.data_dirs[path] = ''
# Multiprocessing Section
#########################################
Qids = glbl.data_dirs.keys()
manager = Manager() # creates shared memory manager object
results = manager.dict() # Add dictionary to manager, so it can be accessed across processes
nextid = Queue() # Create Queue object to serve as shared id generator across processes
for qid in Qids: nextid.put(qid) # Load the ids to be tested into the Queue
for x in range(0,multiprocessing.cpu_count()): # Create one process per logical CPU
p = Process(target=processData, args=(nextid,results)) # Assign process to processCBR function, passing in the Queue and shared dictionary
glbl.jobs.append(p) # Add the process to a list of running processes
p.start() # Start process running
for j in glbl.jobs:
j.join() # For each process, join them back to main, blocking on each one until finished
# write out results
c = 1
sets = results.keys()
sets.sort()
for x in sets:
if results[x] != 'None':
FINAL = open('result'+str(c)+'.txt','w')
n = "\n************************************************************************************************\n"
FINAL.write(n+"* "+x+' *\n'+n+results[x]+"\n")
FINAL.close()
c += 1
示例7: serve
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def serve(self):
"""Start a fixed number of worker threads and put client into a queue"""
#this is a shared state that can tell the workers to exit when set as false
self.isRunning.value = True
#first bind and listen to the port
self.serverTransport.listen()
#fork the children
for i in range(self.numWorkers):
try:
w = Process(target=self.workerProcess)
w.daemon = True
w.start()
self.workers.append(w)
except (Exception) as x:
logging.exception(x)
#wait until the condition is set by stop()
while True:
self.stopCondition.acquire()
try:
self.stopCondition.wait()
break
except (SystemExit, KeyboardInterrupt):
break
except (Exception) as x:
logging.exception(x)
self.isRunning.value = False
示例8: run_stock_parser
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def run_stock_parser():
symbol_q = Queue()
price_q = Queue()
stock_symbols = []
with open('symbols.txt', 'r') as symfile:
for n, line in enumerate(symfile):
sym = line.strip()
if sym:
stock_symbols.append(sym)
ncpu = len([x for x in open('/proc/cpuinfo').read().split('\n')\
if x.find('processor') == 0])
pool = [Process(target=read_stock_worker, args=(symbol_q, price_q, )) for _ in range(ncpu * 4)]
for p in pool:
p.start()
output = Process(target=write_output_file, args=(price_q, ))
output.start()
for symbol in stock_symbols:
symbol_q.put(symbol)
symbol_q.put(_sentinel)
for p in pool:
p.join()
price_q.put(_sentinel)
output.join()
示例9: start_parser_process
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def start_parser_process(self):
if self.mp_mode:
from multiprocessing import Process, Event
else:
from multiprocessing.dummy import Process, Event
waiting_shutdown_event = Event()
if self.mp_mode:
bot = self.bot.__class__(
network_result_queue=self.network_result_queue,
parser_result_queue=self.parser_result_queue,
waiting_shutdown_event=waiting_shutdown_event,
shutdown_event=self.shutdown_event,
parser_mode=True,
meta=self.bot.meta)
else:
# In non-multiprocess mode we start `run_process`
# method in new semi-process (actually it is a thread)
# Because the use `run_process` of main spider instance
# all changes made in handlers are applied to main
# spider instance, that allows to suppport deprecated
# spiders that do not know about multiprocessing mode
bot = self.bot
bot.network_result_queue = self.network_result_queue
bot.parser_result_queue = self.parser_result_queue
bot.waiting_shutdown_event = waiting_shutdown_event
bot.shutdown_event = self.shutdown_event
bot.meta = self.bot.meta
proc = Process(target=bot.run_parser)
if not self.mp_mode:
proc.daemon = True
proc.start()
return waiting_shutdown_event, proc
示例10: MultiProcessPlot
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
class MultiProcessPlot(object):
## Initilization
def __init__(self):
self.plotpipe, PlotterPipe = Pipe()
## Called process for plotting
self.plotter = ProcessPlotter()
## Process holder
self.plotprocess = Process(target = self.plotter, args = (PlotterPipe, ))
self.plotprocess.daemon = True
self.plotprocess.start()
## Plot function
def plot(self, finished=False):
send = self.plotpipe.send
if finished:
send(None)
else:
if not LoopCounter % plotRefreshPeriod:
reset = 1
else:
reset = 0
## Compose data for pipe
data = [reset,
MessageMeasurement.pose2d.x, MessageMeasurement.pose2d.y, MessageMeasurement.pose2d.theta,
MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta,
MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta]
# print(MessageEKF.ekfpose2d.x, MessageEKF.ekfpose2d.y, MessageEKF.ekfpose2d.theta) # //VB
# print(MessageEKF.odompose2d.x, MessageEKF.odompose2d.y, MessageEKF.odompose2d.theta) # //VB
## Send data through pipe
send(data)
## Reset global flags to receive new input
flagSubscriber1 = False
flagSubscriber2 = False
示例11: apply_update
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def apply_update(fname, status):
# As soon as python-apt closes its opened files on object deletion
# we can drop this fork workaround. As long as they keep their files
# open, we run the code in an own fork, than the files are closed on
# process termination an we can remount the filesystem readonly
# without errors.
p = Process(target=_apply_update, args=(fname, status))
with rw_access("/", status):
try:
t_ver = get_target_version(fname)
except BaseException:
status.log('Reading xml-file failed!')
return
try:
c_ver = get_current_version()
except IOError as e:
status.log('get current version failed: ' + str(e))
c_ver = ""
pre_sh(c_ver, t_ver, status)
p.start()
p.join()
status.log("cleanup /var/cache/apt/archives")
# don't use execute() here, it results in an error that the apt-cache
# is locked. We currently don't understand this behaviour :(
os.system("apt-get clean")
if p.exitcode != 0:
raise Exception(
"Applying update failed. See logfile for more information")
post_sh(c_ver, t_ver, status)
示例12: connect_multiprocess
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def connect_multiprocess(service = VoidService, config = {}, remote_service = VoidService, remote_config = {}, args={}):
"""starts an rpyc server on a new process, bound to an arbitrary port,
and connects to it over a socket. Basically a copy of connect_thread().
However if args is used and if these are shared memory then changes
will be bi-directional. That is we now have access to shared memmory.
:param service: the local service to expose (defaults to Void)
:param config: configuration dict
:param server_service: the remote service to expose (of the server; defaults to Void)
:param server_config: remote configuration dict (of the server)
:param args: dict of local vars to pass to new connection, form {'name':var}
Contributed by *@tvanzyl*
"""
from multiprocessing import Process
listener = socket.socket()
listener.bind(("localhost", 0))
listener.listen(1)
def server(listener=listener, args=args):
client = listener.accept()[0]
listener.close()
conn = connect_stream(SocketStream(client), service = remote_service, config = remote_config)
try:
for k in args:
conn._local_root.exposed_namespace[k] = args[k]
conn.serve_all()
except KeyboardInterrupt:
interrupt_main()
t = Process(target = server)
t.start()
host, port = listener.getsockname()
return connect(host, port, service = service, config = config)
示例13: ArtBox
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
class ArtBox(object):
def __init__(self, width, height):
self._pen_comms = Pipe()
self._paper_comms = Pipe()
self._pen_ear, self._pen_mouth = Pipe()
self._paper_ear, self._paper_mouth = Pipe()
self._pen = pen.Pen()
self._paper = paper.Paper(width=width, height=height)
self._proc = Process(target=self._pen, args=(self._pen_comms, self._paper_comms))
self._proc.daemon = True
def add_resource_folder(self, folder_name):
pyglet.resource.path.append(folder_name)
pyglet.resource.reindex()
def precache(self, asset_dict):
for key in asset_dict:
attributes = asset_dict[key]
if len(attributes) == 1:
self._paper._handle_command(Nibs.Cache(key, attributes[0]))
elif len(attributes) == 2:
self._paper._handle_command(Nibs.Cache(key, attributes[0], attributes[1]))
def open(self):
self._proc.start()
self._paper.unfurl(self._pen_comms, self._paper_comms)
self._proc.join(1)
if self._proc.exitcode is None:
self._proc.terminate()
示例14: start_workers
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def start_workers(config):
'''
Picks up all the external system configuration from the config file and starts up as many processes as non-default sections in the config.
The following elements are required from the default configuration section :
- solr_url : base url of the solr server.
- nova_db_server : IP or hostname of the nova controller.
- nova_db_port : Port of the nova db to which the workers should connect.For nova+mysql this would be 3306.
- nova_db_creds : credentials in the format user:password
- amqp_server : IP or hostname of the amqp server. Usually, this is same as the nova controller.
- amqp_port : Port of the AMQP server. If using RMQ this should be 5672.
- amqp_creds : credentials in the format user:password
Each non-default section of the config should represent a resource type that this system monitors. Each individual worker corresponds to
a resource type and is run in a separate python process.
'''
logUtils.setup_logging(config)
global _LOGGER
_LOGGER = logUtils.get_logger(__name__)
for section in config.sections():
process = Process(target=worker.run, args=(config, section,))
process.daemon = True
process.start()
_LOGGER.info('Started worker process - ' + str(process.pid))
_PROCESSES.append(process)
示例15: start_schedulers
# 需要导入模块: from multiprocessing import Process [as 别名]
# 或者: from multiprocessing.Process import start [as 别名]
def start_schedulers(options):
apps = [app.strip() for app in options.scheduler.split(',')]
try:
from multiprocessing import Process
except:
sys.stderr.write('Sorry, -K only supported for python 2.6-2.7\n')
return
processes = []
code = "from gluon import current;current._scheduler.loop()"
for app in apps:
if not check_existent_app(options, app):
print "Application '%s' doesn't exist, skipping" % (app)
continue
print 'starting scheduler for "%s"...' % app
args = (app,True,True,None,False,code)
logging.getLogger().setLevel(options.debuglevel)
p = Process(target=run, args=args)
processes.append(p)
print "Currently running %s scheduler processes" % (len(processes))
p.start()
print "Processes started"
for p in processes:
try:
p.join()
except (KeyboardInterrupt, SystemExit):
print "Processes stopped"
except:
p.terminate()
p.join()