本文整理汇总了Python中multiprocessing.Pipe类的典型用法代码示例。如果您正苦于以下问题:Python Pipe类的具体用法?Python Pipe怎么用?Python Pipe使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Pipe类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_request_with_fork
def test_request_with_fork(self):
try:
from multiprocessing import Process, Pipe
except ImportError:
raise SkipTest("No multiprocessing module")
coll = self.c.test.test
coll.remove(safe=True)
coll.insert({'_id': 1}, safe=True)
coll.find_one()
self.assert_pool_size(1)
self.c.start_request()
self.assert_pool_size(1)
coll.find_one()
self.assert_pool_size(0)
self.assert_request_with_socket()
def f(pipe):
# We can still query server without error
self.assertEqual({'_id':1}, coll.find_one())
# Pool has detected that we forked, but resumed the request
self.assert_request_with_socket()
self.assert_pool_size(0)
pipe.send("success")
parent_conn, child_conn = Pipe()
p = Process(target=f, args=(child_conn,))
p.start()
p.join(1)
p.terminate()
child_conn.close()
self.assertEqual("success", parent_conn.recv())
示例2: __init__
class PluginRunner:
def __init__(self, plugin):
self.name = plugin
self.proc = None
self.running = False
self.local_pipe, self.remote_pipe = Pipe()
def getConnection(self):
return self.local_pipe
def start(self):
assert not self.running, "Already running."
self.running = True
self.thread = Thread(target=self.run)
self.thread.start()
def restart(self):
self.proc.terminate()
def stop(self):
assert self.running, "Running"
self.running = False
self.proc.terminate()
self.thread.join()
self.remote_pipe.close()
self.local_pipe.close()
def run(self):
while self.running:
self.proc = Process(target=launch, args=('repeat', self.remote_pipe))
self.proc.start()
print("Waiting on proc to end")
self.proc.join()
示例3: ProcessStarter
class ProcessStarter(object):
def __init__(self):
'''
Setup the shared memory data structure model and initialize the control parts.
'''
self.running = True
self.orprocess = None
self.guiprocess = None
self.pipeGUI, self.pipeOR = Pipe()
self.StartProcesses()
def StartProcesses(self):
self.guiprocess = Process(target=self.__startGUI__)
self.guiprocess.start()
self.pipeGUI.send(["StartViewer", None])
self.orprocess = Process(target=ORServer,args=(self.pipeOR,))
self.orprocess.start()
return True
def terminate(self):
try:
self.pipeGUI.send(["Stop", None])
self.guiprocess.terminate()
self.orprocess.terminate()
except:
pass
def __startGUI__(self):
app = QtGui.QApplication(sys.argv)
form = pnpApp(self.pipeGUI, self)
form.show()
sys.exit(app.exec_())
示例4: get_resource
def get_resource(url,is_ajax=0,ajax_timeout=2,js_timeout=5,timeout_retry_times=0,jsengine_type=1000):
parent_conn,child_conn=Pipe()
p=Process(target=CrawlerProcess,args=(child_conn,url))
p.start()
p.join()
html=parent_conn.recv()
return html
示例5: resolve_list
def resolve_list(hostname_list, qtype='A', tokens=300, flag=False, servers=[('8.8.8.8', 53)], timeout=(1, 3, 5, 7)):
parent, child = Pipe()
p = Process(target=_resolve_list, args=(child, hostname_list, qtype, tokens, flag, servers, timeout))
p.start()
result = parent.recv()
p.join()
return result
示例6: fn_with_timeout
def fn_with_timeout(*args, **kwargs):
conn1, conn2 = Pipe()
kwargs["_conn"] = conn2
th = Process(target=partial(fn, best_loss=self._best_loss), args=args, kwargs=kwargs)
th.start()
if conn1.poll(self.trial_timeout):
fn_rval = conn1.recv()
th.join()
else:
self.info("TERMINATING DUE TO TIMEOUT")
th.terminate()
th.join()
fn_rval = "return", {"status": hyperopt.STATUS_FAIL, "failure": "TimeOut"}
assert fn_rval[0] in ("raise", "return")
if fn_rval[0] == "raise":
raise fn_rval[1]
# -- remove potentially large objects from the rval
# so that the Trials() object below stays small
# We can recompute them if necessary, and it's usually
# not necessary at all.
if fn_rval[1]["status"] == hyperopt.STATUS_OK:
fn_loss = float(fn_rval[1].get("loss"))
fn_preprocs = fn_rval[1].pop("preprocs")
fn_classif = fn_rval[1].pop("classifier")
fn_iters = fn_rval[1].pop("iterations")
if fn_loss < self._best_loss:
self._best_preprocs = fn_preprocs
self._best_classif = fn_classif
self._best_loss = fn_loss
self._best_iters = fn_iters
return fn_rval[1]
示例7: go
def go():
global graphic_view, status_label
data_parent, data_child = Pipe(duplex=False)
receiver = Process(target=generate_data, args=(data_child,))
receiver.daemon = True
receiver.start()
scene = QGraphicsScene()
graphic_view.setScene(scene)
scene.setSceneRect(0, 0, 1024, 1024)
x_pos = 0
y_pos = 0
t = time.time()
while True:
speed = time.time()
data = data_parent.recv()
spectrogram = Spectrogram(data)
pixmap = QPixmap.fromImage(spectrogram.create_spectrogram_image(transpose=True))
scene.setSceneRect(scene.sceneRect().adjusted(0, 0, 0, pixmap.height()))
item = scene.addPixmap(pixmap)
item.setPos(x_pos, y_pos)
y_pos += pixmap.height()
graphic_view.fitInView(scene.sceneRect())
status_label.setText("Height: {0:.0f} // Speed: {1:.2f} // Total Time: {2:.2f}".format(scene.sceneRect().height(),
1/(time.time()-speed),
time.time()-t))
QApplication.instance().processEvents()
示例8: take_lock
def take_lock(fd, timeout=None, shared=False):
'''Take a lock on a file descriptor
If timeout is 0 the lock is taken without blocking,
if timeout is None we block indefinitely,
if timeout is a positive number we time out in that many seconds.
If shared is True this is a shared lock,
so can lock with other shared locks,
if shared is False this is an exclusive lock.
with open(path, 'r') as lock:
take_lock(lock.fileno(), timeout, shared)
'''
if timeout is None or timeout == 0:
flags = (LOCK_SH if shared else LOCK_EX)
flags |= (LOCK_NB if timeout == 0 else 0)
flock(fd, flags)
return
piper, pipew = Pipe(duplex=False)
p = Process(target=_set_alarm_and_lock,
args=(fd, pipew, timeout, shared))
p.start()
err = piper.recv()
p.join()
if err:
if isinstance(err, IOError) and err.errno == EINTR:
raise IOError(EAGAIN, strerror(EAGAIN))
raise err
示例9: QuickSortMPListArray
def QuickSortMPListArray(A,conn,NumProcs):
if len(A)<=1 :
conn.send(A)
conn.close()
elif int(NumProcs)<1:
conn.send(QuickSortListArray(A))
conn.close()
else:
lesser=[]
greater=[]
pv=[]
Pivot=A.pop(0)
pvVal=int(Pivot[0])
lesser=[x for x in A if x[0] < pvVal]
greater=[x for x in A if x[0] > pvVal]
pv=[x for x in A if x[0] == pvVal]
pv.append(Pivot)
Procs=int(NumProcs)-1
pConnLeft,cConnLeft=Pipe()
leftProc=Process(target=QuickSortMPListArray,args=(lesser,cConnLeft,Procs))
pConnRight,cConnRight=Pipe()
rightProc=Process(target=QuickSortMPListArray,args=(greater,cConnRight,Procs))
leftProc.start()
rightProc.start()
conn.send(pConnLeft.recv()+pv+pConnRight.recv())
conn.close()
leftProc.join()
rightProc.join()
return
示例10: process_pipe
def process_pipe():
parent_conn, child_conn = Pipe()
p = Process(target=pipe_test, args=(child_conn,))
p.start()
print parent_conn.recv()
p.join()
parent_conn.close()
示例11: main
def main():
startup_checks()
parser = argparse.ArgumentParser()
parser.add_argument("repourl", nargs="?", default=REPOURL, help="URL of "
"repository to crawl")
arguments = parser.parse_args(sys.argv[1:])
parent_conn, child_conn = Pipe()
crawler = Process(target=crawl, args=(arguments.repourl, child_conn))
crawler.start()
while True:
package_url = parent_conn.recv()
if package_url is None:
break
print "Processing %s" % package_url
tempdir = tempfile.mkdtemp()
p_obj = Package(package_url, tempdir)
database = pymongo.Connection("localhost", 27017).symdex
collection = database.libraries
for key, value in p_obj.libs_and_syms.items():
mongo_dict = {"package": os.path.basename(package_url),
"library": key, "symbols": value}
collection.insert(mongo_dict)
shutil.rmtree(tempdir)
os.remove(package_url)
crawler.join()
示例12: transcode
def transcode(self, path, format='mp3', bitrate=False):
if self.stopping.is_set():
return
try:
stop = Event()
start_time = time.time()
parent_conn, child_conn = Pipe()
process = Process(target=transcode_process,
args=(child_conn, path, stop, format, bitrate))
process.start()
while not (self.stopping.is_set() or stop.is_set()):
data = parent_conn.recv()
if not data:
break
yield data
logger.debug("Transcoded %s in %0.2f seconds." % (path.encode(cfg['ENCODING']), time.time() - start_time))
except GeneratorExit:
stop.set()
logger.debug("User canceled the request during transcoding.")
except:
stop.set()
logger.warn("Some type of error occured during transcoding.")
finally:
parent_conn.close()
process.join()
示例13: __init__
class StubExecuteTestsFunc:
def __init__(self):
self.main_conn, self.func_conn = Pipe()
self._called = self._complete = None
self.stub_reset()
def stub_reset(self):
self._called = self._complete = False
def stub_complete(self):
self._complete = True
self.main_conn.send(StubExecuteTestsFuncConnMessages.COMPLETE)
def stub_called(self):
if not self._called and self.main_conn.poll():
conn_message = self.main_conn.recv()
if conn_message == StubExecuteTestsFuncConnMessages.CALLED:
self._called = True
return self._called
def __enter__(self):
self.stub_reset()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
self.stub_complete()
def __call__(self, *_):
self._called = True
self.func_conn.send(StubExecuteTestsFuncConnMessages.CALLED)
while not self._complete:
conn_message = self.func_conn.recv()
if conn_message == StubExecuteTestsFuncConnMessages.COMPLETE:
self._complete = True
示例14: run
def run(self):
logging.info('Visualizer thread started')
parent_end, child_end = Pipe()
# Sensible default value for max_process
max_process = 2
process_count = 0
while not self.stop or not self.job_backlog.empty():
while parent_end.poll(0.1):
parent_end.recv() ## currently not using the info... irrelevant
## TODO - a signal to notify the viewer that visuzaliztion job has been finished...
#self.controller.view_update(self)
process_count -= 1
if self.job_backlog.empty():
time.sleep(1)
elif process_count < max_process:
process_count += 1
run_name, function, snapshot = self.job_backlog.get_nowait()
if not (run_name in self.remove_run_name):
logging.info('Added job to visuzalizer Que: ' + str(run_name))
logging.info('No. of jobs in Que: ' + str(process_count))
p = Process(target=self.render_graph,
args=(function, snapshot, run_name, child_end))
p.start()
logging.info('Visualizer Finished')
示例15: main
def main():
logging.basicConfig(level=logging.INFO)
args = parse_args()
print args
echo_server = Process(target=EchoServer('ipc://ECHO:1',
args.echo_delay).run)
client = Process(target=JsonClient('ipc://PRODUCER_REP:1',
'ipc://PRODUCER_PUB:1',
args.request_count,
request_delay=args.request_delay).run)
parent_pipe, child_pipe = Pipe(duplex=True)
async_server_adapter = Process(
target=AsyncJsonServerAdapter('ipc://ECHO:1', 'ipc://PRODUCER_REP:1',
'ipc://PRODUCER_PUB:1', child_pipe).run
)
try:
echo_server.start()
async_server_adapter.start()
client.start()
client.join()
parent_pipe.send('close')
async_server_adapter.join()
except KeyboardInterrupt:
pass
client.terminate()
async_server_adapter.terminate()
echo_server.terminate()
# Since ipc:// creates files, delete them on exit
cleanup_ipc_uris(('ipc://ECHO:1', 'ipc://PRODUCER_REP:1',
'ipc://PRODUCER_PUB:1'))