本文整理汇总了Python中pyon.util.spawn函数的典型用法代码示例。如果您正苦于以下问题:Python spawn函数的具体用法?Python spawn怎么用?Python spawn使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了spawn函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _es_call
def _es_call(es, *args, **kwargs):
res = AsyncResult()
def async_call(es, *args, **kwargs):
res.set(es(*args,**kwargs))
spawn(async_call,es,*args,**kwargs)
try:
retval = res.get(timeout=10)
except Timeout:
raise exceptions.Timeout("Call to ElasticSearch timed out.")
return retval
示例2: get_ready_event
def get_ready_event(self):
"""
Returns an Event that is set when all the listeners in this Process are running.
"""
ev = Event()
def allready(ev):
waitall([x.get_ready_event() for x in self.listeners])
ev.set()
spawn(allready, ev)
return ev
示例3: get_dirty_values_async_result
def get_dirty_values_async_result(self):
dirty_async_res = AsyncResult()
def dirty_check(self, res):
while True:
if self.is_dirty():
time.sleep(0.1)
else:
res.set(True)
break
spawn(dirty_check, self, dirty_async_res)
return dirty_async_res
示例4: run_test_dispatcher
def run_test_dispatcher(work_count, num_workers=1):
# Set up temporary directories to save data
import shutil
BASE_DIR = tempfile.mkdtemp()
PIDANTIC_DIR = tempfile.mkdtemp()
WORK_KEYS = ['a','b','c','d','e']
for x in [x for x in os.listdir(BASE_DIR) if x.endswith('.h5')]:
os.remove(os.path.join(BASE_DIR,x))
fps = {}
for k in WORK_KEYS:
fps[k] = os.path.join(BASE_DIR, '{0}.h5'.format(k))
# with h5py.File(fps[k], 'a'):
# pass
bD = (50,)
cD = (5,)
fv = -9999
dtype = 'f'
def fcb(message, work):
log.error('WORK DISCARDED!!!; %s: %s', message, work)
disp = BrickWriterDispatcher(fcb, num_workers=num_workers, pidantic_dir=PIDANTIC_DIR)
disp.run()
def make_work():
for x in xrange(work_count):
bk = random.choice(WORK_KEYS)
brick_metrics = (fps[bk], bD, cD, dtype, fv)
if np.random.random_sample(1)[0] > 0.5:
sl = int(np.random.randint(0,10,1)[0])
w = np.random.random_sample(1)[0]
else:
strt = int(np.random.randint(0,bD[0] - 2,1)[0])
stp = int(np.random.randint(strt+1,bD[0],1)[0])
sl = slice(strt, stp)
w = np.random.random_sample(stp-strt)
disp.put_work(work_key=bk, work_metrics=brick_metrics, work=([sl], w))
time.sleep(0.1)
spawn(make_work)
# Remove temporary directories
shutil.rmtree(BASE_DIR)
shutil.rmtree(PIDANTIC_DIR)
return disp
示例5: on_start
def on_start(self):
# Persister thread
self._persist_greenlet = spawn(self._persister_loop, self.persist_interval)
log.debug('EventPersister persist greenlet started in "%s" (interval %s)', self.__class__.__name__, self.persist_interval)
# View trigger thread
self._refresh_greenlet = spawn(self._refresher_loop, self.refresh_interval)
log.debug('EventPersister view refresher greenlet started in "%s" (interval %s)', self.__class__.__name__, self.refresh_interval)
# Event subscription
self.event_sub = EventSubscriber(pattern=EventSubscriber.ALL_EVENTS,
callback=self._on_event,
queue_name="event_persister")
self.event_sub.start()
示例6: start
def start(self):
# Create our own queue for container heartbeats and broadcasts
topic = get_safe(self._pd_core.pd_cfg, "aggregator.container_topic") or "bx_containers"
queue_name = "pd_aggregator_%s_%s" % (topic, create_valid_identifier(self.container.id, dot_sub="_"))
self.sub_cont = Subscriber(binding=topic, from_name=queue_name, auto_delete=True,
callback=self._receive_container_info)
self.sub_cont_gl = spawn(self.sub_cont.listen)
self.sub_cont.get_ready_event().wait()
self.evt_sub = EventSubscriber(event_type=OT.ContainerLifecycleEvent, callback=self._receive_event)
self.evt_sub.add_event_subscription(event_type=OT.ProcessLifecycleEvent)
self.evt_sub_gl = spawn(self.evt_sub.listen)
self.evt_sub.get_ready_event().wait()
log.info("PD Aggregator - event and heartbeat subscribers started")
示例7: test_rpc_with_xn
def test_rpc_with_xn(self):
# get an xn to use for send/recv
xn = self.container.ex_manager.create_xn_service('hello')
self.addCleanup(xn.delete)
# create an RPCServer for a hello service
hs = HelloService()
rpcs = RPCServer(from_name=xn, service=hs)
# spawn the listener, kill on test exit (success/fail/error should cover?)
gl_listen = spawn(rpcs.listen)
def cleanup():
rpcs.close()
gl_listen.join(timeout=2)
gl_listen.kill()
self.addCleanup(cleanup)
# wait for listen to be ready
rpcs.get_ready_event().wait(timeout=5)
# ok, now create a client using same xn
hsc = HelloServiceClient(to_name=xn)
# try to message it!
ret = hsc.hello('hi there')
# did we get back what we expected?
self.assertEquals(ret, 'BACK:hi there')
示例8: start
def start(self):
queue_name = get_safe(self._pd_core.pd_cfg, "command_queue") or "pd_command"
self.sub_cont = Subscriber(binding=queue_name, from_name=queue_name, callback=self._receive_command)
self.sub_cont_gl = spawn(self.sub_cont.listen, activate=False)
self.sub_cont.get_ready_event().wait()
self.pub_result = Publisher()
示例9: execute_acquire_data
def execute_acquire_data(self, *args):
"""
Spawns a greenlet to perform a data acquisition
Calls BaseDataHandler._acquire_data
Disallows multiple "new data" (unconstrained) requests using BaseDataHandler._semaphore lock
Called from:
InstrumentAgent._handler_observatory_execute_resource
|--> ExternalDataAgent._handler_streaming_execute_resource
@parameter args First argument should be a config dictionary
"""
try:
config = args[0]
except IndexError:
raise ParameterError('\'acquire_data\' command requires a config dict.')
if not isinstance(config, dict):
raise TypeError('args[0] of \'acquire_data\' is not a dict.')
else:
if get_safe(config,'constraints') is None and not self._semaphore.acquire(blocking=False):
log.warn('Already acquiring new data - action not duplicated')
return
g = spawn(self._acquire_data, config, self._unlock_new_data_callback)
log.debug('** Spawned {0}'.format(g))
self._glet_queue.append(g)
示例10: _spawn
def _spawn(self):
""" Spawn a gevent greenlet using defined target method and args.
"""
gl = spawn(self.target, *self.spawn_args, **self.spawn_kwargs)
gl.link(lambda _: self.ev_exit.set()) # Set exit event when we terminate
gl._glname = "ION Thread %s" % str(self.target)
return gl
示例11: test_pub_speed
def test_pub_speed(self):
pub = Publisher(node=self.container.node, name="i_no_exist")
print >>sys.stderr, ""
self.counter = 0
self.alive = True
def sendem():
while self.alive:
self.counter += 1
pub.publish('meh')
start_time = time.time()
sendgl = spawn(sendem)
time.sleep(5)
end_time = time.time()
self.alive = False
sendgl.join(timeout=2)
sendgl.kill()
diff = end_time - start_time
mps = float(self.counter) / diff
print >>sys.stderr, "Published messages per second:", mps, "(", self.counter, "messages in", diff, "seconds)"
示例12: test_rpc_speed
def test_rpc_speed(self):
hsc = HelloServiceClient()
print >>sys.stderr, ""
self.counter = 0
self.alive = True
def sendem():
while self.alive:
hsc.noop('data')
self.counter += 1
start_time = time.time()
sendgl = spawn(sendem)
time.sleep(5)
end_time = time.time()
self.alive = False
sendgl.join(timeout=2)
sendgl.kill()
diff = end_time - start_time
mps = float(self.counter) / diff
print >>sys.stderr, "Requests per second (RPC):", mps, "(", self.counter, "messages in", diff, "seconds)"
示例13: test_known_error
def test_known_error(self):
# IonExceptions and TypeErrors get forwarded back intact
svc = self._make_service()
p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
p.start()
p.get_ready_event().wait(timeout=5)
self.addCleanup(p.stop)
def proc_call():
raise NotFound("didn't find it")
def client_call(p=None, ar=None):
try:
ca = p._routing_call(proc_call, None)
ca.get(timeout=5)
except IonException as e:
ar.set(e)
ar = AsyncResult()
gl_call = spawn(client_call, p=p, ar=ar)
e = ar.get(timeout=5)
self.assertIsInstance(e, NotFound)
示例14: test_unknown_error
def test_unknown_error(self):
# Unhandled exceptions get handled and then converted to ContainerErrors
svc = self._make_service()
p = IonProcessThread(name=sentinel.name, listeners=[], service=svc)
p.start()
p.get_ready_event().wait(timeout=5)
self.addCleanup(p.stop)
def proc_call():
raise self.ExpectedError("didn't find it")
def client_call(p=None, ar=None):
try:
ca = p._routing_call(proc_call, None)
ca.get(timeout=5)
except IonException as e:
ar.set(e)
ar = AsyncResult()
gl_call = spawn(client_call, p=p, ar=ar)
e = ar.get(timeout=5)
self.assertIsInstance(e, ContainerError)
self.assertEquals(len(p._errors), 1)
示例15: on_start
def on_start(self):
super(IngestionWorker,self).on_start()
#----------------------------------------------
# Start up couch
#----------------------------------------------
self.couch_config = self.CFG.get('couch_storage')
self.hdf_storage = self.CFG.get('hdf_storage')
self.number_of_workers = self.CFG.get('number_of_workers')
self.description = self.CFG.get('description')
self.ingest_config_id = self.CFG.get('configuration_id')
self.datastore_name = self.couch_config.get('datastore_name',None) or 'dm_datastore'
try:
self.datastore_profile = getattr(DataStore.DS_PROFILE, self.couch_config.get('datastore_profile','SCIDATA'))
except AttributeError:
log.exception('Invalid datastore profile passed to ingestion worker. Defaulting to SCIDATA')
self.datastore_profile = DataStore.DS_PROFILE.SCIDATA
log.debug('datastore_profile %s' % self.datastore_profile)
self.db = self.container.datastore_manager.get_datastore(ds_name=self.datastore_name, profile = self.datastore_profile, config = self.CFG)
self.resource_reg_client = ResourceRegistryServiceClient(node = self.container.node)
self.dataset_configs = {}
# update the policy
def receive_dataset_config_event(event_msg, headers):
log.info('Updating dataset config in ingestion worker: %s', event_msg)
if event_msg.type != DatasetIngestionTypeEnum.DATASETINGESTIONBYSTREAM:
raise IngestionWorkerException('Received invalid type in dataset config event.')
stream_id = event_msg.configuration.stream_id
if event_msg.deleted:
try:
del self.dataset_configs[stream_id]
except KeyError:
log.info('Tried to remove dataset config that does not exist!')
else:
self.dataset_configs[stream_id] = event_msg
# Hook to override just before processing is complete
self.dataset_configs_event_test_hook(event_msg, headers)
#Start the event subscriber - really - what a mess!
self.event_subscriber = EventSubscriber(
event_type="DatasetIngestionConfigurationEvent",
origin=self.ingest_config_id,
callback=receive_dataset_config_event
)
self.gl = spawn(self.event_subscriber.listen)
self.event_subscriber._ready_event.wait(timeout=5)
log.info(str(self.db))