本文整理汇总了Python中pyon.core.bootstrap.get_sys_name函数的典型用法代码示例。如果您正苦于以下问题:Python get_sys_name函数的具体用法?Python get_sys_name怎么用?Python get_sys_name使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_sys_name函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _force_clean
def _force_clean(cls, recreate=False, initial=False):
# Database resources
from pyon.core.bootstrap import get_sys_name, CFG
from pyon.datastore.datastore_common import DatastoreFactory
datastore = DatastoreFactory.get_datastore(config=CFG, variant=DatastoreFactory.DS_BASE, scope=get_sys_name())
if initial:
datastore._init_database(datastore.database)
dbs = datastore.list_datastores()
clean_prefix = '%s_' % get_sys_name().lower()
things_to_clean = [x for x in dbs if x.startswith(clean_prefix)]
try:
for thing in things_to_clean:
datastore.delete_datastore(datastore_name=thing)
if recreate:
datastore.create_datastore(datastore_name=thing)
finally:
datastore.close()
# Broker resources
from putil.rabbitmq.rabbit_util import RabbitManagementUtil
rabbit_util = RabbitManagementUtil(CFG, sysname=bootstrap.get_sys_name())
deleted_exchanges, deleted_queues = rabbit_util.clean_by_sysname()
log.info("Deleted %s exchanges, %s queues" % (len(deleted_exchanges), len(deleted_queues)))
# File system
from pyon.util.file_sys import FileSystem
FileSystem._clean(CFG)
示例2: __init__
def __init__(
self, xp_name=None, event_type=None, origin=None, queue_name=None, sub_type=None, origin_type=None, pattern=None
):
self.event_type = event_type
self.sub_type = sub_type
self.origin_type = origin_type
self.origin = origin
xp_name = xp_name or get_events_exchange_point()
if pattern:
binding = pattern
else:
binding = self._topic(event_type, origin, sub_type, origin_type)
self.binding = binding
# TODO: Provide a case where we can have multiple bindings (e.g. different event_types)
# prefix the queue_name, if specified, with the sysname
if queue_name is not None:
if not queue_name.startswith(bootstrap.get_sys_name()):
queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)
else:
queue_name = create_simple_unique_id()
if hasattr(self, "_process") and self._process:
queue_name = "%s_%s" % (self._process._proc_name, queue_name)
queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)
# set this name to be picked up by inherited folks
self._ev_recv_name = (xp_name, queue_name)
示例3: __init__
def __init__(self, xp_name=None, event_type=None, origin=None, queue_name=None,
sub_type=None, origin_type=None, pattern=None):
self.event_type = event_type
self.sub_type = sub_type
self.origin_type = origin_type
self.origin = origin
xp_name = xp_name or get_events_exchange_point()
if pattern:
binding = pattern
else:
binding = self._topic(event_type, origin, sub_type, origin_type)
self.binding = binding
# TODO: Provide a case where we can have multiple bindings (e.g. different event_types)
# prefix the queue_name, if specified, with the sysname
# this is because queue names transcend xp boundaries (see R1 OOIION-477)
if queue_name is not None:
if not queue_name.startswith(bootstrap.get_sys_name()):
queue_name = "%s.%s" % (bootstrap.get_sys_name(), queue_name)
log.warn("queue_name specified, prepending sys_name to it: %s", queue_name)
# set this name to be picked up by inherited folks
self._ev_recv_name = (xp_name, queue_name)
示例4: es_cleanup
def es_cleanup():
es_host = CFG.get_safe("server.elasticsearch.host", "localhost")
es_port = CFG.get_safe("server.elasticsearch.port", "9200")
es = ep.ElasticSearch(host=es_host, port=es_port, timeout=10)
indexes = STD_INDEXES.keys()
indexes.append("%s_resources_index" % get_sys_name().lower())
indexes.append("%s_events_index" % get_sys_name().lower())
for index in indexes:
IndexManagementService._es_call(es.river_couchdb_delete, index)
IndexManagementService._es_call(es.index_delete, index)
示例5: on_start
def on_start(self):
TransformDataProcess.on_start(self)
# set up subscriber to *
self._bt_sub = Subscriber(callback=lambda m, h: self.call_process(m),
from_name=NameTrio(get_sys_name(), 'bench_queue', '*'))
# spawn listener
self._sub_gl = spawn(self._bt_sub.listen)
# set up publisher to anything!
self._bt_pub = Publisher(to_name=NameTrio(get_sys_name(), str(uuid.uuid4())[0:6]))
示例6: _cleanup_method
def _cleanup_method(self, queue_name, ep=None):
"""
Common method to be passed to each spawned ION process to clean up their process-queue.
@TODO Leaks implementation detail, should be using XOs
"""
if not ep._chan._queue_auto_delete:
# only need to delete if AMQP didn't handle it for us already!
# @TODO this will not work with XOs (future)
ch = self.container.node.channel(RecvChannel)
ch._recv_name = NameTrio(get_sys_name(), "%s.%s" % (get_sys_name(), queue_name))
ch._destroy_queue()
示例7: _cleanup_method
def _cleanup_method(self, queue_name, ep=None):
"""
Common method to be passed to each spawned ION process to clean up their process-queue.
@TODO Leaks implementation detail, should be using XOs
"""
if ep._chan is not None and not ep._chan._queue_auto_delete:
# only need to delete if AMQP didn't handle it for us already!
# @TODO this will not work with XOs (future)
try:
ch = self.container.node.channel(RecvChannel)
ch._recv_name = NameTrio(get_sys_name(), "%s.%s" % (get_sys_name(), queue_name))
ch._destroy_queue()
except TransportError as ex:
log.warn("Cleanup method triggered an error, ignoring: %s", ex)
示例8: test_init
def test_init(self):
self.assertEquals(self._pub._send_name.exchange, "%s.pyon.events" % bootstrap.get_sys_name())
self.assertEquals(self._pub._send_name.queue, None)
pub = EventPublisher(node=self._node, xp=sentinel.xp)
self.assertEquals(pub._send_name.exchange, sentinel.xp)
self.assertEquals(pub._send_name.queue, None)
示例9: setUp
def setUp(self):
super(DataRetrieverIntTestAlpha,self).setUp()
self._start_container()
config = DotDict()
config.bootstrap.processes.ingestion.module = 'ion.processes.data.ingestion.ingestion_worker_a'
config.bootstrap.processes.replay.module = 'ion.processes.data.replay.replay_process_a'
self.container.start_rel_from_url('res/deploy/r2dm.yml', config)
self.datastore_name = 'test_datasets'
self.datastore = self.container.datastore_manager.get_datastore(self.datastore_name, profile=DataStore.DS_PROFILE.SCIDATA)
self.data_retriever = DataRetrieverServiceClient()
self.dataset_management = DatasetManagementServiceClient()
self.resource_registry = ResourceRegistryServiceClient()
xs_dot_xp = CFG.core_xps.science_data
try:
self.XS, xp_base = xs_dot_xp.split('.')
self.XP = '.'.join([get_sys_name(), xp_base])
except ValueError:
raise StandardError('Invalid CFG for core_xps.science_data: "%s"; must have "xs.xp" structure' % xs_dot_xp)
示例10: on_start
def on_start(self):
if not self.CFG.get_safe('system.elasticsearch', False):
text = 'Can not initialize indexes without ElasticSearch enabled. Please enable system.elasticsearch.'
log.error(text)
raise BadRequest(text)
self.sysname = get_sys_name().lower()
self.es_host = self.CFG.get_safe('server.elasticsearch.host', 'localhost')
self.es_port = self.CFG.get_safe('server.elasticsearch.port', '9200')
self.index_shards = self.CFG.get_safe('server.elasticsearch.shards',5)
self.index_replicas = self.CFG.get_safe('server.elasticsearch.replicas', 1)
self.river_shards = self.CFG.get_safe('server.elasticsearch.river_shards',5)
self.river_replicas = self.CFG.get_safe('server.elasticsearch.river_replicas',1)
self.es = ep.ElasticSearch(host=self.es_host, port=self.es_port, timeout=10)
op = self.CFG.get('op',None)
if op == 'index_bootstrap':
self.index_bootstrap()
elif op == 'clean_bootstrap':
self.clean_bootstrap()
else:
raise BadRequest('Operation Unknown')
示例11: on_start
def on_start(self):
# print env temporarily to debug cei
import os
log.info('ENV vars: %s' % str(os.environ))
op = self.CFG.get("op", None)
datastore = self.CFG.get("datastore", None)
path = self.CFG.get("path", None)
prefix = self.CFG.get("prefix", get_sys_name()).lower()
log.info("DatastoreLoader: {op=%s, datastore=%s, path=%s, prefix=%s}" % (op, datastore, path, prefix))
self.da = datastore_admin.DatastoreAdmin()
if op:
if op == "load":
self.da.load_datastore(path, datastore, ignore_errors=False)
elif op == "dump":
self.da.dump_datastore(path, datastore)
elif op == "blame":
# TODO make generic
self.da.get_blame_objects()
elif op == "clear":
self.da.clear_datastore(datastore, prefix)
else:
raise iex.BadRequest("Operation unknown")
else:
raise iex.BadRequest("No operation specified")
示例12: launch_benchmark
def launch_benchmark(transform_number=1, primer=1,message_length=4):
import gevent
from gevent.greenlet import Greenlet
from pyon.util.containers import DotDict
from pyon.net.transport import NameTrio
from pyon.net.endpoint import Publisher
import uuid
num = transform_number
msg_len = message_length
transforms = list()
pids = 1
TransformBenchTesting.message_length = message_length
cc = Container.instance
pub = Publisher(to_name=NameTrio(get_sys_name(),str(uuid.uuid4())[0:6]))
for i in xrange(num):
tbt=cc.proc_manager._create_service_instance(str(pids), 'tbt', 'prototype.transforms.linear', 'TransformInPlace', DotDict({'process':{'name':'tbt%d' % pids, 'transform_id':pids}}))
tbt.init()
tbt.start()
gevent.sleep(0.2)
for i in xrange(primer):
pub.publish(list(xrange(msg_len)))
g = Greenlet(tbt.perf)
g.start()
transforms.append(tbt)
pids += 1
示例13: on_initial_bootstrap
def on_initial_bootstrap(self, process, config, **kwargs):
if 'test' in get_sys_name():
# If this is a launch designed for tests, don't launch the QC Post Processor
return
if self.process_exists(process, 'qc_post_processor'):
# Short circuit the bootstrap to make sure not more than one is ever started
return
self.scheduler_service = SchedulerServiceProcessClient(process=process)
self.process_dispatcher = ProcessDispatcherServiceProcessClient(process=process)
self.run_interval = CFG.get_safe('service.qc_processing.run_interval', 24)
interval_key = uuid4().hex # Unique identifier for this process
config = DotDict()
config.process.interval_key = interval_key
process_definition = ProcessDefinition(name='qc_post_processor',
executable={'module':'ion.processes.data.transforms.qc_post_processing', 'class':'QCPostProcessing'})
process_definition_id = self.process_dispatcher.create_process_definition(process_definition)
process_id = self.process_dispatcher.create_process(process_definition_id)
self.process_dispatcher.schedule_process(process_definition_id, process_id=process_id, configuration=config)
timer_id = self.scheduler_service.create_interval_timer(start_time=str(time.time()),
end_time='-1', #Run FOREVER
interval=3600*self.run_interval,
event_origin=interval_key)
示例14: listen
def listen(lch):
"""
The purpose of the this listen method is to trigger waits in code below.
By setting up a listener that subscribes to both 3 and 5, and putting received
messages into the appropriate gevent-queues client side, we can assume that
the channel we're actually testing with get_stats etc has had the message delivered
too.
"""
lch._queue_auto_delete = False
lch.setup_listener(NameTrio(bootstrap.get_sys_name(), 'alternate_listener'), 'routed.3')
lch._bind('routed.5')
lch.start_consume()
while True:
try:
newchan = lch.accept()
m, h, d = newchan.recv()
count = m.rsplit(',', 1)[-1]
if m.startswith('5,'):
self.five_events.put(int(count))
newchan.ack(d)
elif m.startswith('3,'):
self.three_events.put(int(count))
newchan.ack(d)
else:
raise StandardError("unknown message: %s" % m)
except ChannelClosedError:
break
示例15: __init__
def __init__(self, orgname=None, datastore_manager=None, events_enabled=False):
# Get an instance of datastore configured as directory.
datastore_manager = datastore_manager or bootstrap.container_instance.datastore_manager
self.dir_store = datastore_manager.get_datastore(DataStore.DS_DIRECTORY)
self.orgname = orgname or CFG.system.root_org
self.is_root = (self.orgname == CFG.system.root_org)
self.events_enabled = events_enabled
self.event_pub = None
self.event_sub = None
# Create directory root entry (for current org) if not existing
if CFG.system.auto_bootstrap:
root_de = self.register("/", "DIR", sys_name=bootstrap.get_sys_name())
if root_de is None:
# We created this directory just now
pass
if self.events_enabled:
# init change event publisher
self.event_pub = EventPublisher()
# Register to receive directory changes
self.event_sub = EventSubscriber(event_type="ContainerConfigModifiedEvent",
origin="Directory",
callback=self.receive_directory_change_event)