本文整理汇总了Python中yakonfig.get_global_config函数的典型用法代码示例。如果您正苦于以下问题:Python get_global_config函数的具体用法?Python get_global_config怎么用?Python get_global_config使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_global_config函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_kvlayer_simple
def test_kvlayer_simple(configurator, tmpdir):
si = streamcorpus.make_stream_item('2000-01-01T12:34:00.000123Z',
'test://test.stream.item/')
chunkfile = str(tmpdir.join('chunk.sc.xz'))
with streamcorpus.Chunk(path=chunkfile, mode='wb') as chunk:
chunk.add(si)
with configurator():
writer = to_kvlayer(yakonfig.get_global_config(
'streamcorpus_pipeline', 'to_kvlayer'))
writer(chunkfile, {}, '')
kvlclient = kvlayer.client()
kvlclient.setup_namespace({'stream_items': 2})
print repr(list(kvlclient.scan_keys('stream_items')))
for (k,v) in kvlclient.get(
'stream_items',
(uuid.UUID(int=946730040),
uuid.UUID(hex='985c1e3ed73256cd9a399919fe93cf76'))):
assert v is not None
reader = from_kvlayer(yakonfig.get_global_config(
'streamcorpus_pipeline', 'from_kvlayer'))
sis = list(reader(''))
assert len(sis) == 1
assert sis[0].stream_time.epoch_ticks == si.stream_time.epoch_ticks
assert sis[0].abs_url == si.abs_url
示例2: test_include_real_paths
def test_include_real_paths(reset_globals):
t1 = tempfile.NamedTemporaryFile()
t2 = tempfile.NamedTemporaryFile()
t3 = tempfile.NamedTemporaryFile()
y1 = u'''
t1:
k3: !include_yaml %s
k4: !include_yaml %s
''' % (t2.name, os.path.basename(t3.name))
print(y1)
y2 = u'dog'
y3 = u'two'
t1.write(y1.encode('utf-8'))
t2.write(y2.encode('utf-8'))
t3.write(y3.encode('utf-8'))
t1.flush()
t2.flush()
t3.flush()
config = set_global_config(t1.name)
assert get_global_config() is config
print(config)
sub_config = get_global_config('t1')
assert sub_config is config['t1']
assert sub_config['k3'] == y2
assert sub_config['k4'] == y3
示例3: test_normalize
def test_normalize():
with yakonfig.defaulted_config([Normalized]):
assert yakonfig.get_global_config('normalized')['k'] == 'v'
with yakonfig.defaulted_config([Normalized], { 'k': 'foo' }):
assert yakonfig.get_global_config('normalized')['k'] == 'f'
with yakonfig.defaulted_config([Normalized],
yaml='''
normalized:
k: zoom!
'''):
assert yakonfig.get_global_config('normalized')['k'] == 'z'
示例4: v1_folder_extract_post
def v1_folder_extract_post(fid, sid):
conf = yakonfig.get_global_config('coordinate')
tm = coordinate.TaskMaster(conf)
key = cbor.dumps((fid, sid))
wu_status = tm.get_work_unit_status('ingest', key)
if wu_status and wu_status['status'] in (AVAILABLE, BLOCKED, PENDING):
return {'state': 'pending'}
else:
logger.info('launching async work unit for %r', (fid, sid))
conf = yakonfig.get_global_config('coordinate')
tm = coordinate.TaskMaster(conf)
tm.add_work_units('ingest', [(cbor.dumps((fid, sid)), {})])
return {'state': 'submitted'}
示例5: main
def main():
parser = argparse.ArgumentParser(
'Command line interface to the office TREC DD jig.',
usage=usage,
conflict_handler='resolve')
parser.add_argument('command', help='must be "load", "init", "start", "step", or "stop"')
parser.add_argument('args', help='input for given command',
nargs=argparse.REMAINDER)
modules = [yakonfig, kvlayer, Harness]
args = yakonfig.parse_args(parser, modules)
logging.basicConfig(level=logging.DEBUG)
if args.command not in set(['load', 'init', 'start', 'step', 'stop']):
sys.exit('The only valid commands are "load", "init", "start", "step", and "stop".')
kvl = kvlayer.client()
label_store = LabelStore(kvl)
config = yakonfig.get_global_config('harness')
harness = Harness(config, kvl, label_store)
if args.command == 'load':
if not config.get('truth_data_path'):
sys.exit('Must provide --truth-data-path as an argument')
if not os.path.exists(config['truth_data_path']):
sys.exit('%r does not exist' % config['truth_data_path'])
parse_truth_data(label_store, config['truth_data_path'])
logger.info('Done! The truth data was loaded into this '
'kvlayer backend:\n%s',
json.dumps(yakonfig.get_global_config('kvlayer'),
indent=4, sort_keys=True))
elif args.command == 'init':
response = harness.init()
print(json.dumps(response))
elif args.command == 'start':
response = harness.start()
print(json.dumps(response))
elif args.command == 'stop':
response = harness.stop(args.args[0])
print(json.dumps(response))
elif args.command == 'step':
parts = args.args
topic_id = parts.pop(0)
feedback = harness.step(topic_id, parts)
print(json.dumps(feedback))
示例6: test_yakonfig_default
def test_yakonfig_default():
yakonfig.set_default_config([yakonfig])
try:
c = yakonfig.get_global_config()
assert 'yakonfig' in c
finally:
yakonfig.clear_global_config()
示例7: v1_folder_extract_get
def v1_folder_extract_get(request, response, kvlclient, store, fid, sid):
conf = yakonfig.get_global_config('coordinate')
tm = coordinate.TaskMaster(conf)
key = cbor.dumps((fid, sid))
wu_status = tm.get_work_unit_status('ingest', key)
status = wu_status['status']
if status in (AVAILABLE, BLOCKED, PENDING):
return {'state': 'pending'}
elif status in (FINISHED,):
kvlclient.setup_namespace({'openquery': (str,)})
data = None
try:
data = list(kvlclient.get('openquery', (key,)))
assert len(data) == 1, data
logger.info('got data of len 1: %r', data)
assert data[0], data
assert data[0][1], data
data = data[0][1]
data = json.loads(data)
data['state'] = 'done'
return data
except:
logger.info('kvlclient: %r', kvlclient)
logger.error('Failed to get openquery data: %r', data, exc_info=True)
return {'state': 'failed'}
else:
return {'state': 'failed'}
示例8: test_pipeline
def test_pipeline(request, test_data_dir):
filename=str(request.fspath.dirpath('test_dedup_chunk_counts.yaml'))
with yakonfig.defaulted_config([streamcorpus_pipeline], filename=filename):
## config says read from stdin, so make that have what we want
stdin = sys.stdin
sys.stdin = StringIO(get_test_chunk_path(test_data_dir))
## run the pipeline
stages = PipelineStages()
pf = PipelineFactory(stages)
p = pf(yakonfig.get_global_config('streamcorpus_pipeline'))
from streamcorpus_pipeline.run import SimpleWorkUnit
work_unit = SimpleWorkUnit('long string indicating source of text')
work_unit.data['start_chunk_time'] = time.time()
work_unit.data['start_count'] = 0
g = gevent.spawn(p._process_task, work_unit)
gevent.sleep(5)
with pytest.raises(SystemExit): # pylint: disable=E1101
p.shutdown(sig=signal.SIGTERM)
logger.debug('now joining...')
timeout = gevent.Timeout(1)
g.join(timeout=timeout)
示例9: main
def main(options):
"""Run the recommender system on a sequence of topics.
"""
description = "System using LDA, Kmeans and Solr to optimize diversification and exploitation of different topics"
parser = argparse.ArgumentParser(description=description)
parser.add_argument("--overwrite", action="store_true")
args = yakonfig.parse_args(parser, [yakonfig])
logging.basicConfig(level=logging.DEBUG)
config = yakonfig.get_global_config("harness")
batch_size = config.get("batch_size", 5)
run_file_path = config["run_file_path"]
if os.path.exists(run_file_path):
if args.overwrite:
os.remove(run_file_path)
else:
os.remove(run_file_path)
# sys.exit('%r already exists' % run_file_path)
kvl_config = {"storage_type": "local", "namespace": "test", "app_name": "test"}
kvl = kvlayer.client(kvl_config)
method, feedback_options, poids, id_config = options[0], options[1], options[2], options[3]
print method, poids
system = SearchSystem([], method, feedback_options, poids)
print args.config
args.config = "config" + str(id_config) + ".yaml"
print args.config
ambassador = HarnessAmbassadorCLI(system, args.config, batch_size)
ambassador.run()
示例10: dragnet_status
def dragnet_status():
conf = yakonfig.get_global_config('coordinate')
tm = coordinate.TaskMaster(conf)
wu_status = tm.get_work_unit_status('dragnet', DRAGNET_KEY)
if not wu_status: return None
status = wu_status['status']
return status
示例11: test_include_abstract
def test_include_abstract(reset_globals, monkeypatch_open):
YAML_TEXT_TWO = StringIO('''
app_one:
one: car
app_two:
bad: [cat, horse]
good: !include /some-path-that-will-not-be-used
''')
config = set_global_config(YAML_TEXT_TWO)
assert get_global_config() is config
sub_config = get_global_config('app_two')
assert sub_config is config['app_two']
assert sub_config['good'] == dict(k1='v1', k2=['v21'])
示例12: test_archive_by_count
def test_archive_by_count(xconfig, jobqueue_conf):
config = dict(yakonfig.get_global_config('coordinate', 'job_queue'))
config['limit_completed_count'] = 2
config.update(jobqueue_conf)
job_queue = JobQueue(config)
if job_queue.postgres_connect_string:
pytest.skip('TODO: postgres has not implemented archive by count')
job_queue.set_work_spec({'name': 'ws1'})
job_queue.add_work_units('ws1', [('wu1', {'x': 1}),
('wu2', {'x': 1}),
('wu3', {'x': 1})])
# Bump all three work units to "finished"
for wu in ['wu1', 'wu2', 'wu3']:
wu_parts, msg = job_queue.get_work('id1', {})
assert wu_parts[0] == 'ws1'
assert wu_parts[1] == wu
job_queue.update_work_unit(wu_parts[0], wu_parts[1],
{'status': FINISHED})
# Archiving hasn't happened, so we should see the finished count
# is 3, and all three work units are there
counts, msg = job_queue.count_work_units('ws1')
assert counts[FINISHED] == 3
wus, msg = job_queue.get_work_units('ws1', {})
assert [wu[0] for wu in wus] == ['wu1', 'wu2', 'wu3']
job_queue.archive()
# Now we should still see the same count, but the one that ran
# first (wu1) is off the list
counts, msg = job_queue.count_work_units('ws1')
assert counts[FINISHED] == 3
wus, msg = job_queue.get_work_units('ws1', {})
assert [wu[0] for wu in wus] == ['wu2', 'wu3']
示例13: main
def main():
ap = argparse.ArgumentParser()
ap.add_argument('--host', default=None, # NOT -h, that's help
help='host that coordinated will listen on, '
'0.0.0.0 for any input interface')
ap.add_argument('--port', '-p', type=int, default=None,
help='port number that coordinated will listen on')
ap.add_argument('--pid', default=None,
help='file to write pid to')
ap.add_argument('--snapshot-dir', default=None,
help='direcotry to write snapshots to')
ap.add_argument('--httpd', default=None,
help='ip:port or :port to serve http info on')
if yappi is not None:
ap.add_argument('--yappi', default=None, help='file to write yappi profiling to. will be suffied by {timestamp}.txt')
args = yakonfig.parse_args(ap, [yakonfig, dblogger, coordinate])
if args.pid:
with open(args.pid, 'w') as f:
f.write(str(os.getpid()))
if args.snapshot_dir is not None:
cjqconfig = yakonfig.get_global_config('coordinate', 'job_queue')
# (This modifies the global configuration in place)
cjqconfig['snapshot_path_format'] = os.path.join(
args.snapshot_dir, 'snapshot_{timestamp}')
if (yappi is not None) and args.yappi:
yappi.start()
yt = threading.Thread(target=yappi_logger, args=(args.yappi,))
yt.daemon = True
yt.start()
daemon = CoordinateServer(host=args.host, port=args.port, httpd=args.httpd)
daemon.run()
示例14: test_kvlayer_reader_and_writer
def test_kvlayer_reader_and_writer(configurator, test_data_dir):
with chunks(configurator, test_data_dir) as (path, client):
## check that index table was created
all_doc_ids = set()
all_epoch_ticks = set()
for (doc_id, epoch_ticks), empty_data in client.scan('stream_items_doc_id_epoch_ticks'):
all_doc_ids.add(doc_id)
all_epoch_ticks.add(epoch_ticks)
all_doc_ids = sorted(all_doc_ids)
all_epoch_ticks = sorted(all_epoch_ticks)
logger.info('%d doc_ids', len(all_doc_ids))
## make an reader
config = yakonfig.get_global_config('streamcorpus_pipeline',
'from_kvlayer')
reader = from_kvlayer(config)
## test it with different i_str inputs:
for i_str in ['', '0,,%d,' % 10**10, '%d,%s,%d,%s' %
(all_epoch_ticks[0], all_doc_ids[0],
all_epoch_ticks[-1], all_doc_ids[-1]) ]:
stream_ids = []
for si in reader(i_str):
stream_ids.append(si.stream_id)
_input_chunk_ids = [si.stream_id for si in streamcorpus.Chunk(path)]
input_chunk_ids = list(set(_input_chunk_ids))
logger.info('%d inserts, %d unique',
len(_input_chunk_ids), len(input_chunk_ids))
input_chunk_ids.sort()
stream_ids.sort()
assert len(input_chunk_ids) == len(stream_ids)
assert input_chunk_ids == stream_ids
示例15: __init__
def __init__(self, host=None, port=None, config=None, httpd=None):
self.config = config or yakonfig.get_global_config(
'coordinate', 'server')
self.host = host or self._cfget('host')
self.port = port or self._cfget('port')
self.do_rpclog = self._cfget('rpclog')
# TCPServer
self.server = None
# There is one server state instance which is the target
# for all request handlers.
# Methods should synchronize as needed and be thread safe.
self.pobj = MultiBackendProxyObject(
self.do_rpclog,
module_config=self._cfget('modules'),
module_instances=[JobQueue()]
)
self.httpd_params = httpd or self._cfget('httpd')
if self.httpd_params is not None and ':' not in self.httpd_params:
raise ProgrammerError(
'httpd config needs ip:port or :port to serve on, got {!r}'
.format(self.httpd_params))
self.httpd = None
self.httpd_thread = None