本文整理汇总了Python中utils.get_logger函数的典型用法代码示例。如果您正苦于以下问题:Python get_logger函数的具体用法?Python get_logger怎么用?Python get_logger使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get_logger函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: global_r_call
def global_r_call(function_name, args=None, r_file=PATIENT_MODULE_FILE_NAME):
"""
Writing shortcut for all previous function using this same code
:param function_name: name of the ro.globalenv function to call
:type function_name: str
:param args: usually a selector like GroupID
:type args:
:param r_file: The file to be loaded for the call
:type r_file: str
:rtype:
"""
# Source & export R code
source_file(r_file)
# Export a function to call
r_getter_func = ro.globalenv[function_name]
# R call
data = list()
try:
# Arguments magics
if args:
if type(args) is not tuple:
args = (args,)
data = r_getter_func(*args)
else:
data = r_getter_func()
except RRuntimeError as e:
get_logger().error(e)
return data
示例2: auto_register
def auto_register(mng_cls, cls, proxytype=None, init_args=(),
init_kwargs={}, **kwargs):
'''
Register shared object classes with a default proxytype.
Parameters
----------
cls : type
class which is to be registered with the manager for use
as a shared object
proxytype : subclass of multiprocessing.managers.BaseProxy
Proxy object used to communicate with a shared instance of cls.
If None, then the following steps are attempted:
1) an attempt is made to call the class' build_proxy method which
is expected to provision and return a proxy object as well as
register with the manager any sub-proxies which it expects to
utilize.
2) failing that, a default -> make_inst_proxy(cls) will be used.
'''
assert type(cls) == type
typeid = cls.__name__
if proxytype is None:
try: # to use cls defined proxy
proxytype = cls.build_proxy(mng_cls)
except AttributeError:
proxytype = make_inst_proxy(cls)
get_logger().debug("no proxy was provided for '{}' using "
"default '{}'".format(cls, proxytype))
cls = functools.partial(cls, *init_args, **init_kwargs)
mng_cls.register(typeid, cls, proxytype=proxytype, **kwargs)
示例3: __init__
def __init__(self, ml_name, data, split_dict, **kwargs):
self.sample_feats, self.label_feat = self.preprocessing(data.sample_feats, data.label_feats, INDEX)
assert np.all(np.isfinite(self.label_feat))
self.ml_name = ml_name
# training and test
self.Xtrain, self.Xtest, self.ytrain, self.ytest = \
train_test_split(self.sample_feats, self.label_feat, **split_dict)
# model
train_test_str = '-' + dict_to_str(split_dict)
if kwargs is not None and len(kwargs) != 0:
classifier_str = '-' + dict_to_str(kwargs)
pickle_name = str(INDEX) + '-' + self.ml_name + train_test_str + classifier_str + '.pkl'
else:
pickle_name = str(INDEX) + '-' + self.ml_name + train_test_str + '.pkl'
model_pickle = os.path.join(model_dir, pickle_name)
if os.path.exists(model_pickle):
utils.get_logger().warning('{} exists'.format(model_pickle))
with open(model_pickle, 'rb') as model:
self.clf = pickle.load(model)
else:
utils.get_logger().warning('{} does not exist'.format(model_pickle))
self.clf = self.get_clf(kwargs)
self.clf.fit(self.Xtrain, self.ytrain)
with open(model_pickle, 'wb') as model:
pickle.dump(self.clf, model)
示例4: _stop_service
def _stop_service(self, container_id):
ship = get_ship_name()
service_dict = None
service_list = kv_list('ships/{}/service/'.format(ship))
if service_list:
key = fnmatch.filter(service_list, '*/{}'.format(container_id))
service_dict = kv_get(key[0]) if key else None
if service_dict and service_dict['Status'] in ['crashed', 'not-recovered']:
kv_remove(key[0])
else:
run_command_in_container('supervisorctl stop armada_agent', container_id)
# TODO: Compatibility with old microservice images. Should be removed in future armada version.
run_command_in_container('supervisorctl stop register_in_service_discovery', container_id)
docker_api = docker_client.api()
last_exception = None
try:
deregister_services(container_id)
except:
traceback.print_exc()
for i in range(3):
try:
docker_api.stop(container_id)
kv_remove(key[0])
except Exception as e:
last_exception = e
traceback.print_exc()
if not is_container_running(container_id):
break
if is_container_running(container_id):
get_logger().error('Could not stop container: {}'.format(container_id))
raise last_exception
示例5: get_event_time
def get_event_time(event, epoch=0.0):
'''Return micro-second time stamp value in seconds
'''
value = event.getHeader('Event-Date-Timestamp')
if value is None:
get_logger().warning("Event '{}' has no timestamp!?".format(
event.getHeader("Event-Name")))
return None
return float(value) / 1e6 - epoch
示例6: __init__
def __init__(self):
if os.path.isfile(data_pickle):
utils.get_logger().warning('{} exists'.format(data_pickle))
with open(data_pickle, 'rb') as raw_data:
self.data = pickle.load(raw_data)
else:
utils.get_logger().warning('{} does not exist'.format(data_pickle))
assert os.path.isfile(data_file)
self.data = np.genfromtxt(data_file, delimiter=',')
with open(data_pickle, 'wb') as raw_data:
pickle.dump(self.data, raw_data)
示例7: connect_callback
def connect_callback(timestamp):
parser = argparse.ArgumentParser()
parser.add_argument('--slave-host', help='Hostname or IP of the slave database')
parser.add_argument('--slave-port', help='Port number for the slave database')
parser.add_argument('--slave-user', help='Username for the slave database')
parser.add_argument('--slave-password', help='Password for the slave database')
parser.add_argument('--slave-database', help='Name of the slave database')
parser.add_argument('--history-host', help='Hostname or IP of the history database')
parser.add_argument('--history-port', help='Port number for the history database')
parser.add_argument('--history-user', help='Username for the history database')
parser.add_argument('--history-password', help='Password for the history database')
parser.add_argument('--history-database', help='Name of the history database')
args = parser.parse_args()
slave = {
'host': args.slave_host,
'port': args.slave_port,
'user': args.slave_user,
'password': args.slave_password,
'database': args.slave_database
}
history = {
'host': args.history_host,
'port': args.history_port,
'user': args.history_user,
'password': args.history_password,
'database': args.history_database
}
slavecon, histcon = connect(slave, history)
inspector_logger = get_logger('inspector')
populator_logger = get_logger('populator')
inspector = SlaveInspector(slavecon, logger=inspector_logger)
populator = HistoryPopulator(histcon, logger=populator_logger)
populator.create_tables()
populator.update(timestamp)
for schema in inspector.schemas():
populator.add_schema(schema)
for table in inspector.tables(schema):
inspector.columns(table)
populator.add_table(table)
populator.create_table(table)
populator.fill_table(table)
return slavecon, inspector, populator
示例8: main
def main():
consul_mode, ship_ips, datacenter = _get_runtime_settings()
ship_external_ip = os.environ.get('SHIP_EXTERNAL_IP', '')
consul_config_content = consul_config.get_consul_config(consul_mode, ship_ips, datacenter, ship_external_ip)
with open(consul_config.CONFIG_PATH, 'w') as config_file:
config_file.write(consul_config_content)
command = '/usr/local/bin/consul agent -config-file {config_path}'.format(config_path=consul_config.CONFIG_PATH)
get_logger().info('RUNNING: {}'.format(command))
args = command.split()
os.execv(args[0], args)
示例9: test_rora_connect
def test_rora_connect():
"""
Test if RORA server is online and connection can be made successfully
:return: True|False
:rtype: bool
"""
r_code = 'source("%sconnection.R");' % settings.RORA_LIB
r_code += 'link <- roraConnect();'
r_code += 'dbDisconnect(link);'
try:
ro.r(r_code)
except RRuntimeError as e:
get_logger().error(e)
return False
return True
示例10: __init__
def __init__(self, qid, event_queue):
self.logger = get_logger()
self.qid = qid
self.aid = 1003903
self.clientid = random.randrange(11111111, 99999999)
self.msg_id = random.randrange(1111111, 99999999)
self.group_map = {} # 群映射
self.group_m_map = {} # 群到群成员的映射
self.uin_qid_map = {} # uin 到 qq号的映射
self.check_code = None
self.skey = None
self.ptwebqq = None
self.require_check = False
self.QUIT = False
self.last_msg = {}
self.event_queue = event_queue
self.check_data = None # CheckHanlder返回的数据
self.blogin_data = None # 登录前返回的数据
self.rc = 1
self.start_time = time.time()
self.hb_last_time = self.start_time
self.poll_last_time = self.start_time
self._helper = HttpHelper()
self.connected = False
self.polled = False
self.heartbeated = False
self.group_lst_updated = False
示例11: setup
def setup():
global Options
global Log
global RootDir, VerifDir
p = argparse.ArgumentParser(
prog='untb',
formatter_class=argparse.RawDescriptionHelpFormatter,
usage="untb <tb_name>",
version="%(prog)s v"+str(__version__),
description=__description__)
p.add_argument('tb_name', action='append', nargs='+', default=None)
p.add_argument('-f', '--force', action='store_true', default=False, help="Force creation of new testbench even if it already exists.")
p.add_argument('-d', '--dbg', action='store_true', default=False, help="Turns on debug lines.")
try:
Options = p.parse_args()
except:
print "Usage error: See untb -h for more information."
sys.exit(1)
verbosity = {False: logging.INFO, True: logging.DEBUG}[Options.dbg]
Log = utils.get_logger('log', verbosity)
utg.Log = Log
try:
RootDir = utils.calc_root_dir()
except utils.AreaError:
Log.critical("CWD is not in an Octeon Tree.")
sys.exit(255)
VerifDir = os.path.join(RootDir, "verif")
示例12: user_network
def user_network():
"""
Start iteration process for load data to user_network
"""
log = get_logger()
with get_connect() as con, get_connect_iptable() as con_ip:
cur = con.cursor()
cur_ip = con_ip.cursor()
# get settings
settings = get_settings(cur)
load_date_start = datetime.strptime(settings['LOAD_DATE_START'],
'%Y.%m.%d %H:%M:%S.%f')
load_pause = int(settings['LOAD_PAUSE'])
load_pause_empty = int(settings['LOAD_PAUSE_EMPTY'])
cnt_rows = int(settings['LOAD_ROWS'])
while True:
date = user_network_load(cur_ip, cur, load_date_start, cnt_rows)
log.info("Load data. load_date_start: %s", date)
# save date between iterations
if date > load_date_start:
sql = "update load_settings set value=%s where param=%s"
date_str = datetime.strftime(date, '%Y.%m.%d %H:%M:%S.%f')
cur.execute(sql, [date_str, 'LOAD_DATE_START'])
else:
# sleep if not new data
log.debug("No new data sleep( %s )", load_pause_empty)
time.sleep(load_pause_empty)
if load_pause:
log.debug("sleep between loads sleep( %s )", load_pause)
time.sleep(load_pause)
load_date_start = date
示例13: __init__
def __init__(self, modules):
"""Called when module is loaded"""
self._modules = modules
# Set default name if none is set
if not self.name:
self.name = class_name(self)
# Load logger
self.logger = get_logger(self.name, [stdout])
section = self.get_config(False)
# Load enable
self.enable = None
for key, value in section:
if key.lower() == "enable":
self.enable = value.lower() == "true"
if self.enable is None:
raise Exception("Config for module '%s' doesn't have enable = true/false" % self.name)
# Load interval
self.interval = None
for key, value in section:
if key.lower() == "interval":
self.interval = float(value)
if self.interval is None:
raise Exception("Config for module '%s' doesn't have interval = secs, use 0 for running once" % self.name)
示例14: extract_from_file
def extract_from_file(filename, num_process):
import utils
global LOGGER
LOGGER = utils.get_logger()
dataset_path = u'../datasets/wiki'
# get processed words
processed_words = get_target_words(dataset_path)
jobs = dd(list)
for line in codecs.open(filename, encoding='utf-8'):
line = line.split()
target_word, page_title, offset = line[:3]
if target_word not in processed_words:
jobs[target_word].append(dict(word=target_word, page_title=page_title, offset=offset, fetch_links=True))
LOGGER.info("Total {} of jobs available. Num of consumer = {}".format(len(jobs), num_process))
if num_process > 1:
pool = Pool(num_process)
pool.map(extract_instances_for_word, jobs.values())
else:
# for v in jobs.values():
for v in [jobs['milk']]:
extract_instances_for_word(v)
LOGGER.info("Done.")
示例15: proc_luna_running
def proc_luna_running(**kw):
""" Fetch test statuses from Redis, if test finisged notify service via API
and call reduce job.
"""
ext = {'test_id': kw.get('redis_value', {}).get('id')}
logger = get_logger(**ext)
if not ('t_fqdn' in kw and 't_tank_id' in kw):
logger.erro('proc_luna_running call. Malformed params:{}'.format(kw))
try:
except TankClientError as e:
logger.error('Tank API call failed: {}'.format(e))
raise
if tank_msg['status_code'] != 'FINISHED':
if kw.get('status') and TestStateTr.tank_by_port(kw.get('status')) != tank_msg['status_code']:
# test state changes since last check, need to notify
port_state = TestStateTr.port_by_tank(tank_msg['status_code'])
redis_value = kw['redis_value']
redis_value.update({'status': port_state})
redis.hset(r_adr['monitor_finish'], kw['id'],
msgpack.packb(redis_value))
diff = {
'status': port_state,
}