本文整理汇总了Python中scalrpy.LOG.info方法的典型用法代码示例。如果您正苦于以下问题:Python LOG.info方法的具体用法?Python LOG.info怎么用?Python LOG.info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类scalrpy.LOG
的用法示例。
在下文中一共展示了LOG.info方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _get_szr_conn_info
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def _get_szr_conn_info(server, port, instances_connection_policy):
ip = {
'public': server['remote_ip'],
'local': server['local_ip'],
'auto': server['remote_ip'] if server['remote_ip'] else server['local_ip'],
}[instances_connection_policy]
headers = {}
if server['platform'] == 'ec2' and 'ec2.vpc.id' in server and 'router.vpc.ip' in server:
if server['remote_ip']:
ip = server['remote_ip']
else:
headers.update({
'X-Receiver-Host': server['local_ip'],
'X-Receiver-Port': port,
})
ip = server['router.vpc.ip']
port = 80
# Start - Added by Chen Leji
if not server['remote_ip']:
LOG.info("=============Apply FLOATINGIP_PROXY Patch=============")
ip = "localhost"
proxy = floatingip_proxy.szrProxy(port, server['server_id'])
port = proxy.get_proxy_port()
return ip, port, headers
示例2: __call__
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def __call__(self):
self._stop = False
while not self._stop:
g = None
try:
self.start_time = time.time()
LOG.debug('Start periodical task ({})'.format(self.task_name))
self.iteration_number += 1
self.task.task_info = {
'period': self.period,
'timeout': self.timeout,
'start_time': self.start_time,
'iteration_number': self.iteration_number,
}
if callable(self.before):
msg = 'Periodical task ({}) call before ({})'
msg = msg.format(self.task_name, self.before.__name__)
LOG.debug(msg)
self.before(self)
g = gevent.spawn(self.task)
g.get(timeout=self.timeout)
except:
if g and not g.ready():
g.kill()
try:
if callable(self.on_error):
self.on_error(self)
except:
msg = 'Periodical task ({}) on error ({}) failed'
msg = msg.format(self.task_name, self.on_error.__name__)
handle_error(message=msg)
msg = 'Periodical task ({}) error: {}'
msg = msg.format(self.task_name, exc_info(where=False))
handle_error(message=msg)
finally:
try:
if callable(self.after):
try:
msg = 'Periodical task ({}) call after ({})'
msg = msg.format(self.task_name, self.after.__name__)
LOG.debug(msg)
self.after(self)
except:
msg = 'After ({0}) failed'.format(self.after.__name__)
handle_error(message=msg)
self.end_time = time.time()
task_time = self.end_time - self.start_time
msg = 'End task ({0}): {1:.1f} seconds'
msg = msg.format(self.task_name, task_time)
LOG.info(msg)
if self.period:
next_time = self.start_time + self.period
while time.time() < next_time and not self._stop:
time.sleep(0.5)
except:
msg = 'Task ({}) finally failed'.format(self.task_name)
handle_error(message=msg)
示例3: main
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def main():
app = AnalyticsProcessing()
try:
app.load_config()
app.configure()
app.run()
except exceptions.AlreadyRunningError:
LOG.info(helper.exc_info())
except (SystemExit, KeyboardInterrupt):
pass
except:
LOG.exception('Oops')
示例4: main
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def main():
global app
app = LoadStatistics()
try:
app.load_config()
app.configure()
app.run()
except exceptions.AlreadyRunningError:
LOG.info(helper.exc_info(where=False))
except (SystemExit, KeyboardInterrupt):
pass
except:
LOG.exception('Oops')
示例5: _start
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def _start(self):
if helper.check_pid(self.config['pid_file']):
raise exceptions.AlreadyRunningError(self.config['pid_file'])
LOG.debug(self._starting_msg)
if self.args['--daemon']:
helper.daemonize()
helper.create_pid_file(self.config['pid_file'])
atexit.register(helper.delete_file, self.config['pid_file'])
helper.set_proc_name(self.name)
self.start_dtime = datetime.datetime.utcnow()
LOG.info('Started')
self()
LOG.info('Stopped')
示例6: process_aws_billing
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def process_aws_billing(self):
if self.args['--recalculate']:
return
dtime_from, dtime_to = self.get_aws_billing_interval()
msg = 'AWS billing interval: {0} - {1}'
msg = msg.format(dtime_from, dtime_to)
LOG.info(msg)
for envs in self.analytics.load_envs():
unique = {}
for env in envs:
if env.get('ec2.detailed_billing.enabled', '0') != '1':
continue
bucket_name = env['ec2.detailed_billing.bucket']
creds = self.analytics.get_creds([env])
cred = next(cred for cred in creds if cred.platform == 'ec2')
unique.setdefault(cred.unique,
{'envs_ids': [], 'cred': cred, 'bucket_name': bucket_name})
unique[cred.unique]['envs_ids'].append(env['id'])
for data in unique.values():
while len(self.pool) > self.config['pool_size'] * 5 / 10:
gevent.sleep(0.1)
self.pool.apply_async(self.process_aws_account, args=(data, dtime_from, dtime_to))
self.pool.join()
if not self.aws_billing_dtime_from:
return
dtime_from = self.aws_billing_dtime_from
if self.config['dtime_to']:
dtime_to = self.config['dtime_to']
else:
dtime_hour_ago = datetime.datetime.utcnow() - datetime.timedelta(hours=1)
dtime_to = dtime_hour_ago.replace(minute=59, second=59, microsecond=999999)
# fill farm_usage_d
dtime_cur = dtime_from
msg = 'AWS fill_farm_usage_d interval: {0} - {1}'
LOG.info(msg.format(dtime_cur, dtime_to))
while dtime_cur <= dtime_to:
date, hour = dtime_cur.date(), dtime_cur.hour
try:
self.analytics.fill_farm_usage_d(date, hour, platform='ec2')
except:
msg = 'Unable to fill farm_usage_d table for date {0}, hour {1}'.format(date, hour)
LOG.exception(msg)
dtime_cur += datetime.timedelta(hours=1)
示例7: __call__
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def __call__(self):
poller_ps, plotter_ps = None, None
if self.args['--plotter']:
plotter = Plotter(self.config)
plotter_ps = plotter.run_in_process()
time.sleep(5)
if not plotter_ps.is_alive():
LOG.critical('Failed to start CherryPy web server')
sys.exit(1)
self.change_permissions()
if self.args['--poller']:
poller = Poller(self.config, self.scalr_config)
while True:
start_time = time.time()
try:
LOG.info('Start poller iteration')
rrdcached_sock_file = self.config['rrd']['rrdcached_sock_path']
if not os.path.exists(rrdcached_sock_file):
raise Exception('rrdcached process is not running')
poller_ps = poller.run_in_process()
poller_ps.join(self.config['interval'] * 2)
if poller_ps.is_alive():
LOG.error('Poller iteration timeout. Terminating')
try:
poller_ps.terminate()
except:
msg = 'Unable to terminate, reason: {error}'.format(
error=helper.exc_info())
raise Exception(msg)
LOG.info('Poller iteration time: %.2f' % (time.time() - start_time))
except KeyboardInterrupt:
raise
except:
msg = 'Poller iteration failed, reason: {error}'.format(
error=helper.exc_info())
LOG.error(msg)
finally:
sleep_time = start_time + self.config['interval'] - time.time() - 0.1
if sleep_time > 0:
time.sleep(sleep_time)
if plotter_ps:
plotter_ps.join()
示例8: __call__
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def __call__(self):
try:
dtime_from, dtime_to = self.get_billing_interval()
msg = 'AWS billing interval: {} - {}'.format(dtime_from, dtime_to)
LOG.info(msg)
self._create_cache_dir()
aws_accounts_ids = self.analytics.load_aws_accounts_ids()
for chunk in helper.chunks(aws_accounts_ids, 100):
envs = self.analytics.load_aws_accounts_ids_envs(chunk)
envs = [env for env in envs if env.get('ec2.is_enabled', '0') == '1']
self.analytics.load_env_credentials(envs, platform='ec2')
envs = [env for env in envs if
env.get('ec2.detailed_billing.enabled', '0') == '1' and
env.get('ec2.detailed_billing.payer_account') in (None, '')]
if not envs:
continue
self._wait_pool()
self.pool.apply_async(self.process_envs, args=(envs, dtime_from, dtime_to))
aws_payers_accounts = self.analytics.load_aws_payers_accounts()
for chunk in helper.chunks(aws_payers_accounts, 100):
envs = self.analytics.load_aws_payers_accounts_envs(chunk)
envs = [env for env in envs if env.get('ec2.is_enabled', '0') == '1']
self.analytics.load_env_credentials(envs, platform='ec2')
envs = [env for env in envs if
env.get('ec2.detailed_billing.enabled', '0') == '1']
if not envs:
continue
self._wait_pool()
self.pool.apply_async(self.process_envs, args=(envs, dtime_from, dtime_to))
self.pool.join()
except:
self.pool.kill()
helper.handle_error(message='AWS billing failed')
raise
finally:
self.downloading_locks = {}
try:
self._remove_cache_dir()
except:
msg = 'Unable to remove cache dir {}'
msg = msg.format(self.cache_dir)
helper.handle_error(message=msg, level='error')
示例9: __call__
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def __call__(self):
try:
dtime_from, dtime_to = self.get_billing_interval()
msg = 'Azure billing interval: {} - {}'.format(dtime_from, dtime_to)
LOG.info(msg)
azure_subscriptions_ids = self.analytics.load_azure_subscriptions_ids()
for chunk in helper.chunks(azure_subscriptions_ids, 100):
envs = self.analytics.load_azure_subscriptions_ids_envs(chunk)
self.analytics.load_env_credentials(envs, platform='azure')
if not envs:
continue
self._wait_pool()
self.pool.apply_async(self.process_envs, args=(envs, dtime_from, dtime_to))
self.pool.join()
except:
self.pool.kill()
helper.handle_error(message='Azure billing failed')
raise
示例10: run
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def run(self):
try:
self.configure()
t = self._serve_forever()
while not t.is_alive():
time.sleep(0.5)
# wait before change permissions to allow cherrypy read certificates
time.sleep(2)
# change permissions
if self.config['group']:
helper.set_gid(self.config['group'])
if self.config['user']:
helper.set_uid(self.config['user'])
LOG.info('Plotter started')
t.join()
except:
LOG.exception(helper.exc_info())
示例11: recalculate
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def recalculate(self, date, hour):
try:
msg = "Recalculate hourly tables for date {0}, hour {1}".format(date, hour)
LOG.info(msg)
for usage_h_records, nm_usage_h_records in itertools.izip_longest(
self.analytics.get_usage_h_records(date, hour, self.config['platform']),
self.analytics.get_nm_usage_h_records(date, hour, self.config['platform'])):
usage_h_records = usage_h_records or []
LOG.info('usage_h records for recalculating: %s' % len(usage_h_records))
nm_usage_h_records = nm_usage_h_records or []
LOG.info('nm_usage_h records for recalculating: %s' % len(nm_usage_h_records))
self._set_usage_cost(usage_h_records + nm_usage_h_records)
for record in usage_h_records:
self._pool.wait()
self._pool.apply_async(self.analytics.update_usage_h, (record,))
gevent.sleep(0) # force switch
for record in nm_usage_h_records:
self._pool.wait()
self._pool.apply_async(self.analytics.update_nm_usage_h, (record,))
gevent.sleep(0) # force switch
self._pool.join()
self.analytics.fill_farm_usage_d(date, hour, platform=self.config['platform'])
except:
msg = "Unable to recalculate date {date}, hour {hour}, reason: {error}".format(
date=date, hour=hour, error=helper.exc_info())
raise Exception(msg)
示例12: calculate
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def calculate(self, date, hour):
try:
msg = "Calculate date {0}, hour {1}".format(date, hour)
LOG.info(msg)
for managed_servers, not_managed_servers in itertools.izip_longest(
self.analytics.get_managed_servers(date, hour),
self.analytics.get_not_managed_servers(date, hour)):
managed_servers = managed_servers or []
LOG.info('Managed servers for processing: %s' % len(managed_servers))
not_managed_servers = not_managed_servers or []
LOG.info('Not managed servers for processing: %s' % len(not_managed_servers))
self._set_servers_cost(managed_servers + not_managed_servers)
for server in managed_servers:
self._pool.wait()
self._pool.apply_async(self.analytics.insert_managed_server, (server,))
gevent.sleep(0) # force switch
for server in not_managed_servers:
self._pool.wait()
self._pool.apply_async(self.analytics.insert_not_managed_server, (server,))
gevent.sleep(0) # force switch
self._pool.join()
self.analytics.fill_farm_usage_d(date, hour)
except:
msg = "Unable to calculate date {date}, hour {hour}, reason: {error}".format(
date=date, hour=hour, error=helper.exc_info())
raise Exception(msg)
示例13: _stop
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def _stop(self):
LOG.debug(self._stopping_msg)
try:
if not os.path.exists(self.config['pid_file']):
msg = "Can't stop, pid file %s doesn't exist\n" % self.config['pid_file']
sys.stderr.write(helper.colorize(helper.Color.FAIL, msg))
return
with file(self.config['pid_file'], 'r') as pf:
pid = int(pf.read().strip())
for ps in psutil.process_iter():
if ps.name() == self.name[0:15]:
# TODO
# SIGINT
helper.kill_children(pid)
helper.kill(pid)
break
else:
msg = "Process with name {0} doesn't exists".format(self.name)
raise Exception(msg)
LOG.info('Stopped')
helper.delete_file(self.config['pid_file'])
except:
msg = "Can't stop, reason: {error}".format(error=helper.exc_info())
raise Exception(msg)
示例14: do_iteration
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def do_iteration(self):
global debug_rate_counter
global debug_rate_timestamp
debug_rate_time = time.time() - debug_rate_timestamp
rate = round(debug_rate_counter / debug_rate_time, 2)
LOG.info('Average rate: %s, %s' % (rate, rate * 60))
debug_rate_counter = 0
debug_rate_timestamp = time.time()
while len(self._processing_messages) > self._max_processing_messages:
LOG.warning('Reached the limit of simultaneously processed messages')
time.sleep(1)
messages = self.get_messages()
messages = [m for m in messages if m['message_id'] not in self._processing_messages]
num, idx = int(self.config['workers']), int(self.config['index'])
def filter_messages(message):
if message.get('server_id') and message.get('farm_id'):
return int(message['farm_id']) % num == idx - 1
else:
return idx == 1
if num > 1:
messages = filter(filter_messages, messages)
if not messages:
time.sleep(self.nothing_todo_sleep)
return
self.load_servers_data(messages)
server_statuses = [
'Running',
'Initializing',
'Importing',
'Temporary',
'Pending terminate',
'Pending suspend',
]
for message in messages:
try:
self._processing_messages.add(message['message_id'])
if message.get('server_id') is None or \
message['server_status'] not in server_statuses or (
message['server_status'] in ('Pending terminate', 'Pending suspend') and
int(message['handle_attempts']) >= 1):
msg = (
"Server {message_server_id} doesn't exist or not in right status, "
"set message {message_id} status to 3").format(**message)
LOG.warning(msg)
message['status'] = 3
self._pool.wait()
self._pool.apply_async(self.update, (message,))
else:
self._pool.wait()
self._pool.apply_async(self.process_message, (message,))
except:
msg = "Unable to process message: {message_id}, reason: {error}"
msg = msg.format(message_id=message['message_id'], error=helper.exc_info())
LOG.warning(msg)
LOG.info('Messages still in processing: %s' % len(self._processing_messages))
示例15: delete_data
# 需要导入模块: from scalrpy import LOG [as 别名]
# 或者: from scalrpy.LOG import info [as 别名]
def delete_data(self, csv_file, envs, period):
envs_ids = list(set(int(env['id']) for env in envs))
dtime_from, dtime_to = period
msg = 'Deleting AWS detailed billing data for environments: {}, period: {} - {}'
msg = msg.format(envs_ids, dtime_from, dtime_to)
LOG.info(msg)
with self.analytics.lock:
self.analytics.analytics_db.autocommit(False)
try:
# aws_billing_records
for rows in self.csv_reader(csv_file, envs, dtime_from=dtime_from, dtime_to=dtime_to):
records_ids = [row['RecordId'] for row in rows]
for chunk in helper.chunks(records_ids, 1000):
if chunk:
query = (
"DELETE FROM aws_billing_records "
"WHERE record_id IN ({record_id})"
).format(record_id=str(chunk)[1:-1])
self.analytics.analytics_db.execute(query)
_dtime_from = dtime_from
step_days = 15
while _dtime_from < dtime_to:
_dtime_to = min(_dtime_from + datetime.timedelta(days=step_days), dtime_to)
# usage_servers_h, usage_h
query = (
"DELETE uh, us "
"FROM usage_h uh "
"LEFT JOIN usage_servers_h us ON uh.usage_id=us.usage_id "
"WHERE uh.platform='ec2' "
"AND uh.dtime BETWEEN '{dtime_from}' AND '{dtime_to}' "
"AND uh.env_id IN ({env_id})"
).format(env_id=str(envs_ids)[1:-1], dtime_from=_dtime_from, dtime_to=_dtime_to)
self.analytics.analytics_db.execute(query)
# usage_d
query = (
"DELETE FROM usage_d "
"WHERE platform='ec2' "
"AND date BETWEEN '{date_from}' AND '{date_to}' "
"AND env_id IN ({env_id})"
).format(env_id=str(envs_ids)[1:-1], date_from=_dtime_from.date(), date_to=_dtime_to.date())
self.analytics.analytics_db.execute(query)
# farm_usage_d
query = (
"DELETE FROM farm_usage_d "
"WHERE platform='ec2' "
"AND date BETWEEN '{date_from}' AND '{date_to}' "
"AND env_id IN ({env_id})"
).format(env_id=str(envs_ids)[1:-1], date_from=_dtime_from.date(), date_to=_dtime_to.date())
self.analytics.analytics_db.execute(query)
_dtime_from += datetime.timedelta(days=step_days)
self.analytics.analytics_db.commit()
except:
self.analytics.analytics_db.rollback()
raise
finally:
self.analytics.analytics_db.autocommit(True)