本文整理汇总了Python中utils.path.log_path.join函数的典型用法代码示例。如果您正苦于以下问题:Python join函数的具体用法?Python join怎么用?Python join使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了join函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_queue_infrastructure
def test_queue_infrastructure(request, ssh_client, enable_candu):
local_evm_gz = str(log_path.join('evm.perf.log.gz'))
local_evm = str(log_path.join('evm.perf.log'))
local_top_gz = str(log_path.join('top_output.perf.log.gz'))
local_top = str(log_path.join('top_output.perf.log'))
def clean_up_log_files(files):
for clean_file in files:
# Clean up collected log files as they can be huge in case of exception
if os.path.exists(clean_file):
logger.info('Removing: %s', clean_file)
os.remove(clean_file)
request.addfinalizer(lambda: clean_up_log_files([local_evm, local_evm_gz, local_top,
local_top_gz]))
sleep_time = perf_tests['test_queue']['infra_time']
logger.info('Waiting: %s', sleep_time)
time.sleep(sleep_time)
collect_log(ssh_client, 'evm', local_evm_gz)
collect_log(ssh_client, 'top_output', local_top_gz, strip_whitespace=True)
logger.info('Calling gunzip %s', local_evm_gz)
subprocess.call(['gunzip', local_evm_gz])
logger.info('Calling gunzip {}'.format(local_top_gz))
subprocess.call(['gunzip', local_top_gz])
# Post process evm log and top_output log for charts and csvs
perf_process_evm(local_evm, local_top)
示例2: pytest_sessionfinish
def pytest_sessionfinish(self, exitstatus):
# Now master/standalone needs to move all the reports to an appliance for the source report
if store.parallelizer_role != 'master':
manager().collect()
# for slaves, everything is done at this point
if store.parallelizer_role == 'slave':
return
# on master/standalone, merge all the collected reports and bring them back
manager().merge()
try:
global ui_coverage_percent
last_run = json.load(log_path.join('coverage', 'merged', '.last_run.json').open())
ui_coverage_percent = last_run['result']['covered_percent']
style = {'bold': True}
if ui_coverage_percent > 40:
style['green'] = True
else:
style['red'] = True
store.write_line('UI Coverage Result: {}%'.format(ui_coverage_percent),
**style)
except Exception as ex:
logger.error('Error printing coverage report to terminal')
logger.exception(ex)
示例3: run
def run(port, run_id=None):
art_config = env.get('artifactor', {})
art_config['server_port'] = int(port)
art = Artifactor(None)
if 'log_dir' not in art_config:
art_config['log_dir'] = log_path.strpath
if 'artifact_dir' not in art_config:
art_config['artifact_dir'] = log_path.join('artifacts').strpath
art.set_config(art_config)
art.register_plugin(merkyl.Merkyl, "merkyl")
art.register_plugin(logger.Logger, "logger")
art.register_plugin(video.Video, "video")
art.register_plugin(filedump.Filedump, "filedump")
art.register_plugin(reporter.Reporter, "reporter")
art.register_plugin(post_result.PostResult, "post-result")
art.register_plugin(ostriz.Ostriz, "ostriz")
initialize(art)
art.configure_plugin('merkyl')
art.configure_plugin('logger')
art.configure_plugin('video')
art.configure_plugin('filedump')
art.configure_plugin('reporter')
art.configure_plugin('post-result')
art.configure_plugin('ostriz')
art.fire_hook('start_session', run_id=run_id)
示例4: messages_to_statistics_csv
def messages_to_statistics_csv(messages, statistics_file_name):
all_statistics = []
for msg_id in messages:
msg = messages[msg_id]
added = False
if len(all_statistics) > 0:
for msg_statistics in all_statistics:
if msg_statistics.cmd == msg.msg_cmd:
if msg.del_time > 0:
msg_statistics.delivertimes.append(float(msg.del_time))
msg_statistics.gets += 1
msg_statistics.dequeuetimes.append(float(msg.deq_time))
msg_statistics.totaltimes.append(float(msg.total_time))
msg_statistics.puts += 1
added = True
break
if not added:
msg_statistics = MiqMsgLists()
msg_statistics.cmd = msg.msg_cmd
if msg.del_time > 0:
msg_statistics.delivertimes.append(float(msg.del_time))
msg_statistics.gets = 1
msg_statistics.dequeuetimes.append(float(msg.deq_time))
msg_statistics.totaltimes.append(float(msg.total_time))
msg_statistics.puts = 1
all_statistics.append(msg_statistics)
csvdata_path = log_path.join('csv_output', statistics_file_name)
outputfile = csvdata_path.open('w', ensure=True)
try:
csvfile = csv.writer(outputfile)
metrics = ['samples', 'min', 'avg', 'median', 'max', 'std', '90', '99']
measurements = ['deq_time', 'del_time', 'total_time']
headers = ['cmd', 'puts', 'gets']
for measurement in measurements:
for metric in metrics:
headers.append('{}_{}'.format(measurement, metric))
csvfile.writerow(headers)
# Contents of CSV
for msg_statistics in sorted(all_statistics, key=lambda x: x.cmd):
if msg_statistics.gets > 1:
logger.debug('Samples/Avg/90th/Std: {} : {} : {} : {},Cmd: {}'.format(
str(len(msg_statistics.totaltimes)).rjust(7),
str(round(numpy.average(msg_statistics.totaltimes), 3)).rjust(7),
str(round(numpy.percentile(msg_statistics.totaltimes, 90), 3)).rjust(7),
str(round(numpy.std(msg_statistics.totaltimes), 3)).rjust(7),
msg_statistics.cmd))
stats = [msg_statistics.cmd, msg_statistics.puts, msg_statistics.gets]
stats.extend(generate_statistics(msg_statistics.dequeuetimes, 3))
stats.extend(generate_statistics(msg_statistics.delivertimes, 3))
stats.extend(generate_statistics(msg_statistics.totaltimes, 3))
csvfile.writerow(stats)
finally:
outputfile.close()
示例5: run
def run(port, run_id=None):
art_config = env.get('artifactor', {})
art_config['server_port'] = int(port)
art = Artifactor(None)
if 'log_dir' not in art_config:
art_config['log_dir'] = log_path.join('artifacts').strpath
art.set_config(art_config)
art.register_plugin(merkyl.Merkyl, "merkyl")
art.register_plugin(logger.Logger, "logger")
art.register_plugin(video.Video, "video")
art.register_plugin(filedump.Filedump, "filedump")
art.register_plugin(softassert.SoftAssert, "softassert")
art.register_plugin(reporter.Reporter, "reporter")
art.register_plugin(post_result.PostResult, "post-result")
art.register_hook_callback('filedump', 'pre', parse_setup_dir,
name="filedump_dir_setup")
initialize(art)
art.configure_plugin('merkyl')
art.configure_plugin('logger')
art.configure_plugin('video')
art.configure_plugin('filedump')
art.configure_plugin('softassert')
art.configure_plugin('reporter')
art.configure_plugin('post-result')
art.fire_hook('start_session', run_id=run_id)
示例6: update_template_log
def update_template_log(appliance_template, action, provider=None, failed_providers=None):
try:
trackerbot_ip = re.findall(r'[0-9]+(?:\.[0-9]+){3}', args.trackerbot_url)[0]
creds = credentials['host_default']
sshclient = make_ssh_client(trackerbot_ip, creds['username'], creds['password'])
template_resultlog = log_path.join('template_result.log').strpath
if action == 'create':
command = 'mkdir -p /home/amavinag/{}'.format(appliance_template)
sshclient.run_command(command)
sshclient.put_file(template_resultlog, remote_file='/home/amavinag/{}/{}'.format(
appliance_template, provider))
if action == 'merge':
with open(template_resultlog, 'w') as report:
command = 'cd /home/amavinag/{}/&&cat {}'.format(appliance_template,
' '.join(failed_providers))
status, output = sshclient.run_command(command)
if 'No such file or directory' in output:
command = 'cd /home/amavinag/{}/&&cat *'.format(appliance_template)
status, output = sshclient.run_command(command)
report.write(output)
report.close()
elif action == 'remove':
sshclient.run_command('cd /home/amavinag/&&rm -rf {}'.format(
appliance_template))
sshclient.close()
except Exception as e:
print(e)
return False
示例7: pytest_collection_modifyitems
def pytest_collection_modifyitems(session, config, items):
from fixtures.pytest_store import store
len_collected = len(items)
new_items = []
from utils.path import log_path
with log_path.join('uncollected.log').open('w') as f:
for item in items:
# First filter out all items who have the uncollect mark
if item.get_marker('uncollect') or not uncollectif(item):
# if a uncollect marker has been added,
# give it priority for the explanation
uncollect = item.get_marker('uncollect')
marker = uncollect or item.get_marker('uncollectif')
if marker:
reason = marker.kwargs.get('reason', "No reason given")
else:
reason = None
f.write("{} - {}\n".format(item.name, reason))
else:
new_items.append(item)
items[:] = new_items
len_filtered = len(items)
filtered_count = len_collected - len_filtered
store.uncollection_stats['uncollectif'] = filtered_count
示例8: run
def run(port, run_id=None):
art_config = env.get('artifactor', {})
art_config['server_port'] = int(port)
art = Artifactor(None)
if 'log_dir' not in art_config:
art_config['log_dir'] = log_path.join('artifacts').strpath
art.set_config(art_config)
art.register_plugin(merkyl.Merkyl, "merkyl")
art.register_plugin(logger.Logger, "logger")
art.register_plugin(video.Video, "video")
art.register_plugin(filedump.Filedump, "filedump")
art.register_plugin(reporter.Reporter, "reporter")
art.register_hook_callback('filedump', 'pre', parse_setup_dir,
name="filedump_dir_setup")
initialize(art)
ip = urlparse(env['base_url']).hostname
art.configure_plugin('merkyl', ip=ip)
art.configure_plugin('logger')
art.configure_plugin('video')
art.configure_plugin('filedump')
art.configure_plugin('reporter')
art.fire_hook('start_session', run_id=run_id)
示例9: pytest_sessionfinish
def pytest_sessionfinish(session, exitstatus):
udf_log_file = log_path.join('unused_data_files.log')
if udf_log_file.check():
# Clean up old udf log if it exists
udf_log_file.remove()
if session.config.option.udf_report is False:
# Short out here if not making a report
return
# Output an unused data files log after a test run
data_files = set()
for dirpath, dirnames, filenames in os.walk(str(data_path)):
for filename in filenames:
filepath = os.path.join(dirpath, filename)
data_files.add(filepath)
unused_data_files = data_files - seen_data_files
if unused_data_files:
# Write the log of unused data files out, minus the data dir prefix
udf_log = ''.join(
(line[len(str(data_path)):] + '\n' for line in unused_data_files)
)
udf_log_file.write(udf_log + '\n')
# Throw a notice into the terminal reporter to check the log
tr = reporter()
tr.write_line('')
tr.write_sep(
'-',
'%d unused data files after test run, check %s' % (
len(unused_data_files), udf_log_file.basename
)
)
示例10: parse_config
def parse_config(self):
"""
Reads the config data and sets up values
"""
if not self.config:
return False
self.log_dir = local(self.config.get('log_dir', log_path))
self.log_dir.ensure(dir=True)
self.artifact_dir = local(self.config.get('artifact_dir', log_path.join('artifacts')))
self.artifact_dir.ensure(dir=True)
self.logger = create_logger('artifactor', self.log_dir.join('artifactor.log').strpath)
self.squash_exceptions = self.config.get('squash_exceptions', False)
if not self.log_dir:
print "!!! Log dir must be specified in yaml"
sys.exit(127)
if not self.artifact_dir:
print "!!! Artifact dir must be specified in yaml"
sys.exit(127)
self.config['zmq_socket_address'] = 'tcp://127.0.0.1:{}'.format(random_port())
self.setup_plugin_instances()
self.start_server()
self.global_data = {
'artifactor_config': self.config,
'log_dir': self.log_dir.strpath,
'artifact_dir': self.artifact_dir.strpath,
'artifacts': dict(),
'old_artifacts': dict()
}
示例11: generate_hourly_charts_and_csvs
def generate_hourly_charts_and_csvs(hourly_buckets, charts_dir):
for cmd in sorted(hourly_buckets):
current_csv = 'hourly_' + cmd + '.csv'
csv_rawdata_path = log_path.join('csv_output', current_csv)
logger.info('Writing {} csvs/charts'.format(cmd))
output_file = csv_rawdata_path.open('w', ensure=True)
csvwriter = csv.DictWriter(output_file, fieldnames=MiqMsgBucket().headers,
delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
for dt in sorted(hourly_buckets[cmd].keys()):
linechartxaxis = []
avgdeqtimings = []
mindeqtimings = []
maxdeqtimings = []
avgdeltimings = []
mindeltimings = []
maxdeltimings = []
cmd_put = []
cmd_get = []
sortedhr = sorted(hourly_buckets[cmd][dt].keys())
for hr in sortedhr:
linechartxaxis.append(str(hr))
bk = hourly_buckets[cmd][dt][hr]
avgdeqtimings.append(round(bk.avg_deq, 2))
mindeqtimings.append(round(bk.min_deq, 2))
maxdeqtimings.append(round(bk.max_deq, 2))
avgdeltimings.append(round(bk.avg_del, 2))
mindeltimings.append(round(bk.min_del, 2))
maxdeltimings.append(round(bk.max_del, 2))
cmd_put.append(bk.total_put)
cmd_get.append(bk.total_get)
bk.date = dt
bk.hour = hr
csvwriter.writerow(dict(bk))
lines = {}
lines['Put ' + cmd] = cmd_put
lines['Get ' + cmd] = cmd_get
line_chart_render(cmd + ' Command Put/Get Count', 'Hour during ' + dt,
'# Count of Commands', linechartxaxis, lines,
charts_dir.join('/{}-{}-cmdcnt.svg'.format(cmd, dt)))
lines = {}
lines['Average Dequeue Timing'] = avgdeqtimings
lines['Min Dequeue Timing'] = mindeqtimings
lines['Max Dequeue Timing'] = maxdeqtimings
line_chart_render(cmd + ' Dequeue Timings', 'Hour during ' + dt, 'Time (s)',
linechartxaxis, lines, charts_dir.join('/{}-{}-dequeue.svg'.format(cmd, dt)))
lines = {}
lines['Average Deliver Timing'] = avgdeltimings
lines['Min Deliver Timing'] = mindeltimings
lines['Max Deliver Timing'] = maxdeltimings
line_chart_render(cmd + ' Deliver Timings', 'Hour during ' + dt, 'Time (s)',
linechartxaxis, lines, charts_dir.join('/{}-{}-deliver.svg'.format(cmd, dt)))
output_file.close()
示例12: _inc_test_count
def _inc_test_count(test):
error = ""
if 'statuses' in test:
test_counts[test['statuses']['overall']] += 1
else:
error += str(test)
with log_path.join('no_status.log').open('a') as f:
f.write(error)
示例13: pages_to_csv
def pages_to_csv(pages, file_name):
csvdata_path = log_path.join('csv_output', file_name)
outputfile = csvdata_path.open('w', ensure=True)
csvwriter = csv.DictWriter(outputfile, fieldnames=PageStat().headers, delimiter=',',
quotechar='\'', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
for page in pages:
csvwriter.writerow(dict(page))
示例14: generate_raw_data_csv
def generate_raw_data_csv(rawdata_dict, csv_file_name):
csv_rawdata_path = log_path.join('csv_output', csv_file_name)
output_file = csv_rawdata_path.open('w', ensure=True)
csvwriter = csv.DictWriter(output_file, fieldnames=rawdata_dict[rawdata_dict.keys()[0]].headers,
delimiter=',', quotechar='\'', quoting=csv.QUOTE_MINIMAL)
csvwriter.writeheader()
sorted_rd_keys = sorted(rawdata_dict.keys())
for key in sorted_rd_keys:
csvwriter.writerow(dict(rawdata_dict[key]))
示例15: create_logger
def create_logger(logger_name, filename=None, max_file_size=None, max_backups=None):
"""Creates and returns the named logger
If the logger already exists, it will be destroyed and recreated
with the current config in env.yaml
"""
# If the logger already exists, destroy it
if logger_name in logging.root.manager.loggerDict:
del (logging.root.manager.loggerDict[logger_name])
# Grab the logging conf
conf = _load_conf(logger_name)
log_path.ensure(dir=True)
if filename:
log_file = filename
else:
log_file = str(log_path.join("{}.log".format(logger_name)))
# log_file is dynamic, so we can't used logging.config.dictConfig here without creating
# a custom RotatingFileHandler class. At some point, we should do that, and move the
# entire logging config into env.yaml
file_formatter = logging.Formatter(conf["file_format"])
file_handler = RotatingFileHandler(
log_file,
maxBytes=max_file_size or conf["max_file_size"],
backupCount=max_backups or conf["max_file_backups"],
encoding="utf8",
)
file_handler.setFormatter(file_formatter)
logger = logging.getLogger(logger_name)
logger.addHandler(file_handler)
syslog_settings = _get_syslog_settings()
if syslog_settings:
lid = fauxfactory.gen_alphanumeric(8)
fmt = "%(asctime)s [" + lid + "] %(message)s"
syslog_formatter = SyslogMsecFormatter(fmt=fmt)
syslog_handler = SysLogHandler(address=syslog_settings)
syslog_handler.setFormatter(syslog_formatter)
logger.addHandler(syslog_handler)
logger.setLevel(conf["level"])
if conf["errors_to_console"]:
stream_formatter = logging.Formatter(conf["stream_format"])
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.ERROR)
stream_handler.setFormatter(stream_formatter)
logger.addHandler(stream_handler)
logger.addFilter(_RelpathFilter())
return logger