本文整理汇总了Python中mozlog.structured.reader.read函数的典型用法代码示例。如果您正苦于以下问题:Python read函数的具体用法?Python read怎么用?Python read使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了read函数的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: log_is_unstable
def log_is_unstable(self, log_f):
log_f.seek(0)
statuses = defaultdict(set)
def handle_status(item):
if item["test"] == self.target:
statuses[item["subtest"]].add(item["status"])
def handle_end(item):
if item["test"] == self.target:
statuses[None].add(item["status"])
reader.each_log(reader.read(log_f),
{"test_status": handle_status,
"test_end": handle_end})
logger.debug(str(statuses))
if not statuses:
logger.error("Didn't get any useful output from wptrunner")
log_f.seek(0)
for item in reader.read(log_f):
logger.debug(item)
return None
return any(len(item) > 1 for item in statuses.itervalues())
示例2: test_handler
def test_handler(self):
data = [{"action": "action_0", "data": "data_0"},
{"action": "action_1", "data": "data_1"}]
f = self.to_file_like(data)
test = self
class ReaderTestHandler(reader.LogHandler):
def __init__(self):
self.action_0_count = 0
self.action_1_count = 0
def action_0(self, item):
test.assertEquals(item["action"], "action_0")
self.action_0_count += 1
def action_1(self, item):
test.assertEquals(item["action"], "action_1")
self.action_1_count += 1
handler = ReaderTestHandler()
reader.handle_log(reader.read(f), handler)
self.assertEquals(handler.action_0_count, 1)
self.assertEquals(handler.action_1_count, 1)
示例3: process_test_job
def process_test_job(data):
global logger
logger = logger or structuredlog.get_default_logger()
build_name = "{}-{} {}".format(data['platform'], data['buildtype'], data['test'])
logger.debug("now processing a '{}' job".format(build_name))
log_url = None
for name, url in data['blobber_files'].iteritems():
if name in settings['structured_log_names']:
log_url = url
break
log_path = _download_log(log_url)
try:
backend = settings['datastore']
db_args = config.database
store = get_storage_backend(backend, **db_args)
# TODO commit metadata about the test run
handler = StoreResultsHandler(store)
with open(log_path, 'r') as log:
iterator = reader.read(log)
reader.handle_log(iterator, handler)
finally:
mozfile.remove(log_path)
示例4: to_json
def to_json(*log_files):
handler = LogHandler()
for f in log_files:
reader.handle_log(reader.read(f),
handler)
return handler.rv
示例5: get_statuses
def get_statuses(filenames):
handler = StatusHandler()
for filename in filenames:
with open(filename) as f:
reader.handle_log(reader.read(f), handler)
return handler.statuses
示例6: get_test_failures
def get_test_failures(raw_log):
"""
Return the list of test failures contained within a structured log file.
"""
failures = []
def test_status(data):
if data['status'] == 'FAIL':
failures.append(data)
with open(raw_log, 'r') as f:
#XXX: bug 985606: map_action is a generator
list(reader.map_action(reader.read(f),
{"test_status":test_status}))
return failures
示例7: main
def main(**kwargs):
if kwargs["output"] is None:
output = sys.stdout
else:
output = open(kwargs["output"], "w")
readers = [read(open(filename, 'r')) for filename in kwargs["files"]]
start_events = [process_until_suite_start(reader, output) for reader in readers]
validate_start_events(start_events)
merged_start_event = merge_start_events(start_events)
dump_entry(fill_process_info(merged_start_event), output)
end_events = [process_until_suite_end(reader, output) for reader in readers]
dump_entry(fill_process_info(end_events[0]), output)
for reader in readers:
for entry in reader:
dump_entry(entry, output)
示例8: parse_log
def parse_log(self):
"""
Parse the entire log with ``mozlog``.
This presumes that the log at ``log_url`` is a gzipped structured
log generated by ``mozlog``.
"""
handler = self.SummaryHandler()
with self.get_log_handle(self.url) as in_f:
try:
reader.handle_log(reader.read(in_f), handler)
self.artifact["errors_truncated"] = False
except StopIteration:
# cap out the number of lines we store in the artifact.
self.artifact["errors_truncated"] = True
self.artifact["all_errors"] = handler.lines
示例9: record_results
def record_results(*log_files):
handler = LogHandler()
products = []
for name in log_files:
product, filename = name.split(":", 1)
products.append((product, filename))
handler.set_products([item[0] for item in products])
for product, logfile in products:
handler.set_product(product)
with open(logfile) as f:
reader.handle_log(reader.read(f),
handler)
data = handler.data
data["results"] = data["results"].values()
return data
示例10: process_test_job
def process_test_job(data):
global logger
logger = logger or utils.get_logger(name='catalog-worker')
build_name = "{}-{} {}".format(data['platform'], data['buildtype'], data['test'])
logger.info("now processing a '{}' job".format(build_name))
log_url = utils.get_structured_log(data['blobber_files'])
log_path = _download_log(log_url)
try:
backend = settings['datastore']
db_args = config.database
store = get_storage_backend(backend, **db_args)
# TODO commit metadata about the test run
handler = StoreResultsHandler(store)
with open(log_path, 'r') as log:
iterator = reader.read(log)
reader.handle_log(iterator, handler)
finally:
mozfile.remove(log_path)
示例11: test_read
def test_read(self):
data = [{"action": "action_0", "data": "data_0"},
{"action": "action_1", "data": "data_1"}]
f = self.to_file_like(data)
self.assertEquals(data, list(reader.read(f)))
示例12: update_from_log
def update_from_log(self, log_file):
self.run_info = None
log_reader = reader.read(log_file)
reader.each_log(log_reader, self.action_map)
示例13: parse_log
def parse_log(path):
with open(path) as f:
regression_handler = results.LogHandler()
reader.handle_log(reader.read(f),
regression_handler)
return regression_handler.results
示例14: worker_thread
def worker_thread(action_map, context):
stream = DataStream(context)
reader.each_log(reader.read(stream), action_map)
stream.socket.close()