本文整理汇总了Python中bzt.utils.FileReader.get_lines方法的典型用法代码示例。如果您正苦于以下问题:Python FileReader.get_lines方法的具体用法?Python FileReader.get_lines怎么用?Python FileReader.get_lines使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bzt.utils.FileReader
的用法示例。
在下文中一共展示了FileReader.get_lines方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: DataLogReader
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
class DataLogReader(ResultsReader):
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.concurrency = None
def _read(self, last_pass=False):
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if line.count(chr(0x1b)) != 2: # skip garbage
continue
l_start = line.index('m') + 1
l_end = line.index(chr(0x1b), l_start)
line = line[l_start:l_end]
log_vals = [val.strip() for val in line.split(',')]
# _mark = log_vals[0] # 0. current test mark, defined by --mark key
# _http = log_vals[1] # 1. http protocol
_rstatus = log_vals[2] # 2. response status code
_etime = float(log_vals[3]) # 3. elapsed time (total time - connection time)
_rsize = int(log_vals[4]) # 4. size of response
_url = log_vals[5] # 6. long or short URL value
# _url_id = int(log_vals[7]) # 7. url number
_tstamp = time.strptime(log_vals[7], "%Y-%m-%d %H:%M:%S")
_tstamp = int(time.mktime(_tstamp)) # 8. moment of request sending
_con_time = 0
_latency = 0
_error = None
_concur = self.concurrency
yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _rsize
示例2: test_requests
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
def test_requests(self):
self.configure(yaml.load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
self.obj.prepare()
self.obj.get_widget()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.obj.shutdown()
reader = FileReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv"))
lines = reader.get_lines(last_pass=True)
self.assertEquals(4, len(list(lines)))
示例3: TSVDataReader
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
class TSVDataReader(ResultsReader):
def __init__(self, filename, parent_logger):
super(TSVDataReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.skipped_header = False
self.concurrency = None
self.url_label = None
def setup(self, concurrency, url_label):
self.concurrency = concurrency
self.url_label = url_label
return True
def _read(self, last_pass=False):
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not self.skipped_header:
self.skipped_header = True
continue
log_vals = [val.strip() for val in line.split('\t')]
_error = None
_rstatus = None
_url = self.url_label
_concur = self.concurrency
_tstamp = int(log_vals[1]) # timestamp - moment of request sending
_con_time = float(log_vals[2]) / 1000 # connection time
_etime = float(log_vals[4]) / 1000 # elapsed time
_latency = float(log_vals[5]) / 1000 # latency (aka waittime)
_bytes = None
yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
示例4: PBenchKPIReader
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
class PBenchKPIReader(ResultsReader):
"""
Class to read KPI
:type stats_reader: PBenchStatsReader
"""
def __init__(self, filename, parent_logger, stats_filename):
super(PBenchKPIReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.stats_reader = PBenchStatsReader(stats_filename, parent_logger)
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:type last_pass: bool
"""
def mcs2sec(val):
return int(val) / 1000000.0
self.stats_reader.read_file()
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
fields = ("timeStamp", "label", "elapsed",
"Connect", "Send", "Latency", "Receive",
"internal",
"bsent", "brecv",
"opretcode", "responseCode")
dialect = csv.excel_tab()
rows = csv.DictReader(lines, fields, dialect=dialect)
for row in rows:
label = row["label"]
try:
rtm = mcs2sec(row["elapsed"])
ltc = mcs2sec(row["Latency"])
cnn = mcs2sec(row["Connect"])
# NOTE: actually we have precise send and receive time here...
except BaseException:
raise ToolError("PBench reader: failed record: %s" % row)
if row["opretcode"] != "0":
error = strerror(int(row["opretcode"]))
rcd = error
else:
error = None
rcd = row["responseCode"]
tstmp = int(float(row["timeStamp"]) + rtm)
byte_count = int(row["brecv"])
concur = 0
yield tstmp, label, concur, rtm, cnn, ltc, rcd, error, '', byte_count
def _calculate_datapoints(self, final_pass=False):
for point in super(PBenchKPIReader, self)._calculate_datapoints(final_pass):
concurrency = self.stats_reader.get_data(point[DataPoint.TIMESTAMP])
for label_data in viewvalues(point[DataPoint.CURRENT]):
label_data[KPISet.CONCURRENCY] = concurrency
yield point
示例5: DataLogReader
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
class DataLogReader(ResultsReader):
""" Class to read KPI from data log """
DELIMITER = ","
DETAILS_REGEX = re.compile(r"worker\.(\S+) (.+) -> (\S+) (.+), (\d+) bytes")
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.report_by_url = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.idx = {}
self.partial_buffer = ""
self.start_time = 0
self.end_time = 0
self.concurrency = 0
self.test_names = {}
self.known_threads = set()
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
self.log.debug("Reading grinder results...")
self.lines = list(self.file.get_lines(size=1024 * 1024, last_pass=last_pass))
lnum = None
start = time.time()
for lnum, line in enumerate(self.lines):
if not self.idx:
if not line.startswith('data.'):
self.__split(line) # to capture early test name records
continue
line = line[line.find(' '):]
header_list = line.strip().split(self.DELIMITER)
for _ix, field in enumerate(header_list):
self.idx[field.strip()] = _ix
data_fields, worker_id = self.__split(line)
if not data_fields:
self.log.debug("Skipping line: %s", line.strip())
continue
yield self.parse_line(data_fields, worker_id, lnum)
if lnum is not None:
duration = time.time() - start
if duration < 0.001:
duration = 0.001
self.log.debug("Log reading speed: %s lines/s", (lnum + 1) / duration)
def parse_line(self, data_fields, worker_id, lnum):
worker_id = worker_id.split('.')[1]
t_stamp = int(int(data_fields[self.idx["Start time (ms since Epoch)"]]) / 1000.0)
r_time = int(data_fields[self.idx["Test time"]]) / 1000.0
latency = int(data_fields[self.idx["Time to first byte"]]) / 1000.0
r_code = data_fields[self.idx["HTTP response code"]].strip()
con_time = int(data_fields[self.idx["Time to resolve host"]]) / 1000.0
con_time += int(data_fields[self.idx["Time to establish connection"]]) / 1000.0
bytes_count = int(data_fields[self.idx["HTTP response length"]].strip())
test_id = data_fields[self.idx["Test"]].strip()
thread_id = worker_id + '/' + data_fields[self.idx["Thread"]].strip()
if thread_id not in self.known_threads:
self.known_threads.add(thread_id)
self.concurrency += 1
url, error_msg = self.__parse_prev_lines(worker_id, lnum, r_code, bytes_count)
if int(data_fields[self.idx["Errors"]]) or int(data_fields[self.idx['HTTP response errors']]):
if not error_msg:
if r_code != '0':
error_msg = "HTTP %s" % r_code
else:
error_msg = "Java exception calling TestRunner"
else:
error_msg = None # suppress errors
if self.report_by_url:
label = url
elif test_id in self.test_names:
label = self.test_names[test_id]
else:
label = "Test #%s" % test_id
source_id = '' # maybe use worker_id somehow?
return t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error_msg, source_id, bytes_count
def __split(self, line):
if not line.endswith("\n"):
self.partial_buffer += line
return None, None
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
#.........这里部分代码省略.........
示例6: TsungStatsReader
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
class TsungStatsReader(ResultsReader):
def __init__(self, tsung_basedir, parent_logger):
super(TsungStatsReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.tsung_basedir = tsung_basedir
self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats)
self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log)
self.delimiter = ";"
self.partial_buffer = ""
self.skipped_header = False
self.concurrency = 0
def open_stats(self, filename):
return self.open_file(ext='dump')
def open_log(self, filename):
return self.open_file(ext='log')
def open_file(self, ext):
basedir_contents = os.listdir(self.tsung_basedir)
if not basedir_contents:
self.log.debug("Tsung artifacts not appeared yet")
return
if len(basedir_contents) != 1:
self.log.warning("Multiple files in Tsung basedir %s, this shouldn't happen", self.tsung_basedir)
return
filename = os.path.join(self.tsung_basedir, basedir_contents[0], "tsung." + ext)
if not os.path.isfile(filename):
self.log.debug("File not appeared yet: %s", filename)
return
if not os.path.getsize(filename):
self.log.debug("File is empty: %s", filename)
return
self.log.debug('Opening file: %s', filename)
return open(filename, mode='rb')
def _read_concurrency(self, last_pass):
lines = self.log_file.get_lines(size=1024 * 1024, last_pass=last_pass)
extractor = re.compile(r'^stats: users (\d+) (\d+)$')
for line in lines:
match = extractor.match(line.strip())
if not match:
continue
self.concurrency = int(match.group(2))
self.log.debug("Actual Tsung concurrency: %s", self.concurrency)
def _read(self, last_pass=False):
self.log.debug("Reading Tsung results")
self._read_concurrency(last_pass)
lines = self.stats_file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
if not self.skipped_header and line.startswith("#"):
self.skipped_header = True
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
fields = line.split(self.delimiter)
tstamp = int(float(fields[0]))
url = fields[4] + fields[5]
rstatus = fields[6]
rsize = int(fields[7])
etime = float(fields[8]) / 1000
trname = fields[9]
error = fields[10] or None
con_time = 0
latency = 0
yield tstamp, url, self.concurrency, etime, con_time, latency, rstatus, error, trname, rsize
示例7: ApiritifNoseExecutor
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
#.........这里部分代码省略.........
wd_addr=remote, test_mode=test_mode)
builder.build_source_code()
builder.save(filename)
if isinstance(self.engine.aggregator, ConsolidatingAggregator) and isinstance(builder, ApiritifScriptGenerator):
self.engine.aggregator.ignored_labels.extend(builder.service_methods)
return filename
def startup(self):
executable = self.settings.get("interpreter", sys.executable)
report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv"
report_tpl = self.engine.create_artifact("apiritif", ".") + "%s" + report_type
cmdline = [executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl]
load = self.get_load()
if load.concurrency:
cmdline += ['--concurrency', str(load.concurrency)]
if load.iterations:
cmdline += ['--iterations', str(load.iterations)]
if load.hold:
cmdline += ['--hold-for', str(load.hold)]
if load.ramp_up:
cmdline += ['--ramp-up', str(load.ramp_up)]
if load.steps:
cmdline += ['--steps', str(load.steps)]
if self.__is_verbose():
cmdline += ['--verbose']
cmdline += [self.script]
self.process = self._execute(cmdline)
self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log)
def has_results(self):
if not self.reader:
return False
return self.reader.read_records
@staticmethod
def _normalize_label(label):
for char in ":/":
if char in label:
label = label.replace(char, '_')
return label
def _check_stdout(self):
for line in self._tailer.get_lines():
if "Adding worker" in line:
marker = "results="
pos = line.index(marker)
fname = line[pos + len(marker):].strip()
self.log.debug("Adding result reader for %s", fname)
self.reader.register_file(fname)
elif "Transaction started" in line:
colon = line.index('::')
values = {
part.split('=')[0]: part.split('=')[1]
for part in line[colon + 2:].strip().split(',')
}
label = self._normalize_label(values['name'])
start_time = float(values['start_time'])
self.transaction_started(label, start_time)
elif "Transaction ended" in line:
colon = line.index('::')
values = {
part.split('=')[0]: part.split('=')[1]
for part in line[colon + 2:].strip().split(',')
}
label = self._normalize_label(values['name'])
duration = float(values['duration'])
self.transacion_ended(label, duration)
def check(self):
self._check_stdout()
return super(ApiritifNoseExecutor, self).check()
def __log_lines(self):
lines = []
for line in self._tailer.get_lines():
if not IGNORED_LINE.match(line):
lines.append(line)
if lines:
self.log.info("\n".join(lines))
def post_process(self):
self._check_stdout()
self.__log_lines()
self._tailer.close()
super(ApiritifNoseExecutor, self).post_process()
def __is_verbose(self):
engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False)
executor_verbose = self.settings.get("verbose", engine_verbose)
return executor_verbose
示例8: PyTestExecutor
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
class PyTestExecutor(SubprocessedExecutor, HavingInstallableTools):
def __init__(self):
super(PyTestExecutor, self).__init__()
self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py")
self._tailer = FileReader('', file_opener=lambda _: None, parent_logger=self.log)
self._additional_args = []
def prepare(self):
super(PyTestExecutor, self).prepare()
self.install_required_tools()
self.script = self.get_script_path()
if not self.script:
raise TaurusConfigError("'script' should be present for pytest executor")
scenario = self.get_scenario()
if "additional-args" in scenario:
argv = scenario.get("additional-args")
self._additional_args = shlex.split(argv)
self.reporting_setup(suffix=".ldjson")
def __is_verbose(self):
engine_verbose = self.engine.config.get(SETTINGS).get("verbose", False)
executor_verbose = self.settings.get("verbose", engine_verbose)
return executor_verbose
def install_required_tools(self):
"""
we need installed nose plugin
"""
if sys.version >= '3':
self.log.warning("You are using Python 3, make sure that your scripts are able to run in Python 3")
self._check_tools([self._get_tool(TaurusPytestRunner, tool_path=self.runner_path)])
def startup(self):
"""
run python tests
"""
executable = self.settings.get("interpreter", sys.executable)
cmdline = [executable, self.runner_path, '--report-file', self.report_file]
load = self.get_load()
if load.iterations:
cmdline += ['-i', str(load.iterations)]
if load.hold:
cmdline += ['-d', str(load.hold)]
cmdline += self._additional_args
cmdline += [self.script]
self.process = self._execute(cmdline)
if self.__is_verbose():
self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log)
def check(self):
self.__log_lines()
return super(PyTestExecutor, self).check()
def post_process(self):
super(PyTestExecutor, self).post_process()
self.__log_lines()
def __log_lines(self):
lines = []
for line in self._tailer.get_lines():
if not IGNORED_LINE.match(line):
lines.append(line)
if lines:
self.log.info("\n".join(lines))
示例9: DataLogReader
# 需要导入模块: from bzt.utils import FileReader [as 别名]
# 或者: from bzt.utils.FileReader import get_lines [as 别名]
#.........这里部分代码省略.........
if fields[3].strip() == "START":
self.concurrency += 1
elif fields[3].strip() == "END":
self.concurrency -= 1
if fields[0].strip() != "REQUEST":
return None
label = fields[4]
t_stamp = int(fields[6]) / 1000.0
r_time = (int(fields[6]) - int(fields[5])) / 1000.0
latency = 0.0
con_time = 0.0
if fields[7] == 'OK':
r_code = '200'
else:
_tmp_rc = fields[-1].split(" ")[-1]
r_code = _tmp_rc if _tmp_rc.isdigit() else 'No RC'
if len(fields) >= 9 and fields[8]:
error = fields[8]
else:
error = None
return int(t_stamp), label, r_time, con_time, latency, r_code, error
def _guess_gatling_version(self, fields):
if fields[0].strip() in ["USER", "REQUEST", "RUN"]:
self.log.debug("Parsing Gatling 2.2+ stats")
return "2.2+"
elif len(fields) >= 3 and fields[2].strip() in ["USER", "REQUEST", "RUN"]:
self.log.debug("Parsing Gatling 2.1 stats")
return "2.1"
else:
return None
def _extract_log_data(self, fields):
if self.guessed_gatling_version is None:
self.guessed_gatling_version = self._guess_gatling_version(fields)
if self.guessed_gatling_version == "2.1":
return self._extract_log_gatling_21(fields)
elif self.guessed_gatling_version == "2.2+":
return self._extract_log_gatling_22(fields)
else:
return None
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:param last_pass:
"""
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
fields = line.split(self.delimiter)
data = self._extract_log_data(fields)
if data is None:
continue
t_stamp, label, r_time, con_time, latency, r_code, error = data
bytes_count = None
yield t_stamp, label, self.concurrency, r_time, con_time, latency, r_code, error, '', bytes_count
def open_fds(self, filename):
"""
open gatling simulation.log
"""
if os.path.isdir(self.basedir):
prog = re.compile("^%s-[0-9]+$" % self.dir_prefix)
for fname in os.listdir(self.basedir):
if prog.match(fname):
filename = os.path.join(self.basedir, fname, "simulation.log")
break
if not filename or not os.path.isfile(filename):
self.log.debug('simulation.log not found')
return
elif os.path.isfile(self.basedir):
filename = self.basedir
else:
self.log.debug('Path not found: %s', self.basedir)
return
if not os.path.getsize(filename):
self.log.debug('simulation.log is empty')
else:
return open(filename, 'rb')