本文整理汇总了Python中bzt.utils.FileReader类的典型用法代码示例。如果您正苦于以下问题:Python FileReader类的具体用法?Python FileReader怎么用?Python FileReader使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了FileReader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, tsung_basedir, parent_logger):
super(TsungStatsReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.tsung_basedir = tsung_basedir
self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats)
self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log)
self.delimiter = ";"
self.partial_buffer = ""
self.skipped_header = False
self.concurrency = 0
示例2: test_requests
def test_requests(self):
self.configure(yaml.load(open(RESOURCES_DIR + "yaml/selenium_executor_requests.yml").read()))
self.obj.prepare()
self.obj.get_widget()
self.obj.startup()
while not self.obj.check():
time.sleep(self.obj.engine.check_interval)
self.obj.shutdown()
reader = FileReader(os.path.join(self.obj.engine.artifacts_dir, "apiritif-0.csv"))
lines = reader.get_lines(last_pass=True)
self.assertEquals(4, len(list(lines)))
示例3: __init__
def __init__(self, filename, parent_logger):
super(PBenchStatsReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.buffer = ''
self.data = {}
self.last_data = 0
示例4: __init__
def __init__(self, filename, parent_logger):
super(TSVDataReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.skipped_header = False
self.concurrency = None
self.url_label = None
示例5: DataLogReader
class DataLogReader(ResultsReader):
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.concurrency = None
def _read(self, last_pass=False):
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if line.count(chr(0x1b)) != 2: # skip garbage
continue
l_start = line.index('m') + 1
l_end = line.index(chr(0x1b), l_start)
line = line[l_start:l_end]
log_vals = [val.strip() for val in line.split(',')]
# _mark = log_vals[0] # 0. current test mark, defined by --mark key
# _http = log_vals[1] # 1. http protocol
_rstatus = log_vals[2] # 2. response status code
_etime = float(log_vals[3]) # 3. elapsed time (total time - connection time)
_rsize = int(log_vals[4]) # 4. size of response
_url = log_vals[5] # 6. long or short URL value
# _url_id = int(log_vals[7]) # 7. url number
_tstamp = time.strptime(log_vals[7], "%Y-%m-%d %H:%M:%S")
_tstamp = int(time.mktime(_tstamp)) # 8. moment of request sending
_con_time = 0
_latency = 0
_error = None
_concur = self.concurrency
yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _rsize
示例6: startup
def startup(self):
executable = self.settings.get("interpreter", sys.executable)
report_type = ".ldjson" if self.engine.is_functional_mode() else ".csv"
report_tpl = self.engine.create_artifact("apiritif", ".") + "%s" + report_type
cmdline = [executable, "-m", "apiritif.loadgen", '--result-file-template', report_tpl]
load = self.get_load()
if load.concurrency:
cmdline += ['--concurrency', str(load.concurrency)]
if load.iterations:
cmdline += ['--iterations', str(load.iterations)]
if load.hold:
cmdline += ['--hold-for', str(load.hold)]
if load.ramp_up:
cmdline += ['--ramp-up', str(load.ramp_up)]
if load.steps:
cmdline += ['--steps', str(load.steps)]
if self.__is_verbose():
cmdline += ['--verbose']
cmdline += [self.script]
self.process = self._execute(cmdline)
self._tailer = FileReader(filename=self.stdout.name, parent_logger=self.log)
示例7: __init__
def __init__(self, basedir, parent_logger, dir_prefix):
super(DataLogReader, self).__init__()
self.concurrency = 0
self.log = parent_logger.getChild(self.__class__.__name__)
self.basedir = basedir
self.file = FileReader(file_opener=self.open_fds, parent_logger=self.log)
self.partial_buffer = ""
self.delimiter = "\t"
self.dir_prefix = dir_prefix
self.guessed_gatling_version = None
示例8: __init__
def __init__(self, filename, parent_logger):
super(DataLogReader, self).__init__()
self.report_by_url = False
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.idx = {}
self.partial_buffer = ""
self.start_time = 0
self.end_time = 0
self.concurrency = 0
self.test_names = {}
self.known_threads = set()
示例9: __init__
def __init__(self, filename, num_slaves, parent_logger):
"""
:type filename: str
:type num_slaves: int
:type parent_logger: logging.Logger
"""
super(SlavesReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.join_buffer = {}
self.num_slaves = num_slaves
self.file = FileReader(filename=filename, parent_logger=self.log)
self.read_buffer = ""
示例10: PBenchStatsReader
class PBenchStatsReader(object):
MARKER = "\n},"
def __init__(self, filename, parent_logger):
super(PBenchStatsReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.buffer = ''
self.data = {}
self.last_data = 0
def read_file(self):
_bytes = self.file.get_bytes()
if _bytes:
self.buffer += _bytes
while self.MARKER in self.buffer:
idx = self.buffer.find(self.MARKER) + len(self.MARKER)
chunk_str = self.buffer[:idx - 1]
self.buffer = self.buffer[idx + + 1:]
chunk = json.loads("{%s}" % chunk_str)
for date_str in chunk.keys():
statistics = chunk[date_str]
date_obj = datetime.datetime.strptime(date_str.split(".")[0], '%Y-%m-%d %H:%M:%S')
date = int(time.mktime(date_obj.timetuple()))
self.data[date] = 0
for benchmark_name in statistics.keys():
if not benchmark_name.startswith("benchmark_io"):
continue
benchmark = statistics[benchmark_name]
for method in benchmark:
meth_obj = benchmark[method]
if "mmtasks" in meth_obj:
self.data[date] += meth_obj["mmtasks"][2]
self.log.debug("Active instances stats for %s: %s", date, self.data[date])
def get_data(self, tstmp):
if tstmp in self.data:
self.last_data = self.data[tstmp]
return self.data[tstmp]
else:
self.log.debug("No active instances info for %s", tstmp)
return self.last_data
示例11: TSVDataReader
class TSVDataReader(ResultsReader):
def __init__(self, filename, parent_logger):
super(TSVDataReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.skipped_header = False
self.concurrency = None
self.url_label = None
def setup(self, concurrency, url_label):
self.concurrency = concurrency
self.url_label = url_label
return True
def _read(self, last_pass=False):
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not self.skipped_header:
self.skipped_header = True
continue
log_vals = [val.strip() for val in line.split('\t')]
_error = None
_rstatus = None
_url = self.url_label
_concur = self.concurrency
_tstamp = int(log_vals[1]) # timestamp - moment of request sending
_con_time = float(log_vals[2]) / 1000 # connection time
_etime = float(log_vals[4]) / 1000 # elapsed time
_latency = float(log_vals[5]) / 1000 # latency (aka waittime)
_bytes = None
yield _tstamp, _url, _concur, _etime, _con_time, _latency, _rstatus, _error, '', _bytes
示例12: __init__
def __init__(self):
super(PyTestExecutor, self).__init__()
self.runner_path = os.path.join(RESOURCES_DIR, "pytest_runner.py")
self._tailer = FileReader('', file_opener=lambda _: None, parent_logger=self.log)
self._additional_args = []
示例13: SlavesReader
class SlavesReader(ResultsProvider):
def __init__(self, filename, num_slaves, parent_logger):
"""
:type filename: str
:type num_slaves: int
:type parent_logger: logging.Logger
"""
super(SlavesReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.join_buffer = {}
self.num_slaves = num_slaves
self.file = FileReader(filename=filename, parent_logger=self.log)
self.read_buffer = ""
def _calculate_datapoints(self, final_pass=False):
read = self.file.get_bytes(size=1024 * 1024, last_pass=final_pass)
if not read or not read.strip():
return
self.read_buffer += read
while "\n" in self.read_buffer:
_line = self.read_buffer[:self.read_buffer.index("\n") + 1]
self.read_buffer = self.read_buffer[len(_line):]
self.fill_join_buffer(json.loads(_line))
max_full_ts = self.get_max_full_ts()
if max_full_ts is not None:
for point in self.merge_datapoints(max_full_ts):
yield point
def merge_datapoints(self, max_full_ts):
reader_id = self.file.name + "@" + str(id(self))
for key in sorted(self.join_buffer.keys(), key=int):
if int(key) <= max_full_ts:
sec_data = self.join_buffer.pop(key)
self.log.debug("Processing complete second: %s", key)
point = DataPoint(int(key))
point[DataPoint.SOURCE_ID] = reader_id
for sid, item in iteritems(sec_data):
point.merge_point(self.point_from_locust(key, sid, item))
point.recalculate()
yield point
def get_max_full_ts(self):
max_full_ts = None
for key in sorted(self.join_buffer.keys(), key=int):
if len(key) >= self.num_slaves:
max_full_ts = int(key)
return max_full_ts
def fill_join_buffer(self, data):
self.log.debug("Got slave data: %s", data)
for stats_item in data['stats']:
for timestamp in stats_item['num_reqs_per_sec'].keys():
if timestamp not in self.join_buffer:
self.join_buffer[timestamp] = {}
self.join_buffer[timestamp][data['client_id']] = data
@staticmethod
def point_from_locust(timestamp, sid, data):
"""
:type timestamp: str
:type sid: str
:type data: dict
:rtype: DataPoint
"""
point = DataPoint(int(timestamp))
point[DataPoint.SOURCE_ID] = sid
overall = KPISet()
for item in data['stats']:
if timestamp not in item['num_reqs_per_sec']:
continue
kpiset = KPISet()
kpiset[KPISet.SAMPLE_COUNT] = item['num_reqs_per_sec'][timestamp]
kpiset[KPISet.CONCURRENCY] = data['user_count']
kpiset[KPISet.BYTE_COUNT] = item['total_content_length']
if item['num_requests']:
avg_rt = (item['total_response_time'] / 1000.0) / item['num_requests']
kpiset.sum_rt = item['num_reqs_per_sec'][timestamp] * avg_rt
for err in data['errors'].values():
if err['name'] == item['name']:
new_err = KPISet.error_item_skel(err['error'], None, err['occurences'], KPISet.ERRTYPE_ERROR,
Counter(), None)
KPISet.inc_list(kpiset[KPISet.ERRORS], ("msg", err['error']), new_err)
kpiset[KPISet.FAILURES] += err['occurences']
kpiset[KPISet.SUCCESSES] = kpiset[KPISet.SAMPLE_COUNT] - kpiset[KPISet.FAILURES]
point[DataPoint.CURRENT][item['name']] = kpiset
overall.merge_kpis(kpiset, sid)
point[DataPoint.CURRENT][''] = overall
point.recalculate()
return point
示例14: TsungStatsReader
class TsungStatsReader(ResultsReader):
def __init__(self, tsung_basedir, parent_logger):
super(TsungStatsReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.tsung_basedir = tsung_basedir
self.stats_file = FileReader(parent_logger=self.log, file_opener=self.open_stats)
self.log_file = FileReader(parent_logger=self.log, file_opener=self.open_log)
self.delimiter = ";"
self.partial_buffer = ""
self.skipped_header = False
self.concurrency = 0
def open_stats(self, filename):
return self.open_file(ext='dump')
def open_log(self, filename):
return self.open_file(ext='log')
def open_file(self, ext):
basedir_contents = os.listdir(self.tsung_basedir)
if not basedir_contents:
self.log.debug("Tsung artifacts not appeared yet")
return
if len(basedir_contents) != 1:
self.log.warning("Multiple files in Tsung basedir %s, this shouldn't happen", self.tsung_basedir)
return
filename = os.path.join(self.tsung_basedir, basedir_contents[0], "tsung." + ext)
if not os.path.isfile(filename):
self.log.debug("File not appeared yet: %s", filename)
return
if not os.path.getsize(filename):
self.log.debug("File is empty: %s", filename)
return
self.log.debug('Opening file: %s', filename)
return open(filename, mode='rb')
def _read_concurrency(self, last_pass):
lines = self.log_file.get_lines(size=1024 * 1024, last_pass=last_pass)
extractor = re.compile(r'^stats: users (\d+) (\d+)$')
for line in lines:
match = extractor.match(line.strip())
if not match:
continue
self.concurrency = int(match.group(2))
self.log.debug("Actual Tsung concurrency: %s", self.concurrency)
def _read(self, last_pass=False):
self.log.debug("Reading Tsung results")
self._read_concurrency(last_pass)
lines = self.stats_file.get_lines(size=1024 * 1024, last_pass=last_pass)
for line in lines:
if not line.endswith("\n"):
self.partial_buffer += line
continue
if not self.skipped_header and line.startswith("#"):
self.skipped_header = True
continue
line = "%s%s" % (self.partial_buffer, line)
self.partial_buffer = ""
line = line.strip()
fields = line.split(self.delimiter)
tstamp = int(float(fields[0]))
url = fields[4] + fields[5]
rstatus = fields[6]
rsize = int(fields[7])
etime = float(fields[8]) / 1000
trname = fields[9]
error = fields[10] or None
con_time = 0
latency = 0
yield tstamp, url, self.concurrency, etime, con_time, latency, rstatus, error, trname, rsize
示例15: PBenchKPIReader
class PBenchKPIReader(ResultsReader):
"""
Class to read KPI
:type stats_reader: PBenchStatsReader
"""
def __init__(self, filename, parent_logger, stats_filename):
super(PBenchKPIReader, self).__init__()
self.log = parent_logger.getChild(self.__class__.__name__)
self.file = FileReader(filename=filename, parent_logger=self.log)
self.stats_reader = PBenchStatsReader(stats_filename, parent_logger)
def _read(self, last_pass=False):
"""
Generator method that returns next portion of data
:type last_pass: bool
"""
def mcs2sec(val):
return int(val) / 1000000.0
self.stats_reader.read_file()
lines = self.file.get_lines(size=1024 * 1024, last_pass=last_pass)
fields = ("timeStamp", "label", "elapsed",
"Connect", "Send", "Latency", "Receive",
"internal",
"bsent", "brecv",
"opretcode", "responseCode")
dialect = csv.excel_tab()
rows = csv.DictReader(lines, fields, dialect=dialect)
for row in rows:
label = row["label"]
try:
rtm = mcs2sec(row["elapsed"])
ltc = mcs2sec(row["Latency"])
cnn = mcs2sec(row["Connect"])
# NOTE: actually we have precise send and receive time here...
except BaseException:
raise ToolError("PBench reader: failed record: %s" % row)
if row["opretcode"] != "0":
error = strerror(int(row["opretcode"]))
rcd = error
else:
error = None
rcd = row["responseCode"]
tstmp = int(float(row["timeStamp"]) + rtm)
byte_count = int(row["brecv"])
concur = 0
yield tstmp, label, concur, rtm, cnn, ltc, rcd, error, '', byte_count
def _calculate_datapoints(self, final_pass=False):
for point in super(PBenchKPIReader, self)._calculate_datapoints(final_pass):
concurrency = self.stats_reader.get_data(point[DataPoint.TIMESTAMP])
for label_data in viewvalues(point[DataPoint.CURRENT]):
label_data[KPISet.CONCURRENCY] = concurrency
yield point