本文整理汇总了Python中pants.goal.aggregated_timings.AggregatedTimings.add_timing方法的典型用法代码示例。如果您正苦于以下问题:Python AggregatedTimings.add_timing方法的具体用法?Python AggregatedTimings.add_timing怎么用?Python AggregatedTimings.add_timing使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pants.goal.aggregated_timings.AggregatedTimings
的用法示例。
在下文中一共展示了AggregatedTimings.add_timing方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_critical_path_timings
# 需要导入模块: from pants.goal.aggregated_timings import AggregatedTimings [as 别名]
# 或者: from pants.goal.aggregated_timings.AggregatedTimings import add_timing [as 别名]
def get_critical_path_timings(self):
"""
Get the cumulative timings of each goal and all of the goals it (transitively) depended on.
"""
transitive_dependencies = dict()
for goal_info in self._sorted_goal_infos:
deps = transitive_dependencies.setdefault(goal_info.goal.name, set())
for dep in goal_info.goal_dependencies:
deps.add(dep.name)
deps.update(transitive_dependencies.get(dep.name))
raw_timings = dict()
for entry in self.cumulative_timings.get_all():
raw_timings[entry["label"]] = entry["timing"]
timings = AggregatedTimings()
for goal, deps in transitive_dependencies.items():
label = "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, goal)
timings.add_timing(label, raw_timings.get(label, 0.0))
for dep in deps:
dep_label = "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, dep)
timings.add_timing(label, raw_timings.get(dep_label, 0.0))
return timings
示例2: RunTracker
# 需要导入模块: from pants.goal.aggregated_timings import AggregatedTimings [as 别名]
# 或者: from pants.goal.aggregated_timings.AggregatedTimings import add_timing [as 别名]
#.........这里部分代码省略.........
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {} due to {}'.format(url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this
# (probably only Foursquare at present).
params = {k: json.dumps(v) for (k, v) in stats.items()}
try:
r = requests.post(url, data=params, timeout=timeout)
if r.status_code != requests.codes.ok:
return error("HTTP error code: {}".format(r.status_code))
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error("Error: {}".format(e))
return True
def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
stats = {
'run_info': self.run_info.get_as_dict(),
'cumulative_timings': self.cumulative_timings.get_all(),
'self_timings': self.self_timings.get_all(),
'artifact_cache_stats': self.artifact_cache_stats.get_all()
}
# Dump individual stat file.
# TODO(benjy): Do we really need these, once the statsdb is mature?
stats_file = os.path.join(get_pants_cachedir(), 'stats',
'{}.json'.format(self.run_info.get_info('id')))
safe_file_dump(stats_file, json.dumps(stats))
# Add to local stats db.
StatsDBFactory.global_instance().get_db().insert_stats(stats)
# Upload to remote stats db.
stats_url = self.get_options().stats_upload_url
if stats_url:
self.post_stats(stats_url, stats, timeout=self.get_options().stats_upload_timeout)
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
SubprocPool.shutdown(self._aborted)
# Run a dummy work unit to write out one last timestamp.
with self.new_workunit("complete"):
pass
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
# If the goal is clean-all then the run info dir no longer exists, so ignore that error.
self.run_info.add_info('outcome', outcome_str, ignore_errors=True)
self.report.close()
self.store_stats()
def end_workunit(self, workunit):
self.report.end_workunit(workunit)
path, duration, self_time, is_tool = workunit.end()
self.cumulative_timings.add_timing(path, duration, is_tool)
self.self_timings.add_timing(path, self_time, is_tool)
def get_background_root_workunit(self):
if self._background_root_workunit is None:
self._background_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name='background', cmd=None)
self._background_root_workunit.start()
self.report.start_workunit(self._background_root_workunit)
return self._background_root_workunit
def background_worker_pool(self):
if self._background_worker_pool is None: # Initialize lazily.
self._background_worker_pool = WorkerPool(parent_workunit=self.get_background_root_workunit(),
run_tracker=self,
num_workers=self._num_background_workers)
return self._background_worker_pool
示例3: RunTracker
# 需要导入模块: from pants.goal.aggregated_timings import AggregatedTimings [as 别名]
# 或者: from pants.goal.aggregated_timings.AggregatedTimings import add_timing [as 别名]
#.........这里部分代码省略.........
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
# If the goal is clean-all then the run info dir no longer exists, so ignore that error.
self.run_info.add_info('outcome', outcome_str, ignore_errors=True)
if self._target_to_data:
self.run_info.add_info('target_data', self._target_to_data)
self.report.close()
self.store_stats()
run_failed = outcome in [WorkUnit.FAILURE, WorkUnit.ABORTED]
result = PANTS_FAILED_EXIT_CODE if run_failed else PANTS_SUCCEEDED_EXIT_CODE
self._end_memoized_result = result
return self._end_memoized_result
def end_workunit(self, workunit):
path, duration, self_time, is_tool = workunit.end()
self.report.end_workunit(workunit)
workunit.cleanup()
# These three operations may not be thread-safe, and workunits may run in separate threads
# and thus end concurrently, so we want to lock these operations.
with self._stats_lock:
self.cumulative_timings.add_timing(path, duration, is_tool)
self.self_timings.add_timing(path, self_time, is_tool)
self.outcomes[path] = workunit.outcome_string(workunit.outcome())
def get_critical_path_timings(self):
"""
Get the cumulative timings of each goal and all of the goals it (transitively) depended on.
"""
setup_workunit = WorkUnitLabel.SETUP.lower()
transitive_dependencies = dict()
for goal_info in self._sorted_goal_infos:
deps = transitive_dependencies.setdefault(goal_info.goal.name, set())
for dep in goal_info.goal_dependencies:
deps.add(dep.name)
deps.update(transitive_dependencies.get(dep.name))
# Add setup workunit as a dep manually, as its unaccounted for, otherwise.
deps.add(setup_workunit)
raw_timings = dict()
for entry in self.cumulative_timings.get_all():
raw_timings[entry["label"]] = entry["timing"]
critical_path_timings = AggregatedTimings()
def add_to_timings(goal, dep):
tracking_label = get_label(goal)
timing_label = get_label(dep)
critical_path_timings.add_timing(tracking_label, raw_timings.get(timing_label, 0.0))
def get_label(dep):
return "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, dep)
# Add setup workunit to critical_path_timings manually, as its unaccounted for, otherwise.
add_to_timings(setup_workunit, setup_workunit)
示例4: RunTracker
# 需要导入模块: from pants.goal.aggregated_timings import AggregatedTimings [as 别名]
# 或者: from pants.goal.aggregated_timings.AggregatedTimings import add_timing [as 别名]
#.........这里部分代码省略.........
# Run a dummy work unit to write out one last timestamp.
with self.new_workunit("complete"):
pass
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
# If the goal is clean-all then the run info dir no longer exists, so ignore that error.
self.run_info.add_info('outcome', outcome_str, ignore_errors=True)
if self._target_to_data:
self.run_info.add_info('target_data', self._target_to_data)
self.report.close()
self.store_stats()
return 1 if outcome in [WorkUnit.FAILURE, WorkUnit.ABORTED] else 0
def end_workunit(self, workunit):
self.report.end_workunit(workunit)
path, duration, self_time, is_tool = workunit.end()
# These three operations may not be thread-safe, and workunits may run in separate threads
# and thus end concurrently, so we want to lock these operations.
with self._stats_lock:
self.cumulative_timings.add_timing(path, duration, is_tool)
self.self_timings.add_timing(path, self_time, is_tool)
self.outcomes[path] = workunit.outcome_string(workunit.outcome())
def get_critical_path_timings(self):
"""
Get the cumulative timings of each goal and all of the goals it (transitively) depended on.
"""
transitive_dependencies = dict()
for goal_info in self._sorted_goal_infos:
deps = transitive_dependencies.setdefault(goal_info.goal.name, set())
for dep in goal_info.goal_dependencies:
deps.add(dep.name)
deps.update(transitive_dependencies.get(dep.name))
raw_timings = dict()
for entry in self.cumulative_timings.get_all():
raw_timings[entry["label"]] = entry["timing"]
timings = AggregatedTimings()
for goal, deps in transitive_dependencies.items():
label = "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, goal)
timings.add_timing(label, raw_timings.get(label, 0.0))
for dep in deps:
dep_label = "{}:{}".format(RunTracker.DEFAULT_ROOT_NAME, dep)
timings.add_timing(label, raw_timings.get(dep_label, 0.0))
return timings
def get_background_root_workunit(self):
if self._background_root_workunit is None:
self._background_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name='background', cmd=None)
self._background_root_workunit.start()
示例5: RunTracker
# 需要导入模块: from pants.goal.aggregated_timings import AggregatedTimings [as 别名]
# 或者: from pants.goal.aggregated_timings.AggregatedTimings import add_timing [as 别名]
#.........这里部分代码省略.........
raise
else:
workunit.set_outcome(WorkUnit.SUCCESS)
finally:
self.end_workunit(workunit)
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
def upload_stats(self):
"""Send timing results to URL specified in pants.ini"""
def error(msg):
# Report aleady closed, so just print error.
print("WARNING: Failed to upload stats to {} due to {}".format(self.stats_url, msg), file=sys.stderr)
if self.stats_url:
params = {
'run_info': json.dumps(self.run_info.get_as_dict()),
'cumulative_timings': json.dumps(self.cumulative_timings.get_all()),
'self_timings': json.dumps(self.self_timings.get_all()),
'artifact_cache_stats': json.dumps(self.artifact_cache_stats.get_all())
}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
url = urlparse(self.stats_url)
try:
if url.scheme == 'https':
http_conn = httplib.HTTPSConnection(url.netloc, timeout=self.stats_timeout)
else:
http_conn = httplib.HTTPConnection(url.netloc, timeout=self.stats_timeout)
http_conn.request('POST', url.path, urllib.urlencode(params), headers)
resp = http_conn.getresponse()
if resp.status != 200:
error("HTTP error code: {}".format(resp.status))
except Exception as e:
error("Error: {}".format(e))
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
SubprocPool.shutdown(self._aborted)
# Run a dummy work unit to write out one last timestamp
with self.new_workunit("complete"):
pass
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
try:
self.run_info.add_info('outcome', outcome_str)
except IOError:
pass # If the goal is clean-all then the run info dir no longer exists...
self.report.close()
self.upload_stats()
def end_workunit(self, workunit):
self.report.end_workunit(workunit)
path, duration, self_time, is_tool = workunit.end()
self.cumulative_timings.add_timing(path, duration, is_tool)
self.self_timings.add_timing(path, self_time, is_tool)
def get_background_root_workunit(self):
if self._background_root_workunit is None:
self._background_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name='background', cmd=None)
self._background_root_workunit.start()
self.report.start_workunit(self._background_root_workunit)
return self._background_root_workunit
def background_worker_pool(self):
if self._background_worker_pool is None: # Initialize lazily.
self._background_worker_pool = WorkerPool(parent_workunit=self.get_background_root_workunit(),
run_tracker=self,
num_workers=self._num_background_workers)
return self._background_worker_pool