本文整理汇总了Python中pants.base.run_info.RunInfo.get_info方法的典型用法代码示例。如果您正苦于以下问题:Python RunInfo.get_info方法的具体用法?Python RunInfo.get_info怎么用?Python RunInfo.get_info使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pants.base.run_info.RunInfo
的用法示例。
在下文中一共展示了RunInfo.get_info方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_run_info_read
# 需要导入模块: from pants.base.run_info import RunInfo [as 别名]
# 或者: from pants.base.run_info.RunInfo import get_info [as 别名]
def test_run_info_read(self):
with temporary_file_path() as tmppath:
with open(tmppath, 'w') as tmpfile:
tmpfile.write('foo:bar\n baz :qux quux')
ri = RunInfo(tmppath)
self.assertEquals(ri.path(), tmppath)
# Test get_info access.
self.assertEquals(ri.get_info('foo'), 'bar')
self.assertEquals(ri.get_info('baz'), 'qux quux')
self.assertIsNone(ri.get_info('nonexistent'))
# Test dict-like access.
self.assertEquals(ri['foo'], 'bar')
self.assertEquals(ri['baz'], 'qux quux')
示例2: RunTracker
# 需要导入模块: from pants.base.run_info import RunInfo [as 别名]
# 或者: from pants.base.run_info.RunInfo import get_info [as 别名]
#.........这里部分代码省略.........
:return: True if upload was successful, False otherwise.
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {} due to {}'.format(url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this
# (probably only Foursquare at present).
params = {k: json.dumps(v) for (k, v) in stats.items()}
try:
r = requests.post(url, data=params, timeout=timeout)
if r.status_code != requests.codes.ok:
return error("HTTP error code: {}".format(r.status_code))
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error("Error: {}".format(e))
return True
def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
stats = {
'run_info': self.run_info.get_as_dict(),
'cumulative_timings': self.cumulative_timings.get_all(),
'self_timings': self.self_timings.get_all(),
'artifact_cache_stats': self.artifact_cache_stats.get_all()
}
# Dump individual stat file.
# TODO(benjy): Do we really need these, once the statsdb is mature?
stats_file = os.path.join(get_pants_cachedir(), 'stats',
'{}.json'.format(self.run_info.get_info('id')))
safe_file_dump(stats_file, json.dumps(stats))
# Add to local stats db.
StatsDBFactory.global_instance().get_db().insert_stats(stats)
# Upload to remote stats db.
stats_url = self.get_options().stats_upload_url
if stats_url:
self.post_stats(stats_url, stats, timeout=self.get_options().stats_upload_timeout)
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
SubprocPool.shutdown(self._aborted)
# Run a dummy work unit to write out one last timestamp.
with self.new_workunit("complete"):
pass
示例3: RunTracker
# 需要导入模块: from pants.base.run_info import RunInfo [as 别名]
# 或者: from pants.base.run_info.RunInfo import get_info [as 别名]
#.........这里部分代码省略.........
self.report.end_workunit(workunit)
workunit.end()
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
def upload_stats(self):
"""Send timing results to URL specified in pants.ini"""
def error(msg):
# Report aleady closed, so just print error.
print("WARNING: Failed to upload stats. %s" % msg)
if self.stats_url:
params = {
'run_info': json.dumps(self.run_info.get_as_dict()),
'cumulative_timings': json.dumps(self.cumulative_timings.get_all()),
'self_timings': json.dumps(self.self_timings.get_all()),
'artifact_cache_stats': json.dumps(self.artifact_cache_stats.get_all())
}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
url = urlparse(self.stats_url)
try:
if url.scheme == 'https':
http_conn = httplib.HTTPSConnection(url.netloc)
else:
http_conn = httplib.HTTPConnection(url.netloc)
http_conn.request('POST', url.path, urllib.urlencode(params), headers)
resp = http_conn.getresponse()
if resp.status != 200:
error("HTTP error code: %d" % resp.status)
except Exception as e:
error("Error: %s" % e)
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.report.end_workunit(self._background_root_workunit)
self._background_root_workunit.end()
if self._foreground_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting foreground workers.")
self._foreground_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for foreground workers to finish.")
self._foreground_worker_pool.shutdown()
self.report.end_workunit(self._main_root_workunit)
self._main_root_workunit.end()
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = WorkUnit.choose_for_outcome(outcome, Report.ERROR, Report.ERROR,
Report.WARN, Report.INFO, Report.INFO)
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
try:
self.run_info.add_info('outcome', outcome_str)
except IOError:
pass # If the goal is clean-all then the run info dir no longer exists...
self.report.close()
self.upload_stats()
def foreground_worker_pool(self):
if self._foreground_worker_pool is None: # Initialize lazily.
self._foreground_worker_pool = WorkerPool(parent_workunit=self._main_root_workunit,
run_tracker=self,
num_workers=self._num_foreground_workers)
return self._foreground_worker_pool
def get_background_root_workunit(self):
if self._background_root_workunit is None:
self._background_root_workunit = WorkUnit(run_tracker=self, parent=None, labels=[],
name='background', cmd=None)
self._background_root_workunit.start()
self.report.start_workunit(self._background_root_workunit)
return self._background_root_workunit
def background_worker_pool(self):
if self._background_worker_pool is None: # Initialize lazily.
self._background_worker_pool = WorkerPool(parent_workunit=self.get_background_root_workunit(),
run_tracker=self,
num_workers=self._num_background_workers)
return self._background_worker_pool
示例4: RunTracker
# 需要导入模块: from pants.base.run_info import RunInfo [as 别名]
# 或者: from pants.base.run_info.RunInfo import get_info [as 别名]
#.........这里部分代码省略.........
# Hit/miss stats for the artifact cache.
self.artifact_cache_stats = ArtifactCacheStats(os.path.join(self.run_info_dir,
'artifact_cache_stats'))
# Daemon stats.
self.pantsd_stats = PantsDaemonStats()
return run_id
def start(self, report, run_start_time=None):
"""Start tracking this pants run using the given Report.
`RunTracker.initialize` must have been called first to create the run_info_dir and
run_info. TODO: This lifecycle represents a delicate dance with the `Reporting.initialize`
method, and portions of the `RunTracker` should likely move to `Reporting` instead.
report: an instance of pants.reporting.Report.
"""
if not self.run_info:
raise AssertionError('RunTracker.initialize must be called before RunTracker.start.')
self.report = report
self.report.open()
# And create the workunit.
self._main_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)
self.register_thread(self._main_root_workunit)
# Set the true start time in the case of e.g. the daemon.
self._main_root_workunit.start(run_start_time)
self.report.start_workunit(self._main_root_workunit)
# Log reporting details.
url = self.run_info.get_info('report_url')
if url:
self.log(Report.INFO, 'See a report at: {}'.format(url))
else:
self.log(Report.INFO, '(To run a reporting server: ./pants server)')
def set_root_outcome(self, outcome):
"""Useful for setup code that doesn't have a reference to a workunit."""
self._main_root_workunit.set_outcome(outcome)
@contextmanager
def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
- log_config: An optional tuple WorkUnit.LogConfig of task-level options affecting reporting.
Use like this:
with run_tracker.new_workunit(name='compile', labels=[WorkUnitLabel.TASK]) as workunit:
<do scoped work here>
<set the outcome on workunit if necessary>
Note that the outcome will automatically be set to failure if an exception is raised
in a workunit, and to success otherwise, so usually you only need to set the
outcome explicitly if you want to set it to warning.
:API: public
"""
示例5: RunTracker
# 需要导入模块: from pants.base.run_info import RunInfo [as 别名]
# 或者: from pants.base.run_info.RunInfo import get_info [as 别名]
#.........这里部分代码省略.........
return (run_id, run_uuid)
def start(self, report, run_start_time=None):
"""Start tracking this pants run using the given Report.
`RunTracker.initialize` must have been called first to create the run_info_dir and
run_info. TODO: This lifecycle represents a delicate dance with the `Reporting.initialize`
method, and portions of the `RunTracker` should likely move to `Reporting` instead.
report: an instance of pants.reporting.Report.
"""
if not self.run_info:
raise AssertionError('RunTracker.initialize must be called before RunTracker.start.')
self.report = report
# Set up the JsonReporter for V2 stats.
if self.get_options().stats_version == 2:
json_reporter_settings = JsonReporter.Settings(log_level=Report.INFO)
self.json_reporter = JsonReporter(self, json_reporter_settings)
report.add_reporter('json', self.json_reporter)
self.report.open()
# And create the workunit.
self._main_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)
self.register_thread(self._main_root_workunit)
# Set the true start time in the case of e.g. the daemon.
self._main_root_workunit.start(run_start_time)
self.report.start_workunit(self._main_root_workunit)
# Log reporting details.
url = self.run_info.get_info('report_url')
if url:
self.log(Report.INFO, 'See a report at: {}'.format(url))
else:
self.log(Report.INFO, '(To run a reporting server: ./pants server)')
def set_root_outcome(self, outcome):
"""Useful for setup code that doesn't have a reference to a workunit."""
self._main_root_workunit.set_outcome(outcome)
@property
def logger(self):
return self._logger
@contextmanager
def new_workunit(self, name, labels=None, cmd='', log_config=None):
"""Creates a (hierarchical) subunit of work for the purpose of timing and reporting.
- name: A short name for this work. E.g., 'resolve', 'compile', 'scala', 'zinc'.
- labels: An optional iterable of labels. The reporters can use this to decide how to
display information about this work.
- cmd: An optional longer string representing this work.
E.g., the cmd line of a compiler invocation.
- log_config: An optional tuple WorkUnit.LogConfig of task-level options affecting reporting.
Use like this:
with run_tracker.new_workunit(name='compile', labels=[WorkUnitLabel.TASK]) as workunit:
<do scoped work here>
<set the outcome on workunit if necessary>
Note that the outcome will automatically be set to failure if an exception is raised
in a workunit, and to success otherwise, so usually you only need to set the
示例6: RunTracker
# 需要导入模块: from pants.base.run_info import RunInfo [as 别名]
# 或者: from pants.base.run_info.RunInfo import get_info [as 别名]
#.........这里部分代码省略.........
raise
else:
workunit.set_outcome(WorkUnit.SUCCESS)
finally:
self.end_workunit(workunit)
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
def upload_stats(self):
"""Send timing results to URL specified in pants.ini"""
def error(msg):
# Report aleady closed, so just print error.
print("WARNING: Failed to upload stats to {} due to {}".format(self.stats_url, msg), file=sys.stderr)
if self.stats_url:
params = {
'run_info': json.dumps(self.run_info.get_as_dict()),
'cumulative_timings': json.dumps(self.cumulative_timings.get_all()),
'self_timings': json.dumps(self.self_timings.get_all()),
'artifact_cache_stats': json.dumps(self.artifact_cache_stats.get_all())
}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
url = urlparse(self.stats_url)
try:
if url.scheme == 'https':
http_conn = httplib.HTTPSConnection(url.netloc, timeout=self.stats_timeout)
else:
http_conn = httplib.HTTPConnection(url.netloc, timeout=self.stats_timeout)
http_conn.request('POST', url.path, urllib.urlencode(params), headers)
resp = http_conn.getresponse()
if resp.status != 200:
error("HTTP error code: {}".format(resp.status))
except Exception as e:
error("Error: {}".format(e))
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
SubprocPool.shutdown(self._aborted)
# Run a dummy work unit to write out one last timestamp
with self.new_workunit("complete"):
pass
self.end_workunit(self._main_root_workunit)
outcome = self._main_root_workunit.outcome()
if self._background_root_workunit:
outcome = min(outcome, self._background_root_workunit.outcome())
outcome_str = WorkUnit.outcome_string(outcome)
log_level = RunTracker._log_levels[outcome]
self.log(log_level, outcome_str)
if self.run_info.get_info('outcome') is None:
try:
self.run_info.add_info('outcome', outcome_str)
except IOError:
pass # If the goal is clean-all then the run info dir no longer exists...
self.report.close()
self.upload_stats()
def end_workunit(self, workunit):
self.report.end_workunit(workunit)
path, duration, self_time, is_tool = workunit.end()
self.cumulative_timings.add_timing(path, duration, is_tool)
self.self_timings.add_timing(path, self_time, is_tool)
def get_background_root_workunit(self):
if self._background_root_workunit is None:
self._background_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
name='background', cmd=None)
self._background_root_workunit.start()
self.report.start_workunit(self._background_root_workunit)
return self._background_root_workunit
def background_worker_pool(self):
if self._background_worker_pool is None: # Initialize lazily.
self._background_worker_pool = WorkerPool(parent_workunit=self.get_background_root_workunit(),
run_tracker=self,
num_workers=self._num_background_workers)
return self._background_worker_pool