本文整理汇总了Python中pants.goal.artifact_cache_stats.ArtifactCacheStats.get_all方法的典型用法代码示例。如果您正苦于以下问题:Python ArtifactCacheStats.get_all方法的具体用法?Python ArtifactCacheStats.get_all怎么用?Python ArtifactCacheStats.get_all使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pants.goal.artifact_cache_stats.ArtifactCacheStats
的用法示例。
在下文中一共展示了ArtifactCacheStats.get_all方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: mock_artifact_cache_stats
# 需要导入模块: from pants.goal.artifact_cache_stats import ArtifactCacheStats [as 别名]
# 或者: from pants.goal.artifact_cache_stats.ArtifactCacheStats import get_all [as 别名]
def mock_artifact_cache_stats(self,
expected_stats,
expected_hit_or_miss_files=None):
with temporary_dir() as tmp_dir:
artifact_cache_stats = ArtifactCacheStats(tmp_dir)
yield artifact_cache_stats
self.assertEquals(expected_stats, artifact_cache_stats.get_all())
self.assertEquals(sorted(list(expected_hit_or_miss_files.keys())),
sorted(os.listdir(tmp_dir)))
for hit_or_miss_file in expected_hit_or_miss_files.keys():
with open(os.path.join(tmp_dir, hit_or_miss_file)) as hit_or_miss_saved:
self.assertEquals(expected_hit_or_miss_files[hit_or_miss_file], hit_or_miss_saved.read())
示例2: RunTracker
# 需要导入模块: from pants.goal.artifact_cache_stats import ArtifactCacheStats [as 别名]
# 或者: from pants.goal.artifact_cache_stats.ArtifactCacheStats import get_all [as 别名]
#.........这里部分代码省略.........
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
@classmethod
def post_stats(cls, url, stats, timeout=2):
"""POST stats to the given url.
:return: True if upload was successful, False otherwise.
"""
def error(msg):
# Report aleady closed, so just print error.
print('WARNING: Failed to upload stats to {} due to {}'.format(url, msg),
file=sys.stderr)
return False
# TODO(benjy): The upload protocol currently requires separate top-level params, with JSON
# values. Probably better for there to be one top-level JSON value, namely json.dumps(stats).
# But this will first require changing the upload receiver at every shop that uses this
# (probably only Foursquare at present).
params = {k: json.dumps(v) for (k, v) in stats.items()}
try:
r = requests.post(url, data=params, timeout=timeout)
if r.status_code != requests.codes.ok:
return error("HTTP error code: {}".format(r.status_code))
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error("Error: {}".format(e))
return True
def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
stats = {
'run_info': self.run_info.get_as_dict(),
'cumulative_timings': self.cumulative_timings.get_all(),
'self_timings': self.self_timings.get_all(),
'artifact_cache_stats': self.artifact_cache_stats.get_all()
}
# Dump individual stat file.
# TODO(benjy): Do we really need these, once the statsdb is mature?
stats_file = os.path.join(get_pants_cachedir(), 'stats',
'{}.json'.format(self.run_info.get_info('id')))
safe_file_dump(stats_file, json.dumps(stats))
# Add to local stats db.
StatsDBFactory.global_instance().get_db().insert_stats(stats)
# Upload to remote stats db.
stats_url = self.get_options().stats_upload_url
if stats_url:
self.post_stats(stats_url, stats, timeout=self.get_options().stats_upload_timeout)
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.end_workunit(self._background_root_workunit)
示例3: RunTracker
# 需要导入模块: from pants.goal.artifact_cache_stats import ArtifactCacheStats [as 别名]
# 或者: from pants.goal.artifact_cache_stats.ArtifactCacheStats import get_all [as 别名]
#.........这里部分代码省略.........
Task code should not typically call this directly.
"""
workunit = WorkUnit(run_tracker=self, parent=parent, name=name, labels=labels, cmd=cmd)
workunit.start()
try:
self.report.start_workunit(workunit)
yield workunit
except KeyboardInterrupt:
workunit.set_outcome(WorkUnit.ABORTED)
self._aborted = True
raise
except:
workunit.set_outcome(WorkUnit.FAILURE)
raise
else:
workunit.set_outcome(WorkUnit.SUCCESS)
finally:
self.report.end_workunit(workunit)
workunit.end()
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
def upload_stats(self):
"""Send timing results to URL specified in pants.ini"""
def error(msg):
# Report aleady closed, so just print error.
print("WARNING: Failed to upload stats. %s" % msg)
if self.stats_url:
params = {
'run_info': json.dumps(self.run_info.get_as_dict()),
'cumulative_timings': json.dumps(self.cumulative_timings.get_all()),
'self_timings': json.dumps(self.self_timings.get_all()),
'artifact_cache_stats': json.dumps(self.artifact_cache_stats.get_all())
}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
url = urlparse(self.stats_url)
try:
if url.scheme == 'https':
http_conn = httplib.HTTPSConnection(url.netloc)
else:
http_conn = httplib.HTTPConnection(url.netloc)
http_conn.request('POST', url.path, urllib.urlencode(params), headers)
resp = http_conn.getresponse()
if resp.status != 200:
error("HTTP error code: %d" % resp.status)
except Exception as e:
error("Error: %s" % e)
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")
self._background_worker_pool.shutdown()
self.report.end_workunit(self._background_root_workunit)
self._background_root_workunit.end()
示例4: RunTracker
# 需要导入模块: from pants.goal.artifact_cache_stats import ArtifactCacheStats [as 别名]
# 或者: from pants.goal.artifact_cache_stats.ArtifactCacheStats import get_all [as 别名]
#.........这里部分代码省略.........
try:
r = requests.post(url, data=params, timeout=timeout)
if r.status_code != requests.codes.ok:
return error("HTTP error code: {}".format(r.status_code))
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error("Error: {}".format(e))
return True
@classmethod
def write_stats_to_json(cls, file_name, stats):
"""Write stats to a local json file.
:return: True if successfully written, False otherwise.
"""
params = json.dumps(stats)
try:
with open(file_name, 'w') as f:
f.write(params)
except Exception as e: # Broad catch - we don't want to fail in stats related failure.
print('WARNING: Failed to write stats to {} due to Error: {}'.format(file_name, e),
file=sys.stderr)
return False
return True
def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
run_information = self.run_info.get_as_dict()
target_data = run_information.get('target_data', None)
if target_data:
run_information['target_data'] = ast.literal_eval(target_data)
stats = {
'run_info': run_information,
'cumulative_timings': self.cumulative_timings.get_all(),
'self_timings': self.self_timings.get_all(),
'critical_path_timings': self.get_critical_path_timings().get_all(),
'artifact_cache_stats': self.artifact_cache_stats.get_all(),
'pantsd_stats': self.pantsd_stats.get_all(),
'outcomes': self.outcomes
}
# Dump individual stat file.
# TODO(benjy): Do we really need these, once the statsdb is mature?
stats_file = os.path.join(get_pants_cachedir(), 'stats',
'{}.json'.format(self.run_info.get_info('id')))
safe_file_dump(stats_file, json.dumps(stats))
# Add to local stats db.
StatsDBFactory.global_instance().get_db().insert_stats(stats)
# Upload to remote stats db.
stats_url = self.get_options().stats_upload_url
if stats_url:
self.post_stats(stats_url, stats, timeout=self.get_options().stats_upload_timeout)
# Write stats to local json file.
stats_json_file_name = self.get_options().stats_local_json_file
if stats_json_file_name:
self.write_stats_to_json(stats_json_file_name, stats)
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
示例5: RunTracker
# 需要导入模块: from pants.goal.artifact_cache_stats import ArtifactCacheStats [as 别名]
# 或者: from pants.goal.artifact_cache_stats.ArtifactCacheStats import get_all [as 别名]
#.........这里部分代码省略.........
try:
return do_post(stats_url, num_redirects_allowed=6)
except Exception as e: # Broad catch - we don't want to fail the build over upload errors.
return error('Error: {}'.format(e))
@classmethod
def _json_dump_options(cls, stats):
return json.dumps(stats, cls=RunTrackerOptionEncoder)
@classmethod
def write_stats_to_json(cls, file_name, stats):
"""Write stats to a local json file."""
params = cls._json_dump_options(stats)
mode = 'w' if PY3 else 'wb'
try:
safe_file_dump(file_name, params, mode=mode)
except Exception as e: # Broad catch - we don't want to fail in stats related failure.
print('WARNING: Failed to write stats to {} due to Error: {}'.format(file_name, e),
file=sys.stderr)
def run_information(self):
"""Basic information about this run."""
run_information = self.run_info.get_as_dict()
target_data = run_information.get('target_data', None)
if target_data:
run_information['target_data'] = ast.literal_eval(target_data)
return run_information
def _stats(self):
if self.get_options().stats_version == 2:
return {
'run_info': self.run_information(),
'artifact_cache_stats': self.artifact_cache_stats.get_all(),
'pantsd_stats': self.pantsd_stats.get_all(),
'workunits': self.json_reporter.results,
}
else:
return {
'run_info': self.run_information(),
'cumulative_timings': self.cumulative_timings.get_all(),
'self_timings': self.self_timings.get_all(),
'critical_path_timings': self.get_critical_path_timings().get_all(),
'artifact_cache_stats': self.artifact_cache_stats.get_all(),
'pantsd_stats': self.pantsd_stats.get_all(),
'outcomes': self.outcomes,
'recorded_options': self._get_options_to_record(),
}
def store_stats(self):
"""Store stats about this run in local and optionally remote stats dbs."""
stats = self._stats()
# Write stats to user-defined json file.
stats_json_file_name = self.get_options().stats_local_json_file
if stats_json_file_name:
self.write_stats_to_json(stats_json_file_name, stats)
# Upload to remote stats db.
stats_upload_urls = copy.copy(self.get_options().stats_upload_urls)
timeout = self.get_options().stats_upload_timeout
for stats_url, auth_provider in stats_upload_urls.items():
self.post_stats(stats_url, stats, timeout=timeout, auth_provider=auth_provider)
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
示例6: RunTracker
# 需要导入模块: from pants.goal.artifact_cache_stats import ArtifactCacheStats [as 别名]
# 或者: from pants.goal.artifact_cache_stats.ArtifactCacheStats import get_all [as 别名]
#.........这里部分代码省略.........
Task code should not typically call this directly.
"""
workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=parent, name=name, labels=labels, cmd=cmd)
workunit.start()
try:
self.report.start_workunit(workunit)
yield workunit
except KeyboardInterrupt:
workunit.set_outcome(WorkUnit.ABORTED)
self._aborted = True
raise
except:
workunit.set_outcome(WorkUnit.FAILURE)
raise
else:
workunit.set_outcome(WorkUnit.SUCCESS)
finally:
self.end_workunit(workunit)
def log(self, level, *msg_elements):
"""Log a message against the current workunit."""
self.report.log(self._threadlocal.current_workunit, level, *msg_elements)
def upload_stats(self):
"""Send timing results to URL specified in pants.ini"""
def error(msg):
# Report aleady closed, so just print error.
print("WARNING: Failed to upload stats to {} due to {}".format(self.stats_url, msg), file=sys.stderr)
if self.stats_url:
params = {
'run_info': json.dumps(self.run_info.get_as_dict()),
'cumulative_timings': json.dumps(self.cumulative_timings.get_all()),
'self_timings': json.dumps(self.self_timings.get_all()),
'artifact_cache_stats': json.dumps(self.artifact_cache_stats.get_all())
}
headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
url = urlparse(self.stats_url)
try:
if url.scheme == 'https':
http_conn = httplib.HTTPSConnection(url.netloc, timeout=self.stats_timeout)
else:
http_conn = httplib.HTTPConnection(url.netloc, timeout=self.stats_timeout)
http_conn.request('POST', url.path, urllib.urlencode(params), headers)
resp = http_conn.getresponse()
if resp.status != 200:
error("HTTP error code: {}".format(resp.status))
except Exception as e:
error("Error: {}".format(e))
_log_levels = [Report.ERROR, Report.ERROR, Report.WARN, Report.INFO, Report.INFO]
def end(self):
"""This pants run is over, so stop tracking it.
Note: If end() has been called once, subsequent calls are no-ops.
"""
if self._background_worker_pool:
if self._aborted:
self.log(Report.INFO, "Aborting background workers.")
self._background_worker_pool.abort()
else:
self.log(Report.INFO, "Waiting for background workers to finish.")