当前位置: 首页>>代码示例>>Python>>正文


Python artifact_cache_stats.ArtifactCacheStats类代码示例

本文整理汇总了Python中pants.goal.artifact_cache_stats.ArtifactCacheStats的典型用法代码示例。如果您正苦于以下问题:Python ArtifactCacheStats类的具体用法?Python ArtifactCacheStats怎么用?Python ArtifactCacheStats使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了ArtifactCacheStats类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: mock_artifact_cache_stats

  def mock_artifact_cache_stats(self,
                                expected_stats,
                                expected_hit_or_miss_files=None):
    with temporary_dir() as tmp_dir:
      artifact_cache_stats = ArtifactCacheStats(tmp_dir)
      yield artifact_cache_stats
      self.assertEquals(expected_stats, artifact_cache_stats.get_all())

      self.assertEquals(sorted(list(expected_hit_or_miss_files.keys())),
                        sorted(os.listdir(tmp_dir)))
      for hit_or_miss_file in expected_hit_or_miss_files.keys():
        with open(os.path.join(tmp_dir, hit_or_miss_file)) as hit_or_miss_saved:
          self.assertEquals(expected_hit_or_miss_files[hit_or_miss_file], hit_or_miss_saved.read())
开发者ID:foursquare,项目名称:pants,代码行数:13,代码来源:test_artifact_cache_stats.py

示例2: __init__

  def __init__(self,
               info_dir,
               stats_upload_url=None,
               num_foreground_workers=8,
               num_background_workers=8):
    self.run_timestamp = time.time()  # A double, so we get subsecond precision for ids.
    cmd_line = ' '.join(['./pants'] + sys.argv[1:])

    # run_id is safe for use in paths.
    millis = (self.run_timestamp * 1000) % 1000
    run_id = 'pants_run_%s_%d' % \
             (time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self.run_timestamp)), millis)

    self.info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.info_dir, 'info'))
    self.run_info.add_basic_info(run_id, self.run_timestamp)
    self.run_info.add_info('cmd_line', cmd_line)
    self.stats_url = stats_upload_url

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.info_dir), 'latest')
    if os.path.exists(link_to_latest):
      os.unlink(link_to_latest)
    os.symlink(self.info_dir, link_to_latest)

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.info_dir, 'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = \
      ArtifactCacheStats(os.path.join(self.info_dir, 'artifact_cache_stats'))

    # Number of threads for foreground work.
    self._num_foreground_workers = num_foreground_workers

    # Number of threads for background work.
    self._num_background_workers = num_background_workers

    # We report to this Report.
    self.report = None

    # self._threadlocal.current_workunit contains the current workunit for the calling thread.
    # Note that multiple threads may share a name (e.g., all the threads in a pool).
    self._threadlocal = threading.local()

    # For main thread work. Created on start().
    self._main_root_workunit = None

    # For concurrent foreground work.  Created lazily if needed.
    # Associated with the main thread's root workunit.
    self._foreground_worker_pool = None

    # For background work.  Created lazily if needed.
    self._background_worker_pool = None
    self._background_root_workunit = None

    self._aborted = False
开发者ID:govindkabra,项目名称:pants,代码行数:60,代码来源:run_tracker.py

示例3: __init__

  def __init__(self, *args, **kwargs):
    super(RunTracker, self).__init__(*args, **kwargs)
    run_timestamp = time.time()
    cmd_line = ' '.join(['pants'] + sys.argv[1:])

    # run_id is safe for use in paths.
    millis = int((run_timestamp * 1000) % 1000)
    run_id = 'pants_run_{}_{}_{}'.format(
               time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(run_timestamp)), millis,
               uuid.uuid4().hex)

    info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
    self.run_info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
    self.run_info.add_basic_info(run_id, run_timestamp)
    self.run_info.add_info('cmd_line', cmd_line)

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')

    relative_symlink(self.run_info_dir, link_to_latest)

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
                                                             'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = \
      ArtifactCacheStats(os.path.join(self.run_info_dir, 'artifact_cache_stats'))

    # Number of threads for foreground work.
    self._num_foreground_workers = self.get_options().num_foreground_workers

    # Number of threads for background work.
    self._num_background_workers = self.get_options().num_background_workers

    # We report to this Report.
    self.report = None

    # self._threadlocal.current_workunit contains the current workunit for the calling thread.
    # Note that multiple threads may share a name (e.g., all the threads in a pool).
    self._threadlocal = threading.local()

    # For main thread work. Created on start().
    self._main_root_workunit = None

    # For background work.  Created lazily if needed.
    self._background_worker_pool = None
    self._background_root_workunit = None

    # Trigger subproc pool init while our memory image is still clean (see SubprocPool docstring).
    SubprocPool.set_num_processes(self._num_foreground_workers)
    SubprocPool.foreground()

    self._aborted = False
开发者ID:cburroughs,项目名称:pants,代码行数:58,代码来源:run_tracker.py

示例4: initialize

  def initialize(self, all_options):
    """Create run_info and relevant directories, and return the run id.

    Must be called before `start`.
    """
    if self.run_info:
      raise AssertionError('RunTracker.initialize must not be called multiple times.')

    # Initialize the run.

    # Select a globally unique ID for the run, that sorts by time.
    millis = int((self._run_timestamp * 1000) % 1000)
    # run_uuid is used as a part of run_id and also as a trace_id for Zipkin tracing
    run_uuid = uuid.uuid4().hex
    run_id = 'pants_run_{}_{}_{}'.format(
      time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self._run_timestamp)),
      millis,
      run_uuid
    )

    info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
    self.run_info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
    self.run_info.add_basic_info(run_id, self._run_timestamp)
    self.run_info.add_info('cmd_line', self._cmd_line)

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')

    relative_symlink(self.run_info_dir, link_to_latest)

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
                                                             'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = ArtifactCacheStats(os.path.join(self.run_info_dir,
                                                                'artifact_cache_stats'))

    # Daemon stats.
    self.pantsd_stats = PantsDaemonStats()

    self._all_options = all_options

    return (run_id, run_uuid)
开发者ID:cosmicexplorer,项目名称:pants,代码行数:48,代码来源:run_tracker.py

示例5: RunTracker

class RunTracker(Subsystem):
  """Tracks and times the execution of a pants run.

  Also manages background work.

  Use like this:

  run_tracker.start()
  with run_tracker.new_workunit('compile'):
    with run_tracker.new_workunit('java'):
      ...
    with run_tracker.new_workunit('scala'):
      ...
  run_tracker.close()

  Can track execution against multiple 'roots', e.g., one for the main thread and another for
  background threads.
  """
  options_scope = 'run-tracker'

  # The name of the tracking root for the main thread (and the foreground worker threads).
  DEFAULT_ROOT_NAME = 'main'

  # The name of the tracking root for the background worker threads.
  BACKGROUND_ROOT_NAME = 'background'

  @classmethod
  def subsystem_dependencies(cls):
    return (StatsDBFactory,)

  @classmethod
  def register_options(cls, register):
    register('--stats-upload-url', advanced=True, default=None,
             help='Upload stats to this URL on run completion.')
    register('--stats-upload-timeout', advanced=True, type=int, default=2,
             help='Wait at most this many seconds for the stats upload to complete.')
    register('--num-foreground-workers', advanced=True, type=int, default=8,
             help='Number of threads for foreground work.')
    register('--num-background-workers', advanced=True, type=int, default=8,
             help='Number of threads for background work.')

  def __init__(self, *args, **kwargs):
    super(RunTracker, self).__init__(*args, **kwargs)
    run_timestamp = time.time()
    cmd_line = ' '.join(['pants'] + sys.argv[1:])

    # run_id is safe for use in paths.
    millis = int((run_timestamp * 1000) % 1000)
    run_id = 'pants_run_{}_{}_{}'.format(
               time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(run_timestamp)), millis,
               uuid.uuid4().hex)

    info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
    self.run_info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
    self.run_info.add_basic_info(run_id, run_timestamp)
    self.run_info.add_info('cmd_line', cmd_line)

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')

    relative_symlink(self.run_info_dir, link_to_latest)

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
                                                             'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = \
      ArtifactCacheStats(os.path.join(self.run_info_dir, 'artifact_cache_stats'))

    # Number of threads for foreground work.
    self._num_foreground_workers = self.get_options().num_foreground_workers

    # Number of threads for background work.
    self._num_background_workers = self.get_options().num_background_workers

    # We report to this Report.
    self.report = None

    # self._threadlocal.current_workunit contains the current workunit for the calling thread.
    # Note that multiple threads may share a name (e.g., all the threads in a pool).
    self._threadlocal = threading.local()

    # For main thread work. Created on start().
    self._main_root_workunit = None

    # For background work.  Created lazily if needed.
    self._background_worker_pool = None
    self._background_root_workunit = None

    # Trigger subproc pool init while our memory image is still clean (see SubprocPool docstring).
    SubprocPool.foreground()

    self._aborted = False

  def register_thread(self, parent_workunit):
#.........这里部分代码省略.........
开发者ID:megaserg,项目名称:pants,代码行数:101,代码来源:run_tracker.py

示例6: RunTracker

class RunTracker(object):
  """Tracks and times the execution of a pants run.

  Also manages background work.

  Use like this:

  run_tracker.start()
  with run_tracker.new_workunit('compile'):
    with run_tracker.new_workunit('java'):
      ...
    with run_tracker.new_workunit('scala'):
      ...
  run_tracker.close()

  Can track execution against multiple 'roots', e.g., one for the main thread and another for
  background threads.
  """

  # The name of the tracking root for the main thread (and the foreground worker threads).
  DEFAULT_ROOT_NAME = 'main'

  # The name of the tracking root for the background worker threads.
  BACKGROUND_ROOT_NAME = 'background'

  @classmethod
  def from_config(cls, config):
    if not isinstance(config, Config):
      raise ValueError('Expected a Config object, given %s of type %s' % (config, type(config)))
    info_dir = RunInfo.dir(config)
    stats_upload_url = config.getdefault('stats_upload_url', default=None)
    num_foreground_workers = config.getdefault('num_foreground_workers', default=8)
    num_background_workers = config.getdefault('num_background_workers', default=8)
    return cls(info_dir,
               stats_upload_url=stats_upload_url,
               num_foreground_workers=num_foreground_workers,
               num_background_workers=num_background_workers)

  def __init__(self,
               info_dir,
               stats_upload_url=None,
               num_foreground_workers=8,
               num_background_workers=8):
    self.run_timestamp = time.time()  # A double, so we get subsecond precision for ids.
    cmd_line = ' '.join(['./pants'] + sys.argv[1:])

    # run_id is safe for use in paths.
    millis = (self.run_timestamp * 1000) % 1000
    run_id = 'pants_run_%s_%d' % \
             (time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self.run_timestamp)), millis)

    self.info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.info_dir, 'info'))
    self.run_info.add_basic_info(run_id, self.run_timestamp)
    self.run_info.add_info('cmd_line', cmd_line)
    self.stats_url = stats_upload_url

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.info_dir), 'latest')
    if os.path.exists(link_to_latest):
      os.unlink(link_to_latest)
    os.symlink(self.info_dir, link_to_latest)

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.info_dir, 'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = \
      ArtifactCacheStats(os.path.join(self.info_dir, 'artifact_cache_stats'))

    # Number of threads for foreground work.
    self._num_foreground_workers = num_foreground_workers

    # Number of threads for background work.
    self._num_background_workers = num_background_workers

    # We report to this Report.
    self.report = None

    # self._threadlocal.current_workunit contains the current workunit for the calling thread.
    # Note that multiple threads may share a name (e.g., all the threads in a pool).
    self._threadlocal = threading.local()

    # For main thread work. Created on start().
    self._main_root_workunit = None

    # For concurrent foreground work.  Created lazily if needed.
    # Associated with the main thread's root workunit.
    self._foreground_worker_pool = None

    # For background work.  Created lazily if needed.
    self._background_worker_pool = None
    self._background_root_workunit = None

    self._aborted = False

  def register_thread(self, parent_workunit):
#.........这里部分代码省略.........
开发者ID:govindkabra,项目名称:pants,代码行数:101,代码来源:run_tracker.py

示例7: RunTracker


#.........这里部分代码省略.........

    Must be called before `start`.
    """
    if self.run_info:
      raise AssertionError('RunTracker.initialize must not be called multiple times.')

    # Initialize the run.
    millis = int((self._run_timestamp * 1000) % 1000)
    run_id = 'pants_run_{}_{}_{}'.format(
      time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self._run_timestamp)),
      millis,
      uuid.uuid4().hex
    )

    info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
    self.run_info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
    self.run_info.add_basic_info(run_id, self._run_timestamp)
    self.run_info.add_info('cmd_line', self._cmd_line)

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')

    relative_symlink(self.run_info_dir, link_to_latest)

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
                                                             'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = ArtifactCacheStats(os.path.join(self.run_info_dir,
                                                                'artifact_cache_stats'))

    # Daemon stats.
    self.pantsd_stats = PantsDaemonStats()

    return run_id

  def start(self, report, run_start_time=None):
    """Start tracking this pants run using the given Report.

    `RunTracker.initialize` must have been called first to create the run_info_dir and
    run_info. TODO: This lifecycle represents a delicate dance with the `Reporting.initialize`
    method, and portions of the `RunTracker` should likely move to `Reporting` instead.

    report: an instance of pants.reporting.Report.
    """
    if not self.run_info:
      raise AssertionError('RunTracker.initialize must be called before RunTracker.start.')

    self.report = report
    self.report.open()

    # And create the workunit.
    self._main_root_workunit = WorkUnit(run_info_dir=self.run_info_dir, parent=None,
                                        name=RunTracker.DEFAULT_ROOT_NAME, cmd=None)
    self.register_thread(self._main_root_workunit)
    # Set the true start time in the case of e.g. the daemon.
    self._main_root_workunit.start(run_start_time)
    self.report.start_workunit(self._main_root_workunit)

    # Log reporting details.
    url = self.run_info.get_info('report_url')
开发者ID:baroquebobcat,项目名称:pants,代码行数:67,代码来源:run_tracker.py

示例8: RunTracker


#.........这里部分代码省略.........
      raise AssertionError('RunTracker.initialize must not be called multiple times.')

    # Initialize the run.

    # Select a globally unique ID for the run, that sorts by time.
    millis = int((self._run_timestamp * 1000) % 1000)
    # run_uuid is used as a part of run_id and also as a trace_id for Zipkin tracing
    run_uuid = uuid.uuid4().hex
    run_id = 'pants_run_{}_{}_{}'.format(
      time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self._run_timestamp)),
      millis,
      run_uuid
    )

    info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
    self.run_info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
    self.run_info.add_basic_info(run_id, self._run_timestamp)
    self.run_info.add_info('cmd_line', self._cmd_line)

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')

    relative_symlink(self.run_info_dir, link_to_latest)

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
                                                             'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = ArtifactCacheStats(os.path.join(self.run_info_dir,
                                                                'artifact_cache_stats'))

    # Daemon stats.
    self.pantsd_stats = PantsDaemonStats()

    self._all_options = all_options

    return (run_id, run_uuid)

  def start(self, report, run_start_time=None):
    """Start tracking this pants run using the given Report.

    `RunTracker.initialize` must have been called first to create the run_info_dir and
    run_info. TODO: This lifecycle represents a delicate dance with the `Reporting.initialize`
    method, and portions of the `RunTracker` should likely move to `Reporting` instead.

    report: an instance of pants.reporting.Report.
    """
    if not self.run_info:
      raise AssertionError('RunTracker.initialize must be called before RunTracker.start.')

    self.report = report

    # Set up the JsonReporter for V2 stats.
    if self.get_options().stats_version == 2:
      json_reporter_settings = JsonReporter.Settings(log_level=Report.INFO)
      self.json_reporter = JsonReporter(self, json_reporter_settings)
      report.add_reporter('json', self.json_reporter)

    self.report.open()

    # And create the workunit.
开发者ID:cosmicexplorer,项目名称:pants,代码行数:67,代码来源:run_tracker.py

示例9: __init__

  def __init__(self, *args, **kwargs):
    super(RunTracker, self).__init__(*args, **kwargs)
    self.run_timestamp = time.time()  # A double, so we get subsecond precision for ids.
    cmd_line = ' '.join(['./pants'] + sys.argv[1:])

    # run_id is safe for use in paths.
    millis = int((self.run_timestamp * 1000) % 1000)
    run_id = 'pants_run_{}_{}'.format(
               time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self.run_timestamp)), millis)

    info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
    self.run_info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
    self.run_info.add_basic_info(run_id, self.run_timestamp)
    self.run_info.add_info('cmd_line', cmd_line)
    self.stats_url = self.get_options().stats_upload_url
    self.stats_timeout = self.get_options().stats_upload_timeout

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')

    try:
      if os.path.lexists(link_to_latest):
        os.unlink(link_to_latest)
      os.symlink(self.run_info_dir, link_to_latest)
    except OSError as e:
      # Another run may beat us to deletion or creation.
      if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
        raise

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
                                                             'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = \
      ArtifactCacheStats(os.path.join(self.run_info_dir, 'artifact_cache_stats'))

    # Number of threads for foreground work.
    self._num_foreground_workers = self.get_options().num_foreground_workers

    # Number of threads for background work.
    self._num_background_workers = self.get_options().num_background_workers

    # We report to this Report.
    self.report = None

    # self._threadlocal.current_workunit contains the current workunit for the calling thread.
    # Note that multiple threads may share a name (e.g., all the threads in a pool).
    self._threadlocal = threading.local()

    # For main thread work. Created on start().
    self._main_root_workunit = None

    # For background work.  Created lazily if needed.
    self._background_worker_pool = None
    self._background_root_workunit = None

    # Trigger subproc pool init while our memory image is still clean (see SubprocPool docstring)
    SubprocPool.foreground()

    self._aborted = False
开发者ID:MathewJennings,项目名称:pants,代码行数:65,代码来源:run_tracker.py

示例10: RunTracker

class RunTracker(Subsystem):
  """Tracks and times the execution of a pants run.

  Also manages background work.

  Use like this:

  run_tracker.start()
  with run_tracker.new_workunit('compile'):
    with run_tracker.new_workunit('java'):
      ...
    with run_tracker.new_workunit('scala'):
      ...
  run_tracker.close()

  Can track execution against multiple 'roots', e.g., one for the main thread and another for
  background threads.
  """
  @classmethod
  def scope_qualifier(cls):
    return 'run-tracker'

  # The name of the tracking root for the main thread (and the foreground worker threads).
  DEFAULT_ROOT_NAME = 'main'

  # The name of the tracking root for the background worker threads.
  BACKGROUND_ROOT_NAME = 'background'

  @classmethod
  def register_options(cls, register):
    register('--stats-upload-url', advanced=True, default=None,
             help='Upload stats to this URL on run completion.')
    register('--stats-upload-timeout', advanced=True, type=int, default=2,
             help='Wait at most this many seconds for the stats upload to complete.')
    register('--num-foreground-workers', advanced=True, type=int, default=8,
             help='Number of threads for foreground work.')
    register('--num-background-workers', advanced=True, type=int, default=8,
             help='Number of threads for background work.')

  def __init__(self, *args, **kwargs):
    super(RunTracker, self).__init__(*args, **kwargs)
    self.run_timestamp = time.time()  # A double, so we get subsecond precision for ids.
    cmd_line = ' '.join(['./pants'] + sys.argv[1:])

    # run_id is safe for use in paths.
    millis = int((self.run_timestamp * 1000) % 1000)
    run_id = 'pants_run_{}_{}'.format(
               time.strftime('%Y_%m_%d_%H_%M_%S', time.localtime(self.run_timestamp)), millis)

    info_dir = os.path.join(self.get_options().pants_workdir, self.options_scope)
    self.run_info_dir = os.path.join(info_dir, run_id)
    self.run_info = RunInfo(os.path.join(self.run_info_dir, 'info'))
    self.run_info.add_basic_info(run_id, self.run_timestamp)
    self.run_info.add_info('cmd_line', cmd_line)
    self.stats_url = self.get_options().stats_upload_url
    self.stats_timeout = self.get_options().stats_upload_timeout

    # Create a 'latest' symlink, after we add_infos, so we're guaranteed that the file exists.
    link_to_latest = os.path.join(os.path.dirname(self.run_info_dir), 'latest')

    try:
      if os.path.lexists(link_to_latest):
        os.unlink(link_to_latest)
      os.symlink(self.run_info_dir, link_to_latest)
    except OSError as e:
      # Another run may beat us to deletion or creation.
      if not (e.errno == errno.EEXIST or e.errno == errno.ENOENT):
        raise

    # Time spent in a workunit, including its children.
    self.cumulative_timings = AggregatedTimings(os.path.join(self.run_info_dir,
                                                             'cumulative_timings'))

    # Time spent in a workunit, not including its children.
    self.self_timings = AggregatedTimings(os.path.join(self.run_info_dir, 'self_timings'))

    # Hit/miss stats for the artifact cache.
    self.artifact_cache_stats = \
      ArtifactCacheStats(os.path.join(self.run_info_dir, 'artifact_cache_stats'))

    # Number of threads for foreground work.
    self._num_foreground_workers = self.get_options().num_foreground_workers

    # Number of threads for background work.
    self._num_background_workers = self.get_options().num_background_workers

    # We report to this Report.
    self.report = None

    # self._threadlocal.current_workunit contains the current workunit for the calling thread.
    # Note that multiple threads may share a name (e.g., all the threads in a pool).
    self._threadlocal = threading.local()

    # For main thread work. Created on start().
    self._main_root_workunit = None

    # For background work.  Created lazily if needed.
    self._background_worker_pool = None
    self._background_root_workunit = None

#.........这里部分代码省略.........
开发者ID:MathewJennings,项目名称:pants,代码行数:101,代码来源:run_tracker.py


注:本文中的pants.goal.artifact_cache_stats.ArtifactCacheStats类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。