当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.timestamp方法代码示例

本文整理汇总了Python中tensorflow.timestamp方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.timestamp方法的具体用法?Python tensorflow.timestamp怎么用?Python tensorflow.timestamp使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.timestamp方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: tf_times

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import timestamp [as 别名]
def tf_times():
    """Returns (time since start, time since last) as a tensorflow op."""
    # Keep track of start and last times
    with tf.init_scope():
        init = tf.timestamp()

    def make(name):
        return tf.Variable(init, name=name, trainable=False, use_resource=True)

    start = make('start_time')
    last = make('last_time')

    # Get new time and update last
    now = tf.timestamp()
    prev = last.read_value()
    with tf.control_dependencies([prev]):
        with tf.control_dependencies([last.assign(now)]):
            return tf.cast(now - start.read_value(), tf.float32), tf.cast(now - prev, tf.float32) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:20,代码来源:train_policy.py

示例2: begin

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import timestamp [as 别名]
def begin(self):
    with tf.name_scope(_SCOPE_NAME):
      # See _get_or_create_timing_vars for the definitions of these variables.
      timing_vars = _get_or_create_timing_vars()

      # An op to produce a tensor with the latest timestamp.
      self._end_op = _seconds_to_internal_time(tf.timestamp(name='end'))

      # An op to update the timing_vars.start_timestamp variable.
      self._start_op = tf.cond(
          pred=tf.equal(timing_vars.steps, 0),
          true_fn=lambda: timing_vars.start_timestamp.assign(self._end_op),
          false_fn=lambda: timing_vars.start_timestamp)

      # An op to update the step.
      with tf.control_dependencies([self._start_op]):
        self._step_op = timing_vars.steps.assign_add(1)

      # An op to compute the timing_vars.total_time variable.
      self._total_op = timing_vars.total_time.assign(
          timing_vars.previous_time +
          _internal_time_to_seconds(self._end_op - self._start_op)) 
开发者ID:google-research,项目名称:nasbench,代码行数:24,代码来源:training_time.py

示例3: log_deferred

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import timestamp [as 别名]
def log_deferred(op, log_id, every_n=1, first_n=None):
  """Helper method inserting compliance logging ops.

  Note: This helper is not guaranteed to be efficient, as it will insert ops
        and control dependencies. If this proves to be a bottleneck, submitters
        may wish to consider other methods such as extracting values from an
        .events file.

  Args:
    op: A tf op to be printed.
    log_id: a uuid provided by the logger in mlperf_log.py
    every_n: If repeat is True, with what frequency should the input op be '
             logged. If repeat is False, this argument is ignored.
    first_n: Only log this many values. This arg does not interact with every_n.
             The first_n refers to the first n that would have been logged.
  """

  prefix = ":::MLPv0.5.0 [{}]".format(log_id)
  if not first_n is not None and first_n == 1:
    return tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)

  counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
                        aggregation=tf.VariableAggregation.MEAN)
  increment = tf.compat.v1.assign_add(counter, 1, use_locking=True)
  return tf.cond(
      pred=tf.equal(tf.math.mod(increment, every_n), 0),
      true_fn=lambda :tf.compat.v1.Print(op, [tf.timestamp(), op], message=prefix,
                       first_n=first_n),
      false_fn=lambda :op
  ) 
开发者ID:IntelAI,项目名称:models,代码行数:32,代码来源:tf_mlperf_log.py

示例4: log_deferred

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import timestamp [as 别名]
def log_deferred(op, log_id, every_n=1, first_n=None):
  """Helper method inserting compliance logging ops.

  Note: This helper is not guaranteed to be efficient, as it will insert ops
        and control dependencies. If this proves to be a bottleneck, submitters
        may wish to consider other methods such as extracting values from an
        .events file.

  Args:
    op: A tf op to be printed.
    log_id: a uuid provided by the logger in mlperf_log.py
    every_n: If repeat is True, with what frequency should the input op be '
             logged. If repeat is False, this argument is ignored.
    first_n: Only log this many values. This arg does not interact with every_n.
             The first_n refers to the first n that would have been logged.
  """

  prefix = ":::MLPv0.5.0 [{}]".format(log_id)
  if not first_n is not None and first_n == 1:
    return tf.Print(op, [tf.timestamp(), op], message=prefix, first_n=1)

  counter = tf.Variable(tf.zeros(shape=(), dtype=tf.int32) - 1,
                        aggregation=tf.VariableAggregation.MEAN)
  increment = tf.assign_add(counter, 1, use_locking=True)
  return tf.cond(
      tf.equal(tf.mod(increment, every_n), 0),
      lambda :tf.Print(op, [tf.timestamp(), op], message=prefix,
                       first_n=first_n),
      lambda :op
  ) 
开发者ID:mlperf,项目名称:training,代码行数:32,代码来源:tf_mlperf_log.py

示例5: get_iterative_process_for_example_with_unused_tf_computation_arg

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import timestamp [as 别名]
def get_iterative_process_for_example_with_unused_tf_computation_arg():
  """Returns an iterative process with a @tf.function with an unused arg."""
  server_state_type = computation_types.NamedTupleType([('num_clients',
                                                         tf.int32)])

  def _bind_tf_function(unused_input, tf_func):
    tf_wrapper = tf.function(lambda _: tf_func())
    input_federated_type = unused_input.type_signature
    wrapper = computations.tf_computation(tf_wrapper,
                                          input_federated_type.member)
    return intrinsics.federated_map(wrapper, unused_input)

  def count_clients_federated(client_data):

    @tf.function
    def client_ones_fn():
      return tf.ones(shape=[], dtype=tf.int32)

    client_ones = _bind_tf_function(client_data, client_ones_fn)
    return intrinsics.federated_sum(client_ones)

  @computations.federated_computation
  def init_fn():
    return intrinsics.federated_value(
        collections.OrderedDict(num_clients=0), placements.SERVER)

  @computations.federated_computation([
      computation_types.FederatedType(server_state_type, placements.SERVER),
      computation_types.FederatedType(
          computation_types.SequenceType(tf.string), placements.CLIENTS)
  ])
  def next_fn(server_state, client_val):
    """`next` function for `tff.templates.IterativeProcess`."""
    server_update = intrinsics.federated_zip(
        collections.OrderedDict(
            num_clients=count_clients_federated(client_val)))

    server_output = intrinsics.federated_value((), placements.SERVER)
    server_output = intrinsics.federated_sum(
        _bind_tf_function(
            intrinsics.federated_broadcast(server_state), tf.timestamp))

    return server_update, server_output

  return iterative_process.IterativeProcess(init_fn, next_fn) 
开发者ID:tensorflow,项目名称:federated,代码行数:47,代码来源:test_utils.py


注:本文中的tensorflow.timestamp方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。