当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.init_scope方法代码示例

本文整理汇总了Python中tensorflow.init_scope方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.init_scope方法的具体用法?Python tensorflow.init_scope怎么用?Python tensorflow.init_scope使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.init_scope方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: tf_times

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def tf_times():
    """Returns (time since start, time since last) as a tensorflow op."""
    # Keep track of start and last times
    with tf.init_scope():
        init = tf.timestamp()

    def make(name):
        return tf.Variable(init, name=name, trainable=False, use_resource=True)

    start = make('start_time')
    last = make('last_time')

    # Get new time and update last
    now = tf.timestamp()
    prev = last.read_value()
    with tf.control_dependencies([prev]):
        with tf.control_dependencies([last.assign(now)]):
            return tf.cast(now - start.read_value(), tf.float32), tf.cast(now - prev, tf.float32) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:20,代码来源:train_policy.py

示例2: update_state

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def update_state(self, values, sample_weight=None):
        values = tf.cast(values, self.values_dtype)

        if not self.built:
            with tf.name_scope(self.name), tf.init_scope():
                self.build(values.shape)

        unchanged_values = tf.math.count_nonzero(
            tf.equal(self._previous_values, values)
        )
        flip_ratio = 1 - (
            tf.cast(unchanged_values, self.dtype) / tf.cast(self._size, self.dtype)
        )

        update_total_op = self.total.assign_add(flip_ratio * tf.sign(self.count))
        with tf.control_dependencies([update_total_op]):
            update_count_op = self.count.assign_add(1)
            with tf.control_dependencies([update_count_op]):
                return self._previous_values.assign(values) 
开发者ID:larq,项目名称:larq,代码行数:21,代码来源:metrics.py

示例3: _clone_metrics

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def _clone_metrics(metrics):
  """Creates a copy of the maybe-nested metric specification.

  Args:
    metrics: A collection of metric specifications. Supports the same set of
      formats as the `metrics` argument in `tf.keras.Model.compile`.

  Returns:
    The same format as the `metrics` argument, with all `tf.keras.metric.Metric`
    objects replaced by their copies.
  """

  def clone(metric):
    # A `Metric` object is stateful and can only be used in 1 model on 1 output.
    # Cloning the object allows the same metric to be applied in both base and
    # adversarial-regularized models, and also on multiple outputs in one model.
    # The cloning logic is the same as the `clone_metric` function in
    # https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/keras/metrics.py
    if not isinstance(metric, tf.keras.metrics.Metric):
      return metric
    with tf.init_scope():
      return metric.__class__.from_config(metric.get_config())

  return tf.nest.map_structure(clone, metrics) 
开发者ID:tensorflow,项目名称:neural-structured-learning,代码行数:26,代码来源:adversarial_regularization.py

示例4: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def __call__(self):
        with tf.init_scope():
            if self.mode == 'interleave':
                return next(self.cycled_masks)
            elif self.mode == 'merged_head':
                # avoid re-computation
                if self.merged_head is None:
                    nL = self.masks[0].shape[0]
                    self.merged_head = tf.ones((nL, nL), dtype=tf.int32)
                    for mask in self.masks:
                        self.merged_head = self.merged_head * mask
                return self.merged_head
            elif self.mode == 'heads':
                return np.array(self.masks)
            else:
                raise ValueError('Not supported attention mode') 
开发者ID:giannisdaras,项目名称:ylg,代码行数:18,代码来源:masks.py

示例5: get_global_variables_safely

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def get_global_variables_safely():
  """If not executing eagerly, returns tf.global_variables().

  Raises a ValueError if eager execution is enabled,
  because the variables are not tracked when executing eagerly.

  If executing eagerly, use a Keras model's .variables property instead.

  Returns:
    The result of tf.global_variables()
  """
  with tf.init_scope():
    if tf.executing_eagerly():
      raise ValueError("Global variables collection is not tracked when "
                       "executing eagerly. Use a Keras model's `.variables` "
                       "attribute instead.")
  return tf.global_variables() 
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:19,代码来源:variables_helper.py

示例6: get_summary_writer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def get_summary_writer(save_dir, subdir='', comm=MPI.COMM_WORLD):
    if comm.Get_rank() != 0:
        return None
    if save_dir is None:
        return None
    with tf.init_scope():
        return summary.create_file_writer(os.path.join(save_dir, 'tb', subdir)) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:9,代码来源:core.py

示例7: _set_initializers

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def _set_initializers(self):
        """Change initializers to load a language model from a tensorflow checkpoint."""
        # Skip if
        # 1. We're not rank 0.  Values will be copied from there.
        # 2. We want random initialization.  Normal initialization will do the work.
        if not self.is_root or self.trained_model.name == 'test':
            return

        with tf.init_scope():
            scope = self.scope.name

            # Initialize!
            params = {v.op.name: v for v in utils.find_trainable_variables(scope)}
            self.trained_model.init_op(params, new_scope=scope) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:16,代码来源:policy.py

示例8: _set_initializers

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def _set_initializers(self):
        """Change initializers to load a language model from a tensorflow checkpoint."""
        # Skip if
        # 1. We're not rank 0.  Values will be copied from there.
        # 2. We want random initialization.  Normal initialization will do the work.
        if not self.is_root or self.trained_model.name == 'test':
            return

        with tf.init_scope():
            # Initialize!
            params = {v.op.name: v for v in utils.find_trainable_variables(self.scope)}
            assert params
            self.trained_model.init_op(params, new_scope=self.scope) 
开发者ID:openai,项目名称:lm-human-preferences,代码行数:15,代码来源:rewards.py

示例9: apply_gradients

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def apply_gradients(self, grads_and_vars, name: Optional[str] = None, **kwargs):
        """Apply gradients to variables for each optimizer.

        On the first call to `apply_gradients()`, compute the mapping from variables to
        optimizers and cache it in the `self.var_opt_mapping` dict for serialization and
        faster access.
        """

        if self.var_opt_mapping is None:
            # Convert `grads_and_vars` to list so we can iterate multiple times over it
            grads_and_vars = list(grads_and_vars)
            self._compute_var_opt_mapping(grads_and_vars)

        # Split gradients and variables into a separate list for each optimizer
        grad_var_lists = [[] for _ in range(len(self.pred_opt_pairs) + 1)]
        for grad, var in grads_and_vars:
            if var.name in self.var_opt_mapping:
                grad_var_lists[self.var_opt_mapping[var.name]].append((grad, var))

        with tf.init_scope():
            for optimizer, opt_grads_and_vars in zip(self.optimizers, grad_var_lists):
                optimizer._create_slots([v for (_, v) in grads_and_vars])

        return tf.distribute.get_replica_context().merge_call(
            self._apply_gradients, args=(grad_var_lists, name), kwargs=kwargs
        ) 
开发者ID:larq,项目名称:larq,代码行数:28,代码来源:optimizers.py

示例10: test_load_save_eager

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def test_load_save_eager(self):
        import tensorflow as tf
        tf.enable_eager_execution()
        from delira.io.tf import load_checkpoint_eager, save_checkpoint_eager
        from delira.models import AbstractTfEagerNetwork

        import numpy as np

        class DummyNetwork(AbstractTfEagerNetwork):
            def __init__(self, in_channels, n_outputs):
                super().__init__(in_channels=in_channels, n_outputs=n_outputs)
                with tf.init_scope():
                    self.net = self._build_model(in_channels, n_outputs)

            @staticmethod
            def _build_model(in_channels, n_outputs):
                return tf.keras.models.Sequential(
                    layers=[
                        tf.keras.layers.Dense(
                            64,
                            input_shape=in_channels,
                            bias_initializer='glorot_uniform'),
                        tf.keras.layers.ReLU(),
                        tf.keras.layers.Dense(
                            n_outputs,
                            bias_initializer='glorot_uniform')])

            def call(self, inputs):
                return self.net(inputs)

        net = DummyNetwork((32,), 1)
        input_tensor = tf.constant(np.random.rand(1, 32).astype(np.float32))
        result_pre_save = net(input_tensor)
        save_checkpoint_eager("./model_eager", model=net)

        loaded_state = load_checkpoint_eager("./model_eager", model=net)
        loaded_net = loaded_state["model"]

        result_post_save = loaded_net(input_tensor)

        self.assertTrue(np.array_equal(result_post_save, result_pre_save)) 
开发者ID:delira-dev,项目名称:delira,代码行数:43,代码来源:test_tf.py

示例11: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def __init__(self, spec, meta_graph, trainable, checkpoint_path, name):
    """Private constructor.

    Args:
      spec: _ModuleSpec instance.
      meta_graph: MetaGraphDef to use
      trainable: whether module is trainable.
      checkpoint_path: None or a string to the variables checkpoints.
      name: variable and scope name where to instantiate the Module. Must be an
        unused name scope.
    """
    self._spec = spec
    self._meta_graph = meta_graph
    self._trainable = trainable
    self._checkpoint_path = checkpoint_path

    register_ops_if_needed({
        op.name for op in self._meta_graph.meta_info_def.stripped_op_list.op})

    if _is_tpu_graph_function():
      # TODO(b/129142908): Hub should not use `tf.init_scope` since that makes
      # it incompatible with tf.compat.v1.wrap_function. For now the only use
      # case where hub used it was for tpu compatibility. This should be cleaned
      # up at an early convinience.
      scope_func = tf.init_scope
    else:
      scope_func = lambda: tf.control_dependencies(None)

    # Clear dependencies so modules can be constructed from deep inside
    # functions that have dependencies active. Note that the dependencies
    # would be active when applying the Module signature, just not active
    # when creating the Module state. This use case has showed up in some
    # TPU training code.
    with scope_func():
      self._init_state(name) 
开发者ID:tensorflow,项目名称:hub,代码行数:37,代码来源:native_module.py

示例12: _create_optimizer

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def _create_optimizer(self):
    """Initializes the hyperparameters and sets the self._optimizer property."""
    if self._optimizer:
      return
    if not self._layer_collection:
      self.register_layers(self._model, self._loss)

    if self._config['adapt_damping']:
      if 'train_batch' not in self._kfac_kwargs:
        raise ValueError('Must provide a train_batch tuple to use adaptive '
                         'damping. Use register_train_batch or pass it in '
                         'during optimizer construction.')
      if 'loss_fn' not in self._kfac_kwargs:
        self._kfac_kwargs['loss_fn'] = utils.get_loss_fn(
            self._model, self._loss, loss_weights=self._config['loss_weights'])

    with tf.name_scope(self._name):
      with tf.init_scope():
        # "iterations" property will create iterations if necessary.
        _ = self.iterations
        self._create_hypers()

    self._kfac_kwargs.update(self._hyper)
    try:
      # We use the TF 1 variable_scope instead of the TF 2 recommended
      # name_scope because we need to recover the variables created in this
      # scope, which is not possible with name_scope.
      with tf.variable_scope(self._tf_var_scope):
        self._optimizer = _KFAC_OPT_CLASS(
            layer_collection=self._layer_collection, **self._kfac_kwargs)
    except ValueError as e:
      msg = str(e)
      if re.search('Variable .* already exists', msg):
        raise ValueError(
            'You may have instantiated a KFAC Optimizer with the same name as '
            'an existing one. Try resetting the default graph, instantiating '
            'the optimizer with a different name, or changing the optimizer\'s '
            'name.\nHere is the original ValueError:\n ' + msg)
      elif re.search('Found the following errors with variable registration'
                     '.*gamma.*registered with wrong number of uses.*', msg):
        # We don't regex the name batch_normalization because the user could
        # have renamed the layer. We don't regex beta because they could have
        # used BatchNorm without the shift.
        raise ValueError(
            'There may have been an issue registering BatchNormalization. Try '
            'using tf.keras.backend.set_learning_phase before model '
            'construction. An alternative solution is to use the unfused '
            'batchnorm implementation (pass the argument fused=False to '
            'BatchNormalization).\nHere is the original ValueError:\n ' + msg)
      else:
        raise e 
开发者ID:tensorflow,项目名称:kfac,代码行数:53,代码来源:optimizers.py

示例13: _generate

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import init_scope [as 别名]
def _generate(self, feature_map_shape_list):
    """Generates a collection of bounding boxes to be used as anchors.

    Args:
      feature_map_shape_list: list of pairs of convnet layer resolutions in the
        format [(height_0, width_0)].  For example, setting
        feature_map_shape_list=[(8, 8)] asks for anchors that correspond
        to an 8x8 layer.  For this anchor generator, only lists of length 1 are
        allowed.

    Returns:
      boxes_list: a list of BoxLists each holding anchor boxes corresponding to
        the input feature map shapes.

    Raises:
      ValueError: if feature_map_shape_list, box_specs_list do not have the same
        length.
      ValueError: if feature_map_shape_list does not consist of pairs of
        integers
    """
    if not (isinstance(feature_map_shape_list, list)
            and len(feature_map_shape_list) == 1):
      raise ValueError('feature_map_shape_list must be a list of length 1.')
    if not all([isinstance(list_item, tuple) and len(list_item) == 2
                for list_item in feature_map_shape_list]):
      raise ValueError('feature_map_shape_list must be a list of pairs.')

    # Create constants in init_scope so they can be created in tf.functions
    # and accessed from outside of the function.
    with tf.init_scope():
      self._base_anchor_size = tf.cast(tf.convert_to_tensor(
          self._base_anchor_size), dtype=tf.float32)
      self._anchor_stride = tf.cast(tf.convert_to_tensor(
          self._anchor_stride), dtype=tf.float32)
      self._anchor_offset = tf.cast(tf.convert_to_tensor(
          self._anchor_offset), dtype=tf.float32)

    grid_height, grid_width = feature_map_shape_list[0]
    scales_grid, aspect_ratios_grid = ops.meshgrid(self._scales,
                                                   self._aspect_ratios)
    scales_grid = tf.reshape(scales_grid, [-1])
    aspect_ratios_grid = tf.reshape(aspect_ratios_grid, [-1])
    anchors = tile_anchors(grid_height,
                           grid_width,
                           scales_grid,
                           aspect_ratios_grid,
                           self._base_anchor_size,
                           self._anchor_stride,
                           self._anchor_offset)

    num_anchors = anchors.num_boxes_static()
    if num_anchors is None:
      num_anchors = anchors.num_boxes()
    anchor_indices = tf.zeros([num_anchors])
    anchors.add_field('feature_map_index', anchor_indices)
    return [anchors] 
开发者ID:ShivangShekhar,项目名称:Live-feed-object-device-identification-using-Tensorflow-and-OpenCV,代码行数:58,代码来源:grid_anchor_generator.py


注:本文中的tensorflow.init_scope方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。