當前位置: 首頁>>代碼示例>>Python>>正文


Python v1.Variable方法代碼示例

本文整理匯總了Python中tensorflow.compat.v1.Variable方法的典型用法代碼示例。如果您正苦於以下問題:Python v1.Variable方法的具體用法?Python v1.Variable怎麽用?Python v1.Variable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow.compat.v1的用法示例。


在下文中一共展示了v1.Variable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testAppendGradientsWithLossScaleForNonChiefWorker

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def testAppendGradientsWithLossScaleForNonChiefWorker(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=False)  # Non-chief
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=False)

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 1)
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 4)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 10) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:24,代碼來源:variable_mgr_util_test.py

示例2: testAppendGradientsWithLossScaleWithoutNan

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def testAppendGradientsWithLossScaleWithoutNan(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4, dtype=tf.float32),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=tf.constant(False))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 1)
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 8)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:24,代碼來源:variable_mgr_util_test.py

示例3: testAppendGradientsWithLossScaleWithtNan

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def testAppendGradientsWithLossScaleWithtNan(self):
    v = tf.Variable(0)
    training_ops = []
    get_apply_gradients_ops_func = lambda: [tf.assign(v, v + 1)]
    loss_scale_params = variable_mgr_util.AutoLossScaleParams(
        enable_auto_loss_scale=True,
        loss_scale=tf.Variable(4, dtype=tf.float32),
        loss_scale_normal_steps=tf.Variable(10),
        inc_loss_scale_every_n=10,
        is_chief=True)
    variable_mgr_util.append_gradients_with_loss_scale(
        training_ops,
        get_apply_gradients_ops_func,
        loss_scale_params,
        grad_has_inf_nan=tf.constant(True))

    with self.test_session() as sess:
      sess.run(tf.global_variables_initializer())
      sess.run(training_ops)
      self.assertEqual(sess.run(v), 0)  # Skip updating for v.
      # halve loss_scale and reset local_scale_normal_steps.
      self.assertEqual(sess.run(loss_scale_params.loss_scale), 2)
      self.assertEqual(sess.run(loss_scale_params.loss_scale_normal_steps), 0) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:25,代碼來源:variable_mgr_util_test.py

示例4: get_synthetic_inputs

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def get_synthetic_inputs(self, input_name, nclass):
    """Returns the ops to generate synthetic inputs and labels."""
    def users_init_val():
      return tf.random_uniform((self.batch_size, 1), minval=0,
                               maxval=_NUM_USERS_20M, dtype=tf.int32)
    users = tf.Variable(users_init_val, dtype=tf.int32, trainable=False,
                        collections=[tf.GraphKeys.LOCAL_VARIABLES],
                        name='synthetic_users')
    def items_init_val():
      return tf.random_uniform((self.batch_size, 1), minval=0,
                               maxval=_NUM_ITEMS_20M, dtype=tf.int32)
    items = tf.Variable(items_init_val, dtype=tf.int32, trainable=False,
                        collections=[tf.GraphKeys.LOCAL_VARIABLES],
                        name='synthetic_items')

    def labels_init_val():
      return tf.random_uniform((self.batch_size,), minval=0, maxval=2,
                               dtype=tf.int32)
    labels = tf.Variable(labels_init_val, dtype=tf.int32, trainable=False,
                         collections=[tf.GraphKeys.LOCAL_VARIABLES],
                         name='synthetic_labels')

    return [users, items, labels] 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:25,代碼來源:official_ncf_model.py

示例5: _fp16_variable_creator

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def _fp16_variable_creator(next_creator, **kwargs):
  """Variable creator to create variables in fp32 and cast them to fp16."""
  dtype = kwargs.get('dtype', None)
  initial_value = kwargs.get('initial_value', None)
  if dtype is None:
    if initial_value is not None and not callable(initial_value):
      dtype = initial_value.dtype
  if dtype == tf.float16:
    if callable(initial_value):
      new_initial_value = lambda: tf.cast(initial_value(), tf.float32)
    else:
      new_initial_value = tf.cast(initial_value, tf.float32)
    kwargs['dtype'] = tf.float32
    kwargs['initial_value'] = new_initial_value
    var = next_creator(**kwargs)
    return tf.cast(var, dtype=tf.float16)
  else:
    return next_creator(**kwargs) 
開發者ID:tensorflow,項目名稱:benchmarks,代碼行數:20,代碼來源:official_ncf_model.py

示例6: __init__

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def __init__(self, tensors):
    tensors = list(tensors)
    with tf.variable_scope('averaged'):
      self._num_samples = tf.Variable(0, name='num_samples', trainable=False)
      with tf.variable_scope('avg'):
        self._averages = [
            tf.get_variable(
                tensor.name.replace('/', '-').replace(':', '-'),
                tensor.get_shape(), initializer=tf.zeros_initializer(),
                trainable=False)
            for tensor in tensors]
      with tf.variable_scope('save'):
        self._saves = [
            tf.get_variable(
                tensor.name.replace('/', '-').replace(':', '-'),
                tensor.get_shape(), initializer=tf.zeros_initializer(),
                trainable=False)
            for tensor in tensors]
    self._tensors = tensors
    self._take_sample = self._make_take_sample()
    self._switch = self._make_swith_to_average()
    self._restore = self._make_restore()
    self._reset = self._make_reset() 
開發者ID:deepmind,項目名稱:lamb,代碼行數:25,代碼來源:averaged.py

示例7: underlying_variable_ref

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def underlying_variable_ref(t):
  """Find the underlying variable ref.

  Traverses through Identity, ReadVariableOp, and Enter ops.
  Stops when op type has Variable or VarHandle in name.

  Args:
    t: a Tensor

  Returns:
    a Tensor that is a variable ref, or None on error.
  """
  while t.op.type in ["Identity", "ReadVariableOp", "Enter"]:
    t = t.op.inputs[0]

  op_type = t.op.type
  if "Variable" in op_type or "VarHandle" in op_type:
    return t
  else:
    return None 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:22,代碼來源:common_layers.py

示例8: underlying_variable

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def underlying_variable(t):
  """Find the underlying tf.Variable object.

  Args:
    t: a Tensor

  Returns:
    tf.Variable.
  """
  t = underlying_variable_ref(t)
  assert t is not None
  # make sure that the graph has a variable index and that it is up-to-date
  if not hasattr(tf.get_default_graph(), "var_index"):
    tf.get_default_graph().var_index = {}
  var_index = tf.get_default_graph().var_index
  for v in tf.global_variables()[len(var_index):]:
    var_index[v.name] = v
  return var_index[t.name] 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:20,代碼來源:common_layers.py

示例9: testFlopRegularizerDontConvertToVariable

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def testFlopRegularizerDontConvertToVariable(self):
    tf.reset_default_graph()
    tf.set_random_seed(1234)

    x = tf.constant(1.0, shape=[2, 6], name='x', dtype=tf.float32)
    w = tf.Variable(tf.truncated_normal([6, 4], stddev=1.0), use_resource=True)
    net = tf.matmul(x, w)

    # Create FLOPs network regularizer.
    threshold = 0.9
    flop_reg = flop_regularizer.GroupLassoFlopsRegularizer([net.op], threshold,
                                                           0)

    with self.cached_session():
      tf.global_variables_initializer().run()
      flop_reg.get_regularization_term().eval() 
開發者ID:google-research,項目名稱:morph-net,代碼行數:18,代碼來源:flop_regularizer_test.py

示例10: local_variable

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def local_variable(initial_value,
                   validate_shape=True,
                   name=None,
                   use_resource=None):
  """Create a variable with a value and add it to `GraphKeys.LOCAL_VARIABLES`.

  Args:
    initial_value: See variables.Variable.__init__.
    validate_shape: See variables.Variable.__init__.
    name: See variables.Variable.__init__.
    use_resource: If `True` use a ResourceVariable instead of a Variable.

  Returns:
    New variable.
  """
  return variable_scope.variable(
      initial_value,
      trainable=False,
      collections=[ops.GraphKeys.LOCAL_VARIABLES],
      validate_shape=validate_shape,
      use_resource=use_resource,
      name=name) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:24,代碼來源:variables.py

示例11: global_variable

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def global_variable(initial_value,
                    validate_shape=True,
                    name=None,
                    use_resource=None):
  """Create a variable with a value and add it to `GraphKeys.GLOBAL_VARIABLES`.

  Args:
    initial_value: See variables.Variable.__init__.
    validate_shape: See variables.Variable.__init__.
    name: See variables.Variable.__init__.
    use_resource: If `True` use a ResourceVariable instead of a Variable.

  Returns:
    New variable.
  """
  return variable_scope.variable(
      initial_value,
      trainable=False,
      collections=[ops.GraphKeys.GLOBAL_VARIABLES],
      validate_shape=validate_shape,
      use_resource=use_resource,
      name=name) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:24,代碼來源:variables.py

示例12: get_unique_variable

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def get_unique_variable(var_op_name):
  """Gets the variable uniquely identified by that var_op_name.

  Args:
    var_op_name: the full name of the variable op, including the scope.

  Returns:
    a tensorflow variable.

  Raises:
    ValueError: if no variable uniquely identified by the name exists.
  """
  candidates = get_variables(scope=var_op_name)
  if not candidates:
    raise ValueError('Couldn\'t find variable %s' % var_op_name)

  for candidate in candidates:
    if candidate.op.name == var_op_name:
      return candidate
  raise ValueError('Variable %s does not uniquely identify a variable' %
                   var_op_name) 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:23,代碼來源:variables.py

示例13: get_variable_full_name

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def get_variable_full_name(var):
  """Returns the full name of a variable.

  For normal Variables, this is the same as the var.op.name.  For
  sliced or PartitionedVariables, this name is the same for all the
  slices/partitions. In both cases, this is normally the name used in
  a checkpoint file.

  Args:
    var: A `Variable` object.

  Returns:
    A string that is the full name.
  """
  if var._save_slice_info:
    return var._save_slice_info.full_name
  else:
    return var.op.name 
開發者ID:google-research,項目名稱:tf-slim,代碼行數:20,代碼來源:variables.py

示例14: setup_optimizer

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def setup_optimizer(self):
    """Instantiates learning rate, decay op and train_op among others."""
    # If not training, don't need to add optimizer to the graph.
    if not self.is_training:
      self.train_op = tf.no_op()
      self.learning_rate = tf.no_op()
      return

    self.learning_rate = tf.Variable(
        self.hparams.learning_rate,
        name='learning_rate',
        trainable=False,
        dtype=tf.float32)

    # FIXME 0.5 -> hparams.decay_rate
    self.decay_op = tf.assign(self.learning_rate, 0.5 * self.learning_rate)
    self.optimizer = tf.train.AdamOptimizer(learning_rate=self.learning_rate)
    self.train_op = self.optimizer.minimize(self.loss) 
開發者ID:magenta,項目名稱:magenta,代碼行數:20,代碼來源:lib_graph.py

示例15: _variable_with_weight_decay

# 需要導入模塊: from tensorflow.compat import v1 [as 別名]
# 或者: from tensorflow.compat.v1 import Variable [as 別名]
def _variable_with_weight_decay(name, shape, stddev, wd):
  """Helper to create an initialized Variable with weight decay.

  Note that the Variable is initialized with a truncated normal distribution.
  A weight decay is added only if one is specified.

  Args:
    name: name of the variable
    shape: list of ints
    stddev: standard deviation of a truncated Gaussian
    wd: add L2Loss weight decay multiplied by this float. If None, weight
        decay is not added for this Variable.

  Returns:
    Variable Tensor
  """
  var = _variable_on_cpu(name, shape,
                         tf.truncated_normal_initializer(stddev=stddev))
  if wd is not None:
    weight_decay = tf.multiply(tf.nn.l2_loss(var), wd, name='weight_loss')
    tf.add_to_collection('losses', weight_decay)
  return var 
開發者ID:tensorflow,項目名稱:privacy,代碼行數:24,代碼來源:deep_cnn.py


注:本文中的tensorflow.compat.v1.Variable方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。