当前位置: 首页>>代码示例>>Python>>正文


Python variable_scope.get_variable方法代码示例

本文整理汇总了Python中tensorflow.python.ops.variable_scope.get_variable方法的典型用法代码示例。如果您正苦于以下问题:Python variable_scope.get_variable方法的具体用法?Python variable_scope.get_variable怎么用?Python variable_scope.get_variable使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.variable_scope的用法示例。


在下文中一共展示了variable_scope.get_variable方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _create_dense_column_weighted_sum

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _create_dense_column_weighted_sum(
    column, builder, units, weight_collections, trainable):
  """Create a weighted sum of a dense column for linear_model."""
  tensor = column._get_dense_tensor(  # pylint: disable=protected-access
      builder,
      weight_collections=weight_collections,
      trainable=trainable)
  num_elements = column._variable_shape.num_elements()  # pylint: disable=protected-access
  batch_size = array_ops.shape(tensor)[0]
  tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
  weight = variable_scope.get_variable(
      name='weights',
      shape=[num_elements, units],
      initializer=init_ops.zeros_initializer(),
      trainable=trainable,
      collections=weight_collections)
  return math_ops.matmul(tensor, weight, name='weighted_sum') 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:feature_column.py

示例2: _get_sparse_tensors

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _get_sparse_tensors(self,
                          inputs,
                          weight_collections=None,
                          trainable=None):
    """Returns an IdWeightPair.

    `IdWeightPair` is a pair of `SparseTensor`s which represents ids and
    weights.

    `IdWeightPair.id_tensor` is typically a `batch_size` x `num_buckets`
    `SparseTensor` of `int64`. `IdWeightPair.weight_tensor` is either a
    `SparseTensor` of `float` or `None` to indicate all weights should be
    taken to be 1. If specified, `weight_tensor` must have exactly the same
    shape and indices as `sp_ids`. Expected `SparseTensor` is same as parsing
    output of a `VarLenFeature` which is a ragged matrix.

    Args:
      inputs: A `LazyBuilder` as a cache to get input tensors required to
        create `IdWeightPair`.
      weight_collections: List of graph collections to which variables (if any
        will be created) are added.
      trainable: If `True` also add variables to the graph collection
        `GraphKeys.TRAINABLE_VARIABLES` (see ${tf.get_variable}).
    """
    pass 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:feature_column.py

示例3: _get_or_create_eval_step

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _get_or_create_eval_step():
  """Gets or creates the eval step `Tensor`.

  Returns:
    A `Tensor` representing a counter for the evaluation step.

  Raises:
    ValueError: If multiple `Tensors` have been added to the
      `tf.GraphKeys.EVAL_STEP` collection.
  """
  graph = ops.get_default_graph()
  eval_steps = graph.get_collection(ops.GraphKeys.EVAL_STEP)
  if len(eval_steps) == 1:
    return eval_steps[0]
  elif len(eval_steps) > 1:
    raise ValueError('Multiple tensors added to tf.GraphKeys.EVAL_STEP')
  else:
    counter = variable_scope.get_variable(
        'eval_step',
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        collections=[ops.GraphKeys.LOCAL_VARIABLES, ops.GraphKeys.EVAL_STEP])
    return counter 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:evaluation.py

示例4: create_global_step

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def create_global_step(graph=None):
  """Create global step tensor in graph.

  Args:
    graph: The graph in which to create the global step tensor. If missing,
      use default graph.

  Returns:
    Global step tensor.

  Raises:
    ValueError: if global step tensor is already defined.
  """
  graph = graph or ops.get_default_graph()
  if get_global_step(graph) is not None:
    raise ValueError('"global_step" already exists.')
  # Create in proper graph and base name_scope.
  with graph.as_default() as g, g.name_scope(None):
    return variable_scope.get_variable(
        ops.GraphKeys.GLOBAL_STEP,
        shape=[],
        dtype=dtypes.int64,
        initializer=init_ops.zeros_initializer(),
        trainable=False,
        collections=[ops.GraphKeys.GLOBAL_VARIABLES, ops.GraphKeys.GLOBAL_STEP]) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:training_util.py

示例5: call

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def call(self, inputs, state):
    """Run the cell on embedded inputs."""
    with ops.device("/cpu:0"):
      if self._initializer:
        initializer = self._initializer
      elif vs.get_variable_scope().initializer:
        initializer = vs.get_variable_scope().initializer
      else:
        # Default initializer for embeddings should have variance=1.
        sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
        initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

      if isinstance(state, tuple):
        data_type = state[0].dtype
      else:
        data_type = state.dtype

      embedding = vs.get_variable(
          "embedding", [self._embedding_classes, self._embedding_size],
          initializer=initializer,
          dtype=data_type)
      embedded = embedding_ops.embedding_lookup(embedding,
                                                array_ops.reshape(inputs, [-1]))

      return self._cell(embedded, state) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:27,代码来源:core_rnn_cell.py

示例6: _attention

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("attention"):
      k = vs.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:rnn_cell.py

示例7: _highway

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _highway(self, inp, out):
    input_size = inp.get_shape().with_rank(2)[1].value
    carry_weight = vs.get_variable("carry_w", [input_size, input_size])
    carry_bias = vs.get_variable(
        "carry_b", [input_size],
        initializer=init_ops.constant_initializer(
            self._carry_bias_init))
    carry = math_ops.sigmoid(nn_ops.xw_plus_b(inp, carry_weight, carry_bias))
    if self._couple_carry_transform_gates:
      transform = 1 - carry
    else:
      transform_weight = vs.get_variable("transform_w",
                                         [input_size, input_size])
      transform_bias = vs.get_variable(
          "transform_b", [input_size],
          initializer=init_ops.constant_initializer(
              -self._carry_bias_init))
      transform = math_ops.sigmoid(nn_ops.xw_plus_b(inp,
                                                    transform_weight,
                                                    transform_bias))
    return inp * carry + out * transform 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:23,代码来源:rnn_cell.py

示例8: categorical_variable

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def categorical_variable(tensor_in, n_classes, embedding_size, name):
  """Creates an embedding for categorical variable with given number of classes.

  Args:
    tensor_in: Input tensor with class identifier (can be batch or
      N-dimensional).
    n_classes: Number of classes.
    embedding_size: Size of embedding vector to represent each class.
    name: Name of this categorical variable.
  Returns:
    Tensor of input shape, with additional dimension for embedding.

  Example:
    Calling categorical_variable([1, 2], 5, 10, "my_cat"), will return 2 x 10
    tensor, where each row is representation of the class.
  """
  with vs.variable_scope(name):
    embeddings = vs.get_variable(name + '_embeddings',
                                 [n_classes, embedding_size])
    return embedding_lookup(embeddings, tensor_in) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:embeddings_ops.py

示例9: Var

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def Var(name, *args, **kw):
  """Implements an operator that generates a variable.

  This function is still experimental. Use it only
  for generating a single variable instance for
  each name.

  Args:
      name: Name of the variable.
      *args: Other arguments to get_variable.
      **kw: Other keywords for get_variable.

  Returns:
      A specs object for generating a variable.
  """

  def var(_):
    return variable_scope.get_variable(name, *args, **kw)

  return specs_lib.Callable(var) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:22,代码来源:specs_ops.py

示例10: _get_sharded_variable

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _get_sharded_variable(name, shape, dtype, num_shards):
  """Get a list of sharded variables with the given dtype."""
  if num_shards > shape[0]:
    raise ValueError("Too many shards: shape=%s, num_shards=%d" %
                     (shape, num_shards))
  unit_shard_size = int(math.floor(shape[0] / num_shards))
  remaining_rows = shape[0] - unit_shard_size * num_shards

  shards = []
  for i in range(num_shards):
    current_size = unit_shard_size
    if i < remaining_rows:
      current_size += 1
    shards.append(vs.get_variable(name + "_%d" % i, [current_size] + shape[1:],
                                  dtype=dtype))
  return shards 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:18,代码来源:rnn_cell.py

示例11: __call__

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def __call__(self, inputs, state, scope=None):
    """Run the cell on embedded inputs."""
    with vs.variable_scope(scope or "embedding_wrapper"):  # "EmbeddingWrapper"
      with ops.device("/cpu:0"):
        if self._initializer:
          initializer = self._initializer
        elif vs.get_variable_scope().initializer:
          initializer = vs.get_variable_scope().initializer
        else:
          # Default initializer for embeddings should have variance=1.
          sqrt3 = math.sqrt(3)  # Uniform(-sqrt(3), sqrt(3)) has variance=1.
          initializer = init_ops.random_uniform_initializer(-sqrt3, sqrt3)

        if type(state) is tuple:
          data_type = state[0].dtype
        else:
          data_type = state.dtype

        embedding = vs.get_variable(
            "embedding", [self._embedding_classes, self._embedding_size],
            initializer=initializer,
            dtype=data_type)
        embedded = embedding_ops.embedding_lookup(
            embedding, array_ops.reshape(inputs, [-1]))
    return self._cell(embedded, state) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:27,代码来源:core_rnn_cell_impl.py

示例12: _adaptive_max_norm

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _adaptive_max_norm(norm, std_factor, decay, global_step, epsilon, name):
  """Find max_norm given norm and previous average."""
  with vs.variable_scope(name, "AdaptiveMaxNorm", [norm]):
    log_norm = math_ops.log(norm + epsilon)

    def moving_average(name, value, decay):
      moving_average_variable = vs.get_variable(
          name,
          shape=value.get_shape(),
          dtype=value.dtype,
          initializer=init_ops.zeros_initializer(),
          trainable=False)
      return moving_averages.assign_moving_average(
          moving_average_variable, value, decay, zero_debias=False)

    # quicker adaptation at the beginning
    if global_step is not None:
      n = math_ops.cast(global_step, dtypes.float32)
      decay = math_ops.minimum(decay, n / (n + 1.))

    # update averages
    mean = moving_average("mean", log_norm, decay)
    sq_mean = moving_average("sq_mean", math_ops.square(log_norm), decay)

    variance = sq_mean - math_ops.square(mean)
    std = math_ops.sqrt(math_ops.maximum(epsilon, variance))
    max_norms = math_ops.exp(mean + std_factor * std)
    return max_norms, mean 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:30,代码来源:optimizers.py

示例13: _create_categorical_column_weighted_sum

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _create_categorical_column_weighted_sum(
    column, builder, units, sparse_combiner, weight_collections, trainable):
  """Create a weighted sum of a categorical column for linear_model."""
  sparse_tensors = column._get_sparse_tensors(  # pylint: disable=protected-access
      builder,
      weight_collections=weight_collections,
      trainable=trainable)
  id_tensor = sparse_ops.sparse_reshape(sparse_tensors.id_tensor, [
      array_ops.shape(sparse_tensors.id_tensor)[0], -1
  ])
  weight_tensor = sparse_tensors.weight_tensor
  if weight_tensor is not None:
    weight_tensor = sparse_ops.sparse_reshape(
        weight_tensor, [array_ops.shape(weight_tensor)[0], -1])

  weight = variable_scope.get_variable(
      name='weights',
      shape=(column._num_buckets, units),  # pylint: disable=protected-access
      initializer=init_ops.zeros_initializer(),
      trainable=trainable,
      collections=weight_collections)
  return _safe_embedding_lookup_sparse(
      weight,
      id_tensor,
      sparse_weights=weight_tensor,
      combiner=sparse_combiner,
      name='weighted_sum') 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:29,代码来源:feature_column.py

示例14: _get_dense_tensor

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def _get_dense_tensor(self, inputs, weight_collections=None, trainable=None):
    # Get sparse IDs and weights.
    sparse_tensors = self.categorical_column._get_sparse_tensors(  # pylint: disable=protected-access
        inputs, weight_collections=weight_collections, trainable=trainable)
    sparse_ids = sparse_tensors.id_tensor
    sparse_weights = sparse_tensors.weight_tensor

    # Create embedding weight, and restore from checkpoint if necessary.
    embedding_weights = variable_scope.get_variable(
        name='embedding_weights',
        shape=(self.categorical_column._num_buckets, self.dimension),  # pylint: disable=protected-access
        dtype=dtypes.float32,
        initializer=self.initializer,
        trainable=self.trainable and trainable,
        collections=weight_collections)
    if self.ckpt_to_load_from is not None:
      to_restore = embedding_weights
      if isinstance(to_restore, variables.PartitionedVariable):
        to_restore = to_restore._get_variable_list()  # pylint: disable=protected-access
      checkpoint_utils.init_from_checkpoint(self.ckpt_to_load_from, {
          self.tensor_name_in_ckpt: to_restore
      })

    # Return embedding lookup result.
    return _safe_embedding_lookup_sparse(
        embedding_weights=embedding_weights,
        sparse_ids=sparse_ids,
        sparse_weights=sparse_weights,
        combiner=self.combiner,
        name='%s_weights' % self.name,
        max_norm=self.max_norm) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:33,代码来源:feature_column.py

示例15: __call__

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import get_variable [as 别名]
def __call__(self, x, states_prev, scope=None):
    """Long short-term memory cell (LSTM)."""
    with vs.variable_scope(scope or self._names["scope"]):
      x_shape = x.get_shape().with_rank(2)
      if not x_shape[1].value:
        raise ValueError("Expecting x_shape[1] to be set: %s" % str(x_shape))
      if len(states_prev) != 2:
        raise ValueError("Expecting states_prev to be a tuple with length 2.")
      input_size = x_shape[1].value
      w = vs.get_variable(self._names["W"], [input_size + self._num_units,
                                             self._num_units * 4])
      b = vs.get_variable(
          self._names["b"], [w.get_shape().with_rank(2)[1].value],
          initializer=init_ops.constant_initializer(0.0))
      if self._use_peephole:
        wci = vs.get_variable(self._names["wci"], [self._num_units])
        wco = vs.get_variable(self._names["wco"], [self._num_units])
        wcf = vs.get_variable(self._names["wcf"], [self._num_units])
      else:
        wci = wco = wcf = array_ops.zeros([self._num_units])
      (cs_prev, h_prev) = states_prev
      (_, cs, _, _, _, _, h) = _lstm_block_cell(
          x,
          cs_prev,
          h_prev,
          w,
          b,
          wci=wci,
          wco=wco,
          wcf=wcf,
          forget_bias=self._forget_bias,
          use_peephole=self._use_peephole)

      new_state = rnn_cell_impl.LSTMStateTuple(cs, h)
      return h, new_state 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:37,代码来源:lstm_ops.py


注:本文中的tensorflow.python.ops.variable_scope.get_variable方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。