当前位置: 首页>>代码示例>>Python>>正文


Python variable_scope.variable_scope方法代码示例

本文整理汇总了Python中tensorflow.python.ops.variable_scope.variable_scope方法的典型用法代码示例。如果您正苦于以下问题:Python variable_scope.variable_scope方法的具体用法?Python variable_scope.variable_scope怎么用?Python variable_scope.variable_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.python.ops.variable_scope的用法示例。


在下文中一共展示了variable_scope.variable_scope方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: scheduled_sampling

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def scheduled_sampling(self, batch_size, sampling_probability, true, estimate):
    with variable_scope.variable_scope("ScheduledEmbedding"):
      # Return -1s where we do not sample, and sample_ids elsewhere
      select_sampler = bernoulli.Bernoulli(probs=sampling_probability, dtype=tf.bool)
      select_sample = select_sampler.sample(sample_shape=batch_size)
      sample_ids = array_ops.where(
                  select_sample,
                  tf.range(batch_size),
                  gen_array_ops.fill([batch_size], -1))
      where_sampling = math_ops.cast(
          array_ops.where(sample_ids > -1), tf.int32)
      where_not_sampling = math_ops.cast(
          array_ops.where(sample_ids <= -1), tf.int32)
      _estimate = array_ops.gather_nd(estimate, where_sampling)
      _true = array_ops.gather_nd(true, where_not_sampling)

      base_shape = array_ops.shape(true)
      result1 = array_ops.scatter_nd(indices=where_sampling, updates=_estimate, shape=base_shape)
      result2 = array_ops.scatter_nd(indices=where_not_sampling, updates=_true, shape=base_shape)
      result = result1 + result2
      return result1 + result2 
开发者ID:yaserkl,项目名称:TransferRL,代码行数:23,代码来源:run_summarization.py

示例2: add_embedding_layer

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def add_embedding_layer(self, emb_matrix):
        """
        Adds word embedding layer to the graph.

        Inputs:
          emb_matrix: shape (400002, embedding_size).
            The GloVe vectors, plus vectors for PAD and UNK.
        """
        with vs.variable_scope("embeddings"):

            # Note: the embedding matrix is a tf.constant which means it's not a trainable parameter
            embedding_matrix = tf.constant(emb_matrix, dtype=tf.float32, name="emb_matrix") # shape (400002, embedding_size)

            # Get the word embeddings for the context and question,
            # using the placeholders self.context_ids and self.qn_ids
            self.context_embs = embedding_ops.embedding_lookup(embedding_matrix, self.context_ids) # shape (batch_size, context_len, embedding_size)
            self.qn_embs = embedding_ops.embedding_lookup(embedding_matrix, self.qn_ids) # shape (batch_size, question_len, embedding_size) 
开发者ID:abisee,项目名称:cs224n-win18-squad,代码行数:19,代码来源:qa_model.py

示例3: dense_to_sparse

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def dense_to_sparse(tensor, eos_token=0, outputs_collections=None, scope=None):
  """Converts a dense tensor into a sparse tensor.

  An example use would be to convert dense labels to sparse ones
  so that they can be fed to the ctc_loss.

  Args:
     tensor: An `int` `Tensor` to be converted to a `Sparse`.
     eos_token: An integer. It is part of the target label that signifies the
       end of a sentence.
     outputs_collections: Collection to add the outputs.
     scope: Optional scope for name_scope.
  """
  with variable_scope.variable_scope(scope, 'dense_to_sparse', [tensor]) as sc:
    tensor = ops.convert_to_tensor(tensor)
    indices = array_ops.where(
        math_ops.not_equal(tensor, constant_op.constant(eos_token,
                                                        tensor.dtype)))
    values = array_ops.gather_nd(tensor, indices)
    shape = array_ops.shape(tensor, out_type=dtypes.int64)
    outputs = sparse_tensor.SparseTensor(indices, values, shape)
    return utils.collect_named_outputs(outputs_collections, sc.name, outputs) 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:24,代码来源:layers.py

示例4: softmax

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def softmax(logits, scope=None):
  """Performs softmax on Nth dimension of N-dimensional logit tensor.

  For two-dimensional logits this reduces to tf.nn.softmax. The N-th dimension
  needs to have a specified number of elements (number of classes).

  Args:
    logits: N-dimensional `Tensor` with logits, where N > 1.
    scope: Optional scope for variable_scope.

  Returns:
    A `Tensor` with same shape and type as logits.
  """
  # TODO(jrru): Add axis argument which defaults to last dimension.
  with variable_scope.variable_scope(scope, 'softmax', [logits]):
    num_logits = utils.last_dimension(logits.get_shape(), min_rank=2)
    logits_2d = array_ops.reshape(logits, [-1, num_logits])
    predictions = nn.softmax(logits_2d)
    predictions = array_ops.reshape(predictions, array_ops.shape(logits))
    if not context.executing_eagerly():
      predictions.set_shape(logits.get_shape())
    return predictions 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:24,代码来源:layers.py

示例5: call

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def call(self, inputs, forward=True):
    vs = variable_scope.get_variable_scope()
    vars_before = vs.global_variables()

    if forward:
      x1, x2 = inputs
      out = self._forward(x1, x2)
    else:
      y1, y2 = inputs
      out = self._backward(y1, y2)

    # Add any created variables to the Layer's variable stores
    new_vars = vs.global_variables()[len(vars_before):]
    train_vars = vs.trainable_variables()
    for new_var in new_vars:
      if new_var in train_vars:
        self._trainable_weights.append(new_var)
      else:
        self._non_trainable_weights.append(new_var)

    return out 
开发者ID:taehoonlee,项目名称:tensornets,代码行数:23,代码来源:rev_block_lib.py

示例6: call

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def call(self, inputs, state):
    """Gated recurrent unit (GRU) with nunits cells."""
    with vs.variable_scope("gates"):  # Reset gate and update gate.
      # We start with bias of 1.0 to not reset and not update.
      bias_ones = self._bias_initializer
      if self._bias_initializer is None:
        dtype = [a.dtype for a in [inputs, state]][0]
        bias_ones = init_ops.constant_initializer(1.0, dtype=dtype)
      value = math_ops.sigmoid(
          _linear([inputs, state], 2 * self._num_units, True, bias_ones,
                  self._kernel_initializer))
      r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
    with vs.variable_scope("candidate"):
      c = self._activation(
          _linear([inputs, r * state], self._num_units, True,
                  self._bias_initializer, self._kernel_initializer))
    new_h = u * state + (1 - u) * c
    return new_h, new_h 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:20,代码来源:rnn_cell_impl.py

示例7: fixed_size_partitioner

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def fixed_size_partitioner(num_shards, axis=0):
  """Partitioner to specify a fixed number of shards along given axis.

  Args:
    num_shards: `int`, number of shards to partition variable.
    axis: `int`, axis to partition on.

  Returns:
    A partition function usable as the `partitioner` argument to
    `variable_scope`, `get_variable`, and `get_partitioned_variable_list`.
  """
  def _partitioner(shape, **unused_args):
    partitions_list = [1] * len(shape)
    partitions_list[axis] = min(num_shards, shape[axis].value)
    return partitions_list
  return _partitioner 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:partitioned_variables.py

示例8: _create_local

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def _create_local(name, shape, collections=None, validate_shape=True,
                  dtype=dtypes.float32):
  """Creates a new local variable.

  Args:
    name: The name of the new or existing variable.
    shape: Shape of the new or existing variable.
    collections: A list of collection names to which the Variable will be added.
    validate_shape: Whether to validate the shape of the variable.
    dtype: Data type of the variables.

  Returns:
    The created variable.
  """
  # Make sure local variables are added to tf.GraphKeys.LOCAL_VARIABLES
  collections = list(collections or [])
  collections += [ops.GraphKeys.LOCAL_VARIABLES]
  return variable_scope.variable(
      array_ops.zeros(shape, dtype=dtype),
      name=name,
      trainable=False,
      collections=collections,
      validate_shape=validate_shape) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:25,代码来源:metrics_impl.py

示例9: _create_dense_column_weighted_sum

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def _create_dense_column_weighted_sum(
    column, builder, units, weight_collections, trainable):
  """Create a weighted sum of a dense column for linear_model."""
  tensor = column._get_dense_tensor(  # pylint: disable=protected-access
      builder,
      weight_collections=weight_collections,
      trainable=trainable)
  num_elements = column._variable_shape.num_elements()  # pylint: disable=protected-access
  batch_size = array_ops.shape(tensor)[0]
  tensor = array_ops.reshape(tensor, shape=(batch_size, num_elements))
  weight = variable_scope.get_variable(
      name='weights',
      shape=[num_elements, units],
      initializer=init_ops.zeros_initializer(),
      trainable=trainable,
      collections=weight_collections)
  return math_ops.matmul(tensor, weight, name='weighted_sum') 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:19,代码来源:feature_column.py

示例10: _attention

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def _attention(self, query, attn_states):
    conv2d = nn_ops.conv2d
    reduce_sum = math_ops.reduce_sum
    softmax = nn_ops.softmax
    tanh = math_ops.tanh

    with vs.variable_scope("attention"):
      k = vs.get_variable(
          "attn_w", [1, 1, self._attn_size, self._attn_vec_size])
      v = vs.get_variable("attn_v", [self._attn_vec_size])
      hidden = array_ops.reshape(attn_states,
                                 [-1, self._attn_length, 1, self._attn_size])
      hidden_features = conv2d(hidden, k, [1, 1, 1, 1], "SAME")
      y = _linear(query, self._attn_vec_size, True)
      y = array_ops.reshape(y, [-1, 1, 1, self._attn_vec_size])
      s = reduce_sum(v * tanh(hidden_features + y), [2, 3])
      a = softmax(s)
      d = reduce_sum(
          array_ops.reshape(a, [-1, self._attn_length, 1, 1]) * hidden, [1, 2])
      new_attns = array_ops.reshape(d, [-1, self._attn_size])
      new_attn_states = array_ops.slice(attn_states, [0, 1, 0], [-1, -1, -1])
      return new_attns, new_attn_states 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:24,代码来源:rnn_cell.py

示例11: subsample

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def subsample(inputs, factor, scope=None):
  """Subsamples the input along the spatial dimensions.

  Args:
    inputs: A `Tensor` of size [batch, height_in, width_in, channels].
    factor: The subsampling factor.
    scope: Optional variable_scope.

  Returns:
    output: A `Tensor` of size [batch, height_out, width_out, channels] with the
      input, either intact (if factor == 1) or subsampled (if factor > 1).
  """
  if factor == 1:
    return inputs
  else:
    return layers.max_pool2d(inputs, [1, 1], stride=factor, scope=scope) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:18,代码来源:resnet_utils.py

示例12: prepare_attention

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def prepare_attention(attention_states,
                      attention_option,
                      num_units,
                      reuse=False):
  """Prepare keys/values/functions for attention.

  Args:
    attention_states: hidden states to attend over.
    attention_option: how to compute attention, either "luong" or "bahdanau".
    num_units: hidden state dimension.
    reuse: whether to reuse variable scope.

  Returns:
    attention_keys: to be compared with target states.
    attention_values: to be used to construct context vectors.
    attention_score_fn: to compute similarity between key and target states.
    attention_construct_fn: to build attention states.
  """

  # Prepare attention keys / values from attention_states
  with variable_scope.variable_scope("attention_keys", reuse=reuse) as scope:
    attention_keys = layers.linear(
        attention_states, num_units, biases_initializer=None, scope=scope)
  attention_values = attention_states

  # Attention score function
  attention_score_fn = _create_attention_score_fn("attention_score", num_units,
                                                  attention_option, reuse)

  # Attention construction function
  attention_construct_fn = _create_attention_construct_fn("attention_construct",
                                                          num_units,
                                                          attention_score_fn,
                                                          reuse)

  return (attention_keys, attention_values, attention_score_fn,
          attention_construct_fn) 
开发者ID:kepei1106,项目名称:SentenceFunction,代码行数:39,代码来源:my_attention_decoder_fn.py

示例13: _create_attention_construct_fn

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def _create_attention_construct_fn(name, num_units, attention_score_fn, reuse):
  """Function to compute attention vectors.

  Args:
    name: to label variables.
    num_units: hidden state dimension.
    attention_score_fn: to compute similarity between key and target states.
    reuse: whether to reuse variable scope.

  Returns:
    attention_construct_fn: to build attention states.
  """
  with variable_scope.variable_scope(name, reuse=reuse) as scope:

    def construct_fn(attention_query, attention_keys, attention_values):
      context = attention_score_fn(attention_query, attention_keys,
                                   attention_values)
      concat_input = array_ops.concat([attention_query, context], 1)
      attention = layers.linear(
          concat_input, num_units, biases_initializer=None, scope=scope)
      return attention

    return construct_fn


# keys: [batch_size, attention_length, attn_size]
# query: [batch_size, 1, attn_size]
# return weights [batch_size, attention_length] 
开发者ID:kepei1106,项目名称:SentenceFunction,代码行数:30,代码来源:my_attention_decoder_fn.py

示例14: build_graph

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def build_graph(self, inputs, masks):
        """
        Inputs:
          inputs: Tensor shape (batch_size, seq_len, input_size)
          masks: Tensor shape (batch_size, seq_len).
            Has 1s where there is real input, 0s where there's padding.
            This is used to make sure tf.nn.bidirectional_dynamic_rnn doesn't iterate through masked steps.

        Returns:
          out: Tensor shape (batch_size, seq_len, hidden_size*2).
            This is all hidden states (fw and bw hidden states are concatenated).
        """
        with vs.variable_scope("RNNEncoder"):
            input_lens = tf.reduce_sum(masks, reduction_indices=1) # shape (batch_size)

            # Note: fw_out and bw_out are the hidden states for every timestep.
            # Each is shape (batch_size, seq_len, hidden_size).
            (fw_out, bw_out), _ = tf.nn.bidirectional_dynamic_rnn(self.rnn_cell_fw, self.rnn_cell_bw, inputs, input_lens, dtype=tf.float32)

            # Concatenate the forward and backward hidden states
            out = tf.concat([fw_out, bw_out], 2)

            # Apply dropout
            out = tf.nn.dropout(out, self.keep_prob)

            return out 
开发者ID:abisee,项目名称:cs224n-win18-squad,代码行数:28,代码来源:modules.py

示例15: __init__

# 需要导入模块: from tensorflow.python.ops import variable_scope [as 别名]
# 或者: from tensorflow.python.ops.variable_scope import variable_scope [as 别名]
def __init__(self, FLAGS, id2word, word2id, emb_matrix):
        """
        Initializes the QA model.

        Inputs:
          FLAGS: the flags passed in from main.py
          id2word: dictionary mapping word idx (int) to word (string)
          word2id: dictionary mapping word (string) to word idx (int)
          emb_matrix: numpy array shape (400002, embedding_size) containing pre-traing GloVe embeddings
        """
        print "Initializing the QAModel..."
        self.FLAGS = FLAGS
        self.id2word = id2word
        self.word2id = word2id

        # Add all parts of the graph
        with tf.variable_scope("QAModel", initializer=tf.contrib.layers.variance_scaling_initializer(factor=1.0, uniform=True)):
            self.add_placeholders()
            self.add_embedding_layer(emb_matrix)
            self.build_graph()
            self.add_loss()

        # Define trainable parameters, gradient, gradient norm, and clip by gradient norm
        params = tf.trainable_variables()
        gradients = tf.gradients(self.loss, params)
        self.gradient_norm = tf.global_norm(gradients)
        clipped_gradients, _ = tf.clip_by_global_norm(gradients, FLAGS.max_gradient_norm)
        self.param_norm = tf.global_norm(params)

        # Define optimizer and updates
        # (updates is what you need to fetch in session.run to do a gradient update)
        self.global_step = tf.Variable(0, name="global_step", trainable=False)
        opt = tf.train.AdamOptimizer(learning_rate=FLAGS.learning_rate) # you can try other optimizers
        self.updates = opt.apply_gradients(zip(clipped_gradients, params), global_step=self.global_step)

        # Define savers (for checkpointing) and summaries (for tensorboard)
        self.saver = tf.train.Saver(tf.global_variables(), max_to_keep=FLAGS.keep)
        self.bestmodel_saver = tf.train.Saver(tf.global_variables(), max_to_keep=1)
        self.summaries = tf.summary.merge_all() 
开发者ID:abisee,项目名称:cs224n-win18-squad,代码行数:41,代码来源:qa_model.py


注:本文中的tensorflow.python.ops.variable_scope.variable_scope方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。