当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.fill函数代码示例

本文整理汇总了Python中tensorflow.fill函数的典型用法代码示例。如果您正苦于以下问题:Python fill函数的具体用法?Python fill怎么用?Python fill使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了fill函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: thresholding

def thresholding(inputs):
    # find the mean for each example in the batch
    mean_output = tf.reduce_mean(inputs, axis=1)

    # scale each mean based on a factor
    threshold_scalar = tf.Variable(utils.threshold_scalar, tf.float32)
    scaled_mean = tf.scalar_mul(threshold_scalar, mean_output)
    scaled_mean = tf.reshape(scaled_mean, [utils.batch_size])

    # setup matrix for
    min_thresh_for_max = tf.fill([utils.batch_size], 0.05)
    max_thresh_for_min = tf.fill([utils.batch_size], 0.15)   #0.4
    thresholds = tf.maximum(min_thresh_for_max, scaled_mean)
    thresholds = tf.minimum(max_thresh_for_min, thresholds)

    # zero values under the thresholds using bitmask
    thresholds = tf.reshape(thresholds, [128, 1, 1])

    threshold_mask = tf.cast(tf.greater(inputs, thresholds), tf.float32)
    thresholded_input = tf.multiply(inputs, threshold_mask)

    # peak picking
    # select beats by x[i-1] < x[i] > x[i+1] (local maximum)
    x_minus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=-1, axis=1)), tf.float32)
    x_plus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=1, axis=1)), tf.float32)
    output = tf.multiply(x_minus_1, x_plus_1)


    return output
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:29,代码来源:bpm_estimator.py

示例2: _create_state

    def _create_state(self, batch_size, dtype, cell_state=None):
        cand_symbols = tf.fill([batch_size, self.max_len],
                               tf.constant(self.start_token, dtype=tf.int32))
        cand_logprobs = tf.ones((batch_size,), dtype=tf.float32) * -float('inf')
        cand_symbols.set_shape([batch_size, self.max_len])

        if cell_state is None:
            cell_state = self.cell.zero_state(batch_size*self.beam_size, dtype=dtype)
        else:
            cell_state = BeamDecoder._tile_along_beam(self.beam_size, cell_state)
        full_size = batch_size * self.beam_size
        first_in_beam_mask = tf.equal(tf.range(full_size) % self.beam_size, 0)

        beam_symbols = tf.fill([full_size, self.max_len],
                               tf.constant(self.start_token, dtype=tf.int32))
        beam_logprobs = tf.select(
            first_in_beam_mask,
            tf.fill([full_size], 0.0),
            tf.fill([full_size], -1e18), # top_k does not play well with -inf
                                         # TODO: dtype-dependent value here
        )

        return (
            cand_symbols,
            cand_logprobs,
            beam_symbols,
            beam_logprobs,
            cell_state
        )
开发者ID:Calvin-L,项目名称:commandline-helper,代码行数:29,代码来源:beam_search.py

示例3: getLoss

 def getLoss(trueCosSim, falseCosSim, margin):
     zero = tf.fill(tf.shape(trueCosSim), 0.0)
     tfMargin = tf.fill(tf.shape(trueCosSim), margin)
     with tf.name_scope("loss"):
         losses = tf.maximum(zero, tf.subtract(tfMargin, tf.subtract(trueCosSim, falseCosSim)))
         loss = tf.reduce_sum(losses)
     return loss
开发者ID:sjqzhang,项目名称:QA,代码行数:7,代码来源:qaLSTMNet.py

示例4: compute_ans

 def compute_ans(op_embedding, comparison):
   op_embedding = tf.expand_dims(op_embedding, 0)
   #dot product of operation embedding with hidden state to the left of the number occurrence
   first = tf.transpose(
       tf.matmul(op_embedding,
                 tf.transpose(
                     tf.reduce_sum(hidden_vectors * tf.tile(
                         tf.expand_dims(
                             tf.transpose(self.batch_ordinal_question), 2),
                         [1, 1, self.utility.FLAGS.embedding_dims]), 0))))
   second = self.batch_question_number_one_mask + tf.transpose(
       tf.matmul(op_embedding,
                 tf.transpose(
                     tf.reduce_sum(hidden_vectors * tf.tile(
                         tf.expand_dims(
                             tf.transpose(self.batch_ordinal_question_one), 2
                         ), [1, 1, self.utility.FLAGS.embedding_dims]), 0))))
   question_number_softmax = tf.nn.softmax(tf.concat(axis=1, values=[first, second]))
   if (self.mode == "test"):
     cond = tf.equal(question_number_softmax,
                     tf.reshape(
                         tf.reduce_max(question_number_softmax, 1),
                         [self.batch_size, 1]))
     question_number_softmax = tf.where(
         cond,
         tf.fill(tf.shape(question_number_softmax), 1.0),
         tf.fill(tf.shape(question_number_softmax), 0.0))
     question_number_softmax = tf.cast(question_number_softmax,
                                       self.data_type)
   ans = tf.reshape(
       tf.reduce_sum(question_number_softmax * tf.concat(
           axis=1, values=[self.batch_question_number, self.batch_question_number_one]),
                     1), [self.batch_size, 1])
   return ans
开发者ID:Hukongtao,项目名称:models,代码行数:34,代码来源:model.py

示例5: language_model

def language_model(input, vocab_size):
  """Form p(x[0], ..., x[timesteps - 1]),

  \prod_{t=0}^{timesteps - 1} p(x[t] | x[:t]),

  To calculate the probability, we call log_prob on
  x = [x[0], ..., x[timesteps - 1]] given
  `input` = [0, x[0], ..., x[timesteps - 2]].

  We implement this separately from the generative model so the
  forward pass, e.g., embedding/dense layers, can be parallelized.

  [batch_size, timesteps] -> [batch_size, timesteps]
  """
  x = tf.one_hot(input, depth=vocab_size, dtype=tf.float32)
  h = tf.fill(tf.stack([tf.shape(x)[0], FLAGS.hidden_size]), 0.0)
  c = tf.fill(tf.stack([tf.shape(x)[0], FLAGS.hidden_size]), 0.0)
  hs = []
  reuse = None
  for t in range(FLAGS.timesteps):
    if t > 0:
      reuse = True
    xt = x[:, t, :]
    h, c = lstm_cell(xt, h, c, name="lstm", reuse=reuse)
    hs.append(h)

  h = tf.stack(hs, 1)
  logits = tf.layers.dense(h, vocab_size, name="dense")
  output = Categorical(logits=logits)
  return output
开发者ID:JoyceYa,项目名称:edward,代码行数:30,代码来源:lstm.py

示例6: testInitRequiredAssignAdd

 def testInitRequiredAssignAdd(self):
   with self.test_session():
     p = tf.Variable(tf.fill([1024, 1024], 1),
                            tf.int32)
     a = tf.assign_add(p, tf.fill([1024, 1024], 0))
     with self.assertRaisesOpError("use uninitialized"):
       a.op.run()
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:7,代码来源:dense_update_ops_test.py

示例7: testParallelAssignWithLocking

  def testParallelAssignWithLocking(self):
    with self.test_session() as sess:
      zeros_t = tf.fill([1024, 1024], 0.0)
      ones_t = tf.fill([1024, 1024], 1.0)
      p = tf.Variable(zeros_t)
      assigns = [tf.assign(p, tf.mul(ones_t, float(i)),
                                  use_locking=True)
                 for i in range(1, 21)]
      p.initializer.run()

      def run_assign(assign_op):
        sess.run(assign_op)
      threads = [self.checkedThread(target=run_assign, args=(assign_op,))
                 for assign_op in assigns]
      for t in threads:
        t.start()
      for t in threads:
        t.join()

      vals = p.eval()

      # Assert every element is the same, and taken from one of the assignments.
      self.assertTrue(vals[0, 0] > 0)
      self.assertTrue(vals[0, 0] <= 20)
      self.assertAllEqual(vals, np.ones([1024, 1024]) * vals[0, 0])
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:25,代码来源:dense_update_ops_test.py

示例8: _variance

  def _variance(self):
    # We need to put the tf.where inside the outer tf.where to ensure we never
    # hit a NaN in the gradient.
    denom = tf.where(tf.greater(self.df, 2.),
                     self.df - 2.,
                     tf.ones_like(self.df))
    # Abs(scale) superfluous.
    var = (tf.ones(self.batch_shape_tensor(), dtype=self.dtype) *
           tf.square(self.scale) * self.df / denom)
    # When 1 < df <= 2, variance is infinite.
    inf = np.array(np.inf, dtype=self.dtype.as_numpy_dtype())
    result_where_defined = tf.where(
        self.df > tf.fill(self.batch_shape_tensor(), 2.),
        var,
        tf.fill(self.batch_shape_tensor(), inf, name="inf"))

    if self.allow_nan_stats:
      nan = np.array(np.nan, dtype=self.dtype.as_numpy_dtype())
      return tf.where(
          tf.greater(
              self.df,
              tf.ones(self.batch_shape_tensor(), dtype=self.dtype)),
          result_where_defined,
          tf.fill(self.batch_shape_tensor(), nan, name="nan"))
    else:
      return control_flow_ops.with_dependencies(
          [
              tf.assert_less(
                  tf.ones([], dtype=self.dtype),
                  self.df,
                  message="variance not defined for components of df <= 1"),
          ],
          result_where_defined)
开发者ID:asudomoeva,项目名称:probability,代码行数:33,代码来源:student_t.py

示例9: get_online_sequences

def get_online_sequences(sequence_length, batch_size,
                         pattern_length=10):
    """Gets tensors which produce new random examples every time
    they are evaluated.

    Args:
        sequence_length: the length of the time-lag the model has to
            remember the sequence for.
        batch_size: how many at once.
        pattern_length: the length of the pattern that has to be
            remembered and regurgitated.

    Returns:
        (data, targets): data is
            `[sequence_length + 2*pattern_length, batch_size, 1]`, targets
            are also `[sequence_length + 2*pattern_length, batch_size, 1]`.
    """
    # first we need a pattern to remember
    pattern = tf.random_uniform([pattern_length, batch_size, 1], maxval=8,
                                dtype=tf.int32)
    central_fillers = tf.fill([sequence_length-1, batch_size, 1], 8)
    go = tf.fill([1, batch_size, 1], 9)
    final_fillers = tf.fill([pattern_length, batch_size, 1], 8)
    inputs = tf.concat(axis=0, values=[pattern, central_fillers, go, final_fillers])

    fillers = tf.fill([sequence_length+pattern_length, batch_size, 1], 8)
    targets = tf.concat(axis=0, values=[fillers, pattern])

    return inputs, targets
开发者ID:PFCM,项目名称:datasets,代码行数:29,代码来源:copy_pattern.py

示例10: add_model

  def add_model(self, input_data):
    """Adds a linear-layer plus a softmax transformation

    The core transformation for this model which transforms a batch of input
    data into a batch of predictions. In this case, the mathematical
    transformation effected is

    y = softmax(xW + b)

    Hint: Make sure to create tf.Variables as needed. Also, make sure to use
          tf.name_scope to ensure that your name spaces are clean.
    Hint: For this simple use-case, it's sufficient to initialize both weights W
          and biases b with zeros.

    Args:
      input_data: A tensor of shape (batch_size, n_features).
    Returns:
      out: A tensor of shape (batch_size, n_classes)
    """
    ### YOUR CODE HERE
    with tf.variable_scope("linear-transform"):
        weight = tf.Variable(tf.fill([self.config.n_features,self.config.n_classes],0.0))
        bias = tf.Variable(tf.fill([self.config.n_classes],0.0))
        z = tf.matmul(input_data,weight) + bias
        out = softmax(z)
    ### END YOUR CODE
    return out
开发者ID:kvfrans,项目名称:cs224-solutions,代码行数:27,代码来源:q1_classifier.py

示例11: _chain_backprop

def _chain_backprop(n):
  """Creates forward backward graph using tf.gradients.

  A0->A1->A2->..->An
    /    /       /
  B0<-B1<-B2<-..<-Bn
  """

  def forward(A0, n):
    """Takes A0, applies n operations to it, returns An."""

    A = A0
    for L in range(1, n+1): # op_i produces A_i
      A = tf.tanh(A, name="A"+str(L))
    return A

  def backward(A0, An, Bn, n):
    B0 = tf.gradients([An], [A0], grad_ys=[Bn])[0]
    return B0

  A0 = tf.fill((size,), 1.0, name="A0")
  An = forward(A0, n)
  Bn = tf.fill((size,), 1.0, name="Bn")
  B0 = tf.gradients([An], [A0], grad_ys=[Bn])[0]
  return B0
开发者ID:BhaskarNallani,项目名称:gradient-checkpointing,代码行数:25,代码来源:mem_util_test.py

示例12: make_hard_softmax

 def make_hard_softmax(self, softmax):
   #converts soft selection to hard selection. used at test time
   cond = tf.equal(
       softmax, tf.reshape(tf.reduce_max(softmax, 1), [self.batch_size, 1]))
   softmax = tf.where(
       cond, tf.fill(tf.shape(softmax), 1.0), tf.fill(tf.shape(softmax), 0.0))
   softmax = tf.cast(softmax, self.data_type)
   return softmax
开发者ID:Hukongtao,项目名称:models,代码行数:8,代码来源:model.py

示例13: testFillNegative

  def testFillNegative(self):
    with self.test_session():
      for shape in (-1,), (2, -1), (-1, 2):
        with self.assertRaises(ValueError):
          tf.fill(shape, 7)

      # Using a placeholder so this won't be caught in Python.
      dims = tf.placeholder(tf.int32)
      fill_t = tf.fill(dims, 3.0)
      for shape in (-1,), (2, -1), (-1, 2):
        with self.assertRaises(tf.errors.InvalidArgumentError):
          fill_t.eval({dims: shape})
开发者ID:4chin,项目名称:tensorflow,代码行数:12,代码来源:constant_op_test.py

示例14: LSTMBiasInit

def LSTMBiasInit(shape, dtype):
  """Returns ones for forget-gate, and zeros for the others."""
  shape = np.array(shape)

  # Check internal consistencies.
  assert shape.shape == (1,), shape
  assert shape[0] % 4 == 0, shape

  n = shape[0] // 4
  ones = tf.fill([n], tf.constant(1, dtype=dtype))
  zeros = tf.fill([3 * n], tf.constant(0, dtype=dtype))
  return tf.concat([ones, zeros], 0)
开发者ID:812864539,项目名称:models,代码行数:12,代码来源:blocks_lstm.py

示例15: testShapeFunctionEdgeCases

    def testShapeFunctionEdgeCases(self):
        # Non-vector dimensions.
        with self.assertRaises(ValueError):
            tf.fill([[0, 1], [2, 3]], 1.0)

        # Non-scalar value.
        with self.assertRaises(ValueError):
            tf.fill([3, 2], [1.0, 2.0])

        # Partial dimension information.
        f = tf.fill(tf.placeholder(tf.int32, shape=(4,)), 3.0)
        self.assertEqual([None, None, None, None], f.get_shape().as_list())
开发者ID:khellan,项目名称:tensorflow,代码行数:12,代码来源:constant_op_test.py


注:本文中的tensorflow.fill函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。