当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.convert_to_tensor函数代码示例

本文整理汇总了Python中tensorflow.convert_to_tensor函数的典型用法代码示例。如果您正苦于以下问题:Python convert_to_tensor函数的具体用法?Python convert_to_tensor怎么用?Python convert_to_tensor使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了convert_to_tensor函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: forward_prop_nodes

def forward_prop_nodes(i_start, size, acts, offset):
  # Note: In the corpus that we've seen, parse trees are always ordered such that
  # iteration forward through the list will be in bottom-up order.
  # Conversely, iteration in reverse is always top-down.
  # This enables a simple iterative algorithm. If this were not the case,
  # putting the nodes in order by a postorder traversal would fix it.
  def fwd_continue(*parms):
    (_, sz, cur, _) = parms
    return tf.less(cur, sz, name='cur_le_size')

  def forward_prop(*parms):
    (i0, sz, cur, act) = parms
    with tf.device('/gpu:0'):
      gact = act
      gcur = cur
      next_idx = i0 + gcur
    node_out = tf.reshape(forward_node(next_idx, act, offset), [1, FLAGS.wvs, 1], name='node_out')
    tf.scatter_add(gact, tf.pack([gcur]), node_out, name='act_update')
    act = gact
    return [i0, sz, cur + iONE, act]

  with tf.device('/cpu:0'):
    i_start = tf.convert_to_tensor(i_start, dtype=tf.int32, name='i_start')
    size = tf.convert_to_tensor(size, dtype=tf.int32, name='size')
    iZ = tf.convert_to_tensor(0, dtype=tf.int32, name='ZERO')

  while_parms = [i_start, size, iZ, acts]
  wresult = tf.while_loop(fwd_continue, forward_prop, while_parms, parallel_iterations=1,
                          name='forward_prop_while')
  (_, _, _, result) = wresult
  return tf.slice(result, [0, 0, 0], tf.pack([size, -1, -1]), name='fwd_prop_nodes')
开发者ID:rgobbel,项目名称:rntn,代码行数:31,代码来源:tf_rntn.py

示例2: testLSTMBasicToBlockPeeping

  def testLSTMBasicToBlockPeeping(self):
    with self.test_session(use_gpu=self._use_gpu) as sess:
      batch_size = 2
      input_size = 3
      cell_size = 4
      sequence_length = 5

      inputs = []
      for _ in range(sequence_length):
        inp = tf.convert_to_tensor(
            np.random.randn(batch_size, input_size),
            dtype=tf.float32)
        inputs.append(inp)

      initializer = tf.random_uniform_initializer(-0.01, 0.01, seed=19890212)
      with tf.variable_scope("basic", initializer=initializer):
        cell = tf.nn.rnn_cell.LSTMCell(cell_size,
                                       use_peepholes=True,
                                       state_is_tuple=True)
        outputs, _ = tf.nn.rnn(cell, inputs, dtype=tf.float32)

        sess.run([tf.initialize_all_variables()])
        basic_outputs = sess.run(outputs)
        basic_grads = sess.run(tf.gradients(outputs, inputs))
        basic_wgrads = sess.run(tf.gradients(outputs, tf.trainable_variables()))

      with tf.variable_scope("block", initializer=initializer):
        w = tf.get_variable("w",
                            shape=[input_size + cell_size, cell_size * 4],
                            dtype=tf.float32)
        b = tf.get_variable("b",
                            shape=[cell_size * 4],
                            dtype=tf.float32,
                            initializer=tf.zeros_initializer)

        wci = tf.get_variable("wci", shape=[cell_size], dtype=tf.float32)
        wcf = tf.get_variable("wcf", shape=[cell_size], dtype=tf.float32)
        wco = tf.get_variable("wco", shape=[cell_size], dtype=tf.float32)

        _, _, _, _, _, _, outputs = fused_lstm(
            tf.convert_to_tensor(sequence_length,
                                 dtype=tf.int64),
            inputs,
            w,
            b,
            wci=wci,
            wcf=wcf,
            wco=wco,
            cell_clip=0,
            use_peephole=True)

        sess.run([tf.initialize_all_variables()])
        block_outputs = sess.run(outputs)
        block_grads = sess.run(tf.gradients(outputs, inputs))
        block_wgrads = sess.run(tf.gradients(outputs, [w, b, wci, wcf, wco]))

      self.assertAllClose(basic_outputs, block_outputs)
      self.assertAllClose(basic_grads, block_grads)
      for basic, block in zip(basic_wgrads, block_wgrads):
        self.assertAllClose(basic, block, rtol=1e-2, atol=1e-2)
开发者ID:10imaging,项目名称:tensorflow,代码行数:60,代码来源:lstm_ops_test.py

示例3: one_minus_pseudo_unitcell_transfer_op

def one_minus_pseudo_unitcell_transfer_op(direction, mps, left_dominant,
                                          right_dominant, vector):
    """
    calculates action of 11-Transfer-Operator +|r)(l|
    Parameters:
    ---------------------------
    direction:  int or str 
                if (1,'l','left'): do left multiplication
                if (-1,'r','right'): do right multiplication
    mps:        InfiniteMPSCentralGauge object
                an infinite mps
    left_dominant:  tf.tensor of shape (mps.D[0],mps.D[0])
                    left dominant eigenvector of the unit-cell transfer operator of mps
    right_dominant: tf.tensor of shape (mps.D[-1],mps.D[-1])
                    right dominant eigenvector of the unit-cell transfer operator of mps
    vector:         tf.tensor of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])
                    the input vector
    Returns
    ---------------------------
    np.ndarray of shape (mps.D[0]*mps.D[0]) or (mps.D[-1]*mps.D[-1])

    """

    if direction in (1, 'l', 'left'):
        x = tf.reshape(tf.convert_to_tensor(vector), (mps.D[0], mps.D[0]))
        temp = x - mps.unitcell_transfer_op('left', x) + ncon(
            [x, right_dominant], [[1, 2], [1, 2]]) * left_dominant
        return tf.reshape(temp, [mps.D[-1] * mps.D[-1]]).numpy()

    if direction in (-1, 'r', 'right'):
        x = tf.reshape(tf.convert_to_tensor(vector), [mps.D[-1], mps.D[-1]])
        temp = x - mps.unitcell_transfer_op('right', x) + ncon(
            [left_dominant, x], [[1, 2], [1, 2]]) * right_dominant
        return tf.reshape(temp, [mps.D[0] * mps.D[0]]).numpy()
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:34,代码来源:misc_mps.py

示例4: testOneOpCond

  def testOneOpCond(self):
    with self.test_session():
      v = tf.Variable(0)
      c = tf.convert_to_tensor(0)
      one = tf.convert_to_tensor(1)
      two = tf.convert_to_tensor(2)
      p = tf.greater_equal(c, 1)

      def a():
        return tf.assign(v, one)

      def b():
        return tf.assign(v, two)

      i = tf.cond(p, a, b)
      self.assertTrue(isinstance(i, tf.Tensor))
      tf.initialize_all_variables().run()

      self.assertEqual(0, v.eval())

      # True case: c = 2 is >= 1, v is set to 1.
      self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
      self.assertEqual(1, v.eval())

      # False case: c = 0 is not >= 1, v is set to 2.
      self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
      self.assertEqual(2, v.eval())
开发者ID:hypatiad,项目名称:tensorflow,代码行数:27,代码来源:control_flow_ops_py_test.py

示例5: _prepare_args_with_initial_simplex

def _prepare_args_with_initial_simplex(objective_function,
                                       initial_simplex,
                                       objective_at_initial_simplex,
                                       batch_evaluate_objective):
  """Evaluates the objective function at the specified initial simplex."""
  initial_simplex = tf.convert_to_tensor(initial_simplex)

  # If d is the dimension of the problem, the number of vertices in the
  # simplex should be d+1. From this, we can infer the number of dimensions
  # as n - 1 where n is the number of vertices specified.
  num_vertices = tf.shape(initial_simplex)[0]
  dim = num_vertices - 1
  num_evaluations = 0

  if objective_at_initial_simplex is None:
    objective_at_initial_simplex, n_evals = _evaluate_objective_multiple(
        objective_function, initial_simplex, batch_evaluate_objective)
    num_evaluations += n_evals
  objective_at_initial_simplex = tf.convert_to_tensor(
      objective_at_initial_simplex)
  return (dim,
          num_vertices,
          initial_simplex,
          objective_at_initial_simplex,
          num_evaluations)
开发者ID:lewisKit,项目名称:probability,代码行数:25,代码来源:nelder_mead.py

示例6: test_CE_loss

def test_CE_loss(sess, CE_arrays):
    y, y_hat = CE_arrays
    y = tf.convert_to_tensor(y, dtype=tf.float64)
    y_hat = tf.convert_to_tensor(y_hat, dtype=tf.float64)
    sess.run(cross_entropy_loss(y,y_hat))
    assert 1
    print("CE_loss ran to completion")
开发者ID:kingtaurus,项目名称:cs224d,代码行数:7,代码来源:test_softmax.py

示例7: testSumGradArgs

 def testSumGradArgs(self):
   with self.test_session(use_gpu=False):
     indices = [tf.convert_to_tensor([0, 1, 2, 3]),
                tf.convert_to_tensor([2, 3])]
     values = [tf.convert_to_tensor([2, 3, 5, 7]), tf.convert_to_tensor([1, 1])]
     self.assertAllEqual(
         tf.dynamic_stitch(indices, values).eval(), [2, 3, 1, 1])
开发者ID:CdricGmd,项目名称:tensorflow,代码行数:7,代码来源:embedding_ops_test.py

示例8: file_to_dataset

def file_to_dataset(filepath, N, vectorizer):
    #convert file to dataset, returns X and Y tensors of integer indexes describing the N words (X) leading up to (Y)
    f = open(filepath)
    lines = f.readlines()
    X = []
    Y = []
    line_index = 0
    for line in lines:
        if line_index % 1000 == 0:
            print(str(line_index) +'/' len(lines) + ' lines read...')
        line_index +=1
        words =  line.split()
        padding = (N-1)*['<pre>']

        #remove /n at the end, add padding to front
        words = padding + words[:-1]
        for i in range(0, len(words) - N):
            x = []
            y = vectorizer.transform(words[i+N]).toarray()
            x_words = words[i:i+N]
            for word in x_words:
                x.append(vectorizer.transform(word).toarray())
            X.append(x)
            Y.append(y)

    return tf.convert_to_tensor(X), tf.convert_to_tensor(Y)
开发者ID:skaasj,项目名称:QMemNet,代码行数:26,代码来源:ptb_parser.py

示例9: _compare

 def _compare(self, func, x, y, dtype):
     with self.test_session(use_gpu=False):
         out = func(
             tf.convert_to_tensor(np.array([x]).astype(dtype)), tf.convert_to_tensor(np.array([y]).astype(dtype))
         )
         ret = out.eval()
     return ret[0]
开发者ID:peace195,项目名称:tensorflow,代码行数:7,代码来源:cwise_ops_test.py

示例10: log_prob

    def log_prob(self, xs, zs):
        """
        Parameters
        ----------
        xs : dict of str to tf.Tensor
            Data dictionary. Each key is a data structure used in the
            model (Theano shared variable), and its value is the
            corresponding realization (tf.Tensor).
        zs : dict of str to tf.Tensor
            Latent variable dictionary. Each key names a latent variable
            used in the model (str), and its value is the corresponding
            realization (tf.Tensor).

        Returns
        -------
        tf.Tensor
            Scalar, the log joint density log p(xs, zs).

        Notes
        -----
        It wraps around a Python function. The Python function takes
        inputs of type np.ndarray and outputs a np.ndarray.
        """
        # Store keys so that ``_py_log_prob_args`` knows how each
        # value corresponds to a key.
        self.xs_keys = list(six.iterkeys(xs))
        self.zs_keys = list(six.iterkeys(zs))

        # Pass in all tensors as a flattened list for tf.py_func().
        inputs = [tf.convert_to_tensor(x) for x in six.itervalues(xs)]
        inputs += [tf.convert_to_tensor(z) for z in six.itervalues(zs)]

        return tf.py_func(self._py_log_prob_args, inputs, [tf.float32])[0]
开发者ID:taku-y,项目名称:pymc3,代码行数:33,代码来源:edward.py

示例11: __init__

 def __init__(self, data_dir, data_list, input_size, 
              random_scale, random_mirror, ignore_label, img_mean, coord):
     '''Initialise an ImageReader.
     
     Args:
       data_dir: path to the directory with images and masks.
       data_list: path to the file with lines of the form '/path/to/image /path/to/mask'.
       input_size: a tuple with (height, width) values, to which all the images will be resized.
       random_scale: whether to randomly scale the images prior to random crop.
       random_mirror: whether to randomly mirror the images prior to random crop.
       ignore_label: index of label to ignore during the training.
       img_mean: vector of mean colour values.
       coord: TensorFlow queue coordinator.
     '''
     self.data_dir = data_dir
     self.data_list = data_list
     self.input_size = input_size
     self.coord = coord
     
     self.image_list, self.label_list = read_labeled_image_list(self.data_dir, self.data_list)
     self.images = tf.convert_to_tensor(self.image_list, dtype=tf.string)
     self.labels = tf.convert_to_tensor(self.label_list, dtype=tf.string)
     self.queue = tf.train.slice_input_producer([self.images, self.labels],
                                                shuffle=input_size is not None) # not shuffling if it is val
     self.image, self.label = read_images_from_disk(self.queue, self.input_size, random_scale, random_mirror, ignore_label, img_mean) 
开发者ID:YCYchunyan,项目名称:Deeplab-v2--ResNet-101--Tensorflow,代码行数:25,代码来源:image_reader.py

示例12: testPlaceholder

  def testPlaceholder(self):
    with self.test_session(use_gpu=True):
      # Test using placeholder with a defined shape.
      ph_0 = tf.placeholder(tf.int32, shape=[])
      result_0 = tf.convert_to_tensor([[0, 0, 0],
                                       [0, ph_0, 0],
                                       [0, 0, 0]])
      self.assertAllEqual([[0, 0, 0],
                           [0, 1, 0],
                           [0, 0, 0]],
                          result_0.eval(feed_dict={ph_0: 1}))
      self.assertAllEqual([[0, 0, 0],
                           [0, 2, 0],
                           [0, 0, 0]],
                          result_0.eval(feed_dict={ph_0: 2}))

      # Test using placeholder with an undefined shape.
      ph_1 = tf.placeholder(tf.int32)
      result_1 = tf.convert_to_tensor([[0, 0, 0],
                                       [0, ph_1, 0],
                                       [0, 0, 0]])
      self.assertAllEqual([[0, 0, 0], [0, 1, 0], [0, 0, 0]],
                          result_1.eval(feed_dict={ph_1: 1}))
      self.assertAllEqual([[0, 0, 0], [0, 2, 0], [0, 0, 0]],
                          result_1.eval(feed_dict={ph_1: 2}))
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:25,代码来源:pack_op_test.py

示例13: test_regression_metrics

 def test_regression_metrics(self):
     with self.test_session():
         y_a = tf.convert_to_tensor(np.random.random((6, 7)))
         y_b = tf.convert_to_tensor(np.random.random((6, 7)))
         for metric in all_regression_metrics:
             output = metric(y_a, y_b)
             assert output.eval().shape == ()
开发者ID:TalkingData,项目名称:edward,代码行数:7,代码来源:test_metrics.py

示例14: testLoop_1

    def testLoop_1(self):
        with self.test_session():
            zero = tf.convert_to_tensor(0)
            one = tf.convert_to_tensor(1)
            n = tf.constant(10)

            enter_zero = control_flow_ops.enter(zero, "foo_1", False)
            enter_one = control_flow_ops.enter(one, "foo_1", False)
            enter_n = control_flow_ops.enter(n, "foo_1", False)
            merge_zero = control_flow_ops.merge([enter_zero, enter_zero], name="merge_zero")[0]
            merge_one = control_flow_ops.merge([enter_one, enter_one], name="merge_one")[0]
            merge_n = control_flow_ops.merge([enter_n, enter_n], name="merge_n")[0]
            less_op = tf.less(merge_n, merge_n)
            cond_op = control_flow_ops.loop_cond(less_op)
            switch_zero = control_flow_ops.switch(merge_zero, cond_op)
            switch_one = control_flow_ops.switch(merge_one, cond_op)
            switch_n = control_flow_ops.switch(merge_n, cond_op)
            next_zero = control_flow_ops.next_iteration(switch_zero[1])
            next_one = control_flow_ops.next_iteration(switch_one[1])
            next_n = control_flow_ops.next_iteration(switch_n[1])
            merge_zero.op._update_input(1, next_zero)
            merge_one.op._update_input(1, next_one)
            merge_n.op._update_input(1, next_n)
            exit_n = control_flow_ops.exit(switch_n[0])

            result = exit_n.eval()
        self.assertAllEqual(10, result)
开发者ID:peace195,项目名称:tensorflow,代码行数:27,代码来源:control_flow_ops_py_test.py

示例15: batched_index

def batched_index(values, indices):
  """Equivalent to `values[:, indices]`.

  Performs indexing on batches and sequence-batches by reducing over
  zero-masked values. Compared to indexing with `tf.gather` this approach is
  more general and TPU-friendly, but may be less efficient if `num_values`
  is large. It works with tensors whose shapes are unspecified or
  partially-specified, but this op will only do shape checking on shape
  information available at graph construction time. When complete shape
  information is absent, certain shape incompatibilities may not be detected at
  runtime! See `indexing_ops_test` for detailed examples.

  Args:
    values: tensor of shape `[B, num_values]` or `[T, B, num_values]`
    indices: tensor of shape `[B]` or `[T, B]` containing indices.

  Returns:
    Tensor of shape `[B]` or `[T, B]` containing values for the given indices.

  Raises: ValueError if values and indices have sizes that are known
    statically (i.e. during graph construction), and those sizes are not
    compatible (see shape descriptions in Args list above).
  """
  with tf.name_scope("batch_indexing", values=[values, indices]):
    values = tf.convert_to_tensor(values)
    indices = tf.convert_to_tensor(indices)
    assert_compatible_shapes(values.shape, indices.shape)

    one_hot_indices = tf.one_hot(
        indices, tf.shape(values)[-1], dtype=values.dtype)
    return tf.reduce_sum(values * one_hot_indices, axis=-1)
开发者ID:wmiao1769,项目名称:trfl,代码行数:31,代码来源:indexing_ops.py


注:本文中的tensorflow.convert_to_tensor函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。