当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.scan函数代码示例

本文整理汇总了Python中tensorflow.scan函数的典型用法代码示例。如果您正苦于以下问题:Python scan函数的具体用法?Python scan怎么用?Python scan使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了scan函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testScan_MultiOutputMismatchedInitializer

 def testScan_MultiOutputMismatchedInitializer(self):
     with self.test_session():
         elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
         initializer = np.array(1.0)
         # Multiply a * 1 each time
         with self.assertRaisesRegexp(ValueError, "two structures don't have the same number of elements"):
             tf.scan(lambda a, x: (a, -a), elems, initializer)
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py

示例2: train

 def train(x=x, size_bt=size_bt, BV_t=BV_t, BH_t=BH_t):
     bv_init = tf.zeros([1, n_visible], tf.float32)
     bh_init = tf.zeros([1, n_hidden], tf.float32)
     u_t  = tf.scan(rnn_recurrence, x, initializer=u0)
     BV_t = tf.reshape(tf.scan(visible_bias_recurrence, u_t, bv_init), [size_bt, n_visible])
     BH_t = tf.reshape(tf.scan(hidden_bias_recurrence, u_t, bh_init), [size_bt, n_hidden])
     sample, cost = RBM.build_rbm(x, W, BV_t, BH_t, k=15)
     return x, sample, cost, params, size_bt            
开发者ID:atriedman,项目名称:Musical_Matrices,代码行数:8,代码来源:rnn_rbm.py

示例3: testScan_Simple

    def testScan_Simple(self):
        with self.test_session():
            elems = tf.constant([1.0, 2.0, 3.0, 4.0, 5.0, 6.0], name="data")
            v = tf.constant(2.0, name="v")

            r = tf.scan(lambda a, x: tf.mul(a, x), elems)
            self.assertAllEqual([1.0, 2.0, 6.0, 24.0, 120.0, 720.0], r.eval())

            r = tf.scan(lambda a, x: tf.mul(a, x), elems, initializer=v)
            self.assertAllEqual([2.0, 4.0, 12.0, 48.0, 240.0, 1440.0], r.eval())
开发者ID:gavinsherry,项目名称:tensorflow,代码行数:10,代码来源:functional_ops_test.py

示例4: testScanVaryingShape

    def testScanVaryingShape(self):
        with self.test_session() as sess:
            x = tf.placeholder(dtype=tf.float32, shape=[None, 2])
            x_t = tf.transpose(x)
            # scan over dimension 0 (with shape None)
            result = tf.scan(lambda a, x: a + x, x)
            # scanned over transposed dimension 0 (with shape 2)
            result_t = tf.scan(lambda a, x: a + x, x_t, infer_shape=False)
            # ensure gradients can be calculated
            result_grad = tf.gradients(result, [x])[0]
            result_t_grad = tf.gradients(result_t, [x_t])[0]

            # smoke test to ensure they all evaluate
            sess.run([result, result_t, result_grad, result_t_grad], feed_dict={x: [[1.0, 2.0]]})
开发者ID:botonchou,项目名称:tensorflow,代码行数:14,代码来源:functional_ops_test.py

示例5: feature

    def feature(self, input_x, name = ''):
        if len(input_x.get_shape()) == 2:
            # incase input_x : batch_size x seq_length [tokens]
            input_x = tf.nn.embedding_lookup(self.embbeding_mat, input_x)
        # input_x:  batch_size x seq_length x g_emb_dim
        pooled_outputs = []
        index = -1
        embedded_chars = tf.scan(lambda a, x: tf.matmul(x, self.W), input_x)
        embedded_chars_expanded = tf.expand_dims(embedded_chars, -1)
        for filter_size, num_filter in zip(self.filter_sizes, self.num_filters):
            index += 1
            with tf.name_scope("conv-maxpool-%s-midterm" % filter_size):
                # Convolution Layer
                conv = tf.nn.conv2d(
                    embedded_chars_expanded,
                    self.W_conv[index],
                    strides=[1, 1, 1, 1],
                    padding="VALID",
                    name="conv")
                # Apply nonlinearity
                h = tf.nn.relu(tf.nn.bias_add(conv, self.b_conv[index]), name="relu")
                # Maxpooling over the outputs
                pooled = tf.nn.max_pool(
                    h,
                    ksize=[1, self.sequence_length - filter_size + 1, 1, 1],
                    strides=[1, 1, 1, 1],
                    padding='VALID',
                    name="pool")
                pooled_outputs.append(pooled)

        # Combine all the pooled features
        num_filters_total = sum(self.num_filters)
        h_pool = tf.concat(pooled_outputs, 3)
        h_pool_flat = tf.reshape(h_pool, [-1, num_filters_total])
        return h_pool_flat
开发者ID:IshJ,项目名称:Texygen,代码行数:35,代码来源:TextganDiscriminator.py

示例6: testScan_MultiInputSingleOutput

 def testScan_MultiInputSingleOutput(self):
     with self.test_session():
         elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
         initializer = np.array(1.0)
         # Multiply a * 1 each time
         r = tf.scan(lambda a, x: a * (x[0] + x[1]), (elems + 1, -elems), initializer)
         self.assertAllEqual([1.0, 1.0, 1.0, 1.0, 1.0, 1.0], r.eval())
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py

示例7: MiniminibatchLayer

def MiniminibatchLayer(name, n_in, dim_b, dim_c, group_size, inputs):
    inputs = tf.random_shuffle(inputs)
    inputs = tf.reshape(inputs, [-1, group_size, n_in])
    def f(a,x):
        return MinibatchLayer(name, n_in, dim_b, dim_c, x)
    outputs = tf.scan(f, inputs)
    return tf.reshape(outputs, [-1, n_in+dim_b])
开发者ID:igul222,项目名称:nn,代码行数:7,代码来源:speech_rnn_gan.py

示例8: _marginal_hidden_probs

  def _marginal_hidden_probs(self):
    """Compute marginal pdf for each individual observable."""

    initial_log_probs = tf.broadcast_to(self._log_init,
                                        tf.concat([self.batch_shape_tensor(),
                                                   [self._num_states]],
                                                  axis=0))
    # initial_log_probs :: batch_shape num_states

    if self._num_steps > 1:
      transition_log_probs = self._log_trans

      def forward_step(log_probs, _):
        return _log_vector_matrix(log_probs, transition_log_probs)

      dummy_index = tf.zeros(self._num_steps - 1, dtype=tf.float32)

      forward_log_probs = tf.scan(forward_step, dummy_index,
                                  initializer=initial_log_probs,
                                  name="forward_log_probs")

      forward_log_probs = tf.concat([[initial_log_probs], forward_log_probs],
                                    axis=0)
    else:
      forward_log_probs = initial_log_probs[tf.newaxis, ...]

    # returns :: num_steps batch_shape num_states

    return tf.exp(forward_log_probs)
开发者ID:asudomoeva,项目名称:probability,代码行数:29,代码来源:hidden_markov_model.py

示例9: tensorflow_test

def tensorflow_test():
    import tensorflow as tf
    nested_input = tf.placeholder(tf.float32, shape=[outer_len, inner_len, input_dim])

    variable = tf.Variable(np.float32(1.0))

    def inner_func(curr, prev):
        return curr + prev# + variable

    def outer_func(curr, prev):
        inner_res = tf.scan(
                fn=inner_func,
                elems=curr,
                initializer=tf.zeros([input_dim])
            )
        return prev + inner_res

    # nested_input.set_shape
    outputs = tf.scan(
            fn=outer_func,
            elems=nested_input,
            initializer=tf.zeros([inner_len, input_dim])
        )

    loss = tf.reduce_sum(outputs)
    # optimizer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
    # train_op = optimizer.minimize(loss)
    grad = tf.gradients(loss, [variable])

    init_op = tf.initialize_all_variables()

    with tf.Session() as sess:
        sess.run(init_op)
开发者ID:zihangdai,项目名称:tensorflow_feature_test,代码行数:33,代码来源:nested_while_loop.py

示例10: outer_func

 def outer_func(curr, prev):
     inner_res = tf.scan(
             fn=inner_func,
             elems=curr,
             initializer=tf.zeros([input_dim])
         )
     return prev + inner_res
开发者ID:zihangdai,项目名称:tensorflow_feature_test,代码行数:7,代码来源:nested_while_loop.py

示例11: omniglot

def omniglot():

    sess = tf.InteractiveSession()

    """    def wrapper(v):
        return tf.Print(v, [v], message="Printing v")

    v = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='Matrix')

    sess.run(tf.global_variables_initializer())
    sess.run(tf.local_variables_initializer())

    temp = tf.Variable(initial_value=np.arange(0, 36).reshape((6, 6)), dtype=tf.float32, name='temp')
    temp = wrapper(v)
    #with tf.control_dependencies([temp]):
    temp.eval()
    print 'Hello'"""

    def update_tensor(V, dim2, val):  # Update tensor V, with index(:,dim2[:]) by val[:]
        val = tf.cast(val, V.dtype)
        def body(_, (v, d2, chg)):
            d2_int = tf.cast(d2, tf.int32)
            return tf.slice(tf.concat_v2([v[:d2_int],[chg] ,v[d2_int+1:]], axis=0), [0], [v.get_shape().as_list()[0]])
        Z = tf.scan(body, elems=(V, dim2, val), initializer=tf.constant(1, shape=V.get_shape().as_list()[1:], dtype=tf.float32), name="Scan_Update")
        return Z
开发者ID:jayvischeng,项目名称:NTM-One-Shot-TF,代码行数:25,代码来源:TestUpd.py

示例12: build

    def build(self, preSoftmaxPi, preSoftmaxA, preSoftmaxB):
        M, V = preSoftmaxB.shape

        self.preSoftmaxPi = tf.Variable(preSoftmaxPi)
        self.preSoftmaxA = tf.Variable(preSoftmaxA)
        self.preSoftmaxB = tf.Variable(preSoftmaxB)

        pi = tf.nn.softmax(self.preSoftmaxPi)
        A = tf.nn.softmax(self.preSoftmaxA)
        B = tf.nn.softmax(self.preSoftmaxB)

        # define cost
        self.tfx = tf.placeholder(tf.int32, shape=(None,), name='x')
        def recurrence(old_a_old_s, x_t):
            old_a = tf.reshape(old_a_old_s[0], (1, M))
            a = tf.matmul(old_a, A) * B[:, x_t]
            a = tf.reshape(a, (M,))
            s = tf.reduce_sum(a)
            return (a / s), s

        # remember, tensorflow scan is going to loop through
        # all the values!
        # we treat the first value differently than the rest
        # so we only want to loop through tfx[1:]
        # the first scale being 1 doesn't affect the log-likelihood
        # because log(1) = 0
        alpha, scale = tf.scan(
            fn=recurrence,
            elems=self.tfx[1:],
            initializer=(pi*B[:,self.tfx[0]], np.float32(1.0)),
        )

        self.cost = -tf.reduce_sum(tf.log(scale))
        self.train_op = tf.train.AdamOptimizer(1e-2).minimize(self.cost)
开发者ID:cmagnusb,项目名称:machine_learning_examples,代码行数:34,代码来源:hmmd_tf.py

示例13: testScan_MultiInputSameTypeOutput

 def testScan_MultiInputSameTypeOutput(self):
     with self.test_session() as sess:
         elems = np.array([1.0, 2.0, 3.0, 4.0, 5.0, 6.0])
         r = tf.scan(lambda a, x: (a[0] + x[0], a[1] + x[1]), (elems, -elems))
         r_value = sess.run(r)
         self.assertAllEqual(np.cumsum(elems), r_value[0])
         self.assertAllEqual(np.cumsum(-elems), r_value[1])
开发者ID:botonchou,项目名称:tensorflow,代码行数:7,代码来源:functional_ops_test.py

示例14: cummax

def cummax(x, reverse=False, name=None):
    """Compute the cumulative maximum of the tensor `x` along `axis`. This
    operation is similar to the more classic `cumsum`. Only support 1D Tensor
    for now.

    Args:
    x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
       `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
       `complex128`, `qint8`, `quint8`, `qint32`, `half`.
       axis: A `Tensor` of type `int32` (default: 0).
       reverse: A `bool` (default: False).
       name: A name for the operation (optional).
    Returns:
    A `Tensor`. Has the same type as `x`.
    """
    with ops.name_scope(name, "Cummax", [x]) as name:
        x = ops.convert_to_tensor(x, name="x")
        # Not very optimal: should directly integrate reverse into tf.scan.
        if reverse:
            x = tf.reverse(x, axis=[0])
        # 'Accumlating' maximum: ensure it is always increasing.
        cmax = tf.scan(lambda a, y: tf.maximum(a, y), x,
                       initializer=None, parallel_iterations=1,
                       back_prop=False, swap_memory=False)
        if reverse:
            cmax = tf.reverse(cmax, axis=[0])
        return cmax
开发者ID:bowrian,项目名称:SSD-Tensorflow,代码行数:27,代码来源:math.py

示例15: testScanUnknownShape

 def testScanUnknownShape(self):
   x = tf.placeholder(tf.float32)
   initializer = tf.placeholder(tf.float32)
   def fn(_, current_input):
     return current_input
   y = tf.scan(fn, x, initializer=initializer)
   self.assertIs(None, y.get_shape().dims)
开发者ID:285219011,项目名称:hello-world,代码行数:7,代码来源:functional_ops_test.py


注:本文中的tensorflow.scan函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。