当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.unpack方法代码示例

本文整理汇总了Python中tensorflow.unpack方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.unpack方法的具体用法?Python tensorflow.unpack怎么用?Python tensorflow.unpack使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.unpack方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sequence_loss

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def sequence_loss(self, y_pred, y_true):
        '''
        Loss function for the seq2seq RNN.  Reshape predicted and true (label) tensors, generate dummy weights,
        then use seq2seq.sequence_loss to actually compute the loss function.
        '''
        if self.verbose > 2: print ("my_sequence_loss y_pred=%s, y_true=%s" % (y_pred, y_true))
        logits = tf.unpack(y_pred, axis=1)		# list of [-1, num_decoder_synbols] elements
        targets = tf.unpack(y_true, axis=1)		# y_true has shape [-1, self.out_seq_len]; unpack to list of self.out_seq_len [-1] elements
        if self.verbose > 2:
            print ("my_sequence_loss logits=%s" % (logits,))
            print ("my_sequence_loss targets=%s" % (targets,))
        weights = [tf.ones_like(yp, dtype=tf.float32) for yp in targets]
        if self.verbose > 4: print ("my_sequence_loss weights=%s" % (weights,))
        sl = seq2seq.sequence_loss(logits, targets, weights)
        if self.verbose > 2: print ("my_sequence_loss return = %s" % sl)
        return sl 
开发者ID:ichuang,项目名称:tflearn_seq2seq,代码行数:18,代码来源:tflearn_seq2seq.py

示例2: _build_global_context

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def _build_global_context(
        net,
        is_training=False,
        bayesian=False,
        dropout_keep_prob=0.8):

    with tf.variable_scope('GlobalContext'):
        # Reduce feature dimension before LSTM to reduce param count
        net = slim.conv2d(net, 1024, 1, padding='VALID', scope='conv_reduce_1x1')

        #net = slim.dropout(net, dropout_keep_prob, is_training=bayesian or is_training, scope='Dropout')

        rows = tf.unpack(net, axis=1)
        net = tf.pack(
            [lstm.bidir_lstm(r, 512, scope='row%d' % i) for i, r in enumerate(rows)],
            axis=1)
        print('Horizontal LSTM', net.get_shape())

        cols = tf.unpack(net, axis=2)
        net = tf.pack(
            [lstm.bidir_lstm(r, 512, scope='col%d' % i) for i, r in enumerate(cols)],
            axis=2)
        print('Vertical LSTM', net.get_shape())

    return net 
开发者ID:rwightman,项目名称:tensorflow-litterbox,代码行数:27,代码来源:build_resnet_sdc.py

示例3: _kernel_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def _kernel_summary(conv_weights, sess):
  """Helper to create image summaries for convolutional kernels

  Creates an image summary of a convolutional kernel

  Args:
    kernel: Variable
  Returns:
    nothing
  """
  #[6,6,12,12] -> [12,12,6,6] -> 12 x [12,6,6]
  kernels = tf.unpack(tf.transpose(conv_weights, perm=[3,2,0,1]))
  for i,kernel in enumerate(kernels):
    #[12, 6, 6] -> 12 x [8, 8]
    padding = [[1,1], [1,1]]
    padded_kernels = [tf.pad(single_kernel, padding) for single_erknel in tf.unpack(kernel)]

    #12 x [8, 8] -> [6, 12 * 8]
    horizontally_concatenated = tf.concat(1, single_layer_kernels) 
开发者ID:twerkmeister,项目名称:iLID,代码行数:21,代码来源:deepaudio.py

示例4: kernel_summary

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def kernel_summary(sess):
  with sess.as_default():
    for layer in ["conv1", "conv2", "conv3"]:
      with tf.variable_scope(layer, reuse=True):
        weights = tf.get_variable('weights')
      kernels = tf.unpack(tf.transpose(weights, perm=[3,2,0,1]))
      for i,kernel in enumerate(kernels):
      #[12, 6, 6] -> 12 x [8, 8]
        padding = [[1,1], [1,1]]
        padded_kernels = [tf.pad(single_kernel, padding) for single_kernel in tf.unpack(kernel)]

      #12 x [8, 8] -> [6, 12 * 8]
        horizontally_concatenated = tf.concat(1, padded_kernels)

        image = horizontally_concatenated.eval()

        misc.imsave(layer + "_" + str(i) + ".png", image) 
开发者ID:twerkmeister,项目名称:iLID,代码行数:19,代码来源:evaluate.py

示例5: distort_image

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def distort_image(image):
    """Perform random distortions to the given 4D image and return result"""

    # Switch to 3D as that's what these operations require
    slices = tf.unpack(image)
    output = []

    # Perform pixel-wise distortions
    for image in slices:
        image  = tf.image.random_flip_left_right(image)
        image  = tf.image.random_saturation(image, .2, 2.)
        image += tf.truncated_normal(image.get_shape(), stddev=.05)
        image  = tf.image.random_contrast(image, .85, 1.15)
        image  = tf.image.random_brightness(image, .3)
        
        output.append(image)

    # Go back to 4D
    image   = tf.pack(output)
    
    return image 
开发者ID:david-gpu,项目名称:deep-makeover,代码行数:23,代码来源:dm_utils.py

示例6: _logp

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def _logp(self, result, weights, centers, std):
        total_logps = None
        
        # loop over clusters
        for i, center in enumerate(tf.unpack(centers)):
            # compute vector of likelihoods that each point could be generated from *this* cluster
            cluster_lls = tf.reduce_sum(util.dists.gaussian_log_density(result, center, std), 1)

            # sum these likelihoods, weighted by cluster probabilities
            cluster_logps = tf.log(weights[i]) + cluster_lls
            if total_logps is not None:
                total_logps = util.logsumexp(total_logps, cluster_logps)
            else:
                total_logps = cluster_logps
            
        # finally sum the log probabilities of all points to get a likelihood for the dataset
        obs_lp = tf.reduce_sum(total_logps)
        
        return obs_lp 
开发者ID:davmre,项目名称:elbow,代码行数:21,代码来源:factorizations.py

示例7: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def __init__(self, N, n_thetas=1):

        self.N = N

        self.theta_q_alpha = tf.Variable(1.0, name="theta_q_alpha")
        self.theta_q_beta = tf.Variable(2.0, name="theta_q_beta")

        self.data = tf.placeholder(dtype=tf.float32, shape=(N,), name="data")

        self.thetas = tf.placeholder(shape=(n_thetas,), dtype=tf.float32, name="thetas")
        
        self.thetas_q_log_density = tf.reduce_sum(dists.beta_log_density(self.thetas, alpha=self.theta_q_alpha, beta=self.theta_q_beta))
        self.thetas_prior = tf.reduce_sum(dists.beta_log_density(self.thetas, alpha=1., beta=1.) )

        self.data_liks = tf.pack([tf.reduce_sum(dists.bernoulli_log_density(self.data, theta)) for theta in tf.unpack(self.thetas)])
        self.joint_density = self.data_liks + self.thetas_prior
        
        self.stochastic_elbo = self.joint_density - self.thetas_q_log_density

        # TODO: add control variates
        self.surrogate = tf.reduce_mean(self.thetas_q_log_density * tf.stop_gradient(self.stochastic_elbo) + self.stochastic_elbo) 
开发者ID:davmre,项目名称:elbow,代码行数:23,代码来源:bernoulli_raw_beta_scorefn.py

示例8: __call__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def __call__(self, memory, question, question_lengths):
        # split memory and get corresponding embeddings
        e1, r, e2 = tf.unpack(memory, axis=2)
        C = tf.ones_like(e1, dtype='float32') * -1000
        mask = tf.not_equal(e1, self.entity_vocab_size - 1)
        key = self.get_key_embedding(e1, r)
        value = self.get_value_embedding(e2)
        ques = self.get_question_embedding(question, question_lengths)

        # get attention on retrived informations based on the question
        attn_ques = self.seek_attention(ques, key, value, C, mask)

        # output embeddings - share with entity lookup table
        # B = tf.slice(self.entity_lookup_table, [0, 0], [1789936, -1])
        B = self.entity_lookup_table_extended
        # project down
        model_answer = tf.add(tf.matmul(attn_ques, self.W1), self.b1)  # model_answer: [B, D]
        logits = tf.matmul(model_answer, B, transpose_b=True, name='ent_mul_manzil')  # scores: [B, num_entities]
        return logits 
开发者ID:rajarshd,项目名称:TextKBQA,代码行数:21,代码来源:KBQA.py

示例9: process_example

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def process_example(self, tensors, mode='eval', thread_id=0):
        train = (mode == 'train')
        image, image_timestamp, camera_id = tensors[:3]

        #FIXME push single/multi image handling into image_process_sdc if we want to share random augmentations
        if self.num_input_images > 1:
            assert(len(image.get_shape()) > 0)
            print('Multi image', image.get_shape())
            split_image = tf.unpack(image)
            split_processed = []
            for i, x in enumerate(split_image):
                suffix = '%d' % i
                xp, _ = image_preprocess_sdc(
                    x, camera_id,
                    height=self.height, width=self.width, image_fmt=self.image_fmt,
                    normalize=self.standardize_input, train=train, summary_suffix=suffix, thread_id=thread_id)
                split_processed.append(xp)
            processed_image = tf.pack(split_processed)
            #FIXME need to sort out flip across mult-images
            flip_coeff = tf.constant(1.0, dtype=tf.float32)
        else:
            print('Single image')
            processed_image, flip_coeff = image_preprocess_sdc(
                image, camera_id,
                height=self.height, width=self.width, image_fmt=self.image_fmt,
                normalize=self.standardize_input, train=train, thread_id=thread_id)

        if mode != 'pred':
            steering_angle, gps_coord = tensors[-2:]
            if steering_angle is not None:
                steering_angle = tf.mul(steering_angle, flip_coeff)
                if self.standardize_labels:
                    steering_angle /= STEERING_STD
                elif self.mu_law_steering:
                    print("Encode mu-law angles")
                    steering_angle = mu_law_steering_enc(steering_angle)
            if gps_coord is not None and self.standardize_labels:
                gps_coord = (gps_coord - GPS_MEAN) / GPS_STD
            return processed_image, image_timestamp, steering_angle, gps_coord
        else:
            return processed_image, image_timestamp, tf.zeros((1,)), tf.zeros((2,)) 
开发者ID:rwightman,项目名称:tensorflow-litterbox,代码行数:43,代码来源:processor_sdc.py

示例10: lstm

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def lstm(
        inputs,
        num_units,
        num_layers=1,
        initializer_fn=tf.truncated_normal,
        initializer_params=_default_initializer_params,
        dtype=tf.float32,
        scope=None
):
    print('input shape', inputs.get_shape())
    shape = inputs.get_shape().as_list()
    batch_size = shape[0]
    inputs_unpacked = tf.unpack(inputs, axis=1)

    cell = tf.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell(num_units=num_units)
    print('cell state size', cell.state_size)

    if num_layers > 1:
        cell = tf.nn.rnn_cell.MultiRNNCell([cell] * num_layers)

    initializer_params = initializer_params or {}
    initializer_params['dtype'] = dtype
    if isinstance(cell.state_size, tuple):
        initial_state = tuple(initializer_fn([batch_size, s]) for s in cell.state_size)
    else:
        initial_state = initializer_fn(shape=[batch_size, cell.state_size], **initializer_params)

    outputs, _, _ = tf.nn.rnn(
        cell,
        inputs_unpacked,
        initial_state=initial_state,
        dtype=dtype,
        scope=scope)

    outputs = tf.pack(outputs, axis=1)
    print('output shape', outputs.get_shape())

    return outputs 
开发者ID:rwightman,项目名称:tensorflow-litterbox,代码行数:40,代码来源:lstm.py

示例11: testSimple

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def testSimple(self):
    np.random.seed(7)
    with self.test_session(use_gpu=True):
      for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
        data = np.random.randn(*shape)
        # Convert data to a single tensorflow tensor
        x = tf.constant(data)
        # Unpack into a list of tensors
        cs_unpacked = tf.unpack(x, num=shape[0])
        cs_unstacked = tf.unpack(x, num=shape[0])
        for cs in (cs_unpacked, cs_unstacked):
          self.assertEqual(type(cs), list)
          self.assertEqual(len(cs), shape[0])
          cs = [c.eval() for c in cs]
          self.assertAllEqual(cs, data) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:17,代码来源:unpack_op_test.py

示例12: testGradientsAxis0

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def testGradientsAxis0(self):
    for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
      data = np.random.randn(*shape)
      shapes = [shape[1:]] * shape[0]
      for i in xrange(shape[0]):
        with self.test_session(use_gpu=True):
          x = tf.constant(data)
          cs = tf.unpack(x, num=shape[0])
          err = tf.test.compute_gradient_error(x, shape, cs[i], shapes[i])
          self.assertLess(err, 1e-6)

          cs = tf.unstack(x, num=shape[0])
          err = tf.test.compute_gradient_error(x, shape, cs[i], shapes[i])
          self.assertLess(err, 1e-6) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:16,代码来源:unpack_op_test.py

示例13: testGradientsAxis1

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def testGradientsAxis1(self):
    for shape in (2, 3), (3, 2), (4, 3, 2):
      data = np.random.randn(*shape)
      out_shape = list(shape)
      del out_shape[1]
      for i in xrange(shape[1]):
        with self.test_session(use_gpu=True):
          x = tf.constant(data)
          cs = tf.unpack(x, num=shape[1], axis=1)
          err = tf.test.compute_gradient_error(x, shape, cs[i], out_shape)
          self.assertLess(err, 1e-6)

          cs = tf.unstack(x, num=shape[1], axis=1)
          err = tf.test.compute_gradient_error(x, shape, cs[i], out_shape)
          self.assertLess(err, 1e-6) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:17,代码来源:unpack_op_test.py

示例14: testInferNum

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def testInferNum(self):
    with self.test_session():
      for shape in (2,), (3,), (2, 3), (3, 2), (4, 3, 2):
        x = tf.placeholder(np.float32, shape=shape)
        cs = tf.unpack(x)
        self.assertEqual(type(cs), list)
        self.assertEqual(len(cs), shape[0])

        cs = tf.unstack(x)
        self.assertEqual(type(cs), list)
        self.assertEqual(len(cs), shape[0]) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:13,代码来源:unpack_op_test.py

示例15: testCannotInferNumFromUnknownShape

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import unpack [as 别名]
def testCannotInferNumFromUnknownShape(self):
    x = tf.placeholder(np.float32)
    with self.assertRaisesRegexp(
        ValueError, r'Cannot infer num from shape <unknown>'):
      tf.unpack(x)
    with self.assertRaisesRegexp(
        ValueError, r'Cannot infer num from shape <unknown>'):
      tf.unstack(x) 
开发者ID:tobegit3hub,项目名称:deep_image_model,代码行数:10,代码来源:unpack_op_test.py


注:本文中的tensorflow.unpack方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。