当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.expand_dims函数代码示例

本文整理汇总了Python中tensorflow.expand_dims函数的典型用法代码示例。如果您正苦于以下问题:Python expand_dims函数的具体用法?Python expand_dims怎么用?Python expand_dims使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了expand_dims函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: copy_net_logit_function

                def copy_net_logit_function(state):
                    state = tf.nn.dropout(state, self.dropout_placeholder)

                    # the logits for generating the next word are computed in
                    # the standard way
                    generate_logits = tf.matmul(state, decoding_w) + decoding_b

                    # Equation 8 in the paper ... in shape of source sentence
                    # (batch x time)
                    copy_logits_in_time = tf.reduce_sum(
                        projected_inputs * tf.expand_dims(state, 1), [2])

                    # mask out the padding in exponential domain
                    copy_logits_in_time_exp_masked = tf.exp(
                        tf.minimum([[80.0]], copy_logits_in_time)) * copy_mask

                    #  ... in shape of vocabulary (batch x time x vocabulary)
                    copy_logits_in_vocabulary = tf.expand_dims(
                        copy_logits_in_time_exp_masked,
                        2) * vocabulary_shaped_indices

                    # Equation 6 without normalization
                    copy_logits_exp = tf.reduce_sum(copy_logits_in_vocabulary,
                                                    [1])

                    logits_exp = copy_logits_exp \
                                 + tf.exp(tf.minimum([[80.0]], generate_logits))

                    return (tf.log(tf.maximum([[1e-40]], logits_exp)),
                            copy_logits_in_time)
开发者ID:alvaz16,项目名称:neuralmonkey,代码行数:30,代码来源:decoder.py

示例2: bond_conv_layer

def bond_conv_layer(activated_atoms, bv_params, layer):
    flow_depth = flow_layer_depths[layer]
    
    next_activated_atoms = tf.zeros(tf.pack([N_atoms_ph, flow_depth]))

    for deg in range(1, 6):
        indices = tf.sub(deg_list_ph, tf.constant(1,dtype=tf.int32))
        flow_param = bv_params['A_flow'+str(layer)+'_'+str(deg)]
        flow_map = tf.gather(flow_param, type_adj_ph)

        multiples = tf.pack([N_atoms_ph, 1, 1])
        activated_atoms_dim = tf.expand_dims(tf.tile(tf.expand_dims(activated_atoms, 0), multiples), 2)

        adj_mul = tf.batch_matmul(activated_atoms_dim, flow_map)
        adj_mul = tf.squeeze(adj_mul, [2])

        deg_mask = tf.to_float(tf.equal(deg_list_ph, deg))

        multiples = tf.pack([1, N_atoms_ph, flow_depth])
        deg_list_dim = tf.tile(tf.expand_dims(tf.expand_dims(deg_mask, 1), 1), multiples)

        multiples = tf.pack([N_atoms_ph, N_atoms_ph, 1])
        biases = tf.tile(bv_params['b_flow'+str(layer)+'_'+str(deg)], multiples)
        filtered_atoms = tf.add(tf.mul(adj_mul, deg_list_dim), biases)

        next_activated_atoms = next_activated_atoms + tf.reduce_sum(filtered_atoms, 1)
        
    next_activated_atoms = tf.nn.relu(next_activated_atoms)
    return next_activated_atoms
开发者ID:rbharath,项目名称:deepchem,代码行数:29,代码来源:bondvolution.py

示例3: encode_coordinates_alt

  def encode_coordinates_alt(self, net):
    """An alternative implemenation for the encoding coordinates.

    Args:
      net: a tensor of shape=[batch_size, height, width, num_features]

    Returns:
      a list of tensors with encoded image coordinates in them.
    """
    batch_size, h, w, _ = net.shape.as_list()
    h_loc = [
      tf.tile(
          tf.reshape(
              tf.contrib.layers.one_hot_encoding(
                  tf.constant([i]), num_classes=h), [h, 1]), [1, w])
      for i in xrange(h)
    ]
    h_loc = tf.concat([tf.expand_dims(t, 2) for t in h_loc], 2)
    w_loc = [
      tf.tile(
          tf.contrib.layers.one_hot_encoding(tf.constant([i]), num_classes=w),
          [h, 1]) for i in xrange(w)
    ]
    w_loc = tf.concat([tf.expand_dims(t, 2) for t in w_loc], 2)
    loc = tf.concat([h_loc, w_loc], 2)
    loc = tf.tile(tf.expand_dims(loc, 0), [batch_size, 1, 1, 1])
    return tf.concat([net, loc], 3)
开发者ID:banjocui,项目名称:models,代码行数:27,代码来源:model_test.py

示例4: dot

def dot(x, y):
    """Compute dot product between a Tensor matrix and a Tensor vector.

    If x is a ``[M x N]`` matrix, then y is a ``M``-vector.

    If x is a ``M``-vector, then y is a ``[M x N]`` matrix.

    Parameters
    ----------
    x : tf.Tensor
        ``M x N`` matrix or ``M`` vector (see above)
    y : tf.Tensor
        ``M`` vector or ``M x N`` matrix (see above)

    Returns
    -------
    tf.Tensor
        ``N``-vector
    """
    if len(x.get_shape()) == 1:
        vec = x
        mat = y
        return tf.matmul(tf.expand_dims(vec, 0), mat)
    else:
        mat = x
        vec = y
        return tf.matmul(mat, tf.expand_dims(vec, 1))
开发者ID:jf0510310315,项目名称:edward,代码行数:27,代码来源:util.py

示例5: call

  def call(self, x):
    """Execute this layer on input tensors.

    Parameters
    ----------
    x: list of Tensor 
      should be [atom_features(batch_size*max_n_atoms*n_embedding), 
                 distance_matrix(batch_size*max_n_atoms*max_n_atoms*n_distance), 
                 distance_matrix_mask(batch_size*max_n_atoms*max_n_atoms)]

    Returns
    -------
    tf.Tensor
      new embeddings for atoms, same shape as x[0]
    """
    self.build()
    atom_features = x[0]
    distance_matrix = x[1]
    distance_matrix_mask = x[2]
    outputs = tf.multiply(
        (tf.tensordot(distance_matrix, self.W_df, [[3], [0]]) + self.b_df),
        tf.expand_dims(
            tf.tensordot(atom_features, self.W_cf, [[2], [0]]) + self.b_cf,
            axis=1))
    # for atom i in a molecule m, this step multiplies together distance info of atom pair(i,j)
    # and embeddings of atom j(both gone through a hidden layer)
    outputs = tf.tensordot(outputs, self.W_fc, [[3], [0]])
    outputs = tf.multiply(outputs, tf.expand_dims(distance_matrix_mask, axis=3))
    # masking the outputs tensor for pair(i,i) and all paddings
    outputs = self.activation(outputs)
    outputs = tf.reduce_sum(outputs, axis=2) + atom_features
    # for atom i, sum the influence from all other atom j in the molecule

    return outputs
开发者ID:joegomes,项目名称:deepchem,代码行数:34,代码来源:layers.py

示例6: _mean_image_subtraction

def _mean_image_subtraction(image, means):
  """Subtracts the given means from each image channel.

  For example:
    means = [123.68, 116.779, 103.939]
    image = _mean_image_subtraction(image, means)

  Note that the rank of `image` must be known.

  Args:
    image: a tensor of size [height, width, C].
    means: a C-vector of values to subtract from each channel.

  Returns:
    the centered image.

  Raises:
    ValueError: If the rank of `image` is unknown, if `image` has a rank other
      than three or if the number of channels in `image` doesn't match the
      number of values in `means`.
  """
  if image.get_shape().ndims != 3:
    raise ValueError('Input must be of size [height, width, C>0]')
  num_channels = image.get_shape().as_list()[-1]
  if len(means) != num_channels:
    raise ValueError('len(means) must match the number of channels')

  # We have a 1-D tensor of means; convert to 3-D.
  means = tf.expand_dims(tf.expand_dims(means, 0), 0)

  return image - means
开发者ID:forging2012,项目名称:models,代码行数:31,代码来源:vgg_preprocessing.py

示例7: softmax

def softmax(x):
  """
  Compute the softmax function in tensorflow.

  You might find the tensorflow functions tf.exp, tf.reduce_max,
  tf.reduce_sum, tf.expand_dims useful. (Many solutions are possible, so you may
  not need to use all of these functions). Recall also that many common
  tensorflow operations are sugared (e.g. x * y does a tensor multiplication
  if x and y are both tensors). Make sure to implement the numerical stability
  fixes as in the previous homework!

  Args:
    x:   tf.Tensor with shape (n_samples, n_features). Note feature vectors are
         represented by row-vectors. (For simplicity, no need to handle 1-d
         input as in the previous homework)
  Returns:
    out: tf.Tensor with shape (n_sample, n_features). You need to construct this
         tensor in this problem.
  """

  ### YOUR CODE HERE
  maxes = tf.expand_dims(tf.reduce_max(x, reduction_indices=[1]), 1)
  stable = x - maxes
  e = tf.exp(stable)
  sums = tf.expand_dims(tf.reduce_sum(e, reduction_indices=[1]), 1)
  out = tf.div(e, sums)
  ### END YOUR CODE
  
  return out 
开发者ID:Sundrique,项目名称:cs224d,代码行数:29,代码来源:q1_softmax.py

示例8: reward_prediction_big

  def reward_prediction_big(
      self, input_images, input_reward, action, latent, mid_outputs):
    """Builds a reward prediction network."""
    del mid_outputs
    conv_size = self.tinyify([32, 32, 16, 8])

    with tf.variable_scope("reward_pred", reuse=tf.AUTO_REUSE):
      x = tf.concat(input_images, axis=3)
      x = tfcl.layer_norm(x)

      if not self.hparams.small_mode:
        x = tfl.conv2d(x, conv_size[1], [3, 3], strides=(2, 2),
                       activation=tf.nn.relu, name="reward_conv1")
        x = tfcl.layer_norm(x)

      # Inject additional inputs
      if action is not None:
        x = common_video.inject_additional_input(
            x, action, "action_enc", self.hparams.action_injection)
      if input_reward is not None:
        x = common_video.inject_additional_input(x, input_reward, "reward_enc")
      if latent is not None:
        latent = tfl.flatten(latent)
        latent = tf.expand_dims(latent, axis=1)
        latent = tf.expand_dims(latent, axis=1)
        x = common_video.inject_additional_input(x, latent, "latent_enc")

      x = tfl.conv2d(x, conv_size[2], [3, 3], strides=(2, 2),
                     activation=tf.nn.relu, name="reward_conv2")
      x = tfcl.layer_norm(x)
      x = tfl.conv2d(x, conv_size[3], [3, 3], strides=(2, 2),
                     activation=tf.nn.relu, name="reward_conv3")
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:32,代码来源:sv2p.py

示例9: build

 def build(self):
   """ tensorflow computation graph for transform """
   graph = tf.Graph()
   with graph.as_default():
     self.inputs = tf.placeholder(tf.float32, shape=(None, self.max_atoms, 4))
     atom_numbers = tf.cast(self.inputs[:, :, 0], tf.int32)
     flags = tf.sign(atom_numbers)
     flags = tf.cast(
         tf.expand_dims(flags, 1) * tf.expand_dims(flags, 2), tf.float32)
     coordinates = self.inputs[:, :, 1:]
     if self.coordinates_in_bohr:
       coordinates = coordinates * 0.52917721092
     d = self.distance_matrix(coordinates, flags)
     d_radial_cutoff = self.distance_cutoff(d, self.radial_cutoff, flags)
     d_angular_cutoff = self.distance_cutoff(d, self.angular_cutoff, flags)
     radial_sym = self.radial_symmetry(d_radial_cutoff, d, atom_numbers)
     angular_sym = self.angular_symmetry(d_angular_cutoff, d, atom_numbers,
                                         coordinates)
     self.outputs = tf.concat(
         [
             tf.cast(tf.expand_dims(atom_numbers, 2), tf.float32), radial_sym,
             angular_sym
         ],
         axis=2)
   return graph
开发者ID:ktaneishi,项目名称:deepchem,代码行数:25,代码来源:transformers.py

示例10: testExpandAndSqueeze

  def testExpandAndSqueeze(self):
    with self.cached_session():

      # TODO(aselle): sparse_split, sparse_reduce_sum,
      #  sparse_reduce_sum_sparse, reduce_join
      a = [[1, 2, 3]]
      self.assertAllEqual(tf.expand_dims(tf.squeeze(a, [0]), 0).eval(),
                          a)
      self.assertAllEqual(tf.squeeze(tf.expand_dims(a, 1), [1]).eval(),
                          a)
      self.assertAllEqual(
          tf.expand_dims(
              tf.squeeze(
                  [[1, 2, 3]], squeeze_dims=[0]), dim=0).eval(),
          a)
      self.assertAllEqual(
          tf.squeeze(
              tf.expand_dims(
                  [[1, 2, 3]], dim=1), squeeze_dims=[1]).eval(),
          a)

      self.assertAllEqual(
          tf.squeeze(
              tf.expand_dims(
                  [[1, 2, 3]], dim=1), squeeze_dims=[1]).eval(),
          a)
开发者ID:JonathanRaiman,项目名称:tensorflow,代码行数:26,代码来源:test_file_v0_11.py

示例11: roc_auc_score

def roc_auc_score(y_pred, y_true):
    """ ROC AUC Score.

    Approximates the Area Under Curve score, using approximation based on
    the Wilcoxon-Mann-Whitney U statistic.

    Yan, L., Dodier, R., Mozer, M. C., & Wolniewicz, R. (2003).
    Optimizing Classifier Performance via an Approximation to the Wilcoxon-Mann-Whitney Statistic.

    Measures overall performance for a full range of threshold levels.

    Arguments:
        y_pred: `Tensor`. Predicted values.
        y_true: `Tensor` . Targets (labels), a probability distribution.

    """
    with tf.name_scope("RocAucScore"):

        pos = tf.boolean_mask(y_pred, tf.cast(y_true, tf.bool))
        neg = tf.boolean_mask(y_pred, ~tf.cast(y_true, tf.bool))

        pos = tf.expand_dims(pos, 0)
        neg = tf.expand_dims(neg, 1)

        # original paper suggests performance is robust to exact parameter choice
        gamma = 0.2
        p     = 3

        difference = tf.zeros_like(pos * neg) + pos - neg - gamma

        masked = tf.boolean_mask(difference, difference < 0.0)

        return tf.reduce_sum(tf.pow(-masked, p))
开发者ID:21hub,项目名称:tflearn,代码行数:33,代码来源:objectives.py

示例12: dna_transformation

def dna_transformation(prev_image, dna_input, dna_kernel_size, relu_shift):
  """Apply dynamic neural advection to previous image.

  Args:
    prev_image: previous image to be transformed.
    dna_input: hidden lyaer to be used for computing DNA transformation.
    dna_kernel_size: dna kernel size.
    relu_shift: shift for ReLU function.
  Returns:
    List of images transformed by the predicted CDNA kernels.
  """
  # Construct translated images.
  prev_image_pad = tf.pad(prev_image, [[0, 0], [2, 2], [2, 2], [0, 0]])
  image_height = int(prev_image.get_shape()[1])
  image_width = int(prev_image.get_shape()[2])

  inputs = []
  for xkern in range(dna_kernel_size):
    for ykern in range(dna_kernel_size):
      inputs.append(
          tf.expand_dims(
              tf.slice(prev_image_pad, [0, xkern, ykern, 0],
                       [-1, image_height, image_width, -1]), [3]))
  inputs = tf.concat(axis=3, values=inputs)

  # Normalize channels to 1.
  kernel = tf.nn.relu(dna_input - relu_shift) + relu_shift
  kernel = tf.expand_dims(
      kernel / tf.reduce_sum(kernel, [3], keep_dims=True), [4])
  return tf.reduce_sum(kernel * inputs, [3], keep_dims=False)
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:30,代码来源:common_video.py

示例13: train

def train():
	image_name = tf.constant("lily.jpg", tf.string)
	image1 = uf.read_image(image_name, IMG_ROW, IMG_COL)
	image1 = tf.expand_dims(image1, 0)
	image2 = uf.read_image(image_name, IMG_ROW, IMG_COL)
	image2 = tf.expand_dims(image2, 0)
	image = tf.concat(0, (image1, image2))

	clstm = crnn.con_lstm_cell(BATCH_SIZE, IMG_ROW, IMG_COL, 3, 3, CELL_C)
	input_ = tf.placeholder(tf.float32, (BATCH_SIZE, IMG_ROW, IMG_COL, 3))
	inputs = []
	inputs.append(input_)
	inputs.append(input_)
	
	outputs, state = crnn.clstm_encode(clstm, inputs)

	sess = tf.Session()

	init_op = tf.initialize_all_variables()
	sess.run(init_op)

	for i in xrange(100):
		image_v = sess.run(image)
		feed_data = dict()
		feed_data[inputs[0]] = image_v
		feed_data[inputs[1]] = image_v
		outputs_v = sess.run(outputs, feed_dict = feed_data)
		print(outputs_v)
开发者ID:polltooh,项目名称:CNN_LSTM,代码行数:28,代码来源:test_clstm.py

示例14: _att

    def _att(self, context, context_encode, h):
        with tf.variable_scope('att') as scope:
            
            hidden_att_W = self._variable_trunc_normal('hidden_att_W',
                    [self.dim_hidden, self.dim_ctx])
            pre_att_b = self._variable_constant('pre_att_b',
                    [self.dim_ctx])
            att_W = self._variable_trunc_normal('att_W',
                    [self.dim_ctx, 1])
            att_b = self._variable_constant('att_b', [1])

            # evaluate context_encode (e_ti)
            context_encode = context_encode + \
                    tf.expand_dims(tf.matmul(h, hidden_att_W), 1) + \
                    pre_att_b
            context_encode = tf.nn.tanh(context_encode)
            context_encode_flat = tf.reshape(context_encode,
                    [self.batch_size*self.ctx_shape[0], self.dim_ctx])
            alpha = tf.reshape(
                    tf.matmul(context_encode_flat, att_W) + att_b,
                    [self.batch_size, self.ctx_shape[0]])
            alpha = tf.nn.softmax(alpha)
            weighted_context = tf.reduce_sum(context * \
                    tf.expand_dims(alpha, 2), 1)
        return weighted_context
开发者ID:guduxingzou,项目名称:show-attend-and-tell,代码行数:25,代码来源:model.py

示例15: build_psi_stats_rbf_plus_linear

def build_psi_stats_rbf_plus_linear(Z, kern, mu, S):
    # TODO: make sure the acvite dimensions are overlapping completely

    # use only active dimensions
    mu, S = kern._slice(mu, S)  # only use the active dimensions.
    Z, _ = kern._slice(Z, None)

    psi0_lin, psi1_lin, psi2_lin = build_psi_stats_linear(Z, kern.linear, mu, S)
    psi0_rbf, psi1_rbf, psi2_rbf = build_psi_stats_rbf(Z, kern.rbf, mu, S)
    psi0, psi1, psi2 = psi0_lin + psi0_rbf, psi1_lin + psi1_rbf, psi2_lin + psi2_rbf

    # extra terms for the 'interaction' of linear and rbf
    l2 = tf.square(kern.rbf.lengthscales)
    A = tf.expand_dims(1./S + 1./l2, 1)  # N x 1 x Q
    m = (tf.expand_dims(mu/S, 1) + tf.expand_dims(Z/l2, 0)) / A  # N x M x Q
    mTAZ = tf.reduce_sum(tf.expand_dims(m * kern.linear.variance, 1) *
                         tf.expand_dims(tf.expand_dims(Z, 0), 0), 3)  # N x M x M
    Z2 = tf.reduce_sum(tf.square(Z) / l2, 1)  # M,
    mu2 = tf.reduce_sum(tf.square(mu) / S, 1)  # N
    mAm = tf.reduce_sum(tf.square(m) * A, 2)  # N x M
    exp_term = tf.exp(-(tf.reshape(Z2, (1, -1)) + tf.reshape(mu2, (-1, 1))-mAm) / 2.)  # N x M
    psi2_extra = tf.reduce_sum(kern.rbf.variance *
                               tf.expand_dims(exp_term, 2) *
                               tf.expand_dims(tf.expand_dims(tf.reduce_prod(S, 1), 1), 2) *
                               tf.expand_dims(tf.reduce_prod(A, 2), 1) *
                               mTAZ, 0)

    psi2 = psi2 + psi2_extra + tf.transpose(psi2_extra)
    return psi0, psi1, psi2
开发者ID:blutooth,项目名称:dgp,代码行数:29,代码来源:kernel_expectations.py


注:本文中的tensorflow.expand_dims函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。