当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.stack函数代码示例

本文整理汇总了Python中tensorflow.stack函数的典型用法代码示例。如果您正苦于以下问题:Python stack函数的具体用法?Python stack怎么用?Python stack使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了stack函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_tensor

  def create_tensor(self, in_layers=None, set_tensors=True, **kwargs):
    """ Generate Radial Symmetry Function """
    if in_layers is None:
      in_layers = self.in_layers
    in_layers = convert_to_layers(in_layers)

    self.build()
    d_cutoff = in_layers[0].out_tensor
    d = in_layers[1].out_tensor
    if self.atomic_number_differentiated:
      atom_numbers = in_layers[2].out_tensor
      atom_number_embedded = tf.nn.embedding_lookup(self.atom_number_embedding,
                                                    atom_numbers)
    d_cutoff = tf.stack([d_cutoff] * self.length, axis=3)
    d = tf.stack([d] * self.length, axis=3)
    Rs = tf.reshape(self.Rs, (1, 1, 1, -1))
    ita = tf.reshape(self.ita, (1, 1, 1, -1))
    out_tensor = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
    if self.atomic_number_differentiated:
      out_tensors = []
      for atom_type in self.atom_number_cases:
        selected_atoms = tf.expand_dims(
            tf.expand_dims(atom_number_embedded[:, :, atom_type], axis=1),
            axis=3)
        out_tensors.append(tf.reduce_sum(out_tensor * selected_atoms, axis=2))
      self.out_tensor = tf.concat(out_tensors, axis=2)
    else:
      self.out_tensor = tf.reduce_sum(out_tensor, axis=2)
开发者ID:AhlamMD,项目名称:deepchem,代码行数:28,代码来源:symmetry_functions.py

示例2: _summarize_input

  def _summarize_input(self, groundtruth_boxes_list, match_list):
    """Creates tensorflow summaries for the input boxes and anchors.

    This function creates four summaries corresponding to the average
    number (over images in a batch) of (1) groundtruth boxes, (2) anchors
    marked as positive, (3) anchors marked as negative, and (4) anchors marked
    as ignored.

    Args:
      groundtruth_boxes_list: a list of 2-D tensors of shape [num_boxes, 4]
        containing corners of the groundtruth boxes.
      match_list: a list of matcher.Match objects encoding the match between
        anchors and groundtruth boxes for each image of the batch,
        with rows of the Match objects corresponding to groundtruth boxes
        and columns corresponding to anchors.
    """
    num_boxes_per_image = tf.stack(
        [tf.shape(x)[0] for x in groundtruth_boxes_list])
    pos_anchors_per_image = tf.stack(
        [match.num_matched_columns() for match in match_list])
    neg_anchors_per_image = tf.stack(
        [match.num_unmatched_columns() for match in match_list])
    ignored_anchors_per_image = tf.stack(
        [match.num_ignored_columns() for match in match_list])
    tf.summary.scalar('Input/AvgNumGroundtruthBoxesPerImage',
                      tf.reduce_mean(tf.to_float(num_boxes_per_image)))
    tf.summary.scalar('Input/AvgNumPositiveAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(pos_anchors_per_image)))
    tf.summary.scalar('Input/AvgNumNegativeAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(neg_anchors_per_image)))
    tf.summary.scalar('Input/AvgNumIgnoredAnchorsPerImage',
                      tf.reduce_mean(tf.to_float(ignored_anchors_per_image)))
开发者ID:Peterwangcn,项目名称:object_detector_app,代码行数:32,代码来源:ssd_meta_arch.py

示例3: iou

  def iou(self, boxes1, boxes2):
    """calculate ious
    Args:
      boxes1: 4-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL, 4]  ====> (x_center, y_center, w, h)
      boxes2: 1-D tensor [4] ===> (x_center, y_center, w, h)
    Return:
      iou: 3-D tensor [CELL_SIZE, CELL_SIZE, BOXES_PER_CELL]
    """
    boxes1 = tf.stack([boxes1[:, :, :, 0] - boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] - boxes1[:, :, :, 3] / 2,
                      boxes1[:, :, :, 0] + boxes1[:, :, :, 2] / 2, boxes1[:, :, :, 1] + boxes1[:, :, :, 3] / 2])
    boxes1 = tf.transpose(boxes1, [1, 2, 3, 0])
    boxes2 =  tf.stack([boxes2[0] - boxes2[2] / 2, boxes2[1] - boxes2[3] / 2,
                      boxes2[0] + boxes2[2] / 2, boxes2[1] + boxes2[3] / 2])

    #calculate the left up point
    lu = tf.maximum(boxes1[:, :, :, 0:2], boxes2[0:2])
    rd = tf.minimum(boxes1[:, :, :, 2:], boxes2[2:])

    #intersection
    intersection = rd - lu 

    inter_square = intersection[:, :, :, 0] * intersection[:, :, :, 1]

    mask = tf.cast(intersection[:, :, :, 0] > 0, tf.float32) * tf.cast(intersection[:, :, :, 1] > 0, tf.float32)
    
    inter_square = mask * inter_square
    
    #calculate the boxs1 square and boxs2 square
    square1 = (boxes1[:, :, :, 2] - boxes1[:, :, :, 0]) * (boxes1[:, :, :, 3] - boxes1[:, :, :, 1])
    square2 = (boxes2[2] - boxes2[0]) * (boxes2[3] - boxes2[1])
    
    return inter_square/(square1 + square2 - inter_square + 1e-6)
开发者ID:TrendonixNetwork,项目名称:ProjectCybonix,代码行数:32,代码来源:yolo_tiny_net.py

示例4: radial_symmetry

  def radial_symmetry(self, d_cutoff, d, atom_numbers):
    """ Radial Symmetry Function """
    embedding = tf.eye(np.max(self.atom_cases) + 1)
    atom_numbers_embedded = tf.nn.embedding_lookup(embedding, atom_numbers)

    Rs = np.linspace(0., self.radial_cutoff, self.radial_length)
    ita = np.ones_like(Rs) * 3 / (Rs[1] - Rs[0])**2
    Rs = tf.cast(np.reshape(Rs, (1, 1, 1, -1)), tf.float32)
    ita = tf.cast(np.reshape(ita, (1, 1, 1, -1)), tf.float32)
    length = ita.get_shape().as_list()[-1]

    d_cutoff = tf.stack([d_cutoff] * length, axis=3)
    d = tf.stack([d] * length, axis=3)

    out = tf.exp(-ita * tf.square(d - Rs)) * d_cutoff
    if self.atomic_number_differentiated:
      out_tensors = []
      for atom_type in self.atom_cases:
        selected_atoms = tf.expand_dims(
            tf.expand_dims(atom_numbers_embedded[:, :, atom_type], axis=1),
            axis=3)
        out_tensors.append(tf.reduce_sum(out * selected_atoms, axis=2))
      return tf.concat(out_tensors, axis=2)
    else:
      return tf.reduce_sum(out, axis=2)
开发者ID:ktaneishi,项目名称:deepchem,代码行数:25,代码来源:transformers.py

示例5: _build_predict

 def _build_predict(self, Xnew, full_cov=False):
     """
     Compute the mean and variance of the latent function at some new points
     Xnew. For a derivation of the terms in here, see the associated SGPR
     notebook.
     """
     num_inducing = len(self.feature)
     err = self.Y - self.mean_function(self.X)
     Kuf = self.feature.Kuf(self.kern, self.X)
     Kuu = self.feature.Kuu(self.kern, jitter=settings.numerics.jitter_level)
     Kus = self.feature.Kuf(self.kern, Xnew)
     sigma = tf.sqrt(self.likelihood.variance)
     L = tf.cholesky(Kuu)
     A = tf.matrix_triangular_solve(L, Kuf, lower=True) / sigma
     B = tf.matmul(A, A, transpose_b=True) + tf.eye(num_inducing, dtype=settings.float_type)
     LB = tf.cholesky(B)
     Aerr = tf.matmul(A, err)
     c = tf.matrix_triangular_solve(LB, Aerr, lower=True) / sigma
     tmp1 = tf.matrix_triangular_solve(L, Kus, lower=True)
     tmp2 = tf.matrix_triangular_solve(LB, tmp1, lower=True)
     mean = tf.matmul(tmp2, c, transpose_a=True)
     if full_cov:
         var = self.kern.K(Xnew) + tf.matmul(tmp2, tmp2, transpose_a=True) \
               - tf.matmul(tmp1, tmp1, transpose_a=True)
         shape = tf.stack([1, 1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 2), shape)
     else:
         var = self.kern.Kdiag(Xnew) + tf.reduce_sum(tf.square(tmp2), 0) \
               - tf.reduce_sum(tf.square(tmp1), 0)
         shape = tf.stack([1, tf.shape(self.Y)[1]])
         var = tf.tile(tf.expand_dims(var, 1), shape)
     return mean + self.mean_function(Xnew), var
开发者ID:vincentadam87,项目名称:GPflow,代码行数:32,代码来源:sgpr.py

示例6: bboxes_crop_or_pad

def bboxes_crop_or_pad(bboxes,
                       height, width,
                       offset_y, offset_x,
                       target_height, target_width):
    """Adapt bounding boxes to crop or pad operations.
    Coordinates are always supposed to be relative to the image.

    Arguments:
      bboxes: Tensor Nx4 with bboxes coordinates [y_min, x_min, y_max, x_max];
      height, width: Original image dimension;
      offset_y, offset_x: Offset to apply,
        negative if cropping, positive if padding;
      target_height, target_width: Target dimension after cropping / padding.
    """
    with tf.name_scope('bboxes_crop_or_pad'):
        # Rescale bounding boxes in pixels.
        scale = tf.cast(tf.stack([height, width, height, width]), bboxes.dtype)
        bboxes = bboxes * scale
        # Add offset.
        offset = tf.cast(tf.stack([offset_y, offset_x, offset_y, offset_x]), bboxes.dtype)
        bboxes = bboxes + offset
        # Rescale to target dimension.
        scale = tf.cast(tf.stack([target_height, target_width,
                                  target_height, target_width]), bboxes.dtype)
        bboxes = bboxes / scale
        return bboxes
开发者ID:angelocarbone,项目名称:OSSDC-VisionBasedACC,代码行数:26,代码来源:tf_image.py

示例7: get_filters

def get_filters(R, filter_size, P=None, n_rings=None):
    """Perform single-frequency DFT on each ring of a polar-resampled patch"""
    k = filter_size
    filters = {}
    N = n_samples(k)
    from scipy.linalg import dft
    for m, r in R.iteritems():
        rsh = r.get_shape().as_list()
        # Get the basis matrices
        weights = get_interpolation_weights(k, m, n_rings=n_rings)
        DFT = dft(N)[m,:]
        LPF = np.dot(DFT, weights).T

        cosine = np.real(LPF).astype(np.float32)
        sine = np.imag(LPF).astype(np.float32)
        # Reshape for multiplication with radial profile
        cosine = tf.constant(cosine)
        sine = tf.constant(sine)
        # Project taps on to rotational basis
        r = tf.reshape(r, tf.stack([rsh[0],rsh[1]*rsh[2]]))
        ucos = tf.reshape(tf.matmul(cosine, r), tf.stack([k, k, rsh[1], rsh[2]]))
        usin = tf.reshape(tf.matmul(sine, r), tf.stack([k, k, rsh[1], rsh[2]]))
        if P is not None:
            # Rotate basis matrices
            ucos_ = tf.cos(P[m])*ucos + tf.sin(P[m])*usin
            usin = -tf.sin(P[m])*ucos + tf.cos(P[m])*usin
            ucos = ucos_
        filters[m] = (ucos, usin)
    return filters
开发者ID:deworrall92,项目名称:groupConvolutions,代码行数:29,代码来源:harmonic_network_ops.py

示例8: getImage

def getImage(filenames):
	# convert filenames to a queue for an input pipeline.
	filenameQ = tf.train.string_input_producer(filenames,num_epochs=None)

	# object to read records
	recordReader = tf.TFRecordReader()

	# read the full set of features for a single example
	key, fullExample = recordReader.read(filenameQ)

	# parse the full example into its' component features.
	features = tf.parse_single_example(
        fullExample,
        features={
            'image/height': tf.FixedLenFeature([], tf.int64),
            'image/width': tf.FixedLenFeature([], tf.int64),
            'image/depth': tf.FixedLenFeature([], tf.int64),
            'image/class/label': tf.FixedLenFeature([],tf.int64),
            'image/class/text': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
            'image/filename': tf.FixedLenFeature([], dtype=tf.string,default_value=''),
            'image/encoded': tf.FixedLenFeature([], dtype=tf.string, default_value='')
        })

	label = features['image/class/label']
	image_buffer = features['image/encoded']

	image = tf.decode_raw(image_buffer, tf.float32)
	image = tf.reshape(image, tf.stack([FLAGS.width*FLAGS.height*FLAGS.depth]))

	label=tf.stack(tf.one_hot(label-1, nLabel))
	return label, image
开发者ID:chopin111,项目名称:Magisterka,代码行数:31,代码来源:simpleCNN_MRI.py

示例9: collapse_mixture_of_tastes

def collapse_mixture_of_tastes(tastes_predictions, tastes_attentions):
    """
    Collapses a list of prediction nodes in to a single prediction node.
    :param tastes_predictions:
    :param tastes_attentions:
    :return:
    """
    stacked_predictions = tf.stack(tastes_predictions)

    # If there is attention, the attentions are used to weight each prediction
    if tastes_attentions is not None:

        # Stack the attentions and perform softmax across the tastes
        stacked_attentions = tf.stack(tastes_attentions)
        softmax_attentions = tf.nn.softmax(stacked_attentions, axis=0)

        # The softmax'd attentions serve as weights for the taste predictiones
        weighted_predictions = tf.multiply(stacked_predictions, softmax_attentions)
        result_prediction = tf.reduce_sum(weighted_predictions, axis=0)

    # If there is no attention, the max prediction is returned
    else:
        result_prediction = tf.reduce_max(stacked_predictions, axis=0)

    return result_prediction
开发者ID:CloudBreadPaPa,项目名称:tensorrec,代码行数:25,代码来源:recommendation_graphs.py

示例10: when_singular

 def when_singular():
   center = min_
   bucket_starts = tf.stack([center - 0.5])
   bucket_ends = tf.stack([center + 0.5])
   bucket_counts = tf.stack([tf.cast(tf.size(data), tf.float64)])
   return tf.transpose(
       tf.stack([bucket_starts, bucket_ends, bucket_counts]))
开发者ID:jlewi,项目名称:tensorboard,代码行数:7,代码来源:summary.py

示例11: objective

    def objective(self, x):
        '''
        Returns scalar to maximize
        '''


        encoder = NN(self.encoder_net, self.encoder_act_func, self.batch_size)
        decoder = BNN(self.decoder_net, self.decoder_act_func, self.batch_size)

        log_px_list = []
        log_pz_list = []
        log_qz_list = []
        log_pW_list = []
        log_qW_list = []

        for W_i in range(self.n_W_particles):

            # Sample decoder weights  __, [1], [1]
            W, log_pW, log_qW = decoder.sample_weights()

            # Sample z   [P,B,Z], [P,B], [P,B]
            z, log_pz, log_qz = self.sample_z(x, encoder, decoder, W)
            # z: [PB,Z]
            z = tf.reshape(z, [self.n_z_particles*self.batch_size, self.z_size])

            # Decode [PB,X]
            y = decoder.feedforward(W, z)
            # y: [P,B,X]
            y = tf.reshape(y, [self.n_z_particles, self.batch_size, self.x_size])

            # Likelihood p(x|z)  [P,B]
            log_px = log_bern(x,y)

            #Store for later
            log_px_list.append(log_px)
            log_pz_list.append(log_pz)
            log_qz_list.append(log_qz)
            log_pW_list.append(log_pW)
            log_qW_list.append(log_qW)


        log_px = tf.stack(log_px_list) #[S,P,B]
        log_pz = tf.stack(log_pz_list) #[S,P,B]
        log_qz = tf.stack(log_qz_list) #[S,P,B]
        log_pW = tf.stack(log_pW_list) #[S]
        log_qW = tf.stack(log_qW_list) #[S]

        # Calculte log probs for printing
        self.log_px = tf.reduce_mean(log_px)
        self.log_pz = tf.reduce_mean(log_pz)
        self.log_qz = tf.reduce_mean(log_qz)
        self.log_pW = tf.reduce_mean(log_pW)
        self.log_qW = tf.reduce_mean(log_qW)
        self.z_elbo = self.log_px + self.log_pz - self.log_qz 


        #Calc elbo
        elbo = self.log_px + self.log_pz - self.log_qz + self.batch_frac*(self.log_pW - self.log_qW)

        return elbo
开发者ID:chriscremer,项目名称:Other_Code,代码行数:60,代码来源:BIWAE.py

示例12: tile_anchors

def tile_anchors(grid_height,
                 grid_width,
                 scales,
                 aspect_ratios,
                 base_anchor_size,
                 anchor_stride,
                 anchor_offset):
  """Create a tiled set of anchors strided along a grid in image space.

  This op creates a set of anchor boxes by placing a "basis" collection of
  boxes with user-specified scales and aspect ratios centered at evenly
  distributed points along a grid.  The basis collection is specified via the
  scale and aspect_ratios arguments.  For example, setting scales=[.1, .2, .2]
  and aspect ratios = [2,2,1/2] means that we create three boxes: one with scale
  .1, aspect ratio 2, one with scale .2, aspect ratio 2, and one with scale .2
  and aspect ratio 1/2.  Each box is multiplied by "base_anchor_size" before
  placing it over its respective center.

  Grid points are specified via grid_height, grid_width parameters as well as
  the anchor_stride and anchor_offset parameters.

  Args:
    grid_height: size of the grid in the y direction (int or int scalar tensor)
    grid_width: size of the grid in the x direction (int or int scalar tensor)
    scales: a 1-d  (float) tensor representing the scale of each box in the
      basis set.
    aspect_ratios: a 1-d (float) tensor representing the aspect ratio of each
      box in the basis set.  The length of the scales and aspect_ratios tensors
      must be equal.
    base_anchor_size: base anchor size as [height, width]
      (float tensor of shape [2])
    anchor_stride: difference in centers between base anchors for adjacent grid
                   positions (float tensor of shape [2])
    anchor_offset: center of the anchor with scale and aspect ratio 1 for the
                   upper left element of the grid, this should be zero for
                   feature networks with only VALID padding and even receptive
                   field size, but may need some additional calculation if other
                   padding is used (float tensor of shape [2])
  Returns:
    a BoxList holding a collection of N anchor boxes
  """
  ratio_sqrts = tf.sqrt(aspect_ratios)
  heights = scales / ratio_sqrts * base_anchor_size[0]
  widths = scales * ratio_sqrts * base_anchor_size[1]

  # Get a grid of box centers
  y_centers = tf.to_float(tf.range(grid_height))
  y_centers = y_centers * anchor_stride[0] + anchor_offset[0]
  x_centers = tf.to_float(tf.range(grid_width))
  x_centers = x_centers * anchor_stride[1] + anchor_offset[1]
  x_centers, y_centers = ops.meshgrid(x_centers, y_centers)

  widths_grid, x_centers_grid = ops.meshgrid(widths, x_centers)
  heights_grid, y_centers_grid = ops.meshgrid(heights, y_centers)
  bbox_centers = tf.stack([y_centers_grid, x_centers_grid], axis=3)
  bbox_sizes = tf.stack([heights_grid, widths_grid], axis=3)
  bbox_centers = tf.reshape(bbox_centers, [-1, 2])
  bbox_sizes = tf.reshape(bbox_sizes, [-1, 2])
  bbox_corners = _center_size_bbox_to_corners_bbox(bbox_centers, bbox_sizes)
  return box_list.BoxList(bbox_corners)
开发者ID:NoPointExc,项目名称:models,代码行数:60,代码来源:grid_anchor_generator.py

示例13: mtrx2vecBatch

def mtrx2vecBatch(pMtrxBatch,opt):
	with tf.name_scope("mtrx2vec"):
		if opt.warpType=="translation":
			[row0,row1,row2] = tf.unstack(pMtrxBatch,axis=1)
			[e00,e01,e02] = tf.unstack(row0,axis=1)
			[e10,e11,e12] = tf.unstack(row1,axis=1)
			[e20,e21,e22] = tf.unstack(row2,axis=1)
			pBatch = tf.stack([e02,e12],axis=1)
		elif opt.warpType=="similarity":
			[row0,row1,row2] = tf.unstack(pMtrxBatch,axis=1)
			[e00,e01,e02] = tf.unstack(row0,axis=1)
			[e10,e11,e12] = tf.unstack(row1,axis=1)
			[e20,e21,e22] = tf.unstack(row2,axis=1)
			pBatch = tf.stack([e00-1,e10,e02,e12],axis=1)
		elif opt.warpType=="affine":
			[row0,row1,row2] = tf.unstack(pMtrxBatch,axis=1)
			[e00,e01,e02] = tf.unstack(row0,axis=1)
			[e10,e11,e12] = tf.unstack(row1,axis=1)
			[e20,e21,e22] = tf.unstack(row2,axis=1)
			pBatch = tf.stack([e00-1,e01,e02,e10,e11-1,e12],axis=1)
		elif opt.warpType=="homography":
			pMtrxBatch = pMtrxBatch/pMtrxBatch[:,2:3,2:3]
			[row0,row1,row2] = tf.unstack(pMtrxBatch,axis=1)
			[e00,e01,e02] = tf.unstack(row0,axis=1)
			[e10,e11,e12] = tf.unstack(row1,axis=1)
			[e20,e21,e22] = tf.unstack(row2,axis=1)
			pBatch = tf.stack([e00-1,e01,e02,e10,e11-1,e12,e20,e21],axis=1)
	return pBatch
开发者ID:sunshinezhe,项目名称:IC-STN,代码行数:28,代码来源:warp.py

示例14: vec2mtrxBatch

def vec2mtrxBatch(pBatch,opt):
	with tf.name_scope("vec2mtrx"):
		batchSize = tf.shape(pBatch)[0]
		O = tf.zeros([batchSize])
		I = tf.ones([batchSize])
		if opt.warpType=="translation":
			tx,ty = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I,O,tx],
												[O,I,ty],
												[O,O,I]]),perm=[2,0,1])
		elif opt.warpType=="similarity":
			pc,ps,tx,ty = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I+pc,-ps,tx],
												[ps,I+pc,ty],
												[O,O,I]]),perm=[2,0,1])
		elif opt.warpType=="affine":
			p1,p2,p3,p4,p5,p6 = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I+p1,p2,p3],
												[p4,I+p5,p6],
												[O,O,I]]),perm=[2,0,1])
		elif opt.warpType=="homography":
			p1,p2,p3,p4,p5,p6,p7,p8 = tf.unstack(pBatch,axis=1)
			pMtrxBatch = tf.transpose(tf.stack([[I+p1,p2,p3],
												[p4,I+p5,p6],
												[p7,p8,I]]),perm=[2,0,1])
	return pMtrxBatch
开发者ID:sunshinezhe,项目名称:IC-STN,代码行数:26,代码来源:warp.py

示例15: __init__

    def __init__(self, config):

        self.inputs = [ev.placeholder(config) for ev in config.evidence]
        exists = [ev.exists(i) for ev, i in zip(config.evidence, self.inputs)]
        zeros = tf.zeros([config.batch_size, config.latent_size], dtype=tf.float32)

        # Compute the denominator used for mean and covariance
        for ev in config.evidence:
            ev.init_sigma(config)
        d = [tf.where(exist, tf.tile([1. / tf.square(ev.sigma)], [config.batch_size]),
                      tf.zeros(config.batch_size)) for ev, exist in zip(config.evidence, exists)]
        d = 1. + tf.reduce_sum(tf.stack(d), axis=0)
        denom = tf.tile(tf.reshape(d, [-1, 1]), [1, config.latent_size])

        # Compute the mean of Psi
        with tf.variable_scope('mean'):
            # 1. compute encoding
            self.encodings = [ev.encode(i, config) for ev, i in zip(config.evidence, self.inputs)]
            encodings = [encoding / tf.square(ev.sigma) for ev, encoding in
                         zip(config.evidence, self.encodings)]

            # 2. pick only encodings from valid inputs that exist, otherwise pick zero encoding
            encodings = [tf.where(exist, enc, zeros) for exist, enc in zip(exists, encodings)]

            # 3. tile the encodings according to each evidence type
            encodings = [[enc] * ev.tile for ev, enc in zip(config.evidence, encodings)]
            encodings = tf.stack(list(chain.from_iterable(encodings)))

            # 4. compute the mean of non-zero encodings
            self.psi_mean = tf.reduce_sum(encodings, axis=0) / denom

        # Compute the covariance of Psi
        with tf.variable_scope('covariance'):
            I = tf.ones([config.batch_size, config.latent_size], dtype=tf.float32)
            self.psi_covariance = I / denom
开发者ID:FoxxyMoxxy,项目名称:Vision,代码行数:35,代码来源:architecture.py


注:本文中的tensorflow.stack函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。