当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.ones_like函数代码示例

本文整理汇总了Python中tensorflow.ones_like函数的典型用法代码示例。如果您正苦于以下问题:Python ones_like函数的具体用法?Python ones_like怎么用?Python ones_like使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了ones_like函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: grad_fn

 def grad_fn(inputs, variables, unused_outputs, unused_grad_outputs):
   grad_inputs = [tf.ones_like(t) * (i + 1.) for i, t in enumerate(inputs)]
   grad_vars = [
       tf.ones_like(t) * (i + len(inputs) + 1.)
       for i, t in enumerate(variables)
   ]
   return grad_inputs, grad_vars
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:7,代码来源:common_layers_test.py

示例2: generalised_dice_loss

def generalised_dice_loss(prediction,
                          ground_truth,
                          weight_map=None,
                          type_weight='Square'):
    """
    Function to calculate the Generalised Dice Loss defined in
        Sudre, C. et. al. (2017) Generalised Dice overlap as a deep learning
        loss function for highly unbalanced segmentations. DLMIA 2017

    :param prediction: the logits
    :param ground_truth: the segmentation ground truth
    :param weight_map:
    :param type_weight: type of weighting allowed between labels (choice
        between Square (square of inverse of volume),
        Simple (inverse of volume) and Uniform (no weighting))
    :return: the loss
    """
    ground_truth = tf.to_int64(ground_truth)
    n_voxels = ground_truth.shape[0].value
    n_classes = prediction.shape[1].value
    ids = tf.constant(np.arange(n_voxels), dtype=tf.int64)
    ids = tf.stack([ids, ground_truth], axis=1)
    one_hot = tf.SparseTensor(indices=ids,
                              values=tf.ones([n_voxels], dtype=tf.float32),
                              dense_shape=[n_voxels, n_classes])

    if weight_map is not None:
        weight_map_nclasses = tf.reshape(
            tf.tile(weight_map, [n_classes]), prediction.get_shape())
        ref_vol = tf.sparse_reduce_sum(
            weight_map_nclasses * one_hot, reduction_axes=[0])

        intersect = tf.sparse_reduce_sum(
            weight_map_nclasses * one_hot * prediction, reduction_axes=[0])
        seg_vol = tf.reduce_sum(
            tf.multiply(weight_map_nclasses, prediction), 0)
    else:
        ref_vol = tf.sparse_reduce_sum(one_hot, reduction_axes=[0])
        intersect = tf.sparse_reduce_sum(one_hot * prediction,
                                         reduction_axes=[0])
        seg_vol = tf.reduce_sum(prediction, 0)
    if type_weight == 'Square':
        weights = tf.reciprocal(tf.square(ref_vol))
    elif type_weight == 'Simple':
        weights = tf.reciprocal(ref_vol)
    elif type_weight == 'Uniform':
        weights = tf.ones_like(ref_vol)
    else:
        raise ValueError("The variable type_weight \"{}\""
                         "is not defined.".format(type_weight))
    new_weights = tf.where(tf.is_inf(weights), tf.zeros_like(weights), weights)
    weights = tf.where(tf.is_inf(weights), tf.ones_like(weights) *
                       tf.reduce_max(new_weights), weights)
    generalised_dice_numerator = \
        2 * tf.reduce_sum(tf.multiply(weights, intersect))
    generalised_dice_denominator = \
        tf.reduce_sum(tf.multiply(weights, seg_vol + ref_vol))
    generalised_dice_score = \
        generalised_dice_numerator / generalised_dice_denominator
    return 1 - generalised_dice_score
开发者ID:nhu2000,项目名称:NiftyNet,代码行数:60,代码来源:loss_segmentation.py

示例3: prune_outside_window

def prune_outside_window(keypoints, window, scope=None):
  """Prunes keypoints that fall outside a given window.

  This function replaces keypoints that fall outside the given window with nan.
  See also clip_to_window which clips any keypoints that fall outside the given
  window.

  Args:
    keypoints: a tensor of shape [num_instances, num_keypoints, 2]
    window: a tensor of shape [4] representing the [y_min, x_min, y_max, x_max]
      window outside of which the op should prune the keypoints.
    scope: name scope.

  Returns:
    new_keypoints: a tensor of shape [num_instances, num_keypoints, 2]
  """
  with tf.name_scope(scope, 'PruneOutsideWindow'):
    y, x = tf.split(value=keypoints, num_or_size_splits=2, axis=2)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)

    valid_indices = tf.logical_and(
        tf.logical_and(y >= win_y_min, y <= win_y_max),
        tf.logical_and(x >= win_x_min, x <= win_x_max))

    new_y = tf.where(valid_indices, y, np.nan * tf.ones_like(y))
    new_x = tf.where(valid_indices, x, np.nan * tf.ones_like(x))
    new_keypoints = tf.concat([new_y, new_x], 2)

    return new_keypoints
开发者ID:ALISCIFP,项目名称:models,代码行数:29,代码来源:keypoint_ops.py

示例4: __init__

    def __init__(self,
                 sess,
                 dataset_name='facades',
                 checkpoint_dir=None):
        self.sess = sess
        self.dataset_name = dataset_name
        self.checkpoint_dir = checkpoint_dir

        self.real_data = tf.placeholder(tf.float32,
                                        [BATCH_SIZE, IMAGE_SIZE, IMAGE_SIZE, 3 + 3],
                                        name='input_images')
        self.real_A = self.real_data[:, :, :, :3]
        self.real_B = self.real_data[:, :, :, 3:6]

        self.fake_B = generator(self.real_A, name="generatorA2B")
        self.fake_A = generator(self.real_B, name="generatorB2A")
        self.fake_B_fake_A = generator(self.fake_B, reuse=True, name="generatorB2A")
        self.fake_A_fake_B = generator(self.fake_A, reuse=True, name="generatorA2B")

        self.DA_real = discriminator(self.real_A, reuse=False, name="descriminatorA")
        self.DB_real = discriminator(self.real_B, reuse=False, name="descriminatorB")
        self.DA_fake = discriminator(self.fake_A, reuse=True, name="descriminatorA")
        self.DB_fake = discriminator(self.fake_B, reuse=True, name="descriminatorB")

        self.g_loss_a2b = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_fake, labels=tf.ones_like(self.DB_fake))) + 100 * tf.reduce_mean(
            tf.abs(self.real_A - self.fake_B_fake_A)) + 100 * tf.reduce_mean(
            tf.abs(self.real_B - self.fake_B))
        self.g_loss_b2a = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_fake, labels=tf.ones_like(self.DA_fake))) + 100 * tf.reduce_mean(
            tf.abs(self.real_B - self.fake_A_fake_B)) + 100 * tf.reduce_mean(
            tf.abs(self.real_A - self.fake_A))
        self.g_loss = self.g_loss_a2b + self.g_loss_b2a

        self.d_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_fake, labels=tf.zeros_like(self.DB_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DB_real, labels=tf.ones_like(self.DB_real))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_fake, labels=tf.zeros_like(self.DA_fake))) + tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.DA_real, labels=tf.ones_like(self.DA_real)))

        self.d_loss_sum = tf.summary.scalar("d_loss", self.d_loss)
        self.g_loss_sum = tf.summary.scalar("g_loss", self.g_loss)
        self.g_loss_a2b_sum = tf.summary.scalar("g_loss_a2b", self.g_loss_a2b)
        self.g_loss_b2a_sum = tf.summary.scalar("g_loss_b2a", self.g_loss_b2a)
        self.real_A_sum = tf.summary.image("real_A", self.real_A)
        self.real_B_sum = tf.summary.image("real_B", self.real_B)
        self.fake_A_sum = tf.summary.image("fake_A", self.fake_A)
        self.fake_B_sum = tf.summary.image("fake_B", self.fake_B)
        self.fake_AB_sum = tf.summary.image("fake_AB", self.fake_A_fake_B)
        self.fake_BA_sum = tf.summary.image("fake_BA", self.fake_B_fake_A)

        self.d_sum = tf.summary.merge([self.d_loss_sum])
        self.g_sum = tf.summary.merge([self.g_loss_sum, self.g_loss_a2b_sum, self.g_loss_b2a_sum,
                                       self.real_A_sum, self.real_B_sum, self.fake_A_sum,
                                       self.fake_B_sum, self.fake_AB_sum, self.fake_BA_sum])

        training_vars = tf.trainable_variables()
        self.d_vars = [var for var in training_vars if 'd_' in var.name]
        self.g_vars = [var for var in training_vars if 'g_' in var.name]
        self.saver = tf.train.Saver(max_to_keep=5)
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:60,代码来源:cycle_model.py

示例5: __init__

    def __init__(self,
                 sess,
                 batch_size=32,
                 image_size=256,
                 lam=0.8,
                 checkpoint_dir=None):
        self.sess = sess
        self.batch_size = batch_size
        self.image_size = image_size
        self.image_shape = [image_size, image_size, 3]
        self.lam = lam
        self.checkpoint_dir = checkpoint_dir
        self.global_step = tf.Variable(0, trainable=False)

        self.images = tf.placeholder(tf.float32, [batch_size] + self.image_shape, name='images')
        self.images_summary = tf.summary.image("image", self.images)

        self.d_bns = [batch_norm(name='d_bn{}'.format(i)) for i in range(5)]
        self.local_d_bns = [batch_norm(name='d_local_bn{}'.format(i)) for i in range(4)]
        self.g_bns = [batch_norm(name='g_bn{}'.format(i, )) for i in range(15)]

        self.D, self.D_logits = self.discriminator(self.images, self.image_size)
        self.d_loss_real = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.D_logits, labels=tf.ones_like(self.D)))
        self.D_summary = tf.summary.histogram("d", self.D)
        self.d_loss_real_summary = tf.summary.scalar("d_loss_real", self.d_loss_real)

        self.masks = tf.placeholder(tf.float32, [batch_size] + self.image_shape, name='masks')
        self.MG = tf.multiply(self.images, self.masks)
        self.G = self.generator(self.MG)
        self.MG_summary = tf.summary.image("mg", self.MG)
        self.G_summary = tf.summary.image("g", self.G)

        self.D_fake, self.D_fake_logits = self.discriminator(self.G, self.image_size, reuse=True)
        self.d_loss_fake = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.D_fake_logits, labels=tf.zeros_like(self.D_fake)))
        self.D_fake_summary = tf.summary.histogram("d_fake", self.D_fake)
        self.d_loss_fake_summary = tf.summary.scalar("d_loss_fake", self.d_loss_fake)
        self.d_loss = self.d_loss_real + self.d_loss_fake

        self.g_loss_d = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
            logits=self.D_fake_logits, labels=tf.ones_like(self.D_fake)))
        self.g_loss_l = tf.reduce_mean(tf.contrib.layers.flatten(
            tf.multiply(self.G - self.images, self.G - self.images)))
        self.g_loss = (1 - self.lam) * self.g_loss_d + self.lam * self.g_loss_l
        self.g_loss_d_summary = tf.summary.scalar("g_loss_d", self.g_loss_d)
        self.g_loss_l_summary = tf.summary.scalar("g_loss_l", self.g_loss_l)
        self.g_loss_summary = tf.summary.scalar("g_loss", self.g_loss)

        t_vars = tf.trainable_variables()
        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]
        self.saver = tf.train.Saver(max_to_keep=10)

        self.g_summary = tf.summary.merge([
            self.G_summary, self.MG_summary, self.D_fake_summary, self.d_loss_fake_summary,
            self.g_loss_summary, self.g_loss_d_summary, self.g_loss_l_summary])
        self.d_summary = tf.summary.merge([
            self.images_summary, self.D_summary, self.d_loss_real_summary])
        self.writer = tf.summary.FileWriter(os.path.join(self.checkpoint_dir, "logs"), self.sess.graph)
开发者ID:yaoyaowd,项目名称:tensorflow_demo,代码行数:60,代码来源:model.py

示例6: p_zt

 def p_zt(self, prev_state, t):
   """Computes the model p(z_t| z_{t-1})."""
   batch_size = tf.shape(prev_state)[0]
   if t > 0:
     z_mu_p = prev_state + self.bs[t - 1]
     p_zt = tf.contrib.distributions.Normal(
         loc=z_mu_p, scale=tf.sqrt(tf.ones_like(z_mu_p) * self.variance))
     return p_zt
   else:  # p(z_0) is mixture of two Normals
     mu_pos = tf.ones([batch_size, self.state_size], dtype=self.dtype) * self.prior_mode_mean
     mu_neg = tf.ones([batch_size, self.state_size], dtype=self.dtype) * -self.prior_mode_mean
     z0_pos = tf.contrib.distributions.Normal(
         loc=mu_pos,
         scale=tf.sqrt(tf.ones_like(mu_pos) * self.variance))
     z0_neg = tf.contrib.distributions.Normal(
         loc=mu_neg,
         scale=tf.sqrt(tf.ones_like(mu_neg) * self.variance))
     mode_probs = tf.convert_to_tensor([self.mixing_coeff, 1-self.mixing_coeff], dtype=tf.float64)
     mode_probs = tf.tile(mode_probs[tf.newaxis, tf.newaxis, :], [batch_size, 1, 1])
     mode_selection_dist = tf.contrib.distributions.Categorical(probs=mode_probs)
     z0_dist = tf.contrib.distributions.Mixture(
         cat=mode_selection_dist,
         components=[z0_pos, z0_neg],
         validate_args=False)
     return z0_dist
开发者ID:812864539,项目名称:models,代码行数:25,代码来源:models.py

示例7: __init__

    def __init__(self, q_values, observations, num_actions, stochastic, eps,
                 softmax, softmax_temp):
        if softmax:
            action_dist = Categorical(q_values / softmax_temp)
            self.action = action_dist.sample()
            self.action_prob = action_dist.sampled_action_prob()
            return

        deterministic_actions = tf.argmax(q_values, axis=1)
        batch_size = tf.shape(observations)[0]

        # Special case masked out actions (q_value ~= -inf) so that we don't
        # even consider them for exploration.
        random_valid_action_logits = tf.where(
            tf.equal(q_values, tf.float32.min),
            tf.ones_like(q_values) * tf.float32.min, tf.ones_like(q_values))
        random_actions = tf.squeeze(
            tf.multinomial(random_valid_action_logits, 1), axis=1)

        chose_random = tf.random_uniform(
            tf.stack([batch_size]), minval=0, maxval=1, dtype=tf.float32) < eps
        stochastic_actions = tf.where(chose_random, random_actions,
                                      deterministic_actions)
        self.action = tf.cond(stochastic, lambda: stochastic_actions,
                              lambda: deterministic_actions)
        self.action_prob = None
开发者ID:robertnishihara,项目名称:ray,代码行数:26,代码来源:dqn_policy_graph.py

示例8: build_losses

    def build_losses(self, logits_real, logits_fake):
        """D and G play two-player minimax game with value function V(G,D)

          min_G max _D V(D, G) = IE_{x ~ p_data} [log D(x)] + IE_{z ~ p_fake} [log (1 - D(G(z)))]

        Args:
            logits_real (tf.Tensor): discrim logits from real samples
            logits_fake (tf.Tensor): discrim logits from fake samples produced by generator
        """
        with tf.name_scope("GAN_loss"):
            score_real = tf.sigmoid(logits_real)
            score_fake = tf.sigmoid(logits_fake)
            tf.summary.histogram('score-real', score_real)
            tf.summary.histogram('score-fake', score_fake)

            with tf.name_scope("discrim"):
                d_loss_pos = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=logits_real, labels=tf.ones_like(logits_real)), name='loss_real')
                d_loss_neg = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=logits_fake, labels=tf.zeros_like(logits_fake)), name='loss_fake')

                d_pos_acc = tf.reduce_mean(tf.cast(score_real > 0.5, tf.float32), name='accuracy_real')
                d_neg_acc = tf.reduce_mean(tf.cast(score_fake < 0.5, tf.float32), name='accuracy_fake')

                d_accuracy = tf.add(.5 * d_pos_acc, .5 * d_neg_acc, name='accuracy')
                self.d_loss = tf.add(.5 * d_loss_pos, .5 * d_loss_neg, name='loss')

            with tf.name_scope("gen"):
                self.g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
                    logits=logits_fake, labels=tf.ones_like(logits_fake)), name='loss')
                g_accuracy = tf.reduce_mean(tf.cast(score_fake > 0.5, tf.float32), name='accuracy')

            add_moving_summary(self.g_loss, self.d_loss, d_accuracy, g_accuracy)
开发者ID:ahuirecome,项目名称:tensorpack,代码行数:33,代码来源:GAN.py

示例9: _sample_n

  def _sample_n(self, n, seed=None):
    n_draws = tf.cast(self.total_count, dtype=tf.int32)
    k = self.event_shape_tensor()[0]

    # broadcast the total_count and logits to same shape
    n_draws = tf.ones_like(
        self.logits[..., 0], dtype=n_draws.dtype) * n_draws
    logits = tf.ones_like(
        n_draws[..., tf.newaxis], dtype=self.logits.dtype) * self.logits

    # flatten the total_count and logits
    flat_logits = tf.reshape(logits, [-1, k])  # [B1B2...Bm, k]
    flat_ndraws = n * tf.reshape(n_draws, [-1])  # [B1B2...Bm]

    # computes each total_count and logits situation by map_fn
    def _sample_single(args):
      logits, n_draw = args[0], args[1]  # [K], []
      x = tf.multinomial(logits[tf.newaxis, ...], n_draw,
                         seed)  # [1, n*n_draw]
      x = tf.reshape(x, shape=[n, -1])  # [n, n_draw]
      x = tf.reduce_sum(tf.one_hot(x, depth=k), axis=-2)  # [n, k]
      return x

    x = tf.map_fn(
        _sample_single, [flat_logits, flat_ndraws],
        dtype=self.dtype)  # [B1B2...Bm, n, k]

    # reshape the results to proper shape
    x = tf.transpose(x, perm=[1, 0, 2])
    final_shape = tf.concat([[n], self.batch_shape_tensor(), [k]], 0)
    x = tf.reshape(x, final_shape)  # [n, B1, B2,..., Bm, k]
    return x
开发者ID:asudomoeva,项目名称:probability,代码行数:32,代码来源:multinomial.py

示例10: add_dyprune

def add_dyprune(weights):
    crate = config.crate[weights.name[:-2]] #hyperpara C rate
    prune_mask = tf.Variable(tf.ones_like(weights),name=weights.name[:-2]+'mask', trainable=False)

    #calculate mask
    mean = tf.divide(tf.reduce_sum(tf.multiply(tf.abs(weights),prune_mask)),tf.reduce_sum(prune_mask))
    var = tf.multiply(weights,prune_mask)
    var = tf.square(var)
    mean_q = tf.square(mean)*tf.reduce_sum(prune_mask)
    var = tf.reduce_sum(var) - mean_q
    var = tf.divide(var,tf.reduce_sum(prune_mask))
    var = tf.sqrt(var)
    t1_lower = (mean+var*crate)*0.25 #hyperpara a
    t1_upper = (mean+var*crate)*0.45 #hyperpara b
    
    indicator_lower1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_lower)    
    indicator_upper1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_upper)
    indicator_matrix1 = tf.greater_equal(prune_mask, tf.zeros_like(weights))
    indicator_matrix1 = tf.logical_and(indicator_matrix1,indicator_lower1)
    indicator_matrix1 = tf.logical_or(indicator_matrix1,indicator_upper1)
    indicator_matrix1 = tf.to_float(indicator_matrix1)
    update = prune_mask.assign(indicator_matrix1)

    prune_fc = tf.multiply(weights, prune_mask)
    return prune_fc
开发者ID:Ewenwan,项目名称:Project,代码行数:25,代码来源:densenetfinalDNS.py

示例11: create_model

    def create_model(self):
        """Create tensorflow variables and graph."""
        self.enc_inp = [tf.placeholder(tf.int32, shape=(None,),
                                       name="inp%i" % t)
                        for t in range(self.bucket[0])]

        self.labels = [tf.placeholder(tf.int32, shape=(None,),
                                      name="labels%i" % t)
                       for t in range(self.bucket[1])]

        self.weights = [tf.ones_like(labels_t, dtype=tf.float32)
                        for labels_t in self.labels]

        # Decoder input: prepend some "GO" token and drop the final
        # token of the encoder input
        self.dec_inp = ([GO_ID * tf.ones_like(self.labels[0], dtype=np.int32,
                                              name="GO")] + self.labels[:-1])

        single_cell = tf.nn.rnn_cell.LSTMCell(self.memory_dim)
        if self.num_layers > 1:
            self.cell = tf.nn.rnn_cell.MultiRNNCell(
                [single_cell] * self.num_layers)
        else:
            self.cell = single_cell

        # Sequence to sequence model
        self.dec_outputs, self.dec_memory = tf.nn.seq2seq.embedding_rnn_seq2seq(
            self.enc_inp, self.dec_inp, self.cell, len(self.en_chars),
            len(self.hi_chars), self.embedding_dim)
开发者ID:chsasank,项目名称:indic-transliteration,代码行数:29,代码来源:train_transliterator.py

示例12: build_model

    def build_model(self):
        if self.y_dim:
            self.y= tf.placeholder(tf.float32, [None, self.y_dim], name='y')

        self.images = tf.placeholder(tf.float32, [self.batch_size] + self.image_shape,
                                    name='real_images')
        self.sample_images= tf.placeholder(tf.float32, [self.sample_size] + self.image_shape,
                                        name='sample_images')
        self.z = tf.placeholder(tf.float32, [None, self.z_dim],
                                name='z')

        self.G = self.generator(self.z)
        self.D = self.discriminator(self.images)

        self.sampler = self.sampler(self.z)
        self.D_ = self.discriminator(self.G, reuse=True)

        self.d_loss_real = binary_cross_entropy_with_logits(tf.ones_like(self.D), self.D)
        self.d_loss_fake = binary_cross_entropy_with_logits(tf.zeros_like(self.D_), self.D_)
        self.g_loss = binary_cross_entropy_with_logits(tf.ones_like(self.D_), self.D_)
                                                    
        self.d_loss = self.d_loss_real + self.d_loss_fake

        t_vars = tf.trainable_variables()

        self.d_vars = [var for var in t_vars if 'd_' in var.name]
        self.g_vars = [var for var in t_vars if 'g_' in var.name]

        self.saver = tf.train.Saver()
开发者ID:ixtel,项目名称:DCGAN-tensorflow,代码行数:29,代码来源:model.py

示例13: compute_losses

    def compute_losses(self, images, wrong_images, fake_images, embeddings):
        real_logit = self.model.get_discriminator(images, embeddings)
        wrong_logit = self.model.get_discriminator(wrong_images, embeddings)
        fake_logit = self.model.get_discriminator(fake_images, embeddings)

        real_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(real_logit,
                                                    tf.ones_like(real_logit))
        real_d_loss = tf.reduce_mean(real_d_loss)
        wrong_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(wrong_logit,
                                                    tf.zeros_like(wrong_logit))
        wrong_d_loss = tf.reduce_mean(wrong_d_loss)
        fake_d_loss =\
            tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
                                                    tf.zeros_like(fake_logit))
        fake_d_loss = tf.reduce_mean(fake_d_loss)
        if cfg.TRAIN.B_WRONG:
            discriminator_loss =\
                real_d_loss + (wrong_d_loss + fake_d_loss) / 2.
            self.log_vars.append(("d_loss_wrong", wrong_d_loss))
        else:
            discriminator_loss = real_d_loss + fake_d_loss
        self.log_vars.append(("d_loss_real", real_d_loss))
        self.log_vars.append(("d_loss_fake", fake_d_loss))

        generator_loss = \
            tf.nn.sigmoid_cross_entropy_with_logits(fake_logit,
                                                    tf.ones_like(fake_logit))
        generator_loss = tf.reduce_mean(generator_loss)

        return discriminator_loss, generator_loss
开发者ID:Soledad89,项目名称:StackGAN,代码行数:32,代码来源:trainer.py

示例14: add_optimization

def add_optimization(learning_rate, beta1, beta2, disc_gen, disc_true,
                     gen_label, disc_label):
    gen_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        disc_gen, tf.ones_like(disc_gen)), name='gen_loss')

    disc_g_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        disc_gen, tf.zeros_like(disc_gen)))
    disc_x_loss = tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(
        disc_true, tf.ones_like(disc_true)))
    disc_loss = tf.add(disc_g_loss, disc_x_loss, name='disc_loss')

    gen_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                 scope=gen_label)
    disc_vars = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES,
                                  scope=disc_label)
    # print 'gen vars---------------------'
    # for v in gen_vars:
    #     print v.name
    # print 'disc vars----------------'
    # for v in disc_vars:
    #     print v.name

    gen_opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                     beta1=beta1,
                                     beta2=beta2).minimize(gen_loss,
                                                           var_list=gen_vars)
    disc_opt = tf.train.AdamOptimizer(learning_rate=learning_rate,
                                      beta1=beta1,
                                      beta2=beta2).minimize(disc_loss,
                                                            var_list=disc_vars)
    return gen_loss, disc_loss, gen_opt, disc_opt
开发者ID:kkihara,项目名称:GAN,代码行数:31,代码来源:model.py

示例15: build_loss_and_gradients

  def build_loss_and_gradients(self, var_list):
    x_true = list(six.itervalues(self.data))[0]
    x_fake = list(six.iterkeys(self.data))[0]
    with tf.variable_scope("Disc"):
      d_true = self.discriminator(x_true)

    with tf.variable_scope("Disc", reuse=True):
      d_fake = self.discriminator(x_fake)

    if self.logging:
      tf.summary.histogram("discriminator_outputs",
                           tf.concat([d_true, d_fake], axis=0),
                           collections=[self._summary_key])

    loss_d = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=tf.ones_like(d_true), logits=d_true) + \
        tf.nn.sigmoid_cross_entropy_with_logits(
            labels=tf.zeros_like(d_fake), logits=d_fake)
    loss = tf.nn.sigmoid_cross_entropy_with_logits(
        labels=tf.ones_like(d_fake), logits=d_fake)
    loss_d = tf.reduce_mean(loss_d)
    loss = tf.reduce_mean(loss)

    var_list_d = tf.get_collection(
        tf.GraphKeys.TRAINABLE_VARIABLES, scope="Disc")
    if var_list is None:
      var_list = [v for v in tf.trainable_variables() if v not in var_list_d]

    grads_d = tf.gradients(loss_d, var_list_d)
    grads = tf.gradients(loss, var_list)
    grads_and_vars_d = list(zip(grads_d, var_list_d))
    grads_and_vars = list(zip(grads, var_list))
    return loss, grads_and_vars, loss_d, grads_and_vars_d
开发者ID:wujsAct,项目名称:edward,代码行数:33,代码来源:gan_inference.py


注:本文中的tensorflow.ones_like函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。