当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.divide函数代码示例

本文整理汇总了Python中tensorflow.divide函数的典型用法代码示例。如果您正苦于以下问题:Python divide函数的具体用法?Python divide怎么用?Python divide使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了divide函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_dyprune

def add_dyprune(weights):
    crate = config.crate[weights.name[:-2]] #hyperpara C rate
    prune_mask = tf.Variable(tf.ones_like(weights),name=weights.name[:-2]+'mask', trainable=False)

    #calculate mask
    mean = tf.divide(tf.reduce_sum(tf.multiply(tf.abs(weights),prune_mask)),tf.reduce_sum(prune_mask))
    var = tf.multiply(weights,prune_mask)
    var = tf.square(var)
    mean_q = tf.square(mean)*tf.reduce_sum(prune_mask)
    var = tf.reduce_sum(var) - mean_q
    var = tf.divide(var,tf.reduce_sum(prune_mask))
    var = tf.sqrt(var)
    t1_lower = (mean+var*crate)*0.25 #hyperpara a
    t1_upper = (mean+var*crate)*0.45 #hyperpara b
    
    indicator_lower1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_lower)    
    indicator_upper1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_upper)
    indicator_matrix1 = tf.greater_equal(prune_mask, tf.zeros_like(weights))
    indicator_matrix1 = tf.logical_and(indicator_matrix1,indicator_lower1)
    indicator_matrix1 = tf.logical_or(indicator_matrix1,indicator_upper1)
    indicator_matrix1 = tf.to_float(indicator_matrix1)
    update = prune_mask.assign(indicator_matrix1)

    prune_fc = tf.multiply(weights, prune_mask)
    return prune_fc
开发者ID:Ewenwan,项目名称:Project,代码行数:25,代码来源:densenetfinalDNS.py

示例2: compute_nats_and_bits_per_dim

def compute_nats_and_bits_per_dim(data_dim,
                                  latent_dim,
                                  average_reconstruction,
                                  average_prior):
  """Computes negative ELBO, which is an upper bound on the negative likelihood.

  Args:
    data_dim: int-like indicating data dimensionality.
    latent_dim: int-like indicating latent dimensionality.
    average_reconstruction: Scalar Tensor indicating the reconstruction cost
      averaged over all data dimensions and any data batches.
    average_prior: Scalar Tensor indicating the negative log-prior probability
      averaged over all latent dimensions and any data batches.

  Returns:
    Tuple of scalar Tensors, representing the nats and bits per data dimension
    (e.g., subpixels) respectively.
  """
  with tf.name_scope(None, default_name="compute_nats_per_dim"):
    data_dim = tf.cast(data_dim, average_reconstruction.dtype)
    latent_dim = tf.cast(latent_dim, average_prior.dtype)
    negative_log_likelihood = data_dim * average_reconstruction
    negative_log_prior = latent_dim * average_prior
    negative_elbo = negative_log_likelihood + negative_log_prior
    nats_per_dim = tf.divide(negative_elbo, data_dim, name="nats_per_dim")
    bits_per_dim = tf.divide(nats_per_dim, tf.log(2.), name="bits_per_dim")
    return nats_per_dim, bits_per_dim
开发者ID:qixiuai,项目名称:tensor2tensor,代码行数:27,代码来源:latent_layers.py

示例3: init_training_graph

    def init_training_graph(self):

        with tf.name_scope('Evaluation'):
            logits = self.last
            prob_b = tf.squeeze(logits, squeeze_dims=[1,2])
            self.predictions = tf.argmax(prob_b, axis=1)
            
            with tf.name_scope('Loss'):
                
                self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prob_b,
                                                                          labels=tf.cast(self.train_labels_node, tf.int32),
                                                                          name="entropy")))
                tf.summary.scalar("entropy", self.loss)

            with tf.name_scope('Accuracy'):

                LabelInt = tf.cast(self.train_labels_node, tf.int64)
                CorrectPrediction = tf.equal(self.predictions, LabelInt)
                self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
                tf.summary.scalar("accuracy", self.accuracy)

            with tf.name_scope('Prediction'):

                self.TP = tf.count_nonzero(self.predictions * LabelInt)
                self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
                self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
                self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)

            with tf.name_scope('Precision'):

                self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
                tf.summary.scalar('Precision', self.precision)

            with tf.name_scope('Recall'):

                self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
                tf.summary.scalar('Recall', self.recall)

            with tf.name_scope('F1'):

                num = tf.multiply(self.precision, self.recall)
                dem = tf.add(self.precision, self.recall)
                self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
                tf.summary.scalar('F1', self.F1)

            with tf.name_scope('MeanAccuracy'):
                
                Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
                self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)

            #self.batch = tf.Variable(0, name = "batch_iterator")

            self.train_prediction = tf.nn.softmax(logits)

            self.test_prediction = tf.nn.softmax(logits)

        tf.global_variables_initializer().run()

        print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:59,代码来源:vgg16.py

示例4: add_tensorboard

    def add_tensorboard(self, session, tensorboard_dir, tb_run_name=None, timeline_enabled=False):
        """
        Add the tensorboard operations to the acoustic RNN
        This method will add ops to feed tensorboard
          self.train_summaries_op : will produce the summary for a training step
          self.test_summaries_op : will produce the summary for a test step
          self.summary_writer_op : will write the summary to disk

        Parameters
        ----------
        :param session: the tensorflow session
        :param tensorboard_dir: path to tensorboard directory
        :param tb_run_name: directory name for the tensorboard files inside tensorboard_dir, if None a default dir
                            will be created
        :param timeline_enabled: enable the output of a trace file for timeline visualization
        """
        self.tensorboard_dir = tensorboard_dir
        self.timeline_enabled = timeline_enabled

        # Define GraphKeys for TensorBoard
        graphkey_training = tf.GraphKeys()
        graphkey_test = tf.GraphKeys()

        # Learning rate
        tf.summary.scalar('Learning_rate', self.learning_rate_var, collections=[graphkey_training, graphkey_test])

        # Loss
        with tf.name_scope('Mean_loss'):
            mean_loss = tf.divide(self.accumulated_mean_loss, self.mini_batch)
            tf.summary.scalar('Training', mean_loss, collections=[graphkey_training])
            tf.summary.scalar('Test', mean_loss, collections=[graphkey_test])

        # Accuracy
        with tf.name_scope('Accuracy_-_Error_Rate'):
            mean_error_rate = tf.divide(self.accumulated_error_rate, self.mini_batch)
            tf.summary.scalar('Training', mean_error_rate, collections=[graphkey_training])
            tf.summary.scalar('Test', mean_error_rate, collections=[graphkey_test])

        # Hidden state
        with tf.name_scope('RNN_internal_state'):
            for idx, state_variable in enumerate(self.rnn_tuple_state):
                tf.summary.histogram('Training_layer-{0}_cell_state'.format(idx), state_variable[0],
                                     collections=[graphkey_training])
                tf.summary.histogram('Test_layer-{0}_cell_state'.format(idx), state_variable[0],
                                     collections=[graphkey_test])
                tf.summary.histogram('Training_layer-{0}_hidden_state'.format(idx), state_variable[1],
                                     collections=[graphkey_training])
                tf.summary.histogram('Test_layer-{0}_hidden_state'.format(idx), state_variable[1],
                                     collections=[graphkey_test])

        self.train_summaries_op = tf.summary.merge_all(key=graphkey_training)
        self.test_summaries_op = tf.summary.merge_all(key=graphkey_test)
        if tb_run_name is None:
            run_name = datetime.now().strftime('%Y-%m-%d--%H-%M-%S')
        else:
            run_name = tb_run_name
        self.summary_writer_op = tf.summary.FileWriter(tensorboard_dir + '/' + run_name + '/', graph=session.graph)
开发者ID:inikdom,项目名称:rnn-speech,代码行数:57,代码来源:AcousticModel.py

示例5: tf_fastfood_transform

def tf_fastfood_transform(in_x, dd, DD, use_get=False, use_C=False):
    '''Transform from d to D. Pads as necessary.

    For now: assume dd and DD are known in python.'''

    # Tensor d and D
    #assert_D_big = tf.assert_greater_equal(DD, dd, message='d cannot be larger than D')
    #with tf.control_dependencies([assert_D_big]):
    #    ll = tf.cast(tf.round(tf.log(tf.to_float(DD)) / np.log(2)), 'int32')
    #    LL = tf.pow(2, ll)

    # Python d and D
    assert isinstance(dd, int), 'd should be int'
    assert isinstance(DD, int), 'D should be int'
    assert DD >= dd, 'd cannot be larger than D'
    assert dd > 0, 'd and D must be positive'

    ll = int(np.ceil(np.log(DD) / np.log(2)))
    LL = 2 ** ll

    # Make vars
    init_BB = tf.to_float(tf.random_uniform((LL,), 0, 2, dtype='int32')) * 2 - 1
    init_Pi = tf.random_shuffle(tf.range(LL))
    init_GG = tf.random_normal((LL,))
    init_divisor = lambda GG: tf.sqrt(LL * tf.reduce_sum(tf.pow(GG.initialized_value(), 2)))
    if use_get:
        BB = tf.get_variable('B', initializer=init_BB, trainable=False)
        Pi = tf.get_variable('Pi', initializer=init_Pi, trainable=False)
        GG = tf.get_variable('G', initializer=init_GG, trainable=False)
        divisor = tf.get_variable('divisor', initializer=init_divisor(GG), trainable=False)
    else:
        BB = tf.Variable(init_BB, name='B', trainable=False)
        Pi = tf.Variable(init_Pi, name='Pi', trainable=False)
        GG = tf.Variable(init_GG, name='G', trainable=False)
        divisor = tf.Variable(init_divisor(GG), name='divisor', trainable=False)

    fastfood_vars = [BB, Pi, GG, divisor]

    # Implement transform
    dd_pad = tf.pad(in_x, [[0, LL - dd]])
    mul_1 = tf.multiply(BB, dd_pad)
    if use_C:
        mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='c', normalize=True)
    else:
        mul_2 = tf_fast_walsh_hadamard(mul_1, 0, method='two', normalize=False)
    mul_3 = tf.gather(mul_2, Pi)
    mul_4 = tf.multiply(mul_3, GG)
    if use_C:
        mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='c', normalize=True)
        print '\nWARNING: check normalization on this next line more carefully\n'
        ret = tf.divide(tf.slice(mul_5, [0], [DD]), divisor * np.sqrt(float(DD) / LL / ll))
    else:
        mul_5 = tf_fast_walsh_hadamard(mul_4, 0, method='two', normalize=False)
        ret = tf.divide(tf.slice(mul_5, [0], [DD]), divisor * np.sqrt(float(DD) / LL))

    return fastfood_vars, ret
开发者ID:niurouli,项目名称:SWEM,代码行数:56,代码来源:rproj_layers_util.py

示例6: logG

def logG(x, y, theta):
    fv = tff(theta,y)
    gv = tfg(theta,y)
    mu = tf.add(y,tf.multiply(fv,gl.h))
    pr = tf.subtract(x,mu)
    pr2 = tf.square(pr)
    gv2 = tf.square(gv)
    my2 = tf.constant(2.0,dtype=gl.myftype)
    mypi = tf.constant(np.pi,dtype=gl.myftype)
    lgp1 = tf.negative(tf.divide(tf.log(tf.multiply(my2*mypi*gl.h,gv2)),my2))
    lgp2 = tf.negative(tf.divide(pr2,tf.multiply(my2*gl.h,gv2)))
    lg = tf.add(lgp1,lgp2)        
    return lg
开发者ID:hbhat4000,项目名称:sdeinference,代码行数:13,代码来源:tfdtqem2.py

示例7: init_training_graph

    def init_training_graph(self):
        with tf.name_scope('Evaluation'):
            self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
            self.predictions = tf.argmax(self.logits, axis=3)
            
            with tf.name_scope('Loss'):
                self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
                                                                          labels=tf.squeeze(tf.cast(self.train_labels_node, tf.int32), squeeze_dims=[3]),
                                                                          name="entropy")))
                tf.summary.scalar("entropy", self.loss)

            with tf.name_scope('Accuracy'):

                LabelInt = tf.squeeze(tf.cast(self.train_labels_node, tf.int64), squeeze_dims=[3])
                CorrectPrediction = tf.equal(self.predictions, LabelInt)
                self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
                tf.summary.scalar("accuracy", self.accuracy)

            with tf.name_scope('ClassPrediction'):
                flat_LabelInt = tf.reshape(LabelInt, [-1])
                flat_predictions = tf.reshape(self.predictions, [-1])
                self.cm = tf.confusion_matrix(flat_LabelInt, flat_predictions, self.NUM_LABELS)
                flatten_confusion_matrix = tf.reshape(self.cm, [-1])
                total = tf.reduce_sum(self.cm)
                for i in range(self.NUM_LABELS):
                    name = "Label_{}".format(i)
                    TP, TN, FP, FN = GetCMInfo_TF(self.cm, i, self.NUM_LABELS)

                    precision =  tf.divide(TP, tf.add(TP, FP))
                    recall = tf.divide(TP, tf.add(TP, FN))
                    num = tf.multiply(precision, recall)
                    dem = tf.add(precision, recall)
                    F1 = tf.scalar_mul(2, tf.divide(num, dem))
                    Nprecision = tf.divide(TN, tf.add(TN, FN))
                    MeanAcc = tf.divide(tf.add(precision, Nprecision) ,2)

                    tf.summary.scalar(name + '_Precision', precision)
                    tf.summary.scalar(name + '_Recall', recall)
                    tf.summary.scalar(name + '_F1', F1)
                    tf.summary.scalar(name + '_Performance', MeanAcc)
                confusion_image = tf.reshape( tf.cast( self.cm, tf.float32),
                                            [1, self.NUM_LABELS, self.NUM_LABELS, 1])
                tf.summary.image('confusion', confusion_image)

            self.train_prediction = tf.nn.softmax(self.logits)

            self.test_prediction = self.train_prediction

        tf.global_variables_initializer().run()

        print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:51,代码来源:UNetMultiClass_v2.py

示例8: adloss

    def adloss(self,x,xt,y,global_step):
        with tf.variable_scope('reuse_inference') as scope:
	    scope.reuse_variables()
            self.inference(x,training=True)
	    source_feature=self.feature
            scope.reuse_variables()
            self.inference(xt,training=True)
	    target_feature=self.feature
	    target_pred=self.output
        with tf.variable_scope('reuse') as scope:
            source_logits,_=D(source_feature)
            scope.reuse_variables()
            target_logits,_=D(target_feature)

	self.source_feature=source_feature
	self.target_feature=target_feature
	self.concat_feature=tf.concat([source_feature,target_feature],0)	
        source_result=tf.argmax(y,1)
        target_result=tf.argmax(target_pred,1)
        ones=tf.ones_like(source_feature)
        current_source_count=tf.unsorted_segment_sum(ones,source_result,self.num_classes)
        current_target_count=tf.unsorted_segment_sum(ones,target_result,self.num_classes)

        current_positive_source_count=tf.maximum(current_source_count,tf.ones_like(current_source_count))
        current_positive_target_count=tf.maximum(current_target_count,tf.ones_like(current_target_count))

        current_source_centroid=tf.divide(tf.unsorted_segment_sum(data=source_feature,segment_ids=source_result,num_segments=self.num_classes),current_positive_source_count)
        current_target_centroid=tf.divide(tf.unsorted_segment_sum(data=target_feature,segment_ids=target_result,num_segments=self.num_classes),current_positive_target_count)

        decay=tf.constant(0.3)
        self.decay=decay

        target_centroid=(decay)*current_target_centroid+(1.-decay)*self.target_moving_centroid
        source_centroid=(decay)*current_source_centroid+(1.-decay)*self.source_moving_centroid
	
        self.Semanticloss=protoloss(source_centroid,target_centroid)
	tf.summary.scalar('semanticloss',self.Semanticloss)

        D_real_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=target_logits,labels=tf.ones_like(target_logits)))
        D_fake_loss=tf.reduce_mean(tf.nn.sigmoid_cross_entropy_with_logits(logits=source_logits,labels=tf.zeros_like(source_logits)))
        self.D_loss=D_real_loss+D_fake_loss
        self.G_loss=-self.D_loss
	tf.summary.scalar('G_loss',self.G_loss)
	tf.summary.scalar('JSD',self.G_loss/2+math.log(2))
	
        self.G_loss=0.1*self.G_loss
	self.D_loss=0.1*self.D_loss
	return self.G_loss,self.D_loss,source_centroid,target_centroid
开发者ID:slowbull,项目名称:Moving-Semantic-Transfer-Network,代码行数:48,代码来源:mstnmodel.py

示例9: read_tensor_from_image_file

def read_tensor_from_image_file(file_name):
  input_name = "file_reader"
  output_name = "normalized"
  width = input_size
  height = input_size
  num_channels = 3
  file_reader = tf.read_file(file_name, input_name)
  if file_name.endswith(".png"):
    image_reader = tf.image.decode_png(file_reader, channels = 3,
                                       name='png_reader')
  elif file_name.endswith(".gif"):
    image_reader = tf.squeeze(tf.image.decode_gif(file_reader,
                                                  name='gif_reader'))
  elif file_name.endswith(".bmp"):
    image_reader = tf.image.decode_bmp(file_reader, name='bmp_reader')
  else:
    image_reader = tf.image.decode_jpeg(file_reader, channels = 3,
                                        name='jpeg_reader')
  float_caster = tf.cast(image_reader, tf.float32)
  dims_expander = tf.expand_dims(float_caster, 0);
  # resized = tf.image.resize_bilinear(dims_expander, [input_size, input_size])
  normalized = tf.divide(tf.subtract(dims_expander, [input_mean]), [input_std])
  patches = tf.extract_image_patches(normalized,
       ksizes=[1, patch_height, patch_width, 1],
       strides=[1, patch_height/4, patch_width/4, 1],
       rates=[1,1,1,1],
       padding="VALID")
  patches_shape = tf.shape(patches)
  patches = tf.reshape(patches, [-1, patch_height, patch_width, num_channels])
  patches = tf.image.resize_images(patches, [height, width])
  patches = tf.reshape(patches, [-1, height, width, num_channels])
  sess = tf.Session()
  return sess.run([patches, patches_shape])
开发者ID:jembezmamy,项目名称:away-pigeons,代码行数:33,代码来源:classifier.py

示例10: __init__

    def __init__(self, n_inputs, n_rules, learning_rate=1e-2):
        self.n = n_inputs
        self.m = n_rules
        self.inputs = tf.placeholder(tf.float32, shape=(None, n_inputs))  # Input
        self.targets = tf.placeholder(tf.float32, shape=None)  # Desired output
        mu = tf.get_variable("mu", [n_rules * n_inputs],
                             initializer=tf.random_normal_initializer(0, 1))  # Means of Gaussian MFS
        sigma = tf.get_variable("sigma", [n_rules * n_inputs],
                                initializer=tf.random_normal_initializer(0, 1))  # Standard deviations of Gaussian MFS
        y = tf.get_variable("y", [1, n_rules], initializer=tf.random_normal_initializer(0, 1))  # Sequent centers

        self.params = tf.trainable_variables()

        self.rul = tf.reduce_prod(
            tf.reshape(tf.exp(-0.5 * tf.square(tf.subtract(tf.tile(self.inputs, (1, n_rules)), mu)) / tf.square(sigma)),
                       (-1, n_rules, n_inputs)), axis=2)  # Rule activations
        # Fuzzy base expansion function:
        num = tf.reduce_sum(tf.multiply(self.rul, y), axis=1)
        den = tf.clip_by_value(tf.reduce_sum(self.rul, axis=1), 1e-12, 1e12)
        self.out = tf.divide(num, den)

        self.loss = tf.losses.huber_loss(self.targets, self.out)  # Loss function computation
        # Other loss functions for regression, uncomment to try them:
        # loss = tf.sqrt(tf.losses.mean_squared_error(target, out))
        # loss = tf.losses.absolute_difference(target, out)
        self.optimize = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(self.loss)  # Optimization step
        # Other optimizers, uncomment to try them:
        # self.optimize = tf.train.RMSPropOptimizer(learning_rate=learning_rate).minimize(self.loss)
        # self.optimize = tf.train.GradientDescentOptimizer(learning_rate=learning_rate).minimize(self.loss)
        self.init_variables = tf.global_variables_initializer()  # Variable initializer
开发者ID:tiagoCuervo,项目名称:TensorANFIS,代码行数:30,代码来源:anfis.py

示例11: _make_activity_op

    def _make_activity_op(self, input_tensor):
        """ Creates the op for calculating the activity of a SOM
        :param input_tensor: A tensor to calculate the activity of. Must be of shape `[batch_size, dim]` where `dim` is
        the dimensionality of the SOM's weights.
        :return A handle to the newly created activity op:
        """
        with self._graph.as_default():
            with tf.name_scope("Activity"):
                # This constant controls the width of the gaussian.
                # The closer to 0 it is, the wider it is.
                c = tf.constant(self._c, dtype="float32")
                # Get the euclidean distance between each neuron and the input vectors
                dist = tf.norm(tf.subtract(
                        tf.expand_dims(self._weights, axis=0),
                        tf.expand_dims(input_tensor, axis=1)),
                    name="Distance")  # [batch_size, neurons]

                # Calculate the Gaussian of the activity. Units with distances closer to 0 will have activities
                # closer to 1.
                activity = tf.exp(tf.multiply(tf.pow(dist, 2), c), name="Gaussian")

                # Convert the activity into a softmax probability distribution
                if self._softmax_activity:
                    activity = tf.divide(tf.exp(activity),
                                         tf.expand_dims(tf.reduce_sum(tf.exp(activity), axis=1), axis=-1),
                                         name="Softmax")

                return tf.identity(activity, name="Output")
开发者ID:alexander-gabriel,项目名称:tensorflow-som,代码行数:28,代码来源:tf_som.py

示例12: dia

def dia(model, config, scope, connectsegment, connectfeature):
	with tf.variable_scope(scope), tf.name_scope(scope):
		with tf.variable_scope('inputs'), tf.name_scope('inputs'):
			model['%s_in0length_segment' %scope] = model['%s_out0length' %connectsegment]
			model['%s_in1length_segment' %scope] = model['%s_out1length' %connectsegment]
			model['%s_in2length_segment' %scope] = model['%s_out2length' %connectsegment]
			model['%s_maxin2length_segment' %scope] = model['%s_maxout2length' %connectsegment]
			model['%s_in0length_feature' %scope] = model['%s_out0length' %connectfeature]
			model['%s_in1length_feature' %scope] = model['%s_out1length' %connectfeature]
			model['%s_in2length_feature' %scope] = model['%s_out2length' %connectfeature]
			model['%s_maxin2length_feature' %scope] = model['%s_maxout2length' %connectfeature]
			model['%s_inputs_segment' %scope] = tf.squeeze(model['%s_outputs' %connectsegment], 2, '%s_inputs_segment' %scope)
			model['%s_inputs_feature' %scope] = tf.unstack(tf.transpose(model['%s_outputs' %connectfeature], [1, 0, 2]), name = '%s_inputs_feature' %scope)
			model['%s_out0length' %scope] = model['%s_in0length_feature' %scope]
			model['%s_out1length' %scope] = config.getint('global', 'speaker_size')
			model['%s_out2length' %scope] = tf.stack([config.getint('global', 'speaker_size') for _ in xrange(model['%s_out0length' %scope])])
			model['%s_maxout2length' %scope] = config.getint('global', 'speaker_size')

		with tf.variable_scope('outputs'), tf.name_scope('outputs'):
			model['%s_topsegmentvalues' %scope], model['%s_topsegmentindices' %scope] = tf.nn.top_k(tf.transpose(model['%s_inputs_segment' %scope], [1, 0]), config.getint('global', 'speaker_size'))
			model['%s_scores' %scope] = [tf.gather(feature, index) for feature, index in zip(model['%s_inputs_feature' %scope], tf.unstack(model['%s_topsegmentindices' %scope]))]
			model['%s_normalizedscores' %scope]  = [tf.divide(score, tf.norm(score, 2, 1, True)) for score in model['%s_scores' %scope]]
			model['%s_outputs' %scope] = tf.add(0.5, tf.multiply(0.5, tf.stack([tf.matmul(score, score, transpose_b = True) for score in model['%s_normalizedscores' %scope]], name = '%s_outputs' %scope)))

	return model
开发者ID:aaiijmrtt,项目名称:DEEPSPEECH,代码行数:25,代码来源:dia.py

示例13: compute_loss

    def compute_loss(self, input_tensor, label, name):
        """
        计算损失函数
        :param input_tensor:
        :param label:
        :param name:
        :return:
        """
        with tf.variable_scope(name):
            # 前向传播获取logits
            inference_ret = self.build_model(input_tensor=input_tensor, name='inference')
            # 计算损失
            decode_logits = inference_ret['logits']
            # 加入bounded inverse class weights
            inverse_class_weights = tf.divide(1.0,
                                              tf.log(tf.add(tf.constant(1.02, tf.float32),
                                                            tf.nn.softmax(decode_logits))))
            decode_logits_weighted = tf.multiply(decode_logits, inverse_class_weights)

            loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
                logits=decode_logits_weighted, labels=tf.squeeze(label, squeeze_dims=[3]),
                name='entropy_loss')

            ret = dict()
            ret['entropy_loss'] = loss
            ret['inference_logits'] = inference_ret['logits']

            return ret
开发者ID:dandancat123,项目名称:bilibli_notes2,代码行数:28,代码来源:lanenet_binary_segmentation.py

示例14: weighted_r2_op

def weighted_r2_op(predictions, targets, inputs):
    """ weighted_r2_op.

    An op that calculates the standard error.

    Examples:
        ```python
        input_data = placeholder(shape=[None, 784])
        y_pred = my_network(input_data) # Apply some ops
        y_true = placeholder(shape=[None, 10]) # Labels
        stderr_op = weighted_r2_op(y_pred, y_true, input_data)

        # Calculate standard error by feeding data X and labels Y
        std_error = sess.run(stderr_op, feed_dict={input_data: X, y_true: Y})
        ```

    Arguments:
        predictions: `Tensor`.
        targets: `Tensor`.
        inputs: `Tensor`.

    Returns:
        `Float`. The standard error.

    """
    with tf.name_scope('WeightedStandardError'):
        if hasattr(inputs, '__len__'):
            inputs = tf.add_n(inputs)
        if inputs.get_shape().as_list() != targets.get_shape().as_list():
            raise Exception("Weighted R2 metric requires Inputs and Targets to "
                            "have same shape.")
        a = tf.reduce_sum(tf.square(predictions - inputs))
        b = tf.reduce_sum(tf.square(targets - inputs))
        return tf.divide(a, b)
开发者ID:tflearn,项目名称:tflearn,代码行数:34,代码来源:metrics.py

示例15: __init__

    def __init__(self, state_values, cumulative_rewards, logits, actions,
                 action_space, beta):
        ma_adv_norm = tf.get_variable(
            name="moving_average_of_advantage_norm",
            dtype=tf.float32,
            initializer=100.0,
            trainable=False)
        # advantage estimation
        adv = cumulative_rewards - state_values
        # update averaged advantage norm
        update_adv_norm = tf.assign_add(
            ref=ma_adv_norm,
            value=1e-6 * (tf.reduce_mean(tf.square(adv)) - ma_adv_norm))

        # exponentially weighted advantages
        with tf.control_dependencies([update_adv_norm]):
            exp_advs = tf.exp(
                beta * tf.divide(adv, 1e-8 + tf.sqrt(ma_adv_norm)))

        # log\pi_\theta(a|s)
        dist_cls, _ = ModelCatalog.get_action_dist(action_space, {})
        action_dist = dist_cls(logits)
        logprobs = action_dist.logp(actions)

        self.loss = -1.0 * tf.reduce_mean(
            tf.stop_gradient(exp_advs) * logprobs)
开发者ID:robertnishihara,项目名称:ray,代码行数:26,代码来源:marwil_policy_graph.py


注:本文中的tensorflow.divide函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。