当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.div函数代码示例

本文整理汇总了Python中tensorflow.div函数的典型用法代码示例。如果您正苦于以下问题:Python div函数的具体用法?Python div怎么用?Python div使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了div函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

 def __init__(self, label, clauses, save_path=""):
     print "defining the knowledge base", label
     self.label = label
     self.clauses = clauses
     self.parameters = [par for cl in self.clauses for par in cl.parameters]
     if not self.clauses:
         self.tensor = tf.constant(1.0)
     else:
         clauses_value_tensor = tf.concat(0, [cl.tensor for cl in clauses])
         if default_clauses_aggregator == "min":
             print "clauses aggregator is min"
             self.tensor = tf.reduce_min(clauses_value_tensor)
         if default_clauses_aggregator == "mean":
             print "clauses aggregator is mean"
             self.tensor = tf.reduce_mean(clauses_value_tensor)
         if default_clauses_aggregator == "hmean":
             print "clauses aggregator is hmean"
             self.tensor = tf.div(tf.to_float(tf.size(clauses_value_tensor)), tf.reduce_sum(tf.inv(clauses_value_tensor), keep_dims=True))
         if default_clauses_aggregator == "wmean":
             print "clauses aggregator is weighted mean"
             weights_tensor = tf.constant([cl.weight for cl in clauses])
             self.tensor = tf.div(tf.reduce_sum(tf.mul(weights_tensor, clauses_value_tensor)), tf.reduce_sum(weights_tensor))
     if default_positive_fact_penality != 0:
         self.loss = smooth(self.parameters) + \
                     tf.mul(default_positive_fact_penality, self.penalize_positive_facts()) - \
                     PR(self.tensor)
     else:
         self.loss = smooth(self.parameters) - PR(self.tensor)
     self.save_path = save_path
     self.train_op = train_op(self.loss, default_optimizer)
     self.saver = tf.train.Saver(max_to_keep=20)
     print "knowledge base", label, "is defined"
开发者ID:ivanDonadello,项目名称:knowPic,代码行数:32,代码来源:logictensornetworks.py

示例2: loglik_discrete

def loglik_discrete(a, b, y_, u_, output_collection=(), name=None):
    """Returns element-wise Weibull censored discrete log-likelihood.

    Unit-discretized weibull log-likelihood. loss=-loglikelihood.

    .. note::
        All input values must be of same type and shape.

    :param a:alpha. Positive nonzero `Tensor`.
    :type a: `float32` or `float64`.
    :param b:beta.  Positive nonzero `Tensor`.
    :type b: `float32` or `float64`.
    :param y_: time to event. Positive nonzero `Tensor` 
    :type y_: `float32` or `float64`.
    :param u_: indicator. 0.0 if right censored, 1.0 if uncensored `Tensor`
    :type u_: `float32` or `float64`.
    :param output_collection:name of the collection to collect result of this op.
    :type output_collection: Tuple of Strings.
    :param String name: name of the operation.
    :return: A `Tensor` of log-likelihoods of same shape as a, b, y_, u_.
    """

    with tf.name_scope(name, "weibull_loglik_discrete", [a, b, y_, u_]):
        hazard0 = tf.pow(tf.div(y_ + 1e-35, a), b)  # 1e-9 safe, really
        hazard1 = tf.pow(tf.div(y_ + 1.0, a), b)
        loglik = tf.multiply(u_, tf.log(
            tf.exp(hazard1 - hazard0) - 1.0)) - hazard1

        tf.add_to_collection(output_collection, loglik)
    return(loglik)
开发者ID:g6t,项目名称:wtte-rnn,代码行数:30,代码来源:tensorflow.py

示例3: scaled_squared_distance

    def scaled_squared_distance(self, X, Y):
        """ Computes the squared distance.

        Parameters
        ----------
        X : np or tf nd.array. shape = (x_samples, n_dim)
            One of the design matrices
        Y : np or tf nd.array. shape = (y_samples, n_dim)
            One of the design matrices
        
        Returns
        -------
        NA : tf nd.array. shape = (x_samples, y_samples)
            Scaled squared distance matrix M where M[i, j] is the sq distance
            between X[i] and Y[j]
        """
        # Scale X and Y accordingly
        Xs, Ys = (tf.div(X, self.length_scales), tf.div(Y, self.length_scales))
        # Create matrix of ones
        Xo = tf.ones(tf.pack([tf.shape(X)[0], 1]))
        Yo = tf.ones(tf.pack([1, tf.shape(Y)[0]]))
        # Precompute squared norms for rows of each matrix
        Xsqn = tf.reshape(tf.reduce_sum(tf.square(Xs), 1), tf.shape(Xo))
        Ysqn = tf.reshape(tf.reduce_sum(tf.square(Ys), 1), tf.shape(Yo))
        # Precompute "interaction" norm
        XYn = tf.matmul(Xs, tf.transpose(Ys))
        # Return the matrix of squared distances
        return tf.matmul(Xsqn, Yo) + tf.matmul(Xo, Ysqn) - 2*XYn
开发者ID:kashizui,项目名称:automated-statistician,代码行数:28,代码来源:kernels.py

示例4: cell_locate

def cell_locate(size, bbox, S):

    """ 
    locate the center of ground truth in which grid cell

    """
    x = tf.cast(tf.slice(bbox, [0,0], [-1,1]), tf.float32)
    y = tf.cast(tf.slice(bbox, [0,1], [-1,1]), tf.float32)
    w = tf.cast(tf.slice(bbox, [0,2], [-1,1]), tf.float32)
    h = tf.cast(tf.slice(bbox, [0,3], [-1,1]), tf.float32)


    height, width = size

    cell_w = width / S
    cell_h = height / S

    center_y = tf.add(y, tf.mul(h, 0.5))
    center_x = tf.add(x, tf.mul(w, 0.5))

    cell_coord_x = tf.cast(tf.div(center_x, cell_w), tf.int32)
    cell_coord_y = tf.cast(tf.div(center_y, cell_h), tf.int32)

    cell_num = tf.add(tf.mul(cell_coord_y, S), cell_coord_x)

    return cell_num
开发者ID:Johannes-brahms,项目名称:Yolo,代码行数:26,代码来源:yolo_utils.py

示例5: sample

    def sample(self, projected_output):
        """Return integer ID tensor representing the sampled word.
        
        Args:
            projected_output: Tensor [1, 1, state_size], representing a single
                decoder timestep output. 
        """
        # TODO: We really need a tf.control_dependencies check here (for rank).
        with tf.name_scope('decoder_sampler', values=[projected_output]):

            # Protect against extra size-1 dimensions; grab the 1D tensor
            # of size state_size.
            logits = tf.squeeze(projected_output)
            if self.temperature < 0.02:
                return tf.argmax(logits, axis=0)

            # Convert logits to probability distribution.
            probabilities = tf.div(logits, self.temperature)
            projected_output = tf.div(
                tf.exp(probabilities),
                tf.reduce_sum(tf.exp(probabilities), axis=-1))

            # Sample 1 time from the probability distribution.
            sample_ID = tf.squeeze(
                tf.multinomial(tf.expand_dims(probabilities, 0), 1))
        return sample_ID
开发者ID:laurii,项目名称:DeepChatModels,代码行数:26,代码来源:decoders.py

示例6: __init__

    def __init__(self, action1_bounds, action2_bounds, session):
        self.graph = session.graph
        with self.graph.as_default():
            self.sess = session

            self.action_bounds = [[action1_bounds[1], action2_bounds[1]],
                                  [action1_bounds[0], action2_bounds[0]]]

            self.action_size = len(self.action_bounds[0])
            self.action_input = tf.placeholder(tf.float32, [None, self.action_size])

            self.p_max = tf.constant(self.action_bounds[0], dtype=tf.float32)
            self.p_min = tf.constant(self.action_bounds[1], dtype=tf.float32)

            self.p_range = tf.constant([x - y for x, y in zip(self.action_bounds[0], self.action_bounds[1])],
                                       dtype=tf.float32)

            self.p_diff_max = tf.div(-self.action_input + self.p_max, self.p_range)
            self.p_diff_min = tf.div(self.action_input - self.p_min, self.p_range)

            self.zeros_act_grad_filter = tf.zeros([self.action_size])
            self.act_grad = tf.placeholder(tf.float32, [None, self.action_size])

            self.grad_inverter = tf.select(tf.greater(self.act_grad, self.zeros_act_grad_filter),
                                           tf.mul(self.act_grad, self.p_diff_max),
                                           tf.mul(self.act_grad, self.p_diff_min))
开发者ID:JakobBreuninger,项目名称:neurobotics,代码行数:26,代码来源:grad_inverter.py

示例7: compute_auc

 def compute_auc(tp, fn, tn, fp, name):
   """Computes the roc-auc or pr-auc based on confusion counts."""
   rec = tf.div(tp + epsilon, tp + fn + epsilon)
   if curve == 'ROC':
     fp_rate = tf.div(fp, fp + tn + epsilon)
     x = fp_rate
     y = rec
   elif curve == 'R':  # recall auc
     x = tf.linspace(1., 0., num_thresholds)
     y = rec
   else:  # curve == 'PR'.
     prec = tf.div(tp + epsilon, tp + fp + epsilon)
     x = rec
     y = prec
   if summation_method == 'trapezoidal':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   (y[:num_thresholds - 1] + y[1:]) / 2.),
       name=name)
   elif summation_method == 'minoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.minimum(y[:num_thresholds - 1], y[1:])),
       name=name)
   elif summation_method == 'majoring':
     return tf.reduce_sum(
       tf.multiply(x[:num_thresholds - 1] - x[1:],
                   tf.maximum(y[:num_thresholds - 1], y[1:])),
       name=name)
   else:
     raise ValueError('Invalid summation_method: %s' % summation_method)
开发者ID:fossabot,项目名称:SiamFC-TensorFlow,代码行数:31,代码来源:track_metrics.py

示例8: cosineface_losses

def cosineface_losses(embedding, labels, out_num, w_init=None, s=30., m=0.4):
    '''
    :param embedding: the input embedding vectors
    :param labels:  the input labels, the shape should be eg: (batch_size, 1)
    :param s: scalar value, default is 30
    :param out_num: output class num
    :param m: the margin value, default is 0.4
    :return: the final cacualted output, this output is send into the tf.nn.softmax directly
    '''
    with tf.variable_scope('cosineface_loss'):
        # inputs and weights norm
        embedding_norm = tf.norm(embedding, axis=1, keep_dims=True)
        embedding = tf.div(embedding, embedding_norm, name='norm_embedding')
        weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),
                                  initializer=w_init, dtype=tf.float32)
        weights_norm = tf.norm(weights, axis=0, keep_dims=True)
        weights = tf.div(weights, weights_norm, name='norm_weights')
        # cos_theta - m
        cos_t = tf.matmul(embedding, weights, name='cos_t')
        cos_t_m = tf.subtract(cos_t, m, name='cos_t_m')

        mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask')
        inv_mask = tf.subtract(1., mask, name='inverse_mask')

        output = tf.add(s * tf.multiply(cos_t, inv_mask), s * tf.multiply(cos_t_m, mask), name='cosineface_loss_output')
    return output
开发者ID:xy694942097,项目名称:InsightFace_TF,代码行数:26,代码来源:face_losses.py

示例9: cross_entropy

    def cross_entropy(u, label_u, alpha=0.5, normed=False):

        label_ip = tf.cast(
            tf.matmul(label_u, tf.transpose(label_u)), tf.float32)
        s = tf.clip_by_value(label_ip, 0.0, 1.0)

        # compute balance param
        # s_t \in {-1, 1}
        s_t = tf.multiply(tf.add(s, tf.constant(-0.5)), tf.constant(2.0))
        sum_1 = tf.reduce_sum(s)
        sum_all = tf.reduce_sum(tf.abs(s_t))
        balance_param = tf.add(tf.abs(tf.add(s, tf.constant(-1.0))),
                               tf.multiply(tf.div(sum_all, sum_1), s))

        if normed:
            # ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
            ip_1 = tf.matmul(u, tf.transpose(u))

            def reduce_shaper(t):
                return tf.reshape(tf.reduce_sum(t, 1), [tf.shape(t)[0], 1])
            mod_1 = tf.sqrt(tf.matmul(reduce_shaper(tf.square(u)),
                                      reduce_shaper(tf.square(u)), transpose_b=True))
            ip = tf.div(ip_1, mod_1)
        else:
            ip = tf.clip_by_value(tf.matmul(u, tf.transpose(u)), -1.5e1, 1.5e1)
        ones = tf.ones([tf.shape(u)[0], tf.shape(u)[0]])
        return tf.reduce_mean(tf.multiply(tf.log(ones + tf.exp(alpha * ip)) - s * alpha * ip, balance_param))
开发者ID:AllenMao,项目名称:DeepHash,代码行数:27,代码来源:dhn.py

示例10: getBandWidth

    def getBandWidth(self,input_x,input_y,n_source,n_target,dim):
        ''' calculate bandwidth
        gamma = 1/E(||x-y||) 
        :param input_x:
        :param input_y:
        :param sigma:
        :param n_source:
        :param n_target:
        :param dim:
        :return: gamma
        '''
        x = tf.cast(input_x, tf.float32)
        y = tf.cast(input_y, tf.float32)
        counter = tf.constant(float(n_source))
        sum_up = tf.constant(.0)
        shape = [1, dim]
        for s in range(n_source):
            list1 = tf.slice(x, [s, 0], shape)
            list2 = tf.slice(y, [s, 0], shape)

            # get ||x-y||
            squared = tf.square(tf.sub(list1, list2))
            norm = tf.reduce_sum(tf.sqrt(squared))
            norm = tf.div(norm,tf.constant(float(dim)))

            sum_up  = tf.add(sum_up,tf.to_float(norm))


        gamma = tf.div(counter,sum_up)

        return gamma
开发者ID:IreneZihuiLi,项目名称:deeplearning,代码行数:31,代码来源:Maximum_Mean_Discrepancy.py

示例11: filters_bank

def filters_bank(M, N, J, L=8):
    filters = {}
    filters['psi'] = []

    offset_unpad = 0
    for j in range(J):
        for theta in range(L):
            psi = {}
            psi['j'] = j
            psi['theta'] = theta
            psi_signal = morlet_2d(M, N, 0.8 * 2**j, (int(L - L / 2 - 1) - theta) * np.pi / L, 3.0 / 4.0 * np.pi / 2**j,offset=offset_unpad)  # The 5 is here just to match the LUA implementation :)
            psi_signal_fourier = fft.fft2(psi_signal)
            for res in range(j + 1):
                psi_signal_fourier_res = crop_freq(psi_signal_fourier, res)
                psi[res] = tf.constant(np.stack((np.real(psi_signal_fourier_res), np.imag(psi_signal_fourier_res)), axis=2))
                psi[res] = tf.div(psi[res], (M * N // 2**(2 * j)), name="psi_theta%s_j%s" % (theta, j))
            filters['psi'].append(psi)

    filters['phi'] = {}
    phi_signal = gabor_2d(M, N, 0.8 * 2**(J - 1), 0, 0, offset=offset_unpad)
    phi_signal_fourier = fft.fft2(phi_signal)
    filters['phi']['j'] = J
    for res in range(J):
        phi_signal_fourier_res = crop_freq(phi_signal_fourier, res)
        filters['phi'][res] = tf.constant(np.stack((np.real(phi_signal_fourier_res), np.imag(phi_signal_fourier_res)), axis=2))
        filters['phi'][res] = tf.div(filters['phi'][res], (M * N // 2 ** (2 * J)), name="phi_res%s" % res)

    return filters
开发者ID:MiG-Kharkov,项目名称:DeepLearningImplementations,代码行数:28,代码来源:filters_bank.py

示例12: allreduce

def allreduce(tensor, average=True):
  """Perform an MPI allreduce on a tf.Tensor or tf.IndexedSlices.

  Arguments:
  tensor: tf.Tensor, tf.Variable, or tf.IndexedSlices to reduce.
          The shape of the input must be identical across all ranks.
  average: If True, computes the average over all ranks.
           Otherwise, computes the sum over all ranks.

  This function performs a bandwidth-optimal ring allreduce on the input
  tensor. If the input is an tf.IndexedSlices, the function instead does an
  allgather on the values and the indices, effectively doing an allreduce on
  the represented tensor.
  """
  if isinstance(tensor, tf.IndexedSlices):
    # For IndexedSlices, do two allgathers intead of an allreduce.
    mpi_size = tf.cast(size(), tensor.values.dtype)
    values = allgather(tensor.values)
    indices = allgather(tensor.indices)

    # To make this operation into an average, divide all gathered values by
    # the MPI size.
    new_values = tf.div(values, mpi_size) if average else values
    return tf.IndexedSlices(new_values, indices,
                            dense_shape=tensor.dense_shape)
  else:
    mpi_size = tf.cast(size(), tensor.dtype)
    summed_tensor = _allreduce(tensor)
    new_tensor = (tf.div(summed_tensor, mpi_size)
                  if average else summed_tensor)
    return new_tensor
开发者ID:Crazyonxh,项目名称:tensorflow,代码行数:31,代码来源:__init__.py

示例13: tf_bivariate_normal

def tf_bivariate_normal(y, mu, sigma, rho, n_mixtures, batch_size):
    mu = tf.verify_tensor_all_finite(mu, "Mu not finite!")
    y = tf.verify_tensor_all_finite(y, "Y not finite!")
    delta = tf.sub(tf.tile(tf.expand_dims(y, 1), [1, n_mixtures, 1]), mu)
    delta = tf.verify_tensor_all_finite(delta, "Delta not finite!")
    sigma = tf.verify_tensor_all_finite(sigma, "Sigma not finite!")
    s = tf.reduce_prod(sigma, 2)
    s = tf.verify_tensor_all_finite(s, "S not finite!")
    # -1 <= rho <= 1
    z = tf.reduce_sum(tf.square(tf.div(delta, sigma + epsilon) + epsilon), 2) - \
        2 * tf.div(tf.mul(rho, tf.reduce_prod(delta, 2)), s + epsilon)
    
    z = tf.verify_tensor_all_finite(z, "Z not finite!")
    # 0 < negRho <= 1
    rho = tf.verify_tensor_all_finite(rho, "rho in bivariate normal not finite!")
    negRho = tf.clip_by_value(1 - tf.square(rho), epsilon, 1.0)
    negRho = tf.verify_tensor_all_finite(negRho, "negRho not finite!")
    # Note that if negRho goes near zero, or z goes really large, this explodes.
    negRho = tf.verify_tensor_all_finite(negRho, "negRho in bivariate normal not finite!")
    
    result = tf.clip_by_value(tf.exp(tf.div(-z, 2 * negRho)), 1.0e-8, 1.0e8)
    result = tf.verify_tensor_all_finite(result, "Result in bivariate normal not finite!")
    denom = 2 * np.pi * tf.mul(s, tf.sqrt(negRho))
    denom = tf.verify_tensor_all_finite(denom, "Denom in bivariate normal not finite!")
    result = tf.clip_by_value(tf.div(result, denom + epsilon), epsilon, 1.0)
    result = tf.verify_tensor_all_finite(result, "Result2 in bivariate normal not finite!")
    return result, delta
开发者ID:cybercom-finland,项目名称:location_tracking_ml,代码行数:27,代码来源:model.py

示例14: batch_sample_with_temperature

def batch_sample_with_temperature(a, temperature=1.0):
    '''this function is like sample_with_temperature except it can handle batch input a of [batch_size x logits]
        this function takes logits input, and produces a specific number from the array. This is all done on the gpu
        because this function uses tensorflow
        As you increase the temperature, you will get more diversified output but with more errors (usually gramatical if you're
            doing text)
    args:
        Logits -- this must be a 2d array [batch_size x logits]
        Temperature -- how much variance you want in output
    returns:
        Selected number from distribution
    '''

    '''
    Equation can be found here: https://en.wikipedia.org/wiki/Softmax_function (under reinforcement learning)
        Karpathy did it here as well: https://github.com/karpathy/char-rnn/blob/4297a9bf69726823d944ad971555e91204f12ca8/sample.lua'''
    '''a is [batch_size x logits]'''
    with tf.op_scope([a,temperature], "batch_sample_with_temperature"):

        exponent_raised = tf.exp(tf.div(a, temperature)) #start by reduction of temperature, and get rid of negative numbers with exponent
        matrix_X = tf.div(exponent_raised, tf.reduce_sum(exponent_raised, reduction_indices = 1)) #this will yield probabilities!
        matrix_U = tf.random_uniform(tf.shape(a), minval = 0, maxval = 1)
        final_number = tf.argmax(tf.sub(matrix_X, matrix_U), dimension = 1) #you want dimension = 1 because you are argmaxing across rows.

    return final_number
开发者ID:viswajithiii,项目名称:cs224d-project,代码行数:25,代码来源:seq2seq_custom.py

示例15: getkernel

    def getkernel(self,input_x,input_y,n_source,n_target,dim,sigma):
        '''

        :param x: sourceMatrix
        :param y: targetMatrix
        :param n_source: # of source samples
        :param n_target: # of target samples
        :param dim: # of input dimension(features)
        :return: a scala showing the MMD
        '''
        # ---------------------------------------
        # x = tf.convert_to_tensor(input_x,dtype=tf.float32)
        # y = tf.convert_to_tensor(input_y, dtype=tf.float32)


        x = tf.cast(input_x,tf.float32)
        y = tf.cast(input_y, tf.float32)


        k_ss = k_st = k_tt = tf.constant(0.)
        n_ss = n_st = n_tt = tf.constant(0.)
        flag = tf.constant(1.)
        signal = tf.constant(-2.0)
        shape = [1,dim]
        for s in range(n_source):
            for s_ in range(n_source):
                list1 = tf.slice(x, [s, 0], shape)
                list2 = tf.slice(x, [s_, 0], shape)
                k_ss = tf.add(self.gaussiankernel(list1,list2,sigma),k_ss)
                n_ss = tf.add(n_ss,flag)


        for t in range(n_target):
            for t_ in range(n_target):
                list1 = tf.slice(y, [t, 0], shape)
                list2 = tf.slice(y, [t_, 0], shape)
                k_tt = tf.add(self.gaussiankernel(list1, list2, sigma), k_tt)
                n_st = tf.add(n_st, flag)


        for s in range(n_source):
            for t in range(n_target):
                list1 = tf.slice(x, [s, 0], shape)
                list2 = tf.slice(y, [t, 0], shape)
                k_st = tf.add(self.gaussiankernel(list1, list2, sigma), k_st)
                n_tt = tf.add(n_tt, flag)




        term1 = tf.div(k_ss,n_ss )
        term2 = tf.div( k_tt, n_tt)
        term3 = tf.mul(signal, tf.div(k_st,n_st))
        term4 = tf.add(term1,term2)

        kernel = tf.add(term3, term4)


        return kernel
开发者ID:IreneZihuiLi,项目名称:deeplearning,代码行数:59,代码来源:Maximum_Mean_Discrepancy.py


注:本文中的tensorflow.div函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。