当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.scalar_mul函数代码示例

本文整理汇总了Python中tensorflow.scalar_mul函数的典型用法代码示例。如果您正苦于以下问题:Python scalar_mul函数的具体用法?Python scalar_mul怎么用?Python scalar_mul使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了scalar_mul函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_cost

def create_cost(c, w, b, nb, length, weight_spacing = 1.0, weight_bending = 1.0, gamma = 1.0, kappa = 2.0):
  #tangents
  t  = create_tangent(c);
  tn = create_normalize_tangent(t);
  nl = create_normal(tn);
  nr = tf.scalar_mul(-1.0, nl);
  
  l,r = create_left_right(c,w,nl);

  cost_left = create_cost_soft_min_aligned_distance(l, b, nl, nb, k = kappa, gamma = gamma);
  cost_right= create_cost_soft_min_aligned_distance(r, b, nr, nb, k = kappa, gamma = gamma);
  cost = tf.add(cost_left, cost_right);
  
  #spacing and bending
  if weight_spacing != 0:
    cost_spacing = tf.scalar_mul(weight_spacing, create_cost_spacing(t, length));
    cost = tf.add(cost, cost_spacing);
  else:
    cost_spacing = tf.constant(0);
  
  if weight_bending != 0:
    cost_bending = tf.scalar_mul(weight_bending, create_cost_bending(tn));
    cost = tf.add(cost, cost_bending);
  else:
    cost_bending = tf.constant(0);
  
  return (cost, cost_left, cost_right, cost_spacing, cost_bending, nl, l, r);
开发者ID:ChristophKirst,项目名称:CElegansBehaviour,代码行数:27,代码来源:machine_vision_c.py

示例2: build_graph

    def build_graph(self,test_decoder_logits):
        print('starting building graph [sentiment-discriminator]')
        with tf.variable_scope("sentiment") as scope:
            self.inputs = tf.slice(test_decoder_logits,[0,0,0],[self.batch_size,self.max_length,self.vocab_size])
            # variable
            weights = {
                'w2v' : tf.get_variable(initializer = tf.random_uniform_initializer(-0.1, 0.1, dtype=tf.float32),shape = [self.vocab_size, self.embedding_dim], name='w2v'),
                'out_1' : tf.get_variable(initializer = tf.random_normal_initializer(), shape = [self.unit_size*2, 1], name='w_out_1'),
            }
            biases = {
            'out_1' : tf.get_variable(initializer = tf.random_normal_initializer(), shape=[1], name='b_out_1'),
            }
            # structure
            def BiRNN(x):
                x = tf.unstack(x, self.max_length, 1)
                lstm_fw_cell = tf.contrib.rnn.BasicLSTMCell(self.unit_size, forget_bias=1.0)
                lstm_bw_cell = tf.contrib.rnn.BasicLSTMCell(self.unit_size,forget_bias=1.0)
                outputs, _, _ = tf.contrib.rnn.static_bidirectional_rnn(lstm_fw_cell, lstm_bw_cell, x, dtype = tf.float32 )
                return outputs[-1]

            self.inputs_softmax = tf.nn.softmax(tf.scalar_mul(tf.constant(5.0, shape=[]),self.inputs))
            y_list=[]
            for i in range(self.inputs.get_shape().as_list()[0]):
                y = tf.matmul(self.inputs_softmax[i], weights['w2v'])
                y = tf.reshape(y, [1, self.max_length, self.embedding_dim])
                y_list.append(y)
            embbed_layer = tf.concat(y_list,0)
            layer_1 = BiRNN(embbed_layer)
            pred = tf.matmul(layer_1, weights['out_1']) + biases['out_1'] 
            # get score
            self.score = tf.sigmoid(pred)
开发者ID:HTY886,项目名称:PPGN,代码行数:31,代码来源:discrim.py

示例3: predict_slim

def predict_slim(sample_images, print_func=print):
    """
    Code modified from here: [https://github.com/tensorflow/models/issues/429]
    """
    # Setup preprocessing
    input_tensor = tf.placeholder(tf.float32, shape=(None, 299, 299, 3), name='input_image')
    scaled_input_tensor = tf.scalar_mul((1.0 / 255), input_tensor)
    scaled_input_tensor = tf.subtract(scaled_input_tensor, 0.5)
    scaled_input_tensor = tf.multiply(scaled_input_tensor, 2.0)

    # Setup session
    sess = tf.Session()
    arg_scope = slim_irv2.inception_resnet_v2_arg_scope()
    with slim.arg_scope(arg_scope):
        _, end_points = slim_irv2.inception_resnet_v2(scaled_input_tensor, is_training=False)

    # Load the model
    print_func("Loading TF-slim checkpoint...")
    saver = tf.train.Saver()
    saver.restore(sess, SLIM_CKPT)

    # Make prediction
    predict_values = []
    for image in sample_images:
        im = Image.open(image).resize((299, 299))
        arr = np.expand_dims(np.array(im), axis=0)
        y_pred = sess.run([end_points['Predictions']], feed_dict={input_tensor: arr})
        y_pred = y_pred[0].ravel()

        y_pred = y_pred[1:] / y_pred[1:].sum()  # remove background class and renormalize
        print_func("{} class={} prob={}".format(image, np.argmax(y_pred), np.max(y_pred)))
        predict_values.append(y_pred)

    return predict_values
开发者ID:StevenLOL,项目名称:keras-inception-resnet-v2,代码行数:34,代码来源:test_inception_resnet_v2.py

示例4: thresholding

def thresholding(inputs):
    # find the mean for each example in the batch
    mean_output = tf.reduce_mean(inputs, axis=1)

    # scale each mean based on a factor
    threshold_scalar = tf.Variable(utils.threshold_scalar, tf.float32)
    scaled_mean = tf.scalar_mul(threshold_scalar, mean_output)
    scaled_mean = tf.reshape(scaled_mean, [utils.batch_size])

    # setup matrix for
    min_thresh_for_max = tf.fill([utils.batch_size], 0.05)
    max_thresh_for_min = tf.fill([utils.batch_size], 0.15)   #0.4
    thresholds = tf.maximum(min_thresh_for_max, scaled_mean)
    thresholds = tf.minimum(max_thresh_for_min, thresholds)

    # zero values under the thresholds using bitmask
    thresholds = tf.reshape(thresholds, [128, 1, 1])

    threshold_mask = tf.cast(tf.greater(inputs, thresholds), tf.float32)
    thresholded_input = tf.multiply(inputs, threshold_mask)

    # peak picking
    # select beats by x[i-1] < x[i] > x[i+1] (local maximum)
    x_minus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=-1, axis=1)), tf.float32)
    x_plus_1 = tf.cast(tf.greater(thresholded_input, tf.manip.roll(thresholded_input, shift=1, axis=1)), tf.float32)
    output = tf.multiply(x_minus_1, x_plus_1)


    return output
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:29,代码来源:bpm_estimator.py

示例5: focal_loss

  def focal_loss(onehot_labels, cls_preds,
                 alpha=0.25, gamma=2.0, name=None, scope=None):
    """Compute softmax focal loss between logits and onehot labels
    logits and onehot_labels must have same shape [batchsize, num_classes] and
    the same data type (float16, 32, 64)
    Args:
      onehot_labels: Each row labels[i] must be a valid probability distribution
      cls_preds: Unscaled log probabilities
      alpha: The hyperparameter for adjusting biased samples, default is 0.25
      gamma: The hyperparameter for penalizing the easy labeled samples
      name: A name for the operation (optional)
    Returns:
      A 1-D tensor of length batch_size of same type as logits with softmax focal loss
    """
    with tf.name_scope(scope, 'focal_loss', [cls_preds, onehot_labels]) as sc:
      logits = tf.convert_to_tensor(cls_preds)
      onehot_labels = tf.convert_to_tensor(onehot_labels)

      precise_logits = tf.cast(logits, tf.float32) if (
        logits.dtype == tf.float16) else logits
      onehot_labels = tf.cast(onehot_labels, precise_logits.dtype)
      predictions = tf.nn.sigmoid(logits)
      predictions_pt = tf.where(tf.equal(onehot_labels, 1), predictions, 1. - predictions)
      # add small value to avoid 0
      epsilon = 1e-8
      alpha_t = tf.scalar_mul(alpha, tf.ones_like(onehot_labels, dtype=tf.float32))
      alpha_t = tf.where(tf.equal(onehot_labels, 1.0), alpha_t, 1 - alpha_t)
      losses = tf.reduce_sum(
        -alpha_t * tf.pow(1. - predictions_pt, gamma) * onehot_labels * tf.log(predictions_pt + epsilon),
        name=name, axis=1)
      return losses
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:31,代码来源:network.py

示例6: clip_norm

def clip_norm(g, c, n):
    if c <= 0:  # if clipnorm == 0 no need to add ops to the graph
        return g

    # tf require using a special op to multiply IndexedSliced by scalar
    if K.backend() == 'tensorflow':
        condition = n >= c
        then_expression = tf.scalar_mul(c / n, g)
        else_expression = g

        # saving the shape to avoid converting sparse tensor to dense
        if isinstance(then_expression, tf.Tensor):
            g_shape = copy.copy(then_expression.get_shape())
        elif isinstance(then_expression, tf.IndexedSlices):
            g_shape = copy.copy(then_expression.dense_shape)
        if condition.dtype != tf.bool:
            condition = tf.cast(condition, 'bool')
        g = tf.cond(condition,
                    lambda: then_expression,
                    lambda: else_expression)
        if isinstance(then_expression, tf.Tensor):
            g.set_shape(g_shape)
        elif isinstance(then_expression, tf.IndexedSlices):
            g._dense_shape = g_shape
    else:
        g = K.switch(K.greater_equal(n, c), g * c / n, g)
    return g
开发者ID:cbentes,项目名称:keras,代码行数:27,代码来源:optimizers.py

示例7: test_decoder_loop

 def test_decoder_loop(prev,i):
     factor = tf.constant(5,shape=(),dtype=tf.float32)
     prev = tf.scalar_mul(factor,tf.add(tf.matmul(prev,weight_output),bias_output))
     prev_index = tf.nn.softmax(prev) 
     pred_prev = tf.matmul(prev_index,word_embedding_matrix)
     next_input = pred_prev
     return next_input
开发者ID:HTY886,项目名称:PPGN,代码行数:7,代码来源:decoder.py

示例8: init_training_graph

    def init_training_graph(self):

        with tf.name_scope('Evaluation'):
            logits = self.last
            prob_b = tf.squeeze(logits, squeeze_dims=[1,2])
            self.predictions = tf.argmax(prob_b, axis=1)
            
            with tf.name_scope('Loss'):
                
                self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=prob_b,
                                                                          labels=tf.cast(self.train_labels_node, tf.int32),
                                                                          name="entropy")))
                tf.summary.scalar("entropy", self.loss)

            with tf.name_scope('Accuracy'):

                LabelInt = tf.cast(self.train_labels_node, tf.int64)
                CorrectPrediction = tf.equal(self.predictions, LabelInt)
                self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
                tf.summary.scalar("accuracy", self.accuracy)

            with tf.name_scope('Prediction'):

                self.TP = tf.count_nonzero(self.predictions * LabelInt)
                self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
                self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
                self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)

            with tf.name_scope('Precision'):

                self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
                tf.summary.scalar('Precision', self.precision)

            with tf.name_scope('Recall'):

                self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
                tf.summary.scalar('Recall', self.recall)

            with tf.name_scope('F1'):

                num = tf.multiply(self.precision, self.recall)
                dem = tf.add(self.precision, self.recall)
                self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
                tf.summary.scalar('F1', self.F1)

            with tf.name_scope('MeanAccuracy'):
                
                Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
                self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)

            #self.batch = tf.Variable(0, name = "batch_iterator")

            self.train_prediction = tf.nn.softmax(logits)

            self.test_prediction = tf.nn.softmax(logits)

        tf.global_variables_initializer().run()

        print('Computational graph initialised')
开发者ID:PeterJackNaylor,项目名称:PhD_Fabien,代码行数:59,代码来源:vgg16.py

示例9: create_left_right

def create_left_right(c, w, nrm): 
  left = tf.transpose(tf.mul(tf.transpose(nrm), w));
  right= tf.scalar_mul(-1.0, left);
  
  left = tf.add(left, c);
  right= tf.add(right,c);
  
  return left,right
开发者ID:ChristophKirst,项目名称:CElegansBehaviour,代码行数:8,代码来源:machine_vision_d.py

示例10: main

def main(_):
  print('loading word embeddings from %s' % FLAGS.embedding_file)
  weight_matrix, word_idx = sentiment.load_embeddings(FLAGS.embedding_file)

  train_file = os.path.join(FLAGS.tree_dir, 'train.txt')
  print('loading training trees from %s' % train_file)
  train_trees = sentiment.load_trees(train_file)

  dev_file = os.path.join(FLAGS.tree_dir, 'dev.txt')
  print('loading dev trees from %s' % dev_file)
  dev_trees = sentiment.load_trees(dev_file)

  with tf.Session() as sess:
    print('creating the model')
    keep_prob = tf.placeholder_with_default(1.0, [])
    train_feed_dict = {keep_prob: FLAGS.keep_prob}
    word_embedding = sentiment.create_embedding(weight_matrix)
    compiler, metrics = sentiment.create_model(
        word_embedding, word_idx, FLAGS.lstm_num_units, keep_prob)
    loss = tf.reduce_sum(compiler.metric_tensors['all_loss'])
    opt = tf.train.AdagradOptimizer(FLAGS.learning_rate)
    grads_and_vars = opt.compute_gradients(loss)
    found = 0
    for i, (grad, var) in enumerate(grads_and_vars):
      if var == word_embedding.weights:
        found += 1
        grad = tf.scalar_mul(FLAGS.embedding_learning_rate_factor, grad)
        grads_and_vars[i] = (grad, var)
    assert found == 1  # internal consistency check
    train = opt.apply_gradients(grads_and_vars)
    saver = tf.train.Saver()

    print('initializing tensorflow')
    sess.run(tf.global_variables_initializer())

    with compiler.multiprocessing_pool():
      print('training the model')
      train_set = compiler.build_loom_inputs(train_trees)
      dev_feed_dict = compiler.build_feed_dict(dev_trees)
      dev_hits_best = 0.0
      for epoch, shuffled in enumerate(td.epochs(train_set, FLAGS.epochs), 1):
        train_loss = 0.0
        for batch in td.group_by_batches(shuffled, FLAGS.batch_size):
          train_feed_dict[compiler.loom_input_tensor] = batch
          _, batch_loss = sess.run([train, loss], train_feed_dict)
          train_loss += batch_loss
        dev_metrics = sess.run(metrics, dev_feed_dict)
        dev_loss = dev_metrics['all_loss']
        dev_accuracy = ['%s: %.2f' % (k, v * 100) for k, v in
                        sorted(dev_metrics.items()) if k.endswith('hits')]
        print('epoch:%4d, train_loss: %.3e, dev_loss: %.3e, dev_accuracy: [%s]'
              % (epoch, train_loss, dev_loss, ' '.join(dev_accuracy)))
        dev_hits = dev_metrics['root_hits']
        if dev_hits > dev_hits_best:
          dev_hits_best = dev_hits
          save_path = saver.save(sess, FLAGS.checkpoint_base, global_step=epoch)
          print('model saved in file: %s' % save_path)
开发者ID:wangbosdqd,项目名称:fold,代码行数:57,代码来源:train.py

示例11: _add_focal_losses

  def _add_focal_losses(self, sigma_rpn=3.0):
    with tf.variable_scope('loss_' + self._tag) as scope:
      # RPN, class loss
      rpn_cls_score = tf.reshape(self._predictions['rpn_cls_score_reshape'], [-1, 2])
      rpn_label = tf.reshape(self._anchor_targets['rpn_labels'], [-1])
      rpn_select = tf.where(tf.not_equal(rpn_label, -1))
      rpn_cls_score = tf.reshape(tf.gather(rpn_cls_score, rpn_select), [-1, 2])
      rpn_label = tf.reshape(tf.gather(rpn_label, rpn_select), [-1])
      rpn_cross_entropy = tf.reduce_mean(
        tf.nn.sparse_softmax_cross_entropy_with_logits(logits=rpn_cls_score, labels=rpn_label))

      # RPN, bbox loss
      rpn_bbox_pred = self._predictions['rpn_bbox_pred']
      rpn_bbox_targets = self._anchor_targets['rpn_bbox_targets']
      rpn_bbox_inside_weights = self._anchor_targets['rpn_bbox_inside_weights']
      rpn_bbox_outside_weights = self._anchor_targets['rpn_bbox_outside_weights']

      rpn_loss_box = self._smooth_l1_loss(rpn_bbox_pred, rpn_bbox_targets, rpn_bbox_inside_weights,
                                          rpn_bbox_outside_weights, sigma=sigma_rpn, dim=[1, 2, 3])

      # RCNN, class loss
      alpha_scale = 0.25
      gamma = 2
      epsilon = 1e-8
      cls_score = self._predictions["cls_score"]
      label = tf.reshape(self._proposal_targets["labels"], [-1])
      label = tf.one_hot(label, depth=self._num_classes)
      cls_pred = tf.nn.sigmoid(cls_score)
      predictions_pt = tf.where(tf.equal(label, 1), cls_pred, 1-cls_pred)
      alpha_t = tf.ones_like(label, dtype=tf.float32)
      alpha_t = tf.scalar_mul(alpha_scale, alpha_t)
      alpha_t = tf.where(tf.equal(label, 1.0), alpha_t, 1. - alpha_t)
      cross_entropy = tf.reduce_mean(-alpha_t*tf.pow(1 - predictions_pt, gamma)*tf.log(predictions_pt + epsilon))

      # RCNN, bbox loss
      bbox_pred = self._predictions['bbox_pred']
      bbox_targets = self._proposal_targets['bbox_targets']
      bbox_inside_weights = self._proposal_targets['bbox_inside_weights']
      bbox_outside_weights = self._proposal_targets['bbox_outside_weights']

      loss_box = self._smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights)

      self._losses['cross_entropy'] = cross_entropy
      self._losses['loss_box'] = loss_box
      self._losses['rpn_cross_entropy'] = rpn_cross_entropy
      self._losses['rpn_loss_box'] = rpn_loss_box

      self._losses['rpn_loss'] = rpn_loss_box + rpn_cross_entropy
      self._losses['class_loss'] = cross_entropy + loss_box
      loss = cross_entropy + loss_box + rpn_cross_entropy + rpn_loss_box
      self._losses['total_loss'] = loss

      self._event_summaries.update(self._losses)

    return loss
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:55,代码来源:network.py

示例12: create_cost

def create_cost(center, xy, width, persitaltic, bend, bend_profiles, contour, contour_normals, length, weight_spacing = 1.0, weight_bending = 1.0, gamma = 1.0, kappa = 2.0):

  c = tf.add(center, xy);
  
  #tangent and normals  
  t  = create_tangent(c);
  tn = create_normalize_tangent(t);
  ta = create_average_tangent(tn);
  nl = create_normal(ta);

  #bend the center
  c = create_bend(center, nl, bend, bend_profiles);

  #peristaltically move the center
  c = create_peristaltic(c, ta, persitaltic);

  #tangents
  t  = create_tangent(c);
  tn = create_normalize_tangent(t);
  ta = create_average_tangent(tn);
  nl = create_normal(ta);
  nr = tf.scalar_mul(-1.0, nl);  
  l,r = create_left_right(c,width,nl);

  cost_left = create_cost_soft_min_aligned_distance(l, contour, nl, contour_normals, k = kappa, gamma = gamma);
  cost_right= create_cost_soft_min_aligned_distance(r, contour, nr, contour_normals, k = kappa, gamma = gamma);
  cost = tf.add(cost_left, cost_right);
  
  #spacing and bending
  if weight_spacing != 0:
    cost_spacing = tf.scalar_mul(weight_spacing, create_cost_spacing(t, length));
    cost = tf.add(cost, cost_spacing);
  else:
    cost_spacing = tf.constant(0);
  
  if weight_bending != 0:
    cost_bending = tf.scalar_mul(weight_bending, create_cost_bending(tn));
    cost = tf.add(cost, cost_bending);
  else:
    cost_bending = tf.constant(0);
  
  return (cost, cost_left, cost_right, cost_spacing, cost_bending, c, l, r, nl);  
开发者ID:ChristophKirst,项目名称:CElegansBehaviour,代码行数:42,代码来源:machine_vision_d.py

示例13: focal_loss3

def focal_loss3(cls_score, label, num_classes):
    alpha_scale = 0.25
    gamma = 2
    epsilon = 1e-8
    label = tf.one_hot(label, depth=num_classes)
    cls_pred = tf.nn.sigmoid(cls_score)
    predictions_pt = tf.where(tf.equal(label, 1), cls_pred, 1 - cls_pred)
    alpha_t = tf.ones_like(label, dtype=tf.float32)
    alpha_t = tf.scalar_mul(alpha_scale, alpha_t)
    alpha_t = tf.where(tf.equal(label, 1.0), alpha_t, 1. - alpha_t)
    losses = tf.reduce_mean(-alpha_t * tf.pow(1 - predictions_pt, gamma) * tf.log(predictions_pt + epsilon), axis=1)
    return losses
开发者ID:jacke121,项目名称:tf_rfcn,代码行数:12,代码来源:focal_loss_test.py

示例14: _compute_update

  def _compute_update(self, param, grad, state):
    """Compute updates of parameters."""

    # get the learning rate at the current index, if the index
    # is greater than the number of available learning rates,
    # use the last one
    index = tf.minimum(state["itr"], self.max_index)
    learning_rate = tf.gather(self.learning_rates, index)

    # update the parameters: parameter - learning_rate * gradient
    updated_param = param - tf.scalar_mul(learning_rate, grad)

    return updated_param, {"itr": state["itr"] + 1}
开发者ID:ALISCIFP,项目名称:models,代码行数:13,代码来源:learning_rate_schedule.py

示例15: entropy_matrix

def entropy_matrix(graph, P):
    """

    :param graph:
    :param P:
    :return:
    """
    with graph.as_default():
        shape = P.get_shape().as_list()
        one_diagonal = tf.diag(diagonal=[1.0 for _ in range(shape[0])])
        P_mod = tf.add(P, one_diagonal)
        H = tf.reduce_sum(tf.scalar_mul(-1.0, tf.mul(P, tf.log(P_mod))), 1)
    return H
开发者ID:shkr,项目名称:tensorflow_examples,代码行数:13,代码来源:tSNE.py


注:本文中的tensorflow.scalar_mul函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。