当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.greater_equal函数代码示例

本文整理汇总了Python中tensorflow.greater_equal函数的典型用法代码示例。如果您正苦于以下问题:Python greater_equal函数的具体用法?Python greater_equal怎么用?Python greater_equal使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了greater_equal函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: add_volume_iou_metrics

def add_volume_iou_metrics(inputs, outputs):
  """Computes the per-instance volume IOU.

  Args:
    inputs: Input dictionary of the voxel generation model.
    outputs: Output dictionary returned by the voxel generation model.

  Returns:
    names_to_values: metrics->values (dict).
    names_to_updates: metrics->ops (dict).

  """
  names_to_values = dict()
  names_to_updates = dict()
  labels = tf.greater_equal(inputs['voxels'], 0.5)
  predictions = tf.greater_equal(outputs['voxels_1'], 0.5)
  labels = 2 - tf.to_int32(labels)
  predictions = 3 - tf.to_int32(predictions) * 2
  tmp_values, tmp_updates = tf.metrics.mean_iou(
      labels=labels,
      predictions=predictions,
      num_classes=3)
  names_to_values['volume_iou'] = tmp_values * 3.0
  names_to_updates['volume_iou'] = tmp_updates
  return names_to_values, names_to_updates
开发者ID:ameerellaboudy,项目名称:models,代码行数:25,代码来源:metrics.py

示例2: _has_foreground_and_background_in_first_frame_2

def _has_foreground_and_background_in_first_frame_2(label,
                                                    decoder_output_stride):
  """Checks if the labels have foreground and background in the first frame.

  Second attempt, this time we use the actual output dimension for resizing.

  Args:
    label: Label tensor of shape [num_frames, height, width, 1].
    decoder_output_stride: Integer, the stride of the decoder output.

  Returns:
    Boolean, whether the labels have foreground and background in the first
      frame.
  """
  h, w = train_utils.resolve_shape(label)[1:3]
  h_sub = model.scale_dimension(h, 1.0 / decoder_output_stride)
  w_sub = model.scale_dimension(w, 1.0 / decoder_output_stride)
  label_downscaled = tf.squeeze(
      tf.image.resize_nearest_neighbor(label[0, tf.newaxis], [h_sub, w_sub],
                                       align_corners=True), axis=0)
  is_bg = tf.equal(label_downscaled, 0)
  is_fg = tf.logical_not(is_bg)
  # Just using reduce_any was not robust enough, so lets make sure the count
  # is above MIN_LABEL_COUNT.
  fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
  bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
  has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
  has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
  return tf.logical_and(has_bg, has_fg)
开发者ID:Exscotticus,项目名称:models,代码行数:29,代码来源:video_input_generator.py

示例3: getReward_touch

def getReward_touch(objCoordinates, sampled_locs, numObjsPresented, objSize, batch_size):
    # preallocate for the reward
    corner = tf.zeros((2,), dtype=tf.float32, name=None)
    # reward = np.zeros(batch_size)
    # loop over all examples in the batch
    # for b in xrange(batch_size):
    b = 0
    objCoords_b = objCoordinates[b,:,:]
    sampled_locs_b = sampled_locs[b,:,:]
    numObjsPres_b = numObjsPresented[b]

    nObjTouched = 0
    # for the ith-example in the batch, loop over all object
    for j in xrange(maxNumObj):
        objCoords_cur = objCoords_b[j,:]

        nTimesObjTouched = 0
        # for the j-th objects, loop over all glimpses to determine if it is fixated
        for i in xrange(nGlimpses):
            sampledCoord_cur = toMnistCoordinates_tf(sampled_locs_b[i,:], img_size)
            l2Diff_obj = l2distance(objCoords_cur, sampledCoord_cur)
            l2Diff_corner = l2distance(corner, sampledCoord_cur)
            isTouchingObj = tf.less_equal(l2Diff_obj, objSize)
            isNotTouchingCorner = tf.greater_equal(l2Diff_corner, objSize)
            # true if the current glimpse is fixated on an object
            tempTouchFlag = tf.cast(tf.logical_and(isTouchingObj, isNotTouchingCorner), tf.int32)

            nTimesObjTouched = nTimesObjTouched + tempTouchFlag

        # for the b-th example in the batch, if all objects are touched, then reward = 1, else reward = 0
        nObjTouched = nObjTouched + tf.cast(tf.greater_equal(nTimesObjTouched,1), tf.int32)

    R_bth = tf.equal(nObjTouched, tf.cast(numObjsPres_b, tf.int32))

    return R_bth
开发者ID:jlindsey15,项目名称:mathCognition_RAM,代码行数:35,代码来源:ram_touch.py

示例4: add_dyprune

def add_dyprune(weights):
    crate = config.crate[weights.name[:-2]] #hyperpara C rate
    prune_mask = tf.Variable(tf.ones_like(weights),name=weights.name[:-2]+'mask', trainable=False)

    #calculate mask
    mean = tf.divide(tf.reduce_sum(tf.multiply(tf.abs(weights),prune_mask)),tf.reduce_sum(prune_mask))
    var = tf.multiply(weights,prune_mask)
    var = tf.square(var)
    mean_q = tf.square(mean)*tf.reduce_sum(prune_mask)
    var = tf.reduce_sum(var) - mean_q
    var = tf.divide(var,tf.reduce_sum(prune_mask))
    var = tf.sqrt(var)
    t1_lower = (mean+var*crate)*0.25 #hyperpara a
    t1_upper = (mean+var*crate)*0.45 #hyperpara b
    
    indicator_lower1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_lower)    
    indicator_upper1 = tf.greater_equal(tf.abs(weights), tf.ones_like(weights) * t1_upper)
    indicator_matrix1 = tf.greater_equal(prune_mask, tf.zeros_like(weights))
    indicator_matrix1 = tf.logical_and(indicator_matrix1,indicator_lower1)
    indicator_matrix1 = tf.logical_or(indicator_matrix1,indicator_upper1)
    indicator_matrix1 = tf.to_float(indicator_matrix1)
    update = prune_mask.assign(indicator_matrix1)

    prune_fc = tf.multiply(weights, prune_mask)
    return prune_fc
开发者ID:Ewenwan,项目名称:Project,代码行数:25,代码来源:densenetfinalDNS.py

示例5: prune_completely_outside_window

def prune_completely_outside_window(boxlist, window, scope=None):
  """Prunes bounding boxes that fall completely outside of the given window.

  The function clip_to_window prunes bounding boxes that fall
  completely outside the window, but also clips any bounding boxes that
  partially overflow. This function does not clip partially overflowing boxes.

  Args:
    boxlist: a BoxList holding M_in boxes.
    window: a float tensor of shape [4] representing [ymin, xmin, ymax, xmax]
      of the window
    scope: name scope.

  Returns:
    pruned_boxlist: a new BoxList with all bounding boxes partially or fully in
      the window.
    valid_indices: a tensor with shape [M_out] indexing the valid bounding boxes
     in the input tensor.
  """
  with tf.name_scope(scope, 'PruneCompleteleyOutsideWindow'):
    y_min, x_min, y_max, x_max = tf.split(
        value=boxlist.get(), num_or_size_splits=4, axis=1)
    win_y_min, win_x_min, win_y_max, win_x_max = tf.unstack(window)
    coordinate_violations = tf.concat([
        tf.greater_equal(y_min, win_y_max), tf.greater_equal(x_min, win_x_max),
        tf.less_equal(y_max, win_y_min), tf.less_equal(x_max, win_x_min)
    ], 1)
    valid_indices = tf.reshape(
        tf.where(tf.logical_not(tf.reduce_any(coordinate_violations, 1))), [-1])
    return gather(boxlist, valid_indices), valid_indices
开发者ID:NoPointExc,项目名称:models,代码行数:30,代码来源:box_list_ops.py

示例6: _has_foreground_and_background_in_first_frame

def _has_foreground_and_background_in_first_frame(label, subsampling_factor):
  """Checks if the labels have foreground and background in the first frame.

  Args:
    label: Label tensor of shape [num_frames, height, width, 1].
    subsampling_factor: Integer, the subsampling factor.

  Returns:
    Boolean, whether the labels have foreground and background in the first
      frame.
  """
  h, w = train_utils.resolve_shape(label)[1:3]
  label_downscaled = tf.squeeze(
      tf.image.resize_nearest_neighbor(label[0, tf.newaxis],
                                       [h // subsampling_factor,
                                        w // subsampling_factor],
                                       align_corners=True),
      axis=0)
  is_bg = tf.equal(label_downscaled, 0)
  is_fg = tf.logical_not(is_bg)
  # Just using reduce_any was not robust enough, so lets make sure the count
  # is above MIN_LABEL_COUNT.
  fg_count = tf.reduce_sum(tf.cast(is_fg, tf.int32))
  bg_count = tf.reduce_sum(tf.cast(is_bg, tf.int32))
  has_bg = tf.greater_equal(fg_count, MIN_LABEL_COUNT)
  has_fg = tf.greater_equal(bg_count, MIN_LABEL_COUNT)
  return tf.logical_and(has_bg, has_fg)
开发者ID:Exscotticus,项目名称:models,代码行数:27,代码来源:video_input_generator.py

示例7: build_graph

    def build_graph(self, nn_im_w, nn_im_h, num_colour_channels=3, weights=None, biases=None):
        num_outputs = 1 #ofc
        self.nn_im_w = nn_im_w
        self.nn_im_h = nn_im_h

        if weights is None:
            weights = [None, None, None, None, None]
        if biases is None:
            biases = [None, None, None, None, None]

        with tf.device('/cpu:0'):
            # Placeholder variables for the input image and output images
            self.x = tf.placeholder(tf.float32, shape=[None, nn_im_w*nn_im_h*3])
            self.y_ = tf.placeholder(tf.float32, shape=[None, num_outputs])
            self.threshold = tf.placeholder(tf.float32)

            # Build the convolutional and pooling layers
            conv1_output_channels = 32
            conv2_output_channels = 16
            conv3_output_channels = 8

            conv_layer_1_input = tf.reshape(self.x, [-1, nn_im_h, nn_im_w, num_colour_channels]) #The resized input image
            self.build_conv_layer(conv_layer_1_input, num_colour_channels, conv1_output_channels, initial_weights=weights[0], initial_biases=biases[0]) # layer 1
            self.build_conv_layer(self.layers[0][0], conv1_output_channels, conv2_output_channels, initial_weights=weights[1], initial_biases=biases[1])# layer 2
            self.build_conv_layer(self.layers[1][0], conv2_output_channels, conv3_output_channels, initial_weights=weights[2], initial_biases=biases[2])# layer 3

            # Build the fully connected layer
            convnet_output_w = nn_im_w//8
            convnet_output_h = nn_im_h//8

            fully_connected_layer_input = tf.reshape(self.layers[2][0], [-1, convnet_output_w * convnet_output_h * conv3_output_channels])
            self.build_fully_connected_layer(fully_connected_layer_input, convnet_output_w, convnet_output_h, conv3_output_channels, initial_weights=weights[3], initial_biases=biases[3])

            # The dropout stage and readout layer
            self.keep_prob, self.h_drop = self.dropout(self.layers[3][0])
            self.y_conv,_,_ = self.build_readout_layer(self.h_drop, num_outputs, initial_weights=weights[4], initial_biases=biases[4])

            self.mean_error =  tf.sqrt(tf.reduce_mean(tf.square(self.y_ - self.y_conv)))
            self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.mean_error)

            self.accuracy = (1.0 - tf.reduce_mean(tf.abs(self.y_ - tf.round(self.y_conv))))


            positive_examples = tf.greater_equal(self.y_, 0.5)
            negative_examples = tf.logical_not(positive_examples)
            positive_classifications = tf.greater_equal(self.y_conv, self.threshold)
            negative_classifications = tf.logical_not(positive_classifications)

            self.true_positive = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, positive_classifications),tf.int32)) # count the examples that are positive and classified as positive
            self.false_positive = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, positive_classifications),tf.int32)) # count the examples that are negative but classified as positive

            self.true_negative = tf.reduce_sum(tf.cast(tf.logical_and(negative_examples, negative_classifications),tf.int32)) # count the examples that are negative and classified as negative
            self.false_negative = tf.reduce_sum(tf.cast(tf.logical_and(positive_examples, negative_classifications),tf.int32)) # count the examples that are positive but classified as negative

            self.positive_count = tf.reduce_sum(tf.cast(positive_examples, tf.int32)) # count the examples that are positive
            self.negative_count = tf.reduce_sum(tf.cast(negative_examples, tf.int32)) # count the examples that are negative

            self.confusion_matrix = tf.reshape(tf.pack([self.true_positive, self.false_positive, self.false_negative, self.true_negative]), [2,2])

        self.sess.run(tf.initialize_all_variables())
开发者ID:JTKBowers,项目名称:CNN-people-detect,代码行数:60,代码来源:Model.py

示例8: pad_to_bounding_box

def pad_to_bounding_box(image, offset_height, offset_width, target_height,
                        target_width, pad_value):
  """Pads the given image with the given pad_value.

  Works like tf.image.pad_to_bounding_box, except it can pad the image
  with any given arbitrary pad value and also handle images whose sizes are not
  known during graph construction.

  Args:
    image: 3-D tensor with shape [height, width, channels]
    offset_height: Number of rows of zeros to add on top.
    offset_width: Number of columns of zeros to add on the left.
    target_height: Height of output image.
    target_width: Width of output image.
    pad_value: Value to pad the image tensor with.

  Returns:
    3-D tensor of shape [target_height, target_width, channels].

  Raises:
    ValueError: If the shape of image is incompatible with the offset_* or
    target_* arguments.
  """
  image_rank = tf.rank(image)
  image_rank_assert = tf.Assert(
      tf.equal(image_rank, 3),
      ['Wrong image tensor rank [Expected] [Actual]',
       3, image_rank])
  with tf.control_dependencies([image_rank_assert]):
    image -= pad_value
  image_shape = tf.shape(image)
  height, width = image_shape[0], image_shape[1]
  target_width_assert = tf.Assert(
      tf.greater_equal(
          target_width, width),
      ['target_width must be >= width'])
  target_height_assert = tf.Assert(
      tf.greater_equal(target_height, height),
      ['target_height must be >= height'])
  with tf.control_dependencies([target_width_assert]):
    after_padding_width = target_width - offset_width - width
  with tf.control_dependencies([target_height_assert]):
    after_padding_height = target_height - offset_height - height
  offset_assert = tf.Assert(
      tf.logical_and(
          tf.greater_equal(after_padding_width, 0),
          tf.greater_equal(after_padding_height, 0)),
      ['target size not possible with the given target offsets'])

  height_params = tf.stack([offset_height, after_padding_height])
  width_params = tf.stack([offset_width, after_padding_width])
  channel_params = tf.stack([0, 0])
  with tf.control_dependencies([offset_assert]):
    paddings = tf.stack([height_params, width_params, channel_params])
  padded = tf.pad(image, paddings)
  return padded + pad_value
开发者ID:812864539,项目名称:models,代码行数:56,代码来源:preprocess_utils.py

示例9: distort_image

def distort_image(image, input_width, input_height, output_side):
    """Applies random distortion to the image.
    The output image is output_side x output_side x 3
    """

    def random_crop_it():
        """Random crops image, after resizing it to output_side +10 x output_side+10"""
        resized_img = resize_bl(image, output_side + 10)
        return tf.random_crop(resized_img, [output_side, output_side, 3])

    def resize_it():
        """Resize the image using resize_bl"""
        return resize_bl(image, output_side)

    # if input.width >= output.side + 10 and input.heigth >= output.side + 10
    #   resize it to output.side + 10 x output.size + 10 and random crop it
    # else resize it
    increased_output_side = tf.constant(output_side + 10, dtype=tf.int64)
    image = tf.cond(
        tf.logical_and(
            tf.greater_equal(input_width, increased_output_side),
            tf.greater_equal(input_height, increased_output_side)),
        random_crop_it, resize_it)

    # Apply random distortions to the image
    flipped_image = tf.image.random_flip_left_right(image)

    # randomize the order of the random distortions
    def fn1():
        """Applies random brightness, saturation, hue, contrast"""
        distorted_image = tf.image.random_brightness(
            flipped_image, max_delta=32. / 255.)
        distorted_image = tf.image.random_saturation(
            distorted_image, lower=0.5, upper=1.5)
        distorted_image = tf.image.random_hue(distorted_image, max_delta=0.2)
        distorted_image = tf.image.random_contrast(
            distorted_image, lower=0.5, upper=1.5)
        return distorted_image

    def fn2():
        """Applies random brightness, contrast, saturation, hue"""
        distorted_image = tf.image.random_brightness(
            flipped_image, max_delta=32. / 255.)
        distorted_image = tf.image.random_contrast(
            distorted_image, lower=0.5, upper=1.5)
        distorted_image = tf.image.random_saturation(
            distorted_image, lower=0.5, upper=1.5)
        distorted_image = tf.image.random_hue(distorted_image, max_delta=0.2)

        return distorted_image

    p_order = tf.random_uniform(
        shape=[], minval=0.0, maxval=1.0, dtype=tf.float32)
    distorted_image = tf.cond(tf.less(p_order, 0.5), fn1, fn2)
    distorted_image = tf.clip_by_value(distorted_image, 0.0, 1.0)
    return distorted_image
开发者ID:galeone,项目名称:pgnet,代码行数:56,代码来源:image_processing.py

示例10: optimOp

 def optimOp():
     def updateOptimOp():
         if self._full_stats_init:
             return tf.cond(tf.greater(self.factor_step, tf.convert_to_tensor(0)), lambda: optim.apply_gradients(list(zip(u, varlist))), tf.no_op)
         else:
             return optim.apply_gradients(list(zip(u, varlist)))
     if self._full_stats_init:
         return tf.cond(tf.greater_equal(self.stats_step, self._stats_accum_iter), updateOptimOp, tf.no_op)
     else:
         return tf.cond(tf.greater_equal(self.sgd_step, self._cold_iter), updateOptimOp, tf.no_op)
开发者ID:IcarusTan,项目名称:baselines,代码行数:10,代码来源:kfac.py

示例11: _extract_proposal_features

  def _extract_proposal_features(self, preprocessed_inputs, scope):
    """Extracts first stage RPN features.

    Args:
      preprocessed_inputs: A [batch, height, width, channels] float32 tensor
        representing a batch of images.
      scope: A scope name.

    Returns:
      rpn_feature_map: A tensor with shape [batch, height, width, depth]
      activations: A dictionary mapping feature extractor tensor names to
        tensors

    Raises:
      InvalidArgumentError: If the spatial size of `preprocessed_inputs`
        (height or width) is less than 33.
      ValueError: If the created network is missing the required activation.
    """
    if len(preprocessed_inputs.get_shape().as_list()) != 4:
      raise ValueError('`preprocessed_inputs` must be 4 dimensional, got a '
                       'tensor of shape %s' % preprocessed_inputs.get_shape())
    shape_assert = tf.Assert(
        tf.logical_and(
            tf.greater_equal(tf.shape(preprocessed_inputs)[1], 33),
            tf.greater_equal(tf.shape(preprocessed_inputs)[2], 33)),
        ['image size must at least be 33 in both height and width.'])

    with tf.control_dependencies([shape_assert]):
      # Disables batchnorm for fine-tuning with smaller batch sizes.
      # TODO(chensun): Figure out if it is needed when image
      # batch size is bigger.
      with slim.arg_scope(
          resnet_utils.resnet_arg_scope(
              batch_norm_epsilon=1e-5,
              batch_norm_scale=True,
              weight_decay=self._weight_decay)):
        with tf.variable_scope(
            self._architecture, reuse=self._reuse_weights) as var_scope:
          _, activations = self._resnet_model(
              preprocessed_inputs,
              num_classes=None,
              is_training=self._train_batch_norm,
              global_pool=False,
              output_stride=self._first_stage_features_stride,
              spatial_squeeze=False,
              scope=var_scope)

    handle = scope + '/%s/block3' % self._architecture
    return activations[handle], activations
开发者ID:ALISCIFP,项目名称:models,代码行数:49,代码来源:faster_rcnn_resnet_v1_feature_extractor.py

示例12: _crop

def _crop(image, offset_height, offset_width, crop_height, crop_width):
  """Crops the given image using the provided offsets and sizes.

  Note that the method doesn't assume we know the input image size but it does
  assume we know the input image rank.

  Args:
    image: an image of shape [height, width, channels].
    offset_height: a scalar tensor indicating the height offset.
    offset_width: a scalar tensor indicating the width offset.
    crop_height: the height of the cropped image.
    crop_width: the width of the cropped image.

  Returns:
    The cropped (and resized) image.

  Raises:
    ValueError: if `image` doesn't have rank of 3.
    InvalidArgumentError: if the rank is not 3 or if the image dimensions are
      less than the crop size.
  """
  original_shape = tf.shape(image)

  if len(image.get_shape().as_list()) != 3:
    raise ValueError('input must have rank of 3')
  original_channels = image.get_shape().as_list()[2]

  rank_assertion = tf.Assert(
      tf.equal(tf.rank(image), 3),
      ['Rank of image must be equal to 3.'])
  with tf.control_dependencies([rank_assertion]):
    cropped_shape = tf.stack([crop_height, crop_width, original_shape[2]])

  size_assertion = tf.Assert(
      tf.logical_and(
          tf.greater_equal(original_shape[0], crop_height),
          tf.greater_equal(original_shape[1], crop_width)),
      ['Crop size greater than the image size.'])

  offsets = tf.to_int32(tf.stack([offset_height, offset_width, 0]))

  # Use tf.slice instead of crop_to_bounding box as it accepts tensors to
  # define the crop size.
  with tf.control_dependencies([size_assertion]):
    image = tf.slice(image, offsets, cropped_shape)
  image = tf.reshape(image, cropped_shape)
  image.set_shape([crop_height, crop_width, original_channels])
  return image
开发者ID:Exscotticus,项目名称:models,代码行数:48,代码来源:preprocess_utils.py

示例13: testOneOpCond

  def testOneOpCond(self):
    with self.test_session():
      v = tf.Variable(0)
      c = tf.convert_to_tensor(0)
      one = tf.convert_to_tensor(1)
      two = tf.convert_to_tensor(2)
      p = tf.greater_equal(c, 1)

      def a():
        return tf.assign(v, one)

      def b():
        return tf.assign(v, two)

      i = tf.cond(p, a, b)
      self.assertTrue(isinstance(i, tf.Tensor))
      tf.initialize_all_variables().run()

      self.assertEqual(0, v.eval())

      # True case: c = 2 is >= 1, v is set to 1.
      self.assertEqual(1, i.eval(feed_dict={c.name: 2}))
      self.assertEqual(1, v.eval())

      # False case: c = 0 is not >= 1, v is set to 2.
      self.assertEqual(2, i.eval(feed_dict={c.name: 0}))
      self.assertEqual(2, v.eval())
开发者ID:hypatiad,项目名称:tensorflow,代码行数:27,代码来源:control_flow_ops_py_test.py

示例14: testImbalancedWithExampleWeights

 def testImbalancedWithExampleWeights(self):
   # Setup test data with 1 positive, and 3 negative examples.
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   example_weights = [3.0, 1.0]
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(1, 1)
     options = dict(symmetric_l2_regularization=0.25,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss')
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     lr.minimize().run()
     self.assertAllClose(0.266189, unregularized_loss.eval(),
                         rtol=3e-2, atol=3e-2)
     self.assertAllClose(0.571912, loss.eval(), rtol=3e-2, atol=3e-2)
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllEqual([0, 1], predicted_labels.eval())
开发者ID:4colors,项目名称:tensorflow,代码行数:30,代码来源:sdca_ops_test.py

示例15: testSimpleLogistic

 def testSimpleLogistic(self):
   # Setup test data
   example_protos = [
       make_example_proto(
           {'age': [0],
            'gender': [0]}, 0),
       make_example_proto(
           {'age': [1],
            'gender': [1]}, 1),
   ]
   example_weights = [1.0, 1.0]
   with self._single_threaded_test_session():
     examples = make_example_dict(example_protos, example_weights)
     variables = make_variable_dict(1, 1)
     options = dict(symmetric_l2_regularization=0.5,
                    symmetric_l1_regularization=0,
                    loss_type='logistic_loss',
                    prior=0.0)
     tf.initialize_all_variables().run()
     lr = SdcaModel(CONTAINER, examples, variables, options)
     unregularized_loss = lr.unregularized_loss(examples)
     loss = lr.regularized_loss(examples)
     prediction = lr.predictions(examples)
     self.assertAllClose(0.693147, unregularized_loss.eval())
     self.assertAllClose(0.693147, loss.eval())
     lr.minimize().run()
     self.assertAllClose(0.395226, unregularized_loss.eval(),
                         rtol=3e-2, atol=3e-2)
     self.assertAllClose(0.657446, loss.eval(),
                         rtol=3e-2, atol=3e-2)
     predicted_labels = tf.cast(
         tf.greater_equal(prediction,
                          tf.ones_like(prediction) * 0.5), tf.float32)
     self.assertAllEqual([0, 1], predicted_labels.eval())
开发者ID:4colors,项目名称:tensorflow,代码行数:34,代码来源:sdca_ops_test.py


注:本文中的tensorflow.greater_equal函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。