当前位置: 首页>>代码示例>>Python>>正文


Python common.OUTPUT_TYPE属性代码示例

本文整理汇总了Python中deeplab.common.OUTPUT_TYPE属性的典型用法代码示例。如果您正苦于以下问题:Python common.OUTPUT_TYPE属性的具体用法?Python common.OUTPUT_TYPE怎么用?Python common.OUTPUT_TYPE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在deeplab.common的用法示例。


在下文中一共展示了common.OUTPUT_TYPE属性的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_params

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def get_params(ignore_label, num_classes, num_batches_per_epoch):
  """Build a dict of parameters from command line args."""
  params = {k: FLAGS[k].value for k in FLAGS}

  outputs_to_num_classes = {common.OUTPUT_TYPE: num_classes}
  model_options = common.ModelOptions(
      outputs_to_num_classes, FLAGS.crop_size, FLAGS.atrous_rates,
      FLAGS.output_stride,
      preprocessed_images_dtype=(
          tf.bfloat16 if params['use_bfloat16'] else tf.float32))
  params.update({'ignore_label': ignore_label,
                 'model_options': model_options,
                 'num_batches_per_epoch': num_batches_per_epoch,
                 'num_classes': num_classes,
                 'outputs_to_num_classes': outputs_to_num_classes})

  tf.logging.debug('Params: ')
  for k, v in sorted(params.items()):
    tf.logging.debug('%s: %s', k, v)
  return params 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:22,代码来源:main.py

示例2: testDeepcopy

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def testDeepcopy(self):
    num_classes = 21
    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: num_classes})
    model_options_new = copy.deepcopy(model_options)
    self.assertEqual((model_options_new.
                      outputs_to_num_classes[common.OUTPUT_TYPE]),
                     num_classes)

    num_classes_new = 22
    model_options_new.outputs_to_num_classes[common.OUTPUT_TYPE] = (
        num_classes_new)
    self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE],
                     num_classes)
    self.assertEqual((model_options_new.
                      outputs_to_num_classes[common.OUTPUT_TYPE]),
                     num_classes_new) 
开发者ID:IBM,项目名称:MAX-Image-Segmenter,代码行数:19,代码来源:common_test.py

示例3: _create_eval_metric

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def _create_eval_metric(features, labels, params):
  """Creates eval_metric for model_fn."""
  outputs_to_scales_to_logits = _build_network(
      features, tf.estimator.ModeKeys.EVAL, params)

  semantic_merged_logits = (
      outputs_to_scales_to_logits[common.OUTPUT_TYPE][_MERGED_LOGITS_SCOPE])

  def metric_fn(semantic_merged_logits, labels):
    """Creates metric_fn for TPUEstimatorSpec."""
    logits = tf.image.resize_bilinear(
        semantic_merged_logits, params['crop_size'], align_corners=True)
    predictions_with_shape = tf.argmax(logits, 3, output_type=tf.int32)
    predictions = tf.reshape(predictions_with_shape, shape=[-1])

    labels = tf.reshape(labels, shape=[-1])
    weights = tf.to_float(tf.not_equal(labels, params['ignore_label']))

    # Set ignore_label regions to label 0, because metrics.mean_iou requires
    # range of labels = [0, dataset.num_classes). Note the ignore_lable regions
    # are not evaluated since the corresponding regions contain weights = 0.
    labels = tf.where(
        tf.equal(labels, params['ignore_label']), tf.zeros_like(labels), labels)

    return {
        'miou':
            tf.metrics.mean_iou(
                predictions, labels, params['num_classes'], weights=weights),
    }

  return metric_fn, [semantic_merged_logits, labels] 
开发者ID:mlperf,项目名称:training_results_v0.5,代码行数:33,代码来源:model.py

示例4: testOutputsToNumClasses

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def testOutputsToNumClasses(self):
    num_classes = 21
    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: num_classes})
    self.assertEqual(model_options.outputs_to_num_classes[common.OUTPUT_TYPE],
                     num_classes) 
开发者ID:IBM,项目名称:MAX-Image-Segmenter,代码行数:8,代码来源:common_test.py

示例5: _tower_loss

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def _tower_loss(iterator, num_of_classes, ignore_label, scope, reuse_variable):
  """Calculates the total loss on a single tower running the deeplab model.

  Args:
    iterator: An iterator of type tf.data.Iterator for images and labels.
    num_of_classes: Number of classes for the dataset.
    ignore_label: Ignore label for the dataset.
    scope: Unique prefix string identifying the deeplab tower.
    reuse_variable: If the variable should be reused.

  Returns:
     The total loss for a batch of data.
  """
  with tf.variable_scope(
      tf.get_variable_scope(), reuse=True if reuse_variable else None):
    _build_deeplab(iterator, {common.OUTPUT_TYPE: num_of_classes}, ignore_label)

  losses = tf.losses.get_losses(scope=scope)
  for loss in losses:
    tf.summary.scalar('Losses/%s' % loss.op.name, loss)

  regularization_loss = tf.losses.get_regularization_loss(scope=scope)
  tf.summary.scalar('Losses/%s' % regularization_loss.op.name,
                    regularization_loss)

  total_loss = tf.add_n([tf.add_n(losses), regularization_loss])
  return total_loss 
开发者ID:IBM,项目名称:MAX-Image-Segmenter,代码行数:29,代码来源:train.py

示例6: _log_summaries

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def _log_summaries(input_image, label, num_of_classes, output):
  """Logs the summaries for the model.

  Args:
    input_image: Input image of the model. Its shape is [batch_size, height,
      width, channel].
    label: Label of the image. Its shape is [batch_size, height, width].
    num_of_classes: The number of classes of the dataset.
    output: Output of the model. Its shape is [batch_size, height, width].
  """
  # Add summaries for model variables.
  for model_var in tf.model_variables():
    tf.summary.histogram(model_var.op.name, model_var)

  # Add summaries for images, labels, semantic predictions.
  if FLAGS.save_summaries_images:
    tf.summary.image('samples/%s' % common.IMAGE, input_image)

    # Scale up summary image pixel values for better visualization.
    pixel_scaling = max(1, 255 // num_of_classes)
    summary_label = tf.cast(label * pixel_scaling, tf.uint8)
    tf.summary.image('samples/%s' % common.LABEL, summary_label)

    predictions = tf.expand_dims(tf.argmax(output, 3), -1)
    summary_predictions = tf.cast(predictions * pixel_scaling, tf.uint8)
    tf.summary.image('samples/%s' % common.OUTPUT_TYPE, summary_predictions) 
开发者ID:IBM,项目名称:MAX-Image-Segmenter,代码行数:28,代码来源:train.py

示例7: main

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

  with tf.Graph().as_default():
    image, image_size, resized_image_size = _create_input_tensors()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
        crop_size=FLAGS.crop_size,
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.inference_scales) == (1.0,):
      tf.logging.info('Exported model performs single-scale inference.')
      predictions = model.predict_labels(
          image,
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Exported model performs multi-scale inference.')
      predictions = model.predict_labels_multi_scale(
          image,
          model_options=model_options,
          eval_scales=FLAGS.inference_scales,
          add_flipped_images=FLAGS.add_flipped_images)

    # Crop the valid regions from the predictions.
    semantic_predictions = tf.slice(
        predictions[common.OUTPUT_TYPE],
        [0, 0, 0],
        [1, resized_image_size[0], resized_image_size[1]])
    # Resize back the prediction to the original image size.
    def _resize_label(label, label_size):
      # Expand dimension of label to [1, height, width, 1] for resize operation.
      label = tf.expand_dims(label, 3)
      resized_label = tf.image.resize_images(
          label,
          label_size,
          method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
          align_corners=True)
      return tf.squeeze(resized_label, 3)
    semantic_predictions = _resize_label(semantic_predictions, image_size)
    semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)

    saver = tf.train.Saver(tf.model_variables())

    tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
    freeze_graph.freeze_graph_with_def_protos(
        tf.get_default_graph().as_graph_def(add_shapes=True),
        saver.as_saver_def(),
        FLAGS.checkpoint_path,
        _OUTPUT_NAME,
        restore_op_name=None,
        filename_tensor_name=None,
        output_graph=FLAGS.export_path,
        clear_devices=True,
        initializer_nodes=None) 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:60,代码来源:export_model.py

示例8: _build_deeplab

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label):
  """Builds a clone of DeepLab.

  Args:
    inputs_queue: A prefetch queue for images and labels.
    outputs_to_num_classes: A map from output type to the number of classes.
      For example, for the task of semantic segmentation with 21 semantic
      classes, we would have outputs_to_num_classes['semantic'] = 21.
    ignore_label: Ignore label.

  Returns:
    A map of maps from output_type (e.g., semantic prediction) to a
      dictionary of multi-scale logits names to logits. For each output_type,
      the dictionary has keys which correspond to the scales and values which
      correspond to the logits. For example, if `scales` equals [1.0, 1.5],
      then the keys would include 'merged_logits', 'logits_1.00' and
      'logits_1.50'.
  """
  samples = inputs_queue.dequeue()

  # add name to input and label nodes so we can add to summary
  samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name = common.IMAGE)
  samples[common.LABEL] = tf.identity(samples[common.LABEL], name = common.LABEL)

  model_options = common.ModelOptions(
      outputs_to_num_classes=outputs_to_num_classes,
      crop_size=FLAGS.train_crop_size,
      atrous_rates=FLAGS.atrous_rates,
      output_stride=FLAGS.output_stride)
  outputs_to_scales_to_logits = model.multi_scale_logits(
      samples[common.IMAGE],
      model_options=model_options,
      image_pyramid=FLAGS.image_pyramid,
      weight_decay=FLAGS.weight_decay,
      is_training=True,
      fine_tune_batch_norm=FLAGS.fine_tune_batch_norm)

  # add name to graph node so we can add to summary
  outputs_to_scales_to_logits[common.OUTPUT_TYPE][model._MERGED_LOGITS_SCOPE] = tf.identity( 
    outputs_to_scales_to_logits[common.OUTPUT_TYPE][model._MERGED_LOGITS_SCOPE],
    name = common.OUTPUT_TYPE
  )

  for output, num_classes in six.iteritems(outputs_to_num_classes):
    train_utils.add_softmax_cross_entropy_loss_for_each_scale(
        outputs_to_scales_to_logits[output],
        samples[common.LABEL],
        num_classes,
        ignore_label,
        loss_weight=1.0,
        upsample_logits=FLAGS.upsample_logits,
        scope=output)

  return outputs_to_scales_to_logits 
开发者ID:itsamitgoel,项目名称:Gun-Detector,代码行数:56,代码来源:train.py

示例9: _build_deeplab

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def _build_deeplab(iterator, outputs_to_num_classes, ignore_label):
  """Builds a clone of DeepLab.

  Args:
    iterator: An iterator of type tf.data.Iterator for images and labels.
    outputs_to_num_classes: A map from output type to the number of classes. For
      example, for the task of semantic segmentation with 21 semantic classes,
      we would have outputs_to_num_classes['semantic'] = 21.
    ignore_label: Ignore label.
  """
  samples = iterator.get_next()

  # Add name to input and label nodes so we can add to summary.
  samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name=common.IMAGE)
  samples[common.LABEL] = tf.identity(samples[common.LABEL], name=common.LABEL)

  model_options = common.ModelOptions(
      outputs_to_num_classes=outputs_to_num_classes,
      crop_size=FLAGS.train_crop_size,
      atrous_rates=FLAGS.atrous_rates,
      output_stride=FLAGS.output_stride)

  outputs_to_scales_to_logits = model.multi_scale_logits(
      samples[common.IMAGE],
      model_options=model_options,
      image_pyramid=FLAGS.image_pyramid,
      weight_decay=FLAGS.weight_decay,
      is_training=True,
      fine_tune_batch_norm=FLAGS.fine_tune_batch_norm,
      nas_training_hyper_parameters={
          'drop_path_keep_prob': FLAGS.drop_path_keep_prob,
          'total_training_steps': FLAGS.training_number_of_steps,
      })

  # Add name to graph node so we can add to summary.
  output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE]
  output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity(
      output_type_dict[model.MERGED_LOGITS_SCOPE], name=common.OUTPUT_TYPE)

  for output, num_classes in six.iteritems(outputs_to_num_classes):
    train_utils.add_softmax_cross_entropy_loss_for_each_scale(
        outputs_to_scales_to_logits[output],
        samples[common.LABEL],
        num_classes,
        ignore_label,
        loss_weight=1.0,
        upsample_logits=FLAGS.upsample_logits,
        hard_example_mining_step=FLAGS.hard_example_mining_step,
        top_k_percent_pixels=FLAGS.top_k_percent_pixels,
        scope=output)

    # Log the summary
    _log_summaries(samples[common.IMAGE], samples[common.LABEL], num_classes,
                   output_type_dict[model.MERGED_LOGITS_SCOPE]) 
开发者ID:IBM,项目名称:MAX-Image-Segmenter,代码行数:56,代码来源:train.py

示例10: main

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def main(unused_argv):
  tf.logging.set_verbosity(tf.logging.INFO)
  tf.logging.info('Prepare to export model to: %s', FLAGS.export_path)

  with tf.Graph().as_default():
    image, image_size, resized_image_size = _create_input_tensors()

    model_options = common.ModelOptions(
        outputs_to_num_classes={common.OUTPUT_TYPE: FLAGS.num_classes},
        crop_size=FLAGS.crop_size,
        atrous_rates=FLAGS.atrous_rates,
        output_stride=FLAGS.output_stride)

    if tuple(FLAGS.inference_scales) == (1.0,):
      tf.logging.info('Exported model performs single-scale inference.')
      predictions = model.predict_labels(
          image,
          model_options=model_options,
          image_pyramid=FLAGS.image_pyramid)
    else:
      tf.logging.info('Exported model performs multi-scale inference.')
      predictions = model.predict_labels_multi_scale(
          image,
          model_options=model_options,
          eval_scales=FLAGS.inference_scales,
          add_flipped_images=FLAGS.add_flipped_images)

    predictions = tf.cast(predictions[common.OUTPUT_TYPE], tf.float32)
    # Crop the valid regions from the predictions.
    semantic_predictions = tf.slice(
        predictions,
        [0, 0, 0],
        [1, resized_image_size[0], resized_image_size[1]])
    # Resize back the prediction to the original image size.
    def _resize_label(label, label_size):
      # Expand dimension of label to [1, height, width, 1] for resize operation.
      label = tf.expand_dims(label, 3)
      resized_label = tf.image.resize_images(
          label,
          label_size,
          method=tf.image.ResizeMethod.NEAREST_NEIGHBOR,
          align_corners=True)
      return tf.cast(tf.squeeze(resized_label, 3), tf.int32)
    semantic_predictions = _resize_label(semantic_predictions, image_size)
    semantic_predictions = tf.identity(semantic_predictions, name=_OUTPUT_NAME)

    saver = tf.train.Saver(tf.model_variables())

    tf.gfile.MakeDirs(os.path.dirname(FLAGS.export_path))
    freeze_graph.freeze_graph_with_def_protos(
        tf.get_default_graph().as_graph_def(add_shapes=True),
        saver.as_saver_def(),
        FLAGS.checkpoint_path,
        _OUTPUT_NAME,
        restore_op_name=None,
        filename_tensor_name=None,
        output_graph=FLAGS.export_path,
        clear_devices=True,
        initializer_nodes=None) 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:61,代码来源:export_model.py

示例11: _build_deeplab

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def _build_deeplab(inputs_queue, outputs_to_num_classes, ignore_label):
  """Builds a clone of DeepLab.

  Args:
    inputs_queue: A prefetch queue for images and labels.
    outputs_to_num_classes: A map from output type to the number of classes.
      For example, for the task of semantic segmentation with 21 semantic
      classes, we would have outputs_to_num_classes['semantic'] = 21.
    ignore_label: Ignore label.

  Returns:
    A map of maps from output_type (e.g., semantic prediction) to a
      dictionary of multi-scale logits names to logits. For each output_type,
      the dictionary has keys which correspond to the scales and values which
      correspond to the logits. For example, if `scales` equals [1.0, 1.5],
      then the keys would include 'merged_logits', 'logits_1.00' and
      'logits_1.50'.
  """
  samples = inputs_queue.dequeue()

  # Add name to input and label nodes so we can add to summary.
  samples[common.IMAGE] = tf.identity(
      samples[common.IMAGE], name=common.IMAGE)
  samples[common.LABEL] = tf.identity(
      samples[common.LABEL], name=common.LABEL)

  model_options = common.ModelOptions(
      outputs_to_num_classes=outputs_to_num_classes,
      crop_size=FLAGS.train_crop_size,
      atrous_rates=FLAGS.atrous_rates,
      output_stride=FLAGS.output_stride)
  outputs_to_scales_to_logits = model.multi_scale_logits(
      samples[common.IMAGE],
      model_options=model_options,
      image_pyramid=FLAGS.image_pyramid,
      weight_decay=FLAGS.weight_decay,
      is_training=True,
      fine_tune_batch_norm=FLAGS.fine_tune_batch_norm)

  # Add name to graph node so we can add to summary.
  output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE]
  output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity(
      output_type_dict[model.MERGED_LOGITS_SCOPE],
      name=common.OUTPUT_TYPE)

  for output, num_classes in six.iteritems(outputs_to_num_classes):
    train_utils.add_softmax_cross_entropy_loss_for_each_scale(
        outputs_to_scales_to_logits[output],
        samples[common.LABEL],
        num_classes,
        ignore_label,
        loss_weight=1.0,
        upsample_logits=FLAGS.upsample_logits,
        scope=output)

  return outputs_to_scales_to_logits 
开发者ID:generalized-iou,项目名称:g-tensorflow-models,代码行数:58,代码来源:train.py

示例12: _build_deeplab

# 需要导入模块: from deeplab import common [as 别名]
# 或者: from deeplab.common import OUTPUT_TYPE [as 别名]
def _build_deeplab(iterator, outputs_to_num_classes, ignore_label):
  """Builds a clone of DeepLab.

  Args:
    iterator: An iterator of type tf.data.Iterator for images and labels.
    outputs_to_num_classes: A map from output type to the number of classes. For
      example, for the task of semantic segmentation with 21 semantic classes,
      we would have outputs_to_num_classes['semantic'] = 21.
    ignore_label: Ignore label.
  """
  samples = iterator.get_next()

  # Add name to input and label nodes so we can add to summary.
  samples[common.IMAGE] = tf.identity(samples[common.IMAGE], name=common.IMAGE)
  samples[common.LABEL] = tf.identity(samples[common.LABEL], name=common.LABEL)

  model_options = common.ModelOptions(
      outputs_to_num_classes=outputs_to_num_classes,
      crop_size=[int(sz) for sz in FLAGS.train_crop_size],
      atrous_rates=FLAGS.atrous_rates,
      output_stride=FLAGS.output_stride)

  outputs_to_scales_to_logits = model.multi_scale_logits(
      samples[common.IMAGE],
      model_options=model_options,
      image_pyramid=FLAGS.image_pyramid,
      weight_decay=FLAGS.weight_decay,
      is_training=True,
      fine_tune_batch_norm=FLAGS.fine_tune_batch_norm,
      nas_training_hyper_parameters={
          'drop_path_keep_prob': FLAGS.drop_path_keep_prob,
          'total_training_steps': FLAGS.training_number_of_steps,
      })

  # Add name to graph node so we can add to summary.
  output_type_dict = outputs_to_scales_to_logits[common.OUTPUT_TYPE]
  output_type_dict[model.MERGED_LOGITS_SCOPE] = tf.identity(
      output_type_dict[model.MERGED_LOGITS_SCOPE], name=common.OUTPUT_TYPE)

  for output, num_classes in six.iteritems(outputs_to_num_classes):
    train_utils.add_softmax_cross_entropy_loss_for_each_scale(
        outputs_to_scales_to_logits[output],
        samples[common.LABEL],
        num_classes,
        ignore_label,
        loss_weight=model_options.label_weights,
        upsample_logits=FLAGS.upsample_logits,
        hard_example_mining_step=FLAGS.hard_example_mining_step,
        top_k_percent_pixels=FLAGS.top_k_percent_pixels,
        scope=output) 
开发者ID:tensorflow,项目名称:models,代码行数:52,代码来源:train.py


注:本文中的deeplab.common.OUTPUT_TYPE属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。