当前位置: 首页>>代码示例>>Python>>正文


Python contrib.slim方法代码示例

本文整理汇总了Python中tensorflow.contrib.slim方法的典型用法代码示例。如果您正苦于以下问题:Python contrib.slim方法的具体用法?Python contrib.slim怎么用?Python contrib.slim使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib的用法示例。


在下文中一共展示了contrib.slim方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _define_eval_metrics

# 需要导入模块: from tensorflow import contrib [as 别名]
# 或者: from tensorflow.contrib import slim [as 别名]
def _define_eval_metrics(end_points, data_batched):
    metric_map = super(ClassifierModel, ClassifierModel)._define_eval_metrics(end_points, data_batched)
    # Define the metrics:
    # streaming_auc requires inputs to be within [0,1]
    targets = data_batched.get('target')
    clipped_predictions = tf.clip_by_value(end_points['Predictions'], 0, 1)
    metric_map['AUC'] = tf.metrics.auc(targets, clipped_predictions)
    metric_map['mean_squared_error'] = slim.metrics.streaming_mean_squared_error(end_points['Predictions'], targets)
    metric_map['precision_at_thresholds'] = tf.metrics.precision_at_thresholds(targets, clipped_predictions,
                                                                               [i / 10.0 for i in range(0, 11)])
    metric_map['recall_at_thresholds'] = tf.metrics.recall_at_thresholds(targets, clipped_predictions,
                                                                         [i / 10.0 for i in range(0, 11)])
    return metric_map 
开发者ID:jerryli27,项目名称:TwinGAN,代码行数:15,代码来源:train_image_classifier.py

示例2: setup_metrics

# 需要导入模块: from tensorflow import contrib [as 别名]
# 或者: from tensorflow.contrib import slim [as 别名]
def setup_metrics( inputs, model, cfg ):
    # predictions = model[ 'model' ].
    # Choose the metrics to compute:
    # names_to_values, names_to_updates = slim.metrics.aggregate_metric_map( {} )
    return  {}, {} 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:7,代码来源:train_imagenet.py

示例3: summarize_data_utilization

# 需要导入模块: from tensorflow import contrib [as 别名]
# 或者: from tensorflow.contrib import slim [as 别名]
def summarize_data_utilization(v, tf_global_step, batch_size, epsilon=0.001):
  """Summarizes the samples of non-zero weights during training.

  Args:
    v: a tensor [batch_size, 1] represents the sample weights.
      0: loss, 1: loss difference to the moving average, 2: label and 3: epoch,
      where epoch is an integer between 0 and 99 (the first and the last epoch).
    tf_global_step: the tensor of the current global step.
    batch_size: an integer batch_size
    epsilon: the rounding error. If the weight is smaller than epsilon then set
      it to zero.
  Returns:
    data_util: a tensor of data utilization.
  """
  nonzero_v = tf.get_variable('data_util/nonzero_v', [],
                              initializer=tf.zeros_initializer(),
                              trainable=False,
                              dtype=tf.float32)

  rounded_v = tf.maximum(v - epsilon, tf.to_float(0))

  # Log data utilization
  nonzero_v = tf.assign_add(nonzero_v, tf.count_nonzero(
      rounded_v, dtype=tf.float32))

  # slim runs extra sessions to log, causing
  # the value lager than 1 (data are fed but the global step is not changed)
  # so we use tf_global_step + 2
  data_util = (nonzero_v) / tf.to_float(batch_size) / (
      tf.to_float(tf_global_step) + 2)
  data_util = tf.minimum(data_util, 1)
  tf.stop_gradient(data_util)

  slim.summaries.add_scalar_summary(data_util, 'data_util/data_util')
  slim.summaries.add_scalar_summary(tf.reduce_sum(v), 'data_util/batch_sum_v')
  return data_util 
开发者ID:google,项目名称:mentornet,代码行数:38,代码来源:utils.py

示例4: run_training

# 需要导入模块: from tensorflow import contrib [as 别名]
# 或者: from tensorflow.contrib import slim [as 别名]
def run_training( cfg ):
    # set up logging
    tf.logging.set_verbosity( tf.logging.INFO )

    with tf.Graph().as_default() as g:
        # create ops and placeholders
        inputs = utils.setup_input( cfg, is_training=False, use_filename_queue=True )
        RuntimeDeterminedEnviromentVars.load_dynamic_variables( inputs, cfg )
        RuntimeDeterminedEnviromentVars.populate_registered_variables()

        # build model (and losses and train_op)
        model = setup_model( inputs, cfg, is_training=False )

        # set up metrics to evaluate
        names_to_values, names_to_updates = setup_metrics( inputs, model, cfg )

        # execute training 
        start_time = time.time()
        utils.print_start_info( cfg, inputs[ 'max_steps' ], is_training=False )

        training_runners = { 'sess': tf.Session(), 'coord': tf.train.Coordinator() }
        data_prefetch_init_fn = utils.get_data_prefetch_threads_init_fn( inputs, cfg, is_training=False, use_filename_queue=True )
        training_runners[ 'threads' ] = data_prefetch_init_fn( training_runners[ 'sess' ], training_runners[ 'coord' ] )
        try:
            # This just returns the imput as output. It is for testing data
            #  input only. 
            for step in xrange( inputs[ 'max_steps' ] ):
                input_batch, target_batch, data_idx = training_runners['sess'].run( [ 
                        model['input_batch'],  model['target_batch'], model[ 'data_idxs' ] ] )

                if training_runners['coord'].should_stop():
                    break
        finally:
            utils.request_data_loading_end( training_runners )
            utils.end_data_loading_and_sess( training_runners )
        # else: # Use tf.slim
        #     train_log_dir = os.path.join( cfg['log_dir'], 'slim-train' )

        #     # When ready to use a model, use the code below
        #     train(  model[ 'train_op' ],
        #             train_log_dir,
        #             get_data_prefetch_threads_init_fn( inputs, cfg ), 
        #             global_step=model[ 'global_step' ],
        #             number_of_steps=inputs[ 'max_steps' ],
        #             init_fn=model[ 'init_fn' ],
        #             save_summaries_secs=300,
        #             save_interval_secs=600,
        #             saver=model[ 'saver_op' ] ) 

        end_train_time = time.time() - start_time
        print('time to train %d epochs: %.3f hrs' % (cfg['num_epochs'], end_train_time/(60*60)))
        print('avg time per epoch: %.3f hrs' % ( (end_train_time/(60*60)) / cfg['num_epochs']) ) 
开发者ID:StanfordVL,项目名称:taskonomy,代码行数:54,代码来源:test.py


注:本文中的tensorflow.contrib.slim方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。