當前位置: 首頁>>代碼示例>>Python>>正文


Python inception.inception_v4_arg_scope方法代碼示例

本文整理匯總了Python中nets.inception.inception_v4_arg_scope方法的典型用法代碼示例。如果您正苦於以下問題:Python inception.inception_v4_arg_scope方法的具體用法?Python inception.inception_v4_arg_scope怎麽用?Python inception.inception_v4_arg_scope使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在nets.inception的用法示例。


在下文中一共展示了inception.inception_v4_arg_scope方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: testNoBatchNormScaleByDefault

# 需要導入模塊: from nets import inception [as 別名]
# 或者: from nets.inception import inception_v4_arg_scope [as 別名]
def testNoBatchNormScaleByDefault(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with tf.contrib.slim.arg_scope(inception.inception_v4_arg_scope()):
      inception.inception_v4(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) 
開發者ID:IBM,項目名稱:MAX-Image-Segmenter,代碼行數:10,代碼來源:inception_v4_test.py

示例2: testBatchNormScale

# 需要導入模塊: from nets import inception [as 別名]
# 或者: from nets.inception import inception_v4_arg_scope [as 別名]
def testBatchNormScale(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with tf.contrib.slim.arg_scope(
        inception.inception_v4_arg_scope(batch_norm_scale=True)):
      inception.inception_v4(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) 
開發者ID:IBM,項目名稱:MAX-Image-Segmenter,代碼行數:15,代碼來源:inception_v4_test.py

示例3: use_inceptionv4

# 需要導入模塊: from nets import inception [as 別名]
# 或者: from nets.inception import inception_v4_arg_scope [as 別名]
def use_inceptionv4(self):
        image_size = inception.inception_v4.default_image_size
        img_path = "../../data/misec_images/EnglishCockerSpaniel_simon.jpg"
        checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt"

        with tf.Graph().as_default():
           
            image_string = tf.read_file(img_path)
            image = tf.image.decode_jpeg(image_string, channels=3)
            processed_image = inception_preprocessing.preprocess_image(image, image_size, image_size, is_training=False)
            processed_images  = tf.expand_dims(processed_image, 0)
            
            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(processed_images, num_classes=1001, is_training=False)
            probabilities = tf.nn.softmax(logits)
            
            init_fn = slim.assign_from_checkpoint_fn(
                checkpoint_path,
                slim.get_model_variables('InceptionV4'))
            
            with tf.Session() as sess:
                init_fn(sess)
                np_image, probabilities = sess.run([image, probabilities])
                probabilities = probabilities[0, 0:]
                sorted_inds = [i[0] for i in sorted(enumerate(-probabilities), key=lambda x:x[1])]
                self.disp_names(sorted_inds,probabilities)
                
            plt.figure()
            plt.imshow(np_image.astype(np.uint8))
            plt.axis('off')
            plt.title(img_path)
            plt.show()
            
            
        
        return 
開發者ID:LevinJ,項目名稱:SSD_tensorflow_VOC,代碼行數:39,代碼來源:pretrained.py

示例4: fine_tune_inception

# 需要導入模塊: from nets import inception [as 別名]
# 或者: from nets.inception import inception_v4_arg_scope [as 別名]
def fine_tune_inception(self):
        train_dir = '/tmp/inception_finetuned/'
        image_size = inception.inception_v4.default_image_size
        checkpoint_path = "../../data/trained_models/inception_v4/inception_v4.ckpt"
        flowers_data_dir = "../../data/flower"
        
        
        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            
            dataset = flowers.get_split('train', flowers_data_dir)
            images, _, labels = self.load_batch(dataset, height=image_size, width=image_size)
            
            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)
                
            # Specify the loss function:
            one_hot_labels = slim.one_hot_encoding(labels, dataset.num_classes)
            total_loss = slim.losses.softmax_cross_entropy(logits, one_hot_labels)
#             total_loss = slim.losses.get_total_loss(add_regularization_losses=False)
#             total_loss = slim.losses.get_total_loss()
        
            # Create some summaries to visualize the training process:
            tf.summary.scalar('losses/Total_Loss', total_loss)
          
            # Specify the optimizer and create the train op:
            optimizer = tf.train.AdamOptimizer(learning_rate=0.001)
            train_op = slim.learning.create_train_op(total_loss, optimizer)
            
            # Run the training:
            number_of_steps = math.ceil(dataset.num_samples/32) * 1
            final_loss = slim.learning.train(
                train_op,
                logdir=train_dir,
                init_fn=self.get_init_fn(checkpoint_path),
                number_of_steps=number_of_steps)
        
  
            print('Finished training. Last batch loss %f' % final_loss)
        return 
開發者ID:LevinJ,項目名稱:SSD_tensorflow_VOC,代碼行數:43,代碼來源:pretrained.py

示例5: testNoBatchNormScaleByDefault

# 需要導入模塊: from nets import inception [as 別名]
# 或者: from nets.inception import inception_v4_arg_scope [as 別名]
def testNoBatchNormScaleByDefault(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(inception.inception_v4_arg_scope()):
      inception.inception_v4(inputs, num_classes, is_training=False)

    self.assertEqual(tf.global_variables('.*/BatchNorm/gamma:0$'), []) 
開發者ID:tensorflow,項目名稱:models,代碼行數:10,代碼來源:inception_v4_test.py

示例6: testBatchNormScale

# 需要導入模塊: from nets import inception [as 別名]
# 或者: from nets.inception import inception_v4_arg_scope [as 別名]
def testBatchNormScale(self):
    height, width = 299, 299
    num_classes = 1000
    inputs = tf.placeholder(tf.float32, (1, height, width, 3))
    with slim.arg_scope(
        inception.inception_v4_arg_scope(batch_norm_scale=True)):
      inception.inception_v4(inputs, num_classes, is_training=False)

    gamma_names = set(
        v.op.name
        for v in tf.global_variables('.*/BatchNorm/gamma:0$'))
    self.assertGreater(len(gamma_names), 0)
    for v in tf.global_variables('.*/BatchNorm/moving_mean:0$'):
      self.assertIn(v.op.name[:-len('moving_mean')] + 'gamma', gamma_names) 
開發者ID:tensorflow,項目名稱:models,代碼行數:16,代碼來源:inception_v4_test.py

示例7: use_fined_model

# 需要導入模塊: from nets import inception [as 別名]
# 或者: from nets.inception import inception_v4_arg_scope [as 別名]
def use_fined_model(self):
        image_size = inception.inception_v4.default_image_size
        batch_size = 3
        flowers_data_dir = "../../data/flower"
        train_dir = '/tmp/inception_finetuned/'
        
        with tf.Graph().as_default():
            tf.logging.set_verbosity(tf.logging.INFO)
            
            dataset = flowers.get_split('train', flowers_data_dir)
            images, images_raw, labels = self.load_batch(dataset, height=image_size, width=image_size)
            
            # Create the model, use the default arg scope to configure the batch norm parameters.
            with slim.arg_scope(inception.inception_v4_arg_scope()):
                logits, _ = inception.inception_v4(images, num_classes=dataset.num_classes, is_training=True)
        
            probabilities = tf.nn.softmax(logits)
            
            checkpoint_path = tf.train.latest_checkpoint(train_dir)
            init_fn = slim.assign_from_checkpoint_fn(
              checkpoint_path,
              slim.get_variables_to_restore())
            
            with tf.Session() as sess:
                with slim.queues.QueueRunners(sess):
                    sess.run(tf.initialize_local_variables())
                    init_fn(sess)
                    np_probabilities, np_images_raw, np_labels = sess.run([probabilities, images_raw, labels])
            
                    for i in range(batch_size): 
                        image = np_images_raw[i, :, :, :]
                        true_label = np_labels[i]
                        predicted_label = np.argmax(np_probabilities[i, :])
                        predicted_name = dataset.labels_to_names[predicted_label]
                        true_name = dataset.labels_to_names[true_label]
                        
                        plt.figure()
                        plt.imshow(image.astype(np.uint8))
                        plt.title('Ground Truth: [%s], Prediction [%s]' % (true_name, predicted_name))
                        plt.axis('off')
                        plt.show()
                return 
開發者ID:LevinJ,項目名稱:SSD_tensorflow_VOC,代碼行數:44,代碼來源:pretrained.py


注:本文中的nets.inception.inception_v4_arg_scope方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。