当前位置: 首页>>代码示例>>Python>>正文


Python inception_resnet_v2.inception_resnet_v2方法代码示例

本文整理汇总了Python中inception_resnet_v2.inception_resnet_v2方法的典型用法代码示例。如果您正苦于以下问题:Python inception_resnet_v2.inception_resnet_v2方法的具体用法?Python inception_resnet_v2.inception_resnet_v2怎么用?Python inception_resnet_v2.inception_resnet_v2使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在inception_resnet_v2的用法示例。


在下文中一共展示了inception_resnet_v2.inception_resnet_v2方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_model

# 需要导入模块: import inception_resnet_v2 [as 别名]
# 或者: from inception_resnet_v2 import inception_resnet_v2 [as 别名]
def create_model(x, reuse=None):
  """Create model graph.

  Args:
    x: input images
    reuse: reuse parameter which will be passed to underlying variable scopes.
      Should be None first call and True every subsequent call.

  Returns:
    (logits, end_points) - tuple of model logits and enpoints

  Raises:
    ValueError: if model type specified by --model_name flag is invalid.
  """
  if FLAGS.model_name == 'inception_v3':
    with slim.arg_scope(inception.inception_v3_arg_scope()):
      return inception.inception_v3(
          x, num_classes=NUM_CLASSES, is_training=False, reuse=reuse)
  elif FLAGS.model_name == 'inception_resnet_v2':
    with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
      return inception_resnet_v2.inception_resnet_v2(
          x, num_classes=NUM_CLASSES, is_training=False, reuse=reuse)
  else:
    raise ValueError('Invalid model name: %s' % (FLAGS.model_name)) 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:26,代码来源:eval_on_adversarial.py

示例2: main

# 需要导入模块: import inception_resnet_v2 [as 别名]
# 或者: from inception_resnet_v2 import inception_resnet_v2 [as 别名]
def main(_):
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
  num_classes = 1001

  tf.logging.set_verbosity(tf.logging.INFO)

  with tf.Graph().as_default():
    # Prepare graph
    x_input = tf.placeholder(tf.float32, shape=batch_shape)

    with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
      _, end_points = inception_resnet_v2.inception_resnet_v2(
          x_input, num_classes=num_classes, is_training=False)

    predicted_labels = tf.argmax(end_points['Predictions'], 1)

    # Run computation
    saver = tf.train.Saver(slim.get_model_variables())
    session_creator = tf.train.ChiefSessionCreator(
        scaffold=tf.train.Scaffold(saver=saver),
        checkpoint_filename_with_path=FLAGS.checkpoint_path,
        master=FLAGS.master)

    with tf.train.MonitoredSession(session_creator=session_creator) as sess:
      with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filenames, images in load_images(FLAGS.input_dir, batch_shape):
          labels = sess.run(predicted_labels, feed_dict={x_input: images})
          for filename, label in zip(filenames, labels):
            out_file.write('{0},{1}\n'.format(filename, label)) 
开发者ID:StephanZheng,项目名称:neural-fingerprinting,代码行数:31,代码来源:defense.py

示例3: get_network

# 需要导入模块: import inception_resnet_v2 [as 别名]
# 或者: from inception_resnet_v2 import inception_resnet_v2 [as 别名]
def get_network(self, input_tensor, is_training):
        # Load pre-trained inception-resnet model
        with slim.arg_scope(inception_resnet_v2_arg_scope(batch_norm_decay = 0.999, weight_decay = 0.0001)):
            net, end_points = inception_resnet_v2(input_tensor, is_training = is_training)

        # Adding some modification to original InceptionResnetV2 - changing scoring of AUXILIARY TOWER
        weight_decay = 0.0005
        with tf.variable_scope('NewInceptionResnetV2'):
            with tf.variable_scope('AuxiliaryScoring'):
                with slim.arg_scope([layers.convolution2d, layers.convolution2d_transpose],
                                    weights_regularizer = slim.l2_regularizer(weight_decay),
                                    biases_regularizer = slim.l2_regularizer(weight_decay),
                                    activation_fn = None):
                    tf.summary.histogram('Last_layer/activations', net, [KEY_SUMMARIES])

                    # Scoring
                    net = slim.dropout(net, 0.7, is_training = is_training, scope = 'Dropout')
                    net = layers.convolution2d(net, num_outputs = self.FEATURES, kernel_size = 1, stride = 1,
                                               scope = 'Scoring_layer')
                    feature = net
                    tf.summary.histogram('Scoring_layer/activations', net, [KEY_SUMMARIES])

                    # Upsampling
                    net = layers.convolution2d_transpose(net, num_outputs = 16, kernel_size = 17, stride = 17,
                                                         padding = 'VALID', scope = 'Upsampling_layer')

                    tf.summary.histogram('Upsampling_layer/activations', net, [KEY_SUMMARIES])

            # Smoothing layer - separable gaussian filters
            net = super()._get_gauss_smoothing_net(net, size = self.SMOOTH_SIZE, std = 1.0, kernel_sum = 0.2)

            return net, feature 
开发者ID:marian-margeta,项目名称:gait-recognition,代码行数:34,代码来源:human_pose_nn.py

示例4: main

# 需要导入模块: import inception_resnet_v2 [as 别名]
# 或者: from inception_resnet_v2 import inception_resnet_v2 [as 别名]
def main(_):
  """Classify all images using the sample defense."""
  batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
  nb_classes = 1001

  tf.logging.set_verbosity(tf.logging.INFO)

  with tf.Graph().as_default():
    # Prepare graph
    x_input = tf.placeholder(tf.float32, shape=batch_shape)

    with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
      _, end_points = inception_resnet_v2.inception_resnet_v2(
          x_input, num_classes=nb_classes, is_training=False)

    predicted_labels = tf.argmax(end_points['Predictions'], 1)

    # Run computation
    saver = tf.train.Saver(slim.get_model_variables())
    session_creator = tf.train.ChiefSessionCreator(
        scaffold=tf.train.Scaffold(saver=saver),
        checkpoint_filename_with_path=FLAGS.checkpoint_path,
        master=FLAGS.master)

    with tf.train.MonitoredSession(session_creator=session_creator) as sess:
      with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
        for filenames, images in load_images(FLAGS.input_dir, batch_shape):
          labels = sess.run(predicted_labels, feed_dict={x_input: images})
          for filename, label in zip(filenames, labels):
            out_file.write('{0},{1}\n'.format(filename, label)) 
开发者ID:tensorflow,项目名称:cleverhans,代码行数:32,代码来源:defense.py

示例5: main

# 需要导入模块: import inception_resnet_v2 [as 别名]
# 或者: from inception_resnet_v2 import inception_resnet_v2 [as 别名]
def main(_):
    batch_shape = [FLAGS.batch_size, FLAGS.image_height, FLAGS.image_width, 3]
    num_classes = 1001
    itr = 30

    tf.logging.set_verbosity(tf.logging.INFO)

    with tf.Graph().as_default():
        # Prepare graph
        x_input = tf.placeholder(tf.float32, shape=batch_shape)
        img_resize_tensor = tf.placeholder(tf.int32, [2])
        x_input_resize = tf.image.resize_images(x_input, img_resize_tensor, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)

        shape_tensor = tf.placeholder(tf.int32, [3])
        padded_input = padding_layer_iyswim(x_input_resize, shape_tensor)
        # 330 is the last value to keep 8*8 output, 362 is the last value to keep 9*9 output, stride = 32
        padded_input.set_shape(
            (FLAGS.batch_size, FLAGS.image_resize, FLAGS.image_resize, 3))

        with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):
            _, end_points = inception_resnet_v2.inception_resnet_v2(
                padded_input, num_classes=num_classes, is_training=False, create_aux_logits=True)

        predicted_labels = tf.argmax(end_points['Predictions'], 1)

        # Run computation
        saver = tf.train.Saver(slim.get_model_variables())
        session_creator = tf.train.ChiefSessionCreator(
            scaffold=tf.train.Scaffold(saver=saver),
            checkpoint_filename_with_path=FLAGS.checkpoint_path,
            master=FLAGS.master)

        with tf.train.MonitoredSession(session_creator=session_creator) as sess:
            with tf.gfile.Open(FLAGS.output_file, 'w') as out_file:
                for filenames, images in load_images(FLAGS.input_dir, batch_shape):
                    final_preds = np.zeros(
                        [FLAGS.batch_size, num_classes, itr])
                    for j in range(itr):
                        if np.random.randint(0, 2, size=1) == 1:
                            images = images[:, :, ::-1, :]
                        resize_shape_ = np.random.randint(310, 331)
                        pred, aux_pred = sess.run([end_points['Predictions'], end_points['AuxPredictions']],
                                                        feed_dict={x_input: images, img_resize_tensor: [resize_shape_]*2,
                                                                   shape_tensor: np.array([random.randint(0, FLAGS.image_resize - resize_shape_), random.randint(0, FLAGS.image_resize - resize_shape_), FLAGS.image_resize])})
                        final_preds[..., j] = pred + 0.4 * aux_pred
                    final_probs = np.sum(final_preds, axis=-1)
                    labels = np.argmax(final_probs, 1)
                    for filename, label in zip(filenames, labels):
                        out_file.write('{0},{1}\n'.format(filename, label)) 
开发者ID:cihangxie,项目名称:NIPS2017_adv_challenge_defense,代码行数:51,代码来源:defense.py


注:本文中的inception_resnet_v2.inception_resnet_v2方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。