当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow_hub.get_expected_image_size方法代码示例

本文整理汇总了Python中tensorflow_hub.get_expected_image_size方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow_hub.get_expected_image_size方法的具体用法?Python tensorflow_hub.get_expected_image_size怎么用?Python tensorflow_hub.get_expected_image_size使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow_hub的用法示例。


在下文中一共展示了tensorflow_hub.get_expected_image_size方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: create_module_graph

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import get_expected_image_size [as 别名]
def create_module_graph(module_spec):
  """Creates a graph and loads Hub Module into it.

  Args:
    module_spec: the hub.ModuleSpec for the image module being used.

  Returns:
    graph: the tf.Graph that was created.
    bottleneck_tensor: the bottleneck values output by the module.
    resized_input_tensor: the input images, resized as expected by the module.
    wants_quantization: a boolean, whether the module has been instrumented
      with fake quantization ops.
  """
  height, width = hub.get_expected_image_size(module_spec)
  with tf.Graph().as_default() as graph:
    resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
    m = hub.Module(module_spec)
    bottleneck_tensor = m(resized_input_tensor)
    wants_quantization = any(node.op in FAKE_QUANT_OPS
                             for node in graph.as_graph_def().node)
  return graph, bottleneck_tensor, resized_input_tensor, wants_quantization 
开发者ID:hthuwal,项目名称:sign-language-gesture-recognition,代码行数:23,代码来源:retrain.py

示例2: add_jpeg_decoding

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import get_expected_image_size [as 别名]
def add_jpeg_decoding(module_spec):
  """Adds operations that perform JPEG decoding and resizing to the graph..

  Args:
    module_spec: The hub.ModuleSpec for the image module being used.

  Returns:
    Tensors for the node to feed JPEG data into, and the output of the
      preprocessing steps.
  """
  input_height, input_width = hub.get_expected_image_size(module_spec)
  input_depth = hub.get_num_image_channels(module_spec)
  jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
  decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
  # Convert from full range of uint8 to range [0,1] of float32.
  decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
                                                        tf.float32)
  decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
  resize_shape = tf.stack([input_height, input_width])
  resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
  resized_image = tf.image.resize_bilinear(decoded_image_4d,
                                           resize_shape_as_int)
  return jpeg_data, resized_image 
开发者ID:hthuwal,项目名称:sign-language-gesture-recognition,代码行数:25,代码来源:retrain.py

示例3: create_module_graph

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import get_expected_image_size [as 别名]
def create_module_graph(module_spec):
    """Creates a graph and loads Hub Module into it.

    Args:
      module_spec: the hub.ModuleSpec for the image module being used.

    Returns:
      graph: the tf.Graph that was created.
      bottleneck_tensor: the bottleneck values output by the module.
      resized_input_tensor: the input images, resized as expected by the module.
      wants_quantization: a boolean, whether the module has been instrumented
        with fake quantization ops.
    """
    height, width = hub.get_expected_image_size(module_spec)
    with tf.Graph().as_default() as graph:
        resized_input_tensor = tf.placeholder(tf.float32, [None, height, width, 3])
        m = hub.Module(module_spec)
        bottleneck_tensor = m(resized_input_tensor)
        wants_quantization = any(node.op in FAKE_QUANT_OPS
                                 for node in graph.as_graph_def().node)
    return graph, bottleneck_tensor, resized_input_tensor, wants_quantization 
开发者ID:joelbarmettlerUZH,项目名称:FaceClassification_Tensorflow,代码行数:23,代码来源:retrain.py

示例4: add_jpeg_decoding

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import get_expected_image_size [as 别名]
def add_jpeg_decoding(module_spec):
    """Adds operations that perform JPEG decoding and resizing to the graph..

    Args:
      module_spec: The hub.ModuleSpec for the image module being used.

    Returns:
      Tensors for the node to feed JPEG data into, and the output of the
        preprocessing steps.
    """
    input_height, input_width = hub.get_expected_image_size(module_spec)
    input_depth = hub.get_num_image_channels(module_spec)
    jpeg_data = tf.placeholder(tf.string, name='DecodeJPGInput')
    decoded_image = tf.image.decode_jpeg(jpeg_data, channels=input_depth)
    # Convert from full range of uint8 to range [0,1] of float32.
    decoded_image_as_float = tf.image.convert_image_dtype(decoded_image,
                                                          tf.float32)
    decoded_image_4d = tf.expand_dims(decoded_image_as_float, 0)
    resize_shape = tf.stack([input_height, input_width])
    resize_shape_as_int = tf.cast(resize_shape, dtype=tf.int32)
    resized_image = tf.image.resize_bilinear(decoded_image_4d,
                                             resize_shape_as_int)
    return jpeg_data, resized_image 
开发者ID:joelbarmettlerUZH,项目名称:FaceClassification_Tensorflow,代码行数:25,代码来源:retrain.py

示例5: model_fn

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import get_expected_image_size [as 别名]
def model_fn(features, labels, mode, params):

	module = hub.Module("https://tfhub.dev/google/imagenet/inception_v3/classification/1")
	height, width = hub.get_expected_image_size(module)
	
	# Done here to get the summaries in the model_fn execution
	images = tf.map_fn(
		lambda i: parse_tfrecord_inception(params, i, width, height, is_training=False, use_summary=True)[0],
		features,
		dtype=tf.float32
		)

	tf.summary.image("final_image", images)
	
	logits = module(images) # [batch_size, height, width, 3] => [batch_size, num_classes]

	# Does nothing useful, just to run tensors through the graph
	loss = tf.reduce_mean(tf.layers.dense(images, 1))
	train_op = tf.train.AdamOptimizer().minimize(loss, tf.train.get_global_step())	

	predictions =  logits

	return tf.estimator.EstimatorSpec(
		loss=loss, 
		mode=mode, 
		train_op=train_op,
		predictions=predictions,
		) 
开发者ID:Octavian-ai,项目名称:BigGAN-TPU-TensorFlow,代码行数:30,代码来源:debug_input.py

示例6: testExpectedImageSize

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import get_expected_image_size [as 别名]
def testExpectedImageSize(self):
    image_column = hub.image_embedding_column("image", self.spec)
    # The usage comment recommends this code pattern, so we test it here.
    self.assertSequenceEqual(
        hub.get_expected_image_size(image_column.module_spec), [1, 2]) 
开发者ID:tensorflow,项目名称:hub,代码行数:7,代码来源:feature_column_test.py

示例7: load_hub_weights

# 需要导入模块: import tensorflow_hub [as 别名]
# 或者: from tensorflow_hub import get_expected_image_size [as 别名]
def load_hub_weights(models):
    for alpha, rows in models:

        tf.reset_default_graph()
        print('alpha: ', alpha, 'rows: ', rows)

        WEIGHTS_SAVE_PATH_INCLUDE_TOP = '/home/jon/Documents/keras_mobilenetV2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + str(alpha) + '_' + str(rows) + '.h5'

        WEIGHTS_SAVE_PATH_NO_TOP = '/home/jon/Documents/keras_mobilenetV2/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + \
            str(alpha) + '_' + str(rows) + '_no_top' + '.h5'

        # Load tf stuff
        img = nets.utils.load_img('cat.png', target_size=256, crop_size=rows)
        img = (img / 128.0) - 1.0
        inputs = tf.placeholder(tf.float32, [None, rows, rows, 3])

        model = hub.Module(
            "https://tfhub.dev/google/imagenet/mobilenet_v2_"
            + map_alpha_to_slim(alpha) + "_"
            + str(rows) + "/classification/1")

        h, w = hub.get_expected_image_size(model)

        features = model(inputs, signature="image_classification", as_dict=True)
        probs = tf.nn.softmax(features['default'])

        # Load local model
        with tf.variable_scope('keras'):
            model2 = MobileNetV2(weights=None, 
                                 alpha = alpha, 
                                 input_shape=(rows, rows, 3))
            model2.load_weights('./old_weights_nonhub/mobilenet_v2_weights_tf_dim_ordering_tf_kernels_' + str(alpha) +  '_' +str(rows) + '.h5')
        
        preds1 = model2.predict(img)
        print('preds1: (remote weights) new BN no set w:: ',
              nets.utils.decode_predictions(preds1))

        with tf.Session() as sess:
            sess.run(tf.global_variables_initializer())
            weights = tf.get_collection(
                tf.GraphKeys.GLOBAL_VARIABLES, scope='module/MobilenetV2')
            values = sess.run(weights)
            values[-2] = np.delete(np.squeeze(values[-2]), 0, axis=-1)
            values[-1] = np.delete(values[-1], 0, axis=-1)
            sess.close()

        # Save weights no top and model
        model2.set_weights(values)
        model2.save_weights(WEIGHTS_SAVE_PATH_INCLUDE_TOP)
        model2_no_top = Model(input = model2.input, output = model2.get_layer('out_relu').output)
        model2_no_top.save_weights(WEIGHTS_SAVE_PATH_NO_TOP)

        # Predictions with new BN, new weights
        preds2 = model2.predict(img)

        print('preds2: (after set weights) ',
              nets.utils.decode_predictions(preds2)) 
开发者ID:JonathanCMitchell,项目名称:mobilenet_v2_keras,代码行数:59,代码来源:load_hub_weights.py


注:本文中的tensorflow_hub.get_expected_image_size方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。