本文整理汇总了Python中preprocessing.preprocessing_factory.get_preprocessing方法的典型用法代码示例。如果您正苦于以下问题:Python preprocessing_factory.get_preprocessing方法的具体用法?Python preprocessing_factory.get_preprocessing怎么用?Python preprocessing_factory.get_preprocessing使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类preprocessing.preprocessing_factory
的用法示例。
在下文中一共展示了preprocessing_factory.get_preprocessing方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _build_train_model
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def _build_train_model(self):
preprocess_fn = preprocessing_factory.get_preprocessing(self.config.net_name, is_training=False)
[image] = self.records_loader.get_data()
preprocessed_image = preprocess_fn(image, self.PREPROCESS_SIZE, self.PREPROCESS_SIZE)
images = self.records_loader.batch_data(preprocessed_image)
style_image = self.style_loader.get_data()
preprocessed_style_image = preprocess_fn(style_image, self.PREPROCESS_SIZE, self.PREPROCESS_SIZE)
style_images = self.style_loader.batch_data(preprocessed_style_image)
self.swaped_tensor = self._swap_net(images, style_images)
self.generated = self._inverse_net(self.swaped_tensor)
slim.summary.image("generated", self.generated)
slim.summary.image("origin", images)
slim.summary.image("style", style_images)
self._train_inverse(self.generated, self.swaped_tensor)
self.init_op = self._get_network_init_fn()
示例2: _build_evaluate_model
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def _build_evaluate_model(self):
self.input_image = tf.placeholder(tf.float32, shape=[None, None, 3])
self.style_image = tf.placeholder(tf.float32, shape=[None, None, 3])
preprocess_fn = preprocessing_factory.get_preprocessing(self.config.net_name, is_training=False)
height = self.evaluate_height if self.evaluate_height else self.PREPROCESS_SIZE
width = self.evaluate_width if self.evaluate_width else self.PREPROCESS_SIZE
preprocessed_image = preprocess_fn(self.input_image, height, width, resize_side_min=min(height, width))
images = tf.expand_dims(preprocessed_image, axis=0)
style_images = tf.expand_dims(preprocess_fn(self.style_image, self.PREPROCESS_SIZE, self.PREPROCESS_SIZE), axis=0)
self.swaped_tensor = self._swap_net(images, style_images)
#
# network_fn = nets_factory.get_network_fn(self.config.net_name, num_classes=1, is_training=False)
# _, endpoints_dict = network_fn(images, spatial_squeeze=False)
# self.swaped_tensor = endpoints_dict[self.config.net_name + self.style_layer]
self.generated = self._inverse_net(self.swaped_tensor)
self.evaluate_op = tf.squeeze(self.generated, axis=0)
self.init_op = self._get_network_init_fn()
self.save_variables = [var for var in tf.trainable_variables() if var.name.startswith("inverse_net")]
示例3: _train_inverse
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def _train_inverse(self, generated, swaped_tensor):
preprocess_fn = preprocessing_factory.get_preprocessing(self.config.net_name, is_training=False)
network_fn = nets_factory.get_network_fn(self.config.net_name, num_classes=1, is_training=False)
with tf.variable_scope("", reuse=True):
preprocessed_image = tf.stack([preprocess_fn(img, self.PREPROCESS_SIZE, self.PREPROCESS_SIZE)
for img in tf.unstack(generated, axis=0)])
_, inversed_endpoints_dict = network_fn(preprocessed_image, spatial_squeeze=False)
layer_names = list(inversed_endpoints_dict.keys())
[layer_name] = [l_name for l_name in layer_names if self.style_layer in l_name]
inversed_style_layer = inversed_endpoints_dict[layer_name]
# print(inversed_style_layer.get_shape())
tf.losses.add_loss(tf.nn.l2_loss(swaped_tensor - inversed_style_layer))
self.loss_op = tf.losses.get_total_loss()
train_vars = [var for var in tf.trainable_variables() if var.name.startswith("inverse_net")]
slim.summarize_tensor(self.loss_op, "loss")
slim.summarize_tensors(train_vars)
# print(train_vars)
self.save_variables = train_vars
learning_rate = tf.train.exponential_decay(self.config.learning_rate, self.global_step, 1000, 0.66,
name="learning_rate")
self.train_op = tf.train.AdamOptimizer(learning_rate).minimize(self.loss_op, self.global_step, train_vars)
示例4: _representative_dataset_gen
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def _representative_dataset_gen():
"""Gets a python generator of numpy arrays for the given dataset."""
image_size = FLAGS.image_size
dataset = tfds.builder(FLAGS.dataset_name, data_dir=FLAGS.dataset_dir)
dataset.download_and_prepare()
data = dataset.as_dataset()[FLAGS.dataset_split]
iterator = tf.data.make_one_shot_iterator(data)
if FLAGS.use_model_specific_preprocessing:
preprocess_fn = functools.partial(
preprocessing_factory.get_preprocessing(name=FLAGS.model_name),
output_height=image_size,
output_width=image_size)
else:
preprocess_fn = functools.partial(
_preprocess_for_quantization, image_size=image_size)
features = iterator.get_next()
image = features["image"]
image = preprocess_fn(image)
image = tf.reshape(image, [1, image_size, image_size, 3])
for _ in range(FLAGS.num_steps):
yield [image.eval()]
示例5: imagenet_input
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def imagenet_input(is_training):
"""Data reader for imagenet.
Reads in imagenet data and performs pre-processing on the images.
Args:
is_training: bool specifying if train or validation dataset is needed.
Returns:
A batch of images and labels.
"""
if is_training:
dataset = dataset_factory.get_dataset('imagenet', 'train',
FLAGS.dataset_dir)
else:
dataset = dataset_factory.get_dataset('imagenet', 'validation',
FLAGS.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=is_training,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'mobilenet_v1', is_training=is_training)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch(
tensors=[image, label],
batch_size=FLAGS.batch_size,
num_threads=4,
capacity=5 * FLAGS.batch_size)
return images, labels
示例6: imagenet_input
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def imagenet_input(is_training):
"""Data reader for imagenet.
Reads in imagenet data and performs pre-processing on the images.
Args:
is_training: bool specifying if train or validation dataset is needed.
Returns:
A batch of images and labels.
"""
if is_training:
dataset = dataset_factory.get_dataset('imagenet', 'train',
FLAGS.dataset_dir)
else:
dataset = dataset_factory.get_dataset('imagenet', 'validation',
FLAGS.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=is_training,
common_queue_capacity=2 * FLAGS.batch_size,
common_queue_min=FLAGS.batch_size)
[image, label] = provider.get(['image', 'label'])
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
'mobilenet_v1', is_training=is_training)
image = image_preprocessing_fn(image, FLAGS.image_size, FLAGS.image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=FLAGS.batch_size,
num_threads=4,
capacity=5 * FLAGS.batch_size)
labels = slim.one_hot_encoding(labels, FLAGS.num_classes)
return images, labels
示例7: _select_image_preprocessing_fn
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def _select_image_preprocessing_fn():
"""A wrapper around preprocessing_factory.get_preprocessing()"""
preprocessing_name = FLAGS.preprocessing_name or FLAGS.generator_network
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=FLAGS.is_training, )
if image_preprocessing_fn is not None:
# TODO: this is convoluted. Perhaps combine this into the preprocessing factory.
image_preprocessing_fn = functools.partial(image_preprocessing_fn,
dtype=GeneralModel._dtype_string_to_dtype(FLAGS.dataset_dtype),
color_space=FLAGS.color_space,
subtract_mean=FLAGS.subtract_mean,
resize_mode=FLAGS.resize_mode,
)
return image_preprocessing_fn
示例8: __get_images_labels
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def __get_images_labels(self):
dataset = dataset_factory.get_dataset(
self.dataset_name, self.dataset_split_name, self.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=False,
common_queue_capacity=2 * self.batch_size,
common_queue_min=self.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= self.labels_offset
network_fn = nets_factory.get_network_fn(
self.model_name,
num_classes=(dataset.num_classes - self.labels_offset),
is_training=False)
preprocessing_name = self.preprocessing_name or self.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = self.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image, eval_image_size, eval_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=self.batch_size,
num_threads=self.num_preprocessing_threads,
capacity=5 * self.batch_size)
return images, labels
示例9: __get_images_labels
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def __get_images_labels(self):
dataset = dataset_factory.get_dataset(
self.dataset_name, self.dataset_split_name, self.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=self.num_readers,
common_queue_capacity=20 * self.batch_size,
common_queue_min=10 * self.batch_size)
[image, label] = provider.get(['image', 'label'])
label -= self.labels_offset
network_fn = nets_factory.get_network_fn(
self.model_name,
num_classes=(dataset.num_classes - self.labels_offset),
weight_decay=self.weight_decay,
is_training=True)
train_image_size = self.train_image_size or network_fn.default_image_size
preprocessing_name = self.preprocessing_name or self.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=True)
image = image_preprocessing_fn(image, train_image_size, train_image_size)
images, labels = tf.train.batch(
[image, label],
batch_size=self.batch_size,
num_threads=self.num_preprocessing_threads,
capacity=5 * self.batch_size)
labels = slim.one_hot_encoding(
labels, dataset.num_classes - self.labels_offset)
batch_queue = slim.prefetch_queue.prefetch_queue(
[images, labels], capacity=2)
images, labels = batch_queue.dequeue()
return images, labels
示例10: __get_images_labels
# 需要导入模块: from preprocessing import preprocessing_factory [as 别名]
# 或者: from preprocessing.preprocessing_factory import get_preprocessing [as 别名]
def __get_images_labels(self):
dataset = dataset_factory.get_dataset(
self.dataset_name, self.dataset_split_name, self.dataset_dir)
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
shuffle=False,
common_queue_capacity=2 * self.batch_size,
common_queue_min=self.batch_size)
[image_raw, label] = provider.get(['image', 'label'])
label -= self.labels_offset
network_fn = nets_factory.get_network_fn(
self.model_name,
num_classes=(dataset.num_classes - self.labels_offset),
is_training=False)
preprocessing_name = self.preprocessing_name or self.model_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,
is_training=False)
eval_image_size = self.eval_image_size or network_fn.default_image_size
image = image_preprocessing_fn(image_raw, eval_image_size, eval_image_size)
# Preprocess the image for display purposes.
image_raw = tf.expand_dims(image_raw, 0)
image_raw = tf.image.resize_images(image_raw, [eval_image_size, eval_image_size])
image_raw = tf.squeeze(image_raw)
images, labels, image_raws = tf.train.batch(
[image, label, image_raw],
batch_size=self.batch_size,
num_threads=self.num_preprocessing_threads,
capacity=5 * self.batch_size)
self.network_fn = network_fn
self.dataset = dataset
return images, labels,image_raws