本文整理匯總了Python中tensorflow.python.ops.functional_ops.map_fn方法的典型用法代碼示例。如果您正苦於以下問題:Python functional_ops.map_fn方法的具體用法?Python functional_ops.map_fn怎麽用?Python functional_ops.map_fn使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.python.ops.functional_ops
的用法示例。
在下文中一共展示了functional_ops.map_fn方法的11個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: inception_logits
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def inception_logits(images=inception_images, num_splits=1):
# images = tf.transpose(images, [0, 2, 3, 1])
size = 299
images = tf.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(
images, num_or_size_splits=num_splits)
logits_ = functional_ops.map_fn(
fn=functools.partial(tfgan.eval.run_inception, output_tensor='logits:0'),
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits_ = array_ops.concat(array_ops.unstack(logits_), 0)
return logits_
示例2: tensors_to_item
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def tensors_to_item(self, keys_to_tensors):
"""See base class."""
image_buffer = keys_to_tensors[self._image_key]
image_format = keys_to_tensors[self._format_key]
if self._repeated:
return functional_ops.map_fn(lambda x: self._decode(x, image_format),
image_buffer, dtype=self._dtype)
else:
return self._decode(image_buffer, image_format)
示例3: map_fn
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def map_fn(fn, elems, name=None, dtype=None):
"""Map the function fn over the elements elems and return the outputs.
Arguments:
fn: Callable that will be called upon each element in elems
elems: tensor
name: A string name for the map node in the graph
dtype: Output data type.
Returns:
Tensor with dtype `dtype`.
"""
return functional_ops.map_fn(fn, elems, name=name, dtype=dtype)
示例4: sparse_boolean_mask
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def sparse_boolean_mask(sparse_tensor, mask, name="sparse_boolean_mask"):
"""Boolean mask for `SparseTensor`s.
Args:
sparse_tensor: a `SparseTensor`.
mask: a 1D boolean dense`Tensor` whose length is equal to the 0th dimension
of `sparse_tensor`.
name: optional name for this operation.
Returns:
A `SparseTensor` that contains row `k` of `sparse_tensor` iff `mask[k]` is
`True`.
"""
# TODO(jamieas): consider mask dimension > 1 for symmetry with `boolean_mask`.
with ops.name_scope(name, values=[sparse_tensor, mask]):
mask = ops.convert_to_tensor(mask)
mask_rows = array_ops.where(mask)
first_indices = array_ops.squeeze(array_ops.slice(sparse_tensor.indices,
[0, 0], [-1, 1]))
# Identify indices corresponding to the rows identified by mask_rows.
sparse_entry_matches = functional_ops.map_fn(
lambda x: math_ops.equal(first_indices, x),
mask_rows,
dtype=dtypes.bool)
# Combine the rows of index_matches to form a mask for the sparse indices
# and values.
to_retain = array_ops.reshape(
functional_ops.foldl(math_ops.logical_or, sparse_entry_matches), [-1])
return sparse_ops.sparse_retain(sparse_tensor, to_retain)
示例5: inception_logits
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def inception_logits(images, num_splits = 1):
images = tf.transpose(images, [0, 2, 3, 1])
size = 299
images = tf.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(images, num_or_size_splits = num_splits)
logits = functional_ops.map_fn(
fn = functools.partial(tfgan.eval.run_inception, output_tensor = 'logits:0'),
elems = array_ops.stack(generated_images_list),
parallel_iterations = 1,
back_prop = False,
swap_memory = True,
name = 'RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return logits
示例6: inception_activations
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def inception_activations(images, num_splits=1):
images = tf.transpose(images, [0, 2, 3, 1])
size = 299
images = tf.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(images, num_or_size_splits=num_splits)
activations = functional_ops.map_fn(
fn=functools.partial(tfgan.eval.run_inception, output_tensor='pool_3:0'),
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
activations = array_ops.concat(array_ops.unstack(activations), 0)
return activations
示例7: inception_logits
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def inception_logits(images=inception_images, num_splits=1):
images=tf.transpose(images,[0,2,3,1])
size = 299
images = tf.image.resize_bilinear(images, [size, size])
generated_images_list = array_ops.split(
images, num_or_size_splits=num_splits)
logits = functional_ops.map_fn(
fn=functools.partial(tfgan.eval.run_inception, output_tensor='logits:0'),
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name='RunClassifier')
logits = array_ops.concat(array_ops.unstack(logits), 0)
return logits
示例8: map_fn
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def map_fn(fn, labeled_tensor, name=None):
"""Map on the list of tensors unpacked from labeled_tensor.
See tf.map_fn.
Args:
fn: The function to apply to each unpacked LabeledTensor.
It should have type LabeledTensor -> LabeledTensor.
labeled_tensor: The input tensor.
name: Optional op name.
Returns:
A tensor that packs the results of applying fn to the list of tensors
unpacked from labeled_tensor.
"""
with ops.name_scope(name, 'lt_map_fn', [labeled_tensor]) as scope:
labeled_tensor = core.convert_to_labeled_tensor(labeled_tensor)
unpack_lts = unpack(labeled_tensor)
# TODO(ericmc): Fix this upstream.
if labeled_tensor.dtype == dtypes.string:
# We must construct the full graph here, because functional_ops.map_fn
# doesn't work for string-valued tensors.
# Constructing the full graph may be slow.
map_lts = [fn(t) for t in unpack_lts]
return pack(map_lts, list(labeled_tensor.axes.values())[0], name=scope)
else:
# Figure out what the axis labels should be, but use tf.map_fn to
# construct the graph because it's efficient.
# It may be slow to construct the full graph, so we infer the labels from
# the first element.
# TODO(ericmc): This builds a subgraph which then gets thrown away.
# Find a more elegant solution.
first_map_lt = fn(unpack_lts[0])
final_axes = list(labeled_tensor.axes.values())[:1] + list(
first_map_lt.axes.values())
@tc.returns(ops.Tensor)
@tc.accepts(ops.Tensor)
def tf_fn(tensor):
original_axes = list(labeled_tensor.axes.values())[1:]
tensor_lt = core.LabeledTensor(tensor, original_axes)
return fn(tensor_lt).tensor
map_op = functional_ops.map_fn(tf_fn, labeled_tensor.tensor)
map_lt = core.LabeledTensor(map_op, final_axes)
return core.identity(map_lt, name=scope)
示例9: compress
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def compress(self, inputs):
"""Compress inputs and store their binary representations into strings.
Args:
inputs: `Tensor` with values to be compressed.
Returns:
String `Tensor` vector containing the compressed representation of each
batch element of `inputs`.
"""
with ops.name_scope(self._name_scope()):
inputs = ops.convert_to_tensor(inputs)
if not self.built:
# Check input assumptions set before layer building, e.g. input rank.
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
if self.dtype is None:
self._dtype = inputs.dtype.base_dtype.name
self.build(inputs.shape)
# Check input assumptions set after layer building, e.g. input shape.
if not context.executing_eagerly():
input_spec.assert_input_compatibility(self.input_spec, inputs, self.name)
ndim = self.input_spec.ndim
channel_axis = self._channel_axis(ndim)
# Tuple of slices for expanding dimensions of tensors below.
slices = ndim * [None] + [slice(None)]
slices[channel_axis] = slice(None)
slices = tuple(slices)
# Expand dimensions of CDF to input dimensions, keeping the channels along
# the right dimension.
cdf = self._quantized_cdf[slices[1:]]
num_levels = array_ops.shape(cdf)[-1] - 1
# Bring inputs to the right range by centering the range on the medians.
half = constant_op.constant(.5, dtype=self.dtype)
medians = array_ops.squeeze(self._medians, [1, 2])
offsets = (math_ops.cast(num_levels // 2, self.dtype) + half) - medians
# Expand offsets to input dimensions and add to inputs.
values = inputs + offsets[slices[:-1]]
# Clip to range and cast to integers. Because we have added .5 above, and
# all values are positive, the cast effectively implements rounding.
values = math_ops.maximum(values, half)
values = math_ops.minimum(
values, math_ops.cast(num_levels, self.dtype) - half)
values = math_ops.cast(values, dtypes.int16)
def loop_body(tensor):
return coder_ops.range_encode(
tensor, cdf, precision=self.range_coder_precision)
strings = functional_ops.map_fn(
loop_body, values, dtype=dtypes.string, back_prop=False)
if not context.executing_eagerly():
strings.set_shape(inputs.shape[:1])
return strings
示例10: inception_score
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def inception_score(images, img_size=(299, 299), n_splits=10):
""" referenced from https://github.com/tsc2017/Inception-Score/blob/master/inception_score.py """
assert type(images) == np.ndarray
assert len(images.shape) == 4
assert images.shape[-1] == 3
images = np.clip(images, 0., 255.) # clipped into [0, 255]
def inception_feat(img, n_splits=1):
# img = tf.transpose(img, [0, 2, 3, 1])
img = tf.image.resize_bilinear(img, img_size)
generated_images_list = array_ops.split(img, num_or_size_splits=n_splits)
logits = functional_ops.map_fn(
fn=functools.partial(tf.contrib.gan.eval.run_inception, output_tensor="logits:0"),
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name="RunClassifier"
)
logits = array_ops.concat(array_ops.unstack(logits), axis=0)
return logits
inception_images = tf.placeholder(tf.float32, [None, None, None, 3], name="inception-images")
logits = inception_feat(inception_images)
def get_inception_probs(x, n_classes=1000):
n_batches = len(x) // batch_size
preds = np.zeros([len(x), n_classes], dtype=np.float32)
for i in range(n_batches):
inp = x[i * batch_size:(i + 1) * batch_size] / 255. * 2 - 1. # scaled into [-1, 1]
preds[i * batch_size:(i + 1) * batch_size] = logits.eval({inception_images: inp})[:, :n_classes]
preds = np.exp(preds) / np.sum(np.exp(preds), 1, keepdims=True)
return preds
def preds2score(preds, splits=10):
scores = []
for i in range(splits):
part = preds[(i * preds.shape[0] // splits):((i + 1) * preds.shape[0] // splits), :]
kl = part * (np.log(part) - np.log(np.expand_dims(np.mean(part, axis=0), axis=0)))
kl = np.mean(np.sum(kl, axis=1))
scores.append(np.exp(kl))
return np.mean(scores), np.std(scores)
preds = get_inception_probs(images)
mean, std = preds2score(preds, splits=n_splits)
return mean, std
示例11: fid_score
# 需要導入模塊: from tensorflow.python.ops import functional_ops [as 別名]
# 或者: from tensorflow.python.ops.functional_ops import map_fn [as 別名]
def fid_score(real_img, fake_img, img_size=(299, 299), n_splits=10):
assert type(real_img) == np.ndarray and type(fake_img) == np.ndarray
assert len(real_img.shape) == 4 and len(fake_img.shape) == 4
assert real_img.shape[-1] == 3 and fake_img.shape[-1] == 3
assert real_img.shape == fake_img.shape
real_img = np.clip(real_img, 0., 255.) # clipped into [0, 255]
fake_img = np.clip(fake_img, 0., 255.) # clipped into [0, 255]
inception_images = tf.placeholder(tf.float32, [None, None, None, 3], name="inception-images")
real_acts = tf.placeholder(tf.float32, [None, None], name="real_activations")
fake_acts = tf.placeholder(tf.float32, [None, None], name="fake_activations")
def inception_activation(images, n_splits=1):
# images = tf.transpose(images, [0, 2, 3, 1])
images = tf.image.resize_bilinear(images, img_size)
generated_images_list = array_ops.split(images, num_or_size_splits=n_splits)
acts = functional_ops.map_fn(
fn=functools.partial(tf.contrib.gan.eval.run_inception, output_tensor="pool_3:0"),
elems=array_ops.stack(generated_images_list),
parallel_iterations=1,
back_prop=False,
swap_memory=True,
name="RunClassifier"
)
acts = array_ops.concat(array_ops.unstack(acts), axis=0)
return acts
activations = inception_activation(inception_images)
def get_inception_activations(x, feats=2048):
n_batches = len(x) // batch_size
acts = np.zeros([len(x), feats], dtype=np.float32)
for i in range(n_batches):
inp = x[i * batch_size:(i + 1) * batch_size] / 255. * 2 - 1. # scaled into [-1, 1]
acts[i * batch_size:(i + 1) * batch_size] = activations.eval({inception_images: inp})
acts = np.exp(acts) / np.sum(np.exp(acts), 1, keepdims=True)
return acts
def get_fid(real, fake):
return tf.contrib.gan.eval.frechet_classifier_distance_from_activations(real_acts, fake_acts).eval(
feed_dict={
real_acts: real,
fake_acts: fake,
}
)
real_img_acts = get_inception_activations(real_img)
fake_img_acts = get_inception_activations(fake_img)
fid = get_fid(real_img_acts, fake_img_acts)
return fid