本文整理汇总了Python中tensorflow.squeeze方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.squeeze方法的具体用法?Python tensorflow.squeeze怎么用?Python tensorflow.squeeze使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow
的用法示例。
在下文中一共展示了tensorflow.squeeze方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _aspect_preserving_resize
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def _aspect_preserving_resize(image, smallest_side):
"""Resize images preserving the original aspect ratio.
Args:
image: A 3-D image `Tensor`.
smallest_side: A python integer or scalar `Tensor` indicating the size of
the smallest side after resize.
Returns:
resized_image: A 3-D tensor containing the resized image.
"""
smallest_side = tf.convert_to_tensor(smallest_side, dtype=tf.int32)
shape = tf.shape(image)
height = shape[0]
width = shape[1]
new_height, new_width = _smallest_size_at_least(height, width, smallest_side)
image = tf.expand_dims(image, 0)
resized_image = tf.image.resize_bilinear(image, [new_height, new_width],
align_corners=False)
resized_image = tf.squeeze(resized_image)
resized_image.set_shape([None, None, 3])
return resized_image
示例2: eval_image
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def eval_image(image, height, width, scope=None):
"""Prepare one image for evaluation.
Args:
image: 3-D float Tensor
height: integer
width: integer
scope: Optional scope for name_scope.
Returns:
3-D float Tensor of prepared image.
"""
with tf.name_scope(values=[image, height, width], name=scope,
default_name='eval_image'):
# Crop the central region of the image with an area containing 87.5% of
# the original image.
image = tf.image.central_crop(image, central_fraction=0.875)
# Resize the image to the original height and width.
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [height, width],
align_corners=False)
image = tf.squeeze(image, [0])
return image
示例3: __call__
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def __call__(self, input):
with tf.variable_scope(self.name, reuse=self._reuse):
if not self._reuse:
print('\033[93m'+self.name+'\033[0m')
_ = input
num_channel = [32, 64, 128, 256, 256, 512]
num_layer = np.ceil(np.log2(min(_.shape.as_list()[1:3]))).astype(np.int)
for i in range(num_layer):
ch = num_channel[i] if i < len(num_channel) else 512
_ = conv2d(_, ch, self._is_train, info=not self._reuse,
norm=self._norm_type, name='conv{}'.format(i+1))
_ = conv2d(_, int(num_channel[i]/4), self._is_train, k=1, s=1,
info=not self._reuse, norm='None', name='conv{}'.format(i+2))
_ = conv2d(_, self._num_class+1, self._is_train, k=1, s=1, info=not self._reuse,
activation_fn=None, norm='None',
name='conv{}'.format(i+3))
_ = tf.squeeze(_)
if not self._reuse:
log.info('discriminator output {}'.format(_.shape.as_list()))
self._reuse = True
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, self.name)
return tf.nn.sigmoid(_), _
示例4: get_prob
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def get_prob(self, state):
"""
### PROBLEM 3
### YOUR CODE HERE
args:
state: np array (batch_size, ob_dim)
TODO:
likelihood:
evaluate the discriminator D(x,x) on the same input
prob:
compute the probability density of x from the discriminator
likelihood (see homework doc)
"""
likelihood = self.get_likelihood(state, state)
# avoid divide by 0 and log(0)
likelihood = np.clip(np.squeeze(likelihood), 1e-5, 1-1e-5)
prob = (1 - likelihood) / likelihood
return prob
示例5: simulate
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def simulate(self, action):
with tf.name_scope("environment/simulate"): # Do we need this?
initializer = (tf.zeros(self.old_shape, dtype=tf.float32),
tf.fill((len(self),), 0.0), tf.fill((len(self),), False))
def not_done_step(a, _):
reward, done = self._batch_env.simulate(action)
with tf.control_dependencies([reward, done]):
r0 = self._batch_env.observ + 0
r1 = tf.add(a[1], reward)
r2 = tf.logical_or(a[2], done)
return (r0, r1, r2)
simulate_ret = tf.scan(not_done_step, tf.range(self.skip),
initializer=initializer, parallel_iterations=1,
infer_shape=False)
observations, rewards, dones = simulate_ret
split_observations = tf.split(observations, self.skip, axis=0)
split_observations = [tf.squeeze(o, axis=0) for o in split_observations]
observation = tf.concat(split_observations, axis=-1)
with tf.control_dependencies([self._observ.assign(observation)]):
return tf.identity(rewards[-1, ...]), tf.identity(dones[-1, ...])
示例6: set_precision
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def set_precision(predictions, labels,
weights_fn=common_layers.weights_nonzero):
"""Precision of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_precision", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例7: set_recall
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def set_recall(predictions, labels, weights_fn=common_layers.weights_nonzero):
"""Recall of set predictions.
Args:
predictions : A Tensor of scores of shape [batch, nlabels].
labels: A Tensor of int32s giving true set elements,
of shape [batch, seq_length].
weights_fn: A function to weight the elements.
Returns:
hits: A Tensor of shape [batch, nlabels].
weights: A Tensor of shape [batch, nlabels].
"""
with tf.variable_scope("set_recall", values=[predictions, labels]):
labels = tf.squeeze(labels, [2, 3])
weights = weights_fn(labels)
labels = tf.one_hot(labels, predictions.shape[-1])
labels = tf.reduce_max(labels, axis=1)
labels = tf.cast(labels, tf.bool)
return tf.to_float(tf.equal(labels, predictions)), weights
示例8: rouge_l_fscore
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def rouge_l_fscore(predictions, labels, **unused_kwargs):
"""ROUGE scores computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge_l_fscore: approx rouge-l f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
# Convert the outputs and labels to a [batch_size, input_length] tensor.
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
rouge_l_f_score = tf.py_func(rouge_l_sentence_level, (outputs, labels),
tf.float32)
return rouge_l_f_score, tf.constant(1.0)
示例9: rouge_2_fscore
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def rouge_2_fscore(predictions, labels, **unused_kwargs):
"""ROUGE-2 F1 score computation between labels and predictions.
This is an approximate ROUGE scoring method since we do not glue word pieces
or decode the ids and tokenize the output.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
rouge2_fscore: approx rouge-2 f1 score.
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
# Convert the outputs and labels to a [batch_size, input_length] tensor.
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
rouge_2_f_score = tf.py_func(rouge_n, (outputs, labels), tf.float32)
return rouge_2_f_score, tf.constant(1.0)
示例10: bleu_score
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def bleu_score(predictions, labels, **unused_kwargs):
"""BLEU score computation between labels and predictions.
An approximate BLEU scoring method since we do not glue word pieces or
decode the ids and tokenize the output. By default, we use ngram order of 4
and use brevity penalty. Also, this does not have beam search.
Args:
predictions: tensor, model predictions
labels: tensor, gold output.
Returns:
bleu: int, approx bleu score
"""
outputs = tf.to_int32(tf.argmax(predictions, axis=-1))
# Convert the outputs and labels to a [batch_size, input_length] tensor.
outputs = tf.squeeze(outputs, axis=[-1, -2])
labels = tf.squeeze(labels, axis=[-1, -2])
bleu = tf.py_func(compute_bleu, (labels, outputs), tf.float32)
return bleu, tf.constant(1.0)
示例11: decode
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def decode(self, bottleneck):
"""Auto-decode from the bottleneck and return the result."""
# Get the shape from bottleneck and num channels.
shape = common_layers.shape_list(bottleneck)
try:
num_channels = self.hparams.problem.num_channels
except AttributeError:
num_channels = 1
dummy_targets = tf.zeros(shape[:-1] + [num_channels])
# Set the bottleneck to decode.
if len(shape) > 4:
bottleneck = tf.squeeze(bottleneck, axis=[1])
bottleneck = 2 * bottleneck - 1 # Be -1/1 instead of 0/1.
self._cur_bottleneck_tensor = bottleneck
# Run decoding.
res = self.infer({"targets": dummy_targets})
self._cur_bottleneck_tensor = None
return res
示例12: body
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def body(self, features):
# Remove dropout if not training
hparams = self._hparams
targets = features["targets"]
targets = tf.squeeze(targets, 2)
(decoder_input, decoder_self_attention_bias) = attention_lm_prepare_decoder(
targets, hparams)
decoder_input = tf.nn.dropout(decoder_input,
1.0 - hparams.layer_prepostprocess_dropout)
decoder_output = attention_lm_decoder(decoder_input,
decoder_self_attention_bias, hparams)
decoder_output = tf.expand_dims(decoder_output, 2)
return decoder_output
示例13: infer
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def infer(self,
features=None,
decode_length=50,
beam_size=1,
top_beams=1,
alpha=0.0,
use_tpu=False):
"""Predict."""
del decode_length, beam_size, top_beams, alpha, use_tpu
assert features is not None
logits, _ = self(features) # pylint: disable=not-callable
assert len(logits.get_shape()) == 5
logits = tf.squeeze(logits, [1, 2, 3])
log_probs = common_layers.log_prob_from_logits(logits)
predictions, scores = common_layers.argmax_with_score(log_probs)
return {
"outputs": predictions,
"scores": scores,
}
示例14: _create_greedy_infer_model
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def _create_greedy_infer_model(self):
"""Creates model for greedy inference testing.
Returns:
model: A t2t model.
features: An map of string to tensor.
"""
model, features = get_model(transformer.transformer_small())
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
return model, features
示例15: testGreedyTPUSlowVsFast
# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import squeeze [as 别名]
def testGreedyTPUSlowVsFast(self):
if not tf_version_has_inplace_ops():
return
decode_length = 3
model, features = self._create_greedy_infer_model()
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
slow_result = model._slow_greedy_infer_tpu(
features, decode_length)["outputs"]
slow_result = tf.squeeze(slow_result, axis=[2, 3])
fast_result = model._greedy_infer(
features, decode_length, use_tpu=True)["outputs"]
with self.test_session():
slow_res = slow_result.eval()
fast_res = fast_result.eval()
self.assertEqual(fast_res.shape,
(BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(fast_res, slow_res)