本文整理汇总了Python中tensorflow.squeeze函数的典型用法代码示例。如果您正苦于以下问题:Python squeeze函数的具体用法?Python squeeze怎么用?Python squeeze使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了squeeze函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: step_loss
def step_loss(self, state, action, time):
# cost:
x_h = tf.slice(state, [0, self.x_h_field[0]], [-1, 1])
x_t = tf.slice(state, [0, self.x_t_field[0]], [-1, self.n_t])
# 0. smooth acceleration policy
cost_accel = tf.square(action)
cost_accel_d = tf.mul(tf.pow(self.gamma, time), cost_accel)
# 1. forcing the host to move forward (until the right point of the roundabout)
cost_prog = tf.square(self.x_goal - x_h)
cost_prog_d = tf.mul(tf.pow(self.gamma, time), cost_prog)
cost_prog_d = tf.squeeze(cost_prog_d, squeeze_dims=[1])
# 2. keeping distance from vehicles ahead
# distance to other vehicles
x_abs_diffs = tf.abs(x_h - x_t)
# punish only vehicles closer than "require distance"
cost_acci = tf.nn.relu(self.require_distance - x_abs_diffs)
# punish only w.r.t vehicles ahead
cost_acci = tf.mul(cost_acci, tf.to_float(x_h < x_t))
# sum over all vehicles
cost_acci = tf.reduce_sum(cost_acci)
# punish only when host is inside the roundabout (or very close to enter)
cost_acci = tf.mul(cost_acci, tf.to_float(x_h > -0.5 * self.host_length))
cost_acci_d = tf.mul(tf.pow(self.gamma, time), cost_acci)
cost_acci_d = tf.squeeze(cost_acci_d, squeeze_dims=[1])
return tf.transpose(tf.pack(values=[cost_accel_d, cost_prog_d, cost_acci_d], name='scan_return'))
示例2: entropy
def entropy(self, n, p):
# Note that given n and p where p is a probability vector of
# length k, the entropy requires a sum over all
# possible configurations of a k-vector which sums to n. It's
# expensive.
# http://stackoverflow.com/questions/36435754/generating-a-numpy-array-with-all-combinations-of-numbers-that-sum-to-less-than
sess = tf.Session()
n = sess.run(tf.cast(tf.squeeze(n), dtype=tf.int32))
sess.close()
p = tf.cast(tf.squeeze(p), dtype=tf.float32)
if isinstance(n, np.int32):
k = get_dims(p)[0]
max_range = np.zeros(k, dtype=np.int32) + n
x = np.array([i for i in product(*(range(i+1) for i in max_range))
if sum(i)==n])
logpmf = self.logpmf(x, n, p)
return tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))
else:
out = []
for j in range(n.shape[0]):
k = get_dims(p)[0]
max_range = np.zeros(k, dtype=np.int32) + n[j]
x = np.array([i for i in product(*(range(i+1) for i in max_range))
if sum(i)==n[j]])
logpmf = self.logpmf(x, n[j], p[j, :])
out += [tf.reduce_sum(tf.mul(tf.exp(logpmf), logpmf))]
return tf.pack(out)
示例3: construct_embedding
def construct_embedding(self):
"""Builds an embedding function on top of images.
Method to be overridden by implementations.
Returns:
embeddings: A 2-d float32 `Tensor` of shape [batch_size, embedding_size]
holding the embedded images.
"""
with tf.variable_scope('tcn_net', reuse=self._reuse) as vs:
self._adaptation_scope = vs.name
net = self._pretrained_output
# Define some adaptation blocks on top of the pre-trained resnet output.
adaptation_blocks = []
adaptation_block_params = [map(
int, i.split('_')) for i in self._config.adaptation_blocks.split('-')]
for i, (depth, num_units) in enumerate(adaptation_block_params):
block = resnet_v2.resnet_v2_block(
'adaptation_block_%d' % i, base_depth=depth, num_units=num_units,
stride=1)
adaptation_blocks.append(block)
# Stack them on top of the resent output.
net = resnet_utils.stack_blocks_dense(
net, adaptation_blocks, output_stride=None)
# Average pool the output.
net = tf.reduce_mean(net, [1, 2], name='adaptation_pool', keep_dims=True)
if self._config.emb_connection == 'fc':
# Use fully connected layer to project to embedding layer.
fc_hidden_sizes = self._config.fc_hidden_sizes
if fc_hidden_sizes == 'None':
fc_hidden_sizes = []
else:
fc_hidden_sizes = map(int, fc_hidden_sizes.split('_'))
fc_hidden_keep_prob = self._config.dropout.keep_fc
net = tf.squeeze(net)
for fc_hidden_size in fc_hidden_sizes:
net = slim.layers.fully_connected(net, fc_hidden_size)
if fc_hidden_keep_prob < 1.0:
net = slim.dropout(net, keep_prob=fc_hidden_keep_prob,
is_training=self._is_training)
# Connect last FC layer to embedding.
embedding = slim.layers.fully_connected(net, self._embedding_size,
activation_fn=None)
else:
# Use 1x1 conv layer to project to embedding layer.
embedding = slim.conv2d(
net, self._embedding_size, [1, 1], activation_fn=None,
normalizer_fn=None, scope='embedding')
embedding = tf.squeeze(embedding)
# Optionally L2 normalize the embedding.
if self._embedding_l2:
embedding = tf.nn.l2_normalize(embedding, dim=1)
return embedding
示例4: testSlowVsFast
def testSlowVsFast(self):
model, features = get_model(transformer.transformer_small())
decode_length = 3
out_logits, _ = model(features)
out_logits = tf.squeeze(out_logits, axis=[2, 3])
loss = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=tf.reshape(out_logits, [-1, VOCAB_SIZE]),
labels=tf.reshape(features["targets"], [-1]))
loss = tf.reduce_mean(loss)
apply_grad = tf.train.AdamOptimizer(0.001).minimize(loss)
with self.test_session():
tf.global_variables_initializer().run()
for _ in range(100):
apply_grad.run()
model.set_mode(tf.estimator.ModeKeys.PREDICT)
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
greedy_result = model._slow_greedy_infer(
features, decode_length)["outputs"]
greedy_result = tf.squeeze(greedy_result, axis=[2, 3])
fast_result = model._greedy_infer(features, decode_length)["outputs"]
with self.test_session():
greedy_res = greedy_result.eval()
fast_res = fast_result.eval()
self.assertEqual(fast_res.shape, (BATCH_SIZE, INPUT_LENGTH + decode_length))
self.assertAllClose(greedy_res, fast_res)
示例5: _inference
def _inference(self, x, site, dropout):
# Get each image from the pair
print(x.get_shape())
x_0 = tf.squeeze(x[:, :, :, 0])
x_1 = tf.squeeze(x[:, :, :, 1])
# Share weights between the two models of the pair
with tf.variable_scope("siamese") as scope:
model0 = self.build_model(x_0)
scope.reuse_variables()
model1 = self.build_model(x_1)
# Dot product layer
x = self.corr_layer(model0, model1)
N, M, F = x.get_shape()
x = tf.reshape(x, [int(N), int(M*F)])
site = tf.expand_dims(site, axis=1)
x = tf.concat(1, [x, site])
for i, M in enumerate(self.M[:-1]):
with tf.variable_scope('fc{}'.format(i + 1)):
x = tf.nn.dropout(x, dropout)
x = self.fc(x, M)
# Logits linear layer
with tf.variable_scope('logits'):
x = tf.nn.dropout(x, dropout)
x = self.fc(x, self.M[-1], relu=False)
return tf.squeeze(x) # tf.sigmoid(x)
示例6: preprocess_for_test
def preprocess_for_test(image, gt_boxes, gt_masks):
ih, iw = tf.shape(image)[0], tf.shape(image)[1]
## min size resizing
new_ih, new_iw = preprocess_utils._smallest_size_at_least(ih, iw, cfg.FLAGS.image_min_size)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [new_ih, new_iw], align_corners=False)
image = tf.squeeze(image, axis=[0])
gt_masks = tf.expand_dims(gt_masks, -1)
gt_masks = tf.cast(gt_masks, tf.float32)
gt_masks = tf.image.resize_nearest_neighbor(gt_masks, [new_ih, new_iw], align_corners=False)
gt_masks = tf.cast(gt_masks, tf.int32)
gt_masks = tf.squeeze(gt_masks, axis=[-1])
scale_ratio = tf.to_float(new_ih) / tf.to_float(ih)
gt_boxes = preprocess_utils.resize_gt_boxes(gt_boxes, scale_ratio)
## zero mean image
image = tf.cast(image, tf.float32)
image = image / 256.0
image = (image - 0.5) * 2.0
image = tf.expand_dims(image, axis=0)
## rgb to bgr
image = tf.reverse(image, axis=[-1])
return image, gt_boxes, gt_masks
示例7: inference_input
def inference_input():
"""Returns ops that convert raw image data to a 4D tensor representing a single image.
Taken from:
https://github.com/tensorflow/serving/blob/master/tensorflow_serving/example/inception_export.py
The input to the first op can be read using:
tf.gfile.FastGFile(image_filename, 'r').read()
"""
# Decode image into float range [0,1]
jpegs = tf.placeholder(tf.string, shape=(1), name='input')
image_buffer = tf.squeeze(jpegs, [0])
image = tf.image.decode_jpeg(image_buffer, channels=3)
image = tf.image.convert_image_dtype(image, dtype=tf.float32)
image = tf.image.central_crop(image, central_fraction=0.875)
image = tf.expand_dims(image, 0)
image = tf.image.resize_bilinear(image, [FLAGS.image_size, FLAGS.image_size], align_corners=False)
image = tf.squeeze(image, [0])
# Rescale the image to [-1,-1]
image = tf.sub(image, 0.5)
image = tf.mul(image, 2.0)
images = tf.expand_dims(image, 0)
return images, jpegs
示例8: randomly_scale_image_and_label
def randomly_scale_image_and_label(image, label=None, scale=1.0):
"""Randomly scales image and label.
Args:
image: Image with shape [height, width, 3].
label: Label with shape [height, width, 1].
scale: The value to scale image and label.
Returns:
Scaled image and label.
"""
# No random scaling if scale == 1.
if scale == 1.0:
return image, label
image_shape = tf.shape(image)
new_dim = tf.cast(
tf.cast([image_shape[0], image_shape[1]], tf.float32) * scale,
tf.int32)
# Need squeeze and expand_dims because image interpolation takes
# 4D tensors as input.
image = tf.squeeze(tf.image.resize_bilinear(
tf.expand_dims(image, 0),
new_dim,
align_corners=True), [0])
if label is not None:
label = tf.squeeze(tf.image.resize_nearest_neighbor(
tf.expand_dims(label, 0),
new_dim,
align_corners=True), [0])
return image, label
示例9: build_network
def build_network(self):
net_tensors = self.net_tensors
with self.net_graph.as_default(), tf.device(self.net_device):
logits = tf.placeholder(dtype=tf.float32, shape=(self.batch_size, self.image_classes))
labels = tf.placeholder(dtype=tf.int32, shape=(self.batch_size,))
lambs = tf.placeholder(dtype=tf.float32, shape=(self.image_classes,))
# put a sigfunction on logits and then transpose
logits = tf.transpose(framwork.sig_func(logits))
# according to the labels, erase rows which is not in labels
labels_unique = tf.constant(range(self.image_classes), dtype=tf.int32)
labels_num = self.image_classes
logits = tf.gather(logits, indices=labels_unique)
lambs = tf.gather(lambs, indices=labels_unique)
# set the value of each row to True when it occurs in labels
templete = tf.tile(tf.expand_dims(labels_unique, dim=1), [1, self.batch_size])
labels_expand = tf.tile(tf.expand_dims(labels, dim=0), [labels_num, 1])
indict_logic = tf.equal(labels_expand, templete)
# split the tensor along rows
logit_list = tf.split(0, labels_num, logits)
indict_logic_list = tf.split(0, labels_num, indict_logic)
lamb_list = tf.split(0, self.image_classes, lambs)
logit_list = [tf.squeeze(item) for item in logit_list]
indict_logic_list = [tf.squeeze(item) for item in indict_logic_list]
left_right_tuples = list()
for i in range(self.image_classes):
left_right_tuples.append(framwork.lamb_func(logit_list[i], indict_logic_list[i], lamb=lamb_list[i]))
# func = framwork.lamb_func()
# left_right_tuples = map(func, logit_list, indict_logic_list, lamb_list)
net_tensors.update({'left_right_tuples': left_right_tuples, 'logits': logits, 'labels': labels,
'lambs': lambs})
示例10: build_detector
def build_detector(self):
img_size = self.config['image_size']
self.image_ph = tf.placeholder(shape=[None, None, 3],
dtype=tf.float32, name='img_ph')
self.seg_ph = tf.placeholder(shape=[None, None], dtype=tf.int32, name='seg_ph')
img = tf.image.resize_bilinear(tf.expand_dims(self.image_ph, 0),
(img_size, img_size))
self.net.create_trunk(img)
if args.detect:
self.net.create_multibox_head(self.loader.num_classes)
confidence = tf.nn.softmax(tf.squeeze(self.net.outputs['confidence']))
location = tf.squeeze(self.net.outputs['location'])
self.nms(location, confidence, self.bboxer.tiling)
if args.segment:
self.net.create_segmentation_head(self.loader.num_classes)
self.segmentation = self.net.outputs['segmentation']
seg_shape = tf.shape(self.image_ph)[:2]
self.segmentation = tf.image.resize_bilinear(self.segmentation, seg_shape)
self.segmentation = tf.cast(tf.argmax(tf.squeeze(self.segmentation), axis=-1), tf.int32)
self.segmentation = tf.reshape(self.segmentation, seg_shape)
self.segmentation.set_shape([None, None])
if not self.no_gt:
easy_mask = self.seg_ph <= self.loader.num_classes
predictions = tf.boolean_mask(self.segmentation, easy_mask)
labels = tf.boolean_mask(self.seg_ph, easy_mask)
self.mean_iou, self.iou_update = mean_iou(predictions, labels, self.loader.num_classes)
else:
self.mean_iou = tf.constant(0)
self.iou_update = tf.constant(0)
示例11: buildConvolution
def buildConvolution(self):
q_embedding = self.tensors['q_embedding']
a_embedding = self.tensors['a_embedding']
with tf.name_scope('convolution'):
filter_shape = (self.params['filters'][0], self.wdim, 1, self.params['nb_filter'])
W = glorot_normal(filter_shape, name="W")
b = tf.Variable(tf.constant(0.0, shape=(self.params['nb_filter'],)), name="b")
q_conv = tf.nn.conv2d(
tf.expand_dims(q_embedding, -1),
W,
strides=[1,1,1,1],
padding="VALID",
name="q_conv"
)
a_conv = tf.nn.conv2d(
tf.expand_dims(a_embedding, -1),
W,
strides=[1,1,1,1],
padding="VALID",
name = "a_conv"
)
q_conv = tf.squeeze(q_conv, [2])
a_conv = tf.squeeze(a_conv, [2])
# shape = (batch, q_length, NUM_FILTERS)
q_relu = tf.nn.relu(tf.nn.bias_add(q_conv, b), name="q_relu")
# shape = (batch, a_length, NUM_FILTERS)
a_relu = tf.nn.relu(tf.nn.bias_add(a_conv, b), name="q_relu")
self.tensors['q_conv'] = q_conv
self.tensors['a_conv'] = a_conv
self.tensors['q_relu'] = q_relu
self.tensors['a_relu'] = a_relu
self.tensors.setdefault('weights', []).append(b)
self.tensors.setdefault('summary', []).append(tf.nn.zero_fraction(a_relu))
示例12: _add_box_predictions_to_feature_maps
def _add_box_predictions_to_feature_maps(self, feature_maps):
"""Adds box predictors to each feature map and returns concatenated results.
Args:
feature_maps: a list of tensors where the ith tensor has shape
[batch, height_i, width_i, depth_i]
Returns:
box_encodings: 4-D float tensor of shape [batch_size, num_anchors,
box_code_dimension] containing predicted boxes.
class_predictions_with_background: 2-D float tensor of shape
[batch_size, num_anchors, num_classes+1] containing class predictions
(logits) for each of the anchors. Note that this tensor *includes*
background class predictions (at class index 0).
Raises:
RuntimeError: if the number of feature maps extracted via the
extract_features method does not match the length of the
num_anchors_per_locations list that was passed to the constructor.
RuntimeError: if box_encodings from the box_predictor does not have
shape of the form [batch_size, num_anchors, 1, code_size].
"""
num_anchors_per_location_list = 1
if len(feature_maps) != len(num_anchors_per_location_list):
raise RuntimeError('the number of feature maps must match the '
'length of self.anchors.NumAnchorsPerLocation().')
box_encodings_list = []
mask_encodings_list = []
for idx, (feature_map, num_anchors_per_location
) in enumerate(zip(feature_maps, num_anchors_per_location_list)):
box_predictor_scope = 'BoxPredictor_{}'.format(idx)
box_predictions = self._box_predictor.predict(feature_map,
num_anchors_per_location,
box_predictor_scope)
box_encodings = box_predictions[bpredictor.BOX_ENCODINGS]
mask_encodings = box_predictions[bpredictor.MASK_PREDICTIONS]
box_encodings_shape = box_encodings.get_shape().as_list()
if len(box_encodings_shape) != 5 or box_encodings_shape[2] != 1:
raise RuntimeError('box_encodings from the box_predictor must be of '
'shape `[batch_size, num_anchors, 1, code_size]`; '
'actual shape', box_encodings_shape)
box_encodings = tf.squeeze(box_encodings, axis=2)
mask_encodings = tf.squeeze(mask_encodings, axis=2)
box_encodings_list.append(box_encodings)
mask_encodings_list.append(mask_encodings)
"""
num_predictions = sum(
[tf.shape(box_encodings)[1] for box_encodings in box_encodings_list])
num_anchors = self.anchors.num_boxes()
anchors_assert = tf.assert_equal(num_anchors, num_predictions, [
'Mismatch: number of anchors vs number of predictions', num_anchors,
num_predictions
])
"""
box_encodings = tf.concat(box_encodings_list, 1)
mask_encodings = tf.concat(mask_encodings_list, 1)
return box_encodings, mask_encodings
示例13: _match
def _match(self, similarity_matrix, valid_rows):
"""Bipartite matches a collection rows and columns. A greedy bi-partite.
TODO(rathodv): Add num_valid_columns options to match only that many columns
with all the rows.
Args:
similarity_matrix: Float tensor of shape [N, M] with pairwise similarity
where higher values mean more similar.
valid_rows: A boolean tensor of shape [N] indicating the rows that are
valid.
Returns:
match_results: int32 tensor of shape [M] with match_results[i]=-1
meaning that column i is not matched and otherwise that it is matched to
row match_results[i].
"""
valid_row_sim_matrix = tf.gather(similarity_matrix,
tf.squeeze(tf.where(valid_rows), axis=-1))
invalid_row_sim_matrix = tf.gather(
similarity_matrix,
tf.squeeze(tf.where(tf.logical_not(valid_rows)), axis=-1))
similarity_matrix = tf.concat(
[valid_row_sim_matrix, invalid_row_sim_matrix], axis=0)
# Convert similarity matrix to distance matrix as tf.image.bipartite tries
# to find minimum distance matches.
distance_matrix = -1 * similarity_matrix
num_valid_rows = tf.reduce_sum(tf.to_float(valid_rows))
_, match_results = image_ops.bipartite_match(
distance_matrix, num_valid_rows=num_valid_rows)
match_results = tf.reshape(match_results, [-1])
match_results = tf.cast(match_results, tf.int32)
return match_results
示例14: conpute_loss
def conpute_loss(scores, target):
""" Compute the perplexity of the batch
Args:
scores: 3D tensor, shape=(BATCH_SIZE, 1, S_FRENCH, T_FRENCH)
target: 4D tensor, shape=(BATCH_SIZE, 1, S_FRENCH, T_FRENCH)
Returns:
tf.float32 tensor
"""
with tf.name_scope('Loss_computation'):
sortie_loss = tf.squeeze(target)
scores = tf.squeeze(scores)
loss = tf.reduce_sum(tf.mul(scores, sortie_loss), reduction_indices=2) # Get the activation of the target token
#loss = tf.reduce_sum(loss,reduction_indices=2)
loss = tf.clip_by_value(loss, clip_value_min=1e-10, clip_value_max=1.0)
#loss =
loss = tf.reduce_sum(tf.log(loss), reduction_indices=1)
loss = -tf.reduce_mean(loss)
l2_weights = 0.00
with tf.variable_scope('Embeddings', reuse=True):
w = tf.get_variable('weights')
b = tf.get_variable('biases')
loss = loss + l2_weights*tf.nn.l2_loss(w) + l2_weights*tf.nn.l2_loss(b)
with tf.variable_scope('Decoding', reuse=True):
w = tf.get_variable('weights')
b = tf.get_variable('biases')
loss = loss + l2_weights*tf.nn.l2_loss(w) + l2_weights*tf.nn.l2_loss(b)
return loss
示例15: convolution
def convolution(self, inputs, num_units):
x = tf.expand_dims(inputs, 3)
chan_in = 1
#Bigram
w_bigram = tf.get_variable("w_bigram", shape= [2,50,chan_in,num_units],
initializer= tf.contrib.layers.xavier_initializer_conv2d())
b_bigram = tf.get_variable("b_bigram", shape= [num_units])
y_bigram = self.nonlin(tf.nn.conv2d(x, w_bigram, strides= [1,1,1,1], padding='VALID') + b_bigram)
h_bigram = tf.reduce_max(tf.squeeze(y_bigram) , 1)
#Trigram
w_trigram = tf.get_variable("w_trigram", shape= [3,50,chan_in,num_units],
initializer= tf.contrib.layers.xavier_initializer_conv2d())
b_trigram = tf.get_variable("b_trigram", shape= [num_units])
y_trigram = self.nonlin(tf.nn.conv2d(x, w_trigram, strides= [1,1,1,1], padding='VALID') + b_trigram)
h_trigram = tf.reduce_max(tf.squeeze(y_trigram) , 1)
#Quin-gram
w_quingram = tf.get_variable("w_quingram", shape= [3,50,chan_in,num_units],
initializer= tf.contrib.layers.xavier_initializer_conv2d())
b_quingram = tf.get_variable("b_quingram", shape= [num_units])
y_quingram = self.nonlin(tf.nn.conv2d(x, w_trigram, strides= [1,1,1,1], padding='VALID') + b_trigram)
h_quingram = tf.reduce_max(tf.squeeze(y_quingram) , 1)
if self.hyperparams['conv_type'] == 'bigram':
h = h_bigram
elif self.hyperparams['conv_type'] == 'trigram':
h = h_trigram
elif self.hyperparams['conv_type'] == 'quingram':
h = h_quingram
elif self.hyperparams['conv_type'] == 'inception':
h = tf.concat(1, [h_bigram, h_trigram, h_quingram])
return h