本文整理汇总了Python中keras.backend.squeeze方法的典型用法代码示例。如果您正苦于以下问题:Python backend.squeeze方法的具体用法?Python backend.squeeze怎么用?Python backend.squeeze使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.squeeze方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def step(self, x, states):
h = states[0]
# states[1] necessary?
# comes from the constants
X_static = states[-2]
# equals K.dot(static_x, self._W1) + self._b2 with X.shape=[bs, L, static_input_dim]
total_x_static_prod = states[-1]
# expand dims to add the vector which is only valid for this time step
# to total_x_prod which is valid for all time steps
hw = K.expand_dims(K.dot(h, self._W2), 1)
additive_atn = total_x_static_prod + hw
attention = K.softmax(K.dot(additive_atn, self._V), axis=1)
static_x_weighted = K.sum(attention * X_static, [1])
x = K.dot(K.concatenate([x, static_x_weighted], 1), self._W3) + self._b3
h, new_states = self.layer.cell.call(x, states[:-2])
# append attention to the states to "smuggle" it out of the RNN wrapper
attention = K.squeeze(attention, -1)
h = K.concatenate([h, attention])
return h, new_states
示例2: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例3: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例4: step
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def step(self, x_input, states):
#print "x_input:", x_input, x_input.shape
# <TensorType(float32, matrix)>
input_shape = self.input_spec[0].shape
en_seq = states[-1]
_, [h, c] = super(PointerLSTM, self).step(x_input, states[:-1])
# vt*tanh(W1*e+W2*d)
dec_seq = K.repeat(h, input_shape[1])
Eij = time_distributed_dense(en_seq, self.W1, output_dim=1)
Dij = time_distributed_dense(dec_seq, self.W2, output_dim=1)
U = self.vt * tanh(Eij + Dij)
U = K.squeeze(U, 2)
# make probability tensor
pointer = softmax(U)
return pointer, [h, c]
示例5: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Crossentropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例6: _backward
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def _backward(gamma, mask):
'''Backward recurrence of the linear chain crf.'''
gamma = K.cast(gamma, 'int32')
def _backward_step(gamma_t, states):
y_tm1 = K.squeeze(states[0], 0)
y_t = batch_gather(gamma_t, y_tm1)
return y_t, [K.expand_dims(y_t, 0)]
initial_states = [K.expand_dims(K.zeros_like(gamma[:, 0, 0]), 0)]
_, y_rev, _ = K.rnn(_backward_step,
gamma,
initial_states,
go_backwards=True)
y = K.reverse(y_rev, 1)
if mask is not None:
mask = K.cast(mask, dtype='int32')
# mask output
y *= mask
# set masked values to -1
y += -(1 - mask)
return y
示例7: _target_class_loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def _target_class_loss(
self,
target_class,
box_scores,
box_class_probs_logits):
""" Evaluate target_class_loss w.r.t. the input.
"""
box_scores = K.squeeze(box_scores, axis=0)
box_class_probs_logits = K.squeeze(box_class_probs_logits, axis=0)
import tensorflow as tf
boi_idx = tf.where(box_scores[:, target_class] > self._score)
loss_box_class_conf = tf.reduce_mean(
tf.gather(box_class_probs_logits[:, target_class], boi_idx))
# Avoid the propagation of nan
return tf.cond(
tf.is_nan(loss_box_class_conf),
lambda: tf.constant(0.),
lambda: loss_box_class_conf)
示例8: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def call(self, x, mask=None):
# size of x :[batch_size, sel_len, attention_dim]
# size of u :[batch_size, attention_dim]
# uit = tanh(xW+b)
uit = K.tanh(K.bias_add(K.dot(x, self.W), self.b))
ait = K.dot(uit, self.u)
ait = K.squeeze(ait, -1)
ait = K.exp(ait)
if mask is not None:
# Cast the mask to floatX to avoid float64 upcasting in theano
ait *= K.cast(mask, K.floatx())
ait /= K.cast(K.sum(ait, axis=1, keepdims=True) + K.epsilon(), K.floatx())
ait = K.expand_dims(ait)
weighted_input = x * ait
output = K.sum(weighted_input, axis=1)
return output
示例9: preprocess_input
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def preprocess_input(self, inputs, training=None):
if self.window_size > 1:
inputs = K.temporal_padding(inputs, (self.window_size - 1, 0))
inputs = K.expand_dims(inputs, 2) # add a dummy dimension
output = K.conv2d(inputs, self.kernel, strides=self.strides,
padding='valid',
data_format='channels_last')
output = K.squeeze(output, 2) # remove the dummy dimension
if self.use_bias:
output = K.bias_add(output, self.bias, data_format='channels_last')
if self.dropout is not None and 0. < self.dropout < 1.:
z = output[:, :, :self.units]
f = output[:, :, self.units:2 * self.units]
o = output[:, :, 2 * self.units:]
f = K.in_train_phase(1 - _dropout(1 - f, self.dropout), f, training=training)
return K.concatenate([z, f, o], -1)
else:
return output
示例10: _compute_probabilities
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def _compute_probabilities(self, energy, previous_attention=None):
if self.is_monotonic:
# add presigmoid noise to encourage discreteness
sigmoid_noise = K.in_train_phase(1., 0.)
noise = K.random_normal(K.shape(energy), mean=0.0, stddev=sigmoid_noise)
# encourage discreteness in train
energy = K.in_train_phase(energy + noise, energy)
p = K.in_train_phase(K.sigmoid(energy),
K.cast(energy > 0, energy.dtype))
p = K.squeeze(p, -1)
p_prev = K.squeeze(previous_attention, -1)
# monotonic attention function from tensorflow
at = K.in_train_phase(
tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'parallel'),
tf.contrib.seq2seq.monotonic_attention(p, p_prev, 'hard'))
at = K.expand_dims(at, -1)
else:
# softmax
at = keras.activations.softmax(energy, axis=1)
return at
示例11: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
'''RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for FG/BG.
'''
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例12: rpn_class_loss_graph
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def rpn_class_loss_graph(rpn_match, rpn_class_logits):
"""RPN anchor classifier loss.
rpn_match: [batch, anchors, 1]. Anchor match type. 1=positive,
-1=negative, 0=neutral anchor.
rpn_class_logits: [batch, anchors, 2]. RPN classifier logits for BG/FG.
"""
# Squeeze last dim to simplify
rpn_match = tf.squeeze(rpn_match, -1)
# Get anchor classes. Convert the -1/+1 match to 0/1 values.
anchor_class = K.cast(K.equal(rpn_match, 1), tf.int32)
# Positive and Negative anchors contribute to the loss,
# but neutral anchors (match value = 0) don't.
indices = tf.where(K.not_equal(rpn_match, 0))
# Pick rows that contribute to the loss and filter out the rest.
rpn_class_logits = tf.gather_nd(rpn_class_logits, indices)
anchor_class = tf.gather_nd(anchor_class, indices)
# Cross entropy loss
loss = K.sparse_categorical_crossentropy(target=anchor_class,
output=rpn_class_logits,
from_logits=True)
loss = K.switch(tf.size(loss) > 0, K.mean(loss), tf.constant(0.0))
return loss
示例13: call
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def call(self, inputs, **kwargs):
"""Evaluate YOLO model on given input and return filtered boxes."""
yolo_outputs = inputs[0:-1]
input_image_shape = K.squeeze(inputs[-1], axis=0)
num_layers = len(yolo_outputs)
anchor_mask = [[6, 7, 8], [3, 4, 5], [0, 1, 2]] if num_layers == 3 else [[3, 4, 5],
[1, 2, 3]] # default setting
input_shape = K.shape(yolo_outputs[0])[1:3] * 32
boxes = []
box_scores = []
for l in range(num_layers):
_boxes, _box_scores = yolo_boxes_and_scores(yolo_outputs[l], self.anchors[anchor_mask[l]], self.num_classes,
input_shape, input_image_shape)
boxes.append(_boxes)
box_scores.append(_box_scores)
boxes = K.concatenate(boxes, axis=0)
box_scores = K.concatenate(box_scores, axis=0)
return [boxes, box_scores]
示例14: gripper_coordinate_y_pred
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def gripper_coordinate_y_pred(y_true, y_pred):
""" Get the predicted value at the coordinate found in y_true.
# Arguments
y_true: [ground_truth_label, y_height_coordinate, x_width_coordinate]
Shape of y_true is [batch_size, 3].
y_pred: Predicted values with shape [batch_size, img_height, img_width, 1].
"""
with K.name_scope(name="gripper_coordinate_y_pred") as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
yx_coordinate = K.cast(y_true[:, 1:], 'int32')
yx_shape = K.shape(yx_coordinate)
sample_index = K.expand_dims(K.arange(yx_shape[0]), axis=-1)
byx_coordinate = K.concatenate([sample_index, yx_coordinate], axis=-1)
# maybe need to transpose yx_coordinate?
gripper_coordinate_y_predicted = tf.gather_nd(y_pred, byx_coordinate)
return gripper_coordinate_y_predicted
示例15: gripper_coordinate_y_true
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import squeeze [as 别名]
def gripper_coordinate_y_true(y_true, y_pred=None):
""" Get the label found in y_true which also contains coordinates.
# Arguments
y_true: [ground_truth_label, y_height_coordinate, x_width_coordinate]
Shape of y_true is [batch_size, 3].
y_pred: Predicted values with shape [batch_size, img_height, img_width, 1].
"""
with K.name_scope(name="gripper_coordinate_y_true") as scope:
if keras.backend.ndim(y_true) == 4:
# sometimes the dimensions are expanded from 2 to 4
# to meet Keras' expectations.
# In that case reduce them back to 2
y_true = K.squeeze(y_true, axis=-1)
y_true = K.squeeze(y_true, axis=-1)
label = K.cast(y_true[:, :1], 'float32')
return label