本文整理匯總了Python中tensorflow.keras.backend.softmax方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.softmax方法的具體用法?Python backend.softmax怎麽用?Python backend.softmax使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.softmax方法的14個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def call(self, inputs):
if self.data_mode == 'disjoint':
X, I = inputs
if K.ndim(I) == 2:
I = I[:, 0]
else:
X = inputs
attn_coeff = K.dot(X, self.attn_kernel)
attn_coeff = K.squeeze(attn_coeff, -1)
attn_coeff = K.softmax(attn_coeff)
if self.data_mode == 'single':
output = K.dot(attn_coeff[None, ...], X)
elif self.data_mode == 'batch':
output = K.batch_dot(attn_coeff, X)
else:
output = attn_coeff[:, None] * X
output = tf.math.segment_sum(output, I)
return output
示例2: _softmax
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def _softmax(x, axis=-1, alpha=1):
"""
building on keras implementation, with additional alpha parameter
Softmax activation function.
# Arguments
x : Tensor.
axis: Integer, axis along which the softmax normalization is applied.
alpha: a value to multiply all x
# Returns
Tensor, output of softmax transformation.
# Raises
ValueError: In case `dim(x) == 1`.
"""
x = alpha * x
ndim = K.ndim(x)
if ndim == 2:
return K.softmax(x)
elif ndim > 2:
e = K.exp(x - K.max(x, axis=axis, keepdims=True))
s = K.sum(e, axis=axis, keepdims=True)
return e / s
else:
raise ValueError('Cannot apply softmax to a tensor that is 1D')
示例3: _quilt
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def _quilt(patches, patch_size, grid_size, patch_stride, verbose=False, **kwargs):
assert len(patches.shape) >= 2, "patches has bad shape %s" % pformat(patches.shape)
# reshape to be [nb_patches x nb_vox]
patches = np.reshape(patches, (patches.shape[0], -1, 1))
# quilt
quilted_vol = pl.quilt(patches, patch_size, grid_size, patch_stride=patch_stride, **kwargs)
assert quilted_vol.ndim == len(patch_size), "problem with dimensions after quilt"
# return
return quilted_vol
# TO MOVE (numpy softmax)
示例4: softmax
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def softmax(x, axis):
"""
softmax of a numpy array along a given dimension
"""
return np.exp(x) / np.sum(np.exp(x), axis=axis, keepdims=True)
示例5: softmax_ratio
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def softmax_ratio(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
positive_distance = _euclidean_distance(anchor, positive)
negative_distance = _euclidean_distance(anchor, negative)
softmax = K.softmax(K.concatenate([positive_distance, negative_distance]))
ideal_distance = K.variable([0, 1])
return K.mean(K.maximum(softmax - ideal_distance, 0))
示例6: softmax_ratio_pn
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def softmax_ratio_pn(y_true, y_pred):
anchor, positive, negative = tf.unstack(y_pred)
anchor_positive_distance = _euclidean_distance(anchor, positive)
anchor_negative_distance = _euclidean_distance(anchor, negative)
positive_negative_distance = _euclidean_distance(positive, negative)
minimum_distance = K.min(K.concatenate([anchor_negative_distance, positive_negative_distance]), axis=-1, keepdims=True)
softmax = K.softmax(K.concatenate([anchor_positive_distance, minimum_distance]))
ideal_distance = K.variable([0, 1])
return K.mean(K.maximum(softmax - ideal_distance, 0))
示例7: update_neurons
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def update_neurons(self):
"""Update neurons according to activation function."""
# Update membrane potentials.
new_mem = self.get_new_mem()
# Generate spikes.
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
output_spikes = self.softmax_activation(new_mem)
else:
output_spikes = self.linear_activation(new_mem)
# Reset membrane potential after spikes.
self.set_reset_mem(new_mem, output_spikes)
# Store refractory period after spikes.
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
# We do not constrain softmax output neurons.
new_refrac = tf.identity(self.refrac_until)
else:
new_refrac = tf.where(k.not_equal(output_spikes, 0),
k.ones_like(output_spikes) *
(self.time + self.tau_refrac),
self.refrac_until)
self.add_update([(self.refrac_until, new_refrac)])
if self.spiketrain is not None:
self.add_update([(self.spiketrain, self.time * k.cast(
k.not_equal(output_spikes, 0), k.floatx()))])
# Compute post-synaptic potential.
psp = self.get_psp(output_spikes)
return k.cast(psp, k.floatx())
示例8: softmax_activation
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def softmax_activation(mem):
"""Softmax activation."""
return k.cast(k.less_equal(k.random_uniform(k.shape(mem)),
k.softmax(mem)), k.floatx())
示例9: set_reset_mem
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def set_reset_mem(self, mem, spikes):
"""
Reset membrane potential ``mem`` array where ``spikes`` array is
nonzero.
"""
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
new = tf.identity(mem)
else:
new = tf.where(k.not_equal(spikes, 0), k.zeros_like(mem), mem)
self.add_update([(self.mem, new)])
示例10: get_psp
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def get_psp(self, output_spikes):
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
psp = tf.identity(output_spikes)
else:
new_spiketimes = tf.where(k.not_equal(output_spikes, 0),
k.ones_like(output_spikes) * self.time,
self.last_spiketimes)
assign_new_spiketimes = self.last_spiketimes.assign(new_spiketimes)
with tf.control_dependencies([assign_new_spiketimes]):
last_spiketimes = self.last_spiketimes + 0 # Dummy op
psp = tf.where(k.greater(last_spiketimes, 0),
k.ones_like(output_spikes) * self.dt,
k.zeros_like(output_spikes))
return psp
示例11: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def call(self, inputs, **kwargs):
assert isinstance(inputs, list) and len(inputs) == 3
first, second, features = inputs[0], inputs[1], inputs[2]
if not self.from_logits:
first = K.clip(first, 1e-10, 1.0)
second = K.clip(second, 1e-10, 1.0)
first_, second_ = K.log(first), K.log(second)
else:
first_, second_ = first, second
# embedded_features.shape = (M, T, 1)
if self.use_intermediate_layer:
features = K.dot(features, self.first_kernel)
features = K.bias_add(features, self.first_bias, data_format="channels_last")
features = self.intermediate_activation(features)
embedded_features = K.dot(features, self.features_kernel)
embedded_features = K.bias_add(
embedded_features, self.features_bias, data_format="channels_last")
if self.use_dimension_bias:
tiling_shape = [1] * (K.ndim(first) - 1) + [K.shape(first)[-1]]
embedded_features = K.tile(embedded_features, tiling_shape)
embedded_features = K.bias_add(
embedded_features, self.dimensions_bias, data_format="channels_last")
sigma = K.sigmoid(embedded_features)
result = weighted_sum(first_, second_, sigma,
self.first_threshold, self.second_threshold)
probs = K.softmax(result)
if self.return_logits:
return [probs, result]
return probs
示例12: yolo2_head
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def yolo2_head(feats, anchors, num_classes, input_shape, calc_loss=False):
"""Convert final layer features to bounding box parameters."""
num_anchors = len(anchors)
# Reshape to batch, height, width, num_anchors, box_params.
anchors_tensor = K.reshape(K.constant(anchors), [1, 1, 1, num_anchors, 2])
grid_shape = K.shape(feats)[1:3] # height, width
grid_y = K.tile(K.reshape(K.arange(0, stop=grid_shape[0]), [-1, 1, 1, 1]),
[1, grid_shape[1], 1, 1])
grid_x = K.tile(K.reshape(K.arange(0, stop=grid_shape[1]), [1, -1, 1, 1]),
[grid_shape[0], 1, 1, 1])
grid = K.concatenate([grid_x, grid_y])
grid = K.cast(grid, K.dtype(feats))
feats = K.reshape(
feats, [-1, grid_shape[0], grid_shape[1], num_anchors, num_classes + 5])
# Adjust preditions to each spatial grid point and anchor size.
box_xy = (K.sigmoid(feats[..., :2]) + grid) / K.cast(grid_shape[..., ::-1], K.dtype(feats))
#box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(grid_shape[..., ::-1], K.dtype(feats))
box_wh = K.exp(feats[..., 2:4]) * anchors_tensor / K.cast(input_shape[..., ::-1], K.dtype(feats))
box_confidence = K.sigmoid(feats[..., 4:5])
box_class_probs = K.softmax(feats[..., 5:])
if calc_loss == True:
return grid, feats, box_xy, box_wh
return box_xy, box_wh, box_confidence, box_class_probs
示例13: call
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def call(self, inputs, **kwargs):
if self.axis == 1:
# If channels first, force it to be channels last for these ops
inputs = K.permute_dimensions(inputs, [0, 2, 3, 1])
q, k, v = tf.split(inputs, [self.depth_k, self.depth_k, self.depth_v], axis=-1)
q = self.split_heads_2d(q)
k = self.split_heads_2d(k)
v = self.split_heads_2d(v)
# scale query
depth_k_heads = self.depth_k / self.num_heads
q *= (depth_k_heads ** -0.5)
# [Batch, num_heads, height * width, depth_k or depth_v] if axis == -1
qk_shape = [self._batch, self.num_heads, self._height * self._width, self.depth_k // self.num_heads]
v_shape = [self._batch, self.num_heads, self._height * self._width, self.depth_v // self.num_heads]
flat_q = K.reshape(q, K.stack(qk_shape))
flat_k = K.reshape(k, K.stack(qk_shape))
flat_v = K.reshape(v, K.stack(v_shape))
# [Batch, num_heads, HW, HW]
logits = tf.matmul(flat_q, flat_k, transpose_b=True)
# Apply relative encodings
if self.relative:
h_rel_logits, w_rel_logits = self.relative_logits(q)
logits += h_rel_logits
logits += w_rel_logits
weights = K.softmax(logits, axis=-1)
attn_out = tf.matmul(weights, flat_v)
attn_out_shape = [self._batch, self.num_heads, self._height, self._width, self.depth_v // self.num_heads]
attn_out_shape = K.stack(attn_out_shape)
attn_out = K.reshape(attn_out, attn_out_shape)
attn_out = self.combine_heads_2d(attn_out)
# [batch, height, width, depth_v]
if self.axis == 1:
# return to [batch, depth_v, height, width] for channels first
attn_out = K.permute_dimensions(attn_out, [0, 3, 1, 2])
attn_out.set_shape(self.compute_output_shape(self._shape))
return attn_out
示例14: update_neurons
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import softmax [as 別名]
def update_neurons(self):
"""Update neurons according to activation function."""
# Update membrane potentials.
new_mem = self.get_new_mem()
# Generate spikes.
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
output_spikes = self.softmax_activation(new_mem)
else:
output_spikes = self.linear_activation(new_mem)
# Reset membrane potential after spikes.
self.set_reset_mem(new_mem, output_spikes)
# Store refractory period after spikes.
if hasattr(self, 'activation_str') \
and self.activation_str == 'softmax':
# We do not constrain softmax output neurons.
new_refrac = tf.identity(self.refrac_until)
else:
new_refrac = tf.where(k.not_equal(output_spikes, 0),
k.ones_like(output_spikes) *
(self.time + self.tau_refrac),
self.refrac_until)
c = new_refrac[:self.batch_size]
cc = k.concatenate([c, c], 0)
updates = [self.refrac_until.assign(cc)]
if self.spiketrain is not None:
c = self.time * k.cast(k.not_equal(output_spikes, 0),
k.floatx())[:self.batch_size]
cc = k.concatenate([c, c], 0)
updates += [self.spiketrain.assign(cc)]
with tf.control_dependencies(updates):
masked_impulse = \
tf.where(k.greater(self.refrac_until, self.time),
k.zeros_like(self.impulse), self.impulse)
c = k.greater(masked_impulse, 0)[:self.batch_size]
cc = k.cast(k.concatenate([c, c], 0), k.floatx())
updates = [self.prospective_spikes.assign(cc)]
new_thresh = self._v_thresh * k.ones_like(self.v_thresh) + \
self.missing_impulse
updates += [self.v_thresh.assign(new_thresh)]
with tf.control_dependencies(updates):
# Compute post-synaptic potential.
psp = self.get_psp(output_spikes)
return k.cast(psp, k.floatx())