本文整理汇总了Python中tensorflow.keras.backend.floatx方法的典型用法代码示例。如果您正苦于以下问题:Python backend.floatx方法的具体用法?Python backend.floatx怎么用?Python backend.floatx使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.floatx方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def call(self, x):
n = (self.win_length - 1) / 2.0
denom = n * (n + 1) * (2 * n + 1) / 3
if self.data_format == 'channels_first':
x = K.permute_dimensions(x, (0, 2, 3, 1))
x = tf.pad(x, tf.constant([[0, 0], [0, 0], [int(n), int(n)], [0, 0]]), mode=self.mode)
kernel = K.arange(-n, n + 1, 1, dtype=K.floatx())
kernel = K.reshape(kernel, (1, kernel.shape[-1], 1, 1)) # (freq, time)
x = K.conv2d(x, kernel, 1, data_format='channels_last') / denom
if self.data_format == 'channels_first':
x = K.permute_dimensions(x, (0, 3, 1, 2))
return x
示例2: amplitude_to_decibel
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def amplitude_to_decibel(x, amin=1e-10, dynamic_range=80.0):
"""[K] Convert (linear) amplitude to decibel (log10(x)).
Parameters
----------
x: Keras *batch* tensor or variable. It has to be batch because of sample-wise `K.max()`.
amin: minimum amplitude. amplitude smaller than `amin` is set to this.
dynamic_range: dynamic_range in decibel
"""
log_spec = 10 * K.log(K.maximum(x, amin)) / np.log(10).astype(K.floatx())
if K.ndim(x) > 1:
axis = tuple(range(K.ndim(x))[1:])
else:
axis = None
log_spec = log_spec - K.max(log_spec, axis=axis, keepdims=True) # [-?, 0]
log_spec = K.maximum(log_spec, -1 * dynamic_range) # [-80, 0]
return log_spec
示例3: test_get_stft_kernels
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def test_get_stft_kernels():
"""test for backend.get_stft_kernels"""
n_dft = 4
real_kernels, imag_kernels = KPB.get_stft_kernels(n_dft)
real_kernels_ref = np.array(
[[[[0.0, 0.0, 0.0]]], [[[0.5, 0.0, -0.5]]], [[[1.0, -1.0, 1.0]]], [[[0.5, 0.0, -0.5]]]],
dtype=K.floatx(),
)
imag_kernels_ref = np.array(
[[[[0.0, 0.0, 0.0]]], [[[0.0, -0.5, 0.0]]], [[[0.0, 0.0, 0.0]]], [[[0.0, 0.5, 0.0]]]],
dtype=K.floatx(),
)
assert real_kernels.shape == (n_dft, 1, 1, n_dft // 2 + 1)
assert imag_kernels.shape == (n_dft, 1, 1, n_dft // 2 + 1)
assert np.allclose(real_kernels, real_kernels_ref, atol=TOL)
assert np.allclose(imag_kernels, imag_kernels_ref, atol=TOL)
示例4: test_binary_auto
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def test_binary_auto():
"""Test binary auto scale quantizer."""
np.random.seed(42)
N = 1000000
m_list = [1.0, 0.1, 0.01, 0.001]
for m in m_list:
x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
x = K.constant(x)
quantizer = binary(alpha="auto")
q = K.eval(quantizer(x))
result = get_weight_scale(quantizer, q)
expected = m / 2.0
logging.info("expect %s", expected)
logging.info("result %s", result)
assert_allclose(result, expected, rtol=0.02)
示例5: test_binary_auto_po2
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def test_binary_auto_po2():
"""Test binary auto_po2 scale quantizer."""
np.random.seed(42)
N = 1000000
m_list = [1.0, 0.1, 0.01, 0.001]
for m in m_list:
x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
x = K.constant(x)
quantizer_ref = binary(alpha="auto")
quantizer = binary(alpha="auto_po2")
q_ref = K.eval(quantizer_ref(x))
q = K.eval(quantizer(x))
ref = get_weight_scale(quantizer_ref, q_ref)
expected = np.power(2.0, np.round(np.log2(ref)))
result = get_weight_scale(quantizer, q)
assert_allclose(result, expected, rtol=0.0001)
示例6: test_ternary_auto_po2
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def test_ternary_auto_po2():
"""Test ternary auto_po2 scale quantizer."""
np.random.seed(42)
N = 1000000
m_list = [1.0, 0.1, 0.01, 0.001]
for m in m_list:
x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
x = K.constant(x)
quantizer_ref = ternary(alpha="auto")
quantizer = ternary(alpha="auto_po2")
q_ref = K.eval(quantizer_ref(x))
q = K.eval(quantizer(x))
ref = get_weight_scale(quantizer_ref, q_ref)
expected = np.power(2.0, np.round(np.log2(ref)))
result = get_weight_scale(quantizer, q)
assert_allclose(result, expected, rtol=0.0001)
示例7: test_smooth_sigmoid
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def test_smooth_sigmoid():
"""Test smooth_sigmoid function."""
test_values = np.array(
[[-3.0, -2.0, -1.0, -0.5, 0.005, 0.0, 0.005, 0.5, 1, 4, 10]],
dtype=K.floatx())
def ref_smooth_sigmoid(y):
x = 0.1875 * y + 0.5
z = 0.0 if x <= 0.0 else (1.0 if x >= 1.0 else x)
return z
sigmoid = np.vectorize(ref_smooth_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [smooth_sigmoid(x)])
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
示例8: test_hard_sigmoid
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def test_hard_sigmoid():
"""Test hard_sigmoid function."""
test_values = np.array(
[[-3.0, -2.0, -1.0, -0.5, 0.005, 0.0, 0.005, 0.5, 1, 4, 10]],
dtype=K.floatx())
def ref_hard_sigmoid(y):
x = 0.5 * y + 0.5
z = 0.0 if x <= 0.0 else (1.0 if x >= 1.0 else x)
return z
sigmoid = np.vectorize(ref_hard_sigmoid)
x = K.placeholder(ndim=2)
f = K.function([x], [hard_sigmoid(x)])
result = f([test_values])[0]
expected = sigmoid(test_values)
assert_allclose(result, expected, rtol=1e-05)
示例9: reset_spikevars
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def reset_spikevars(self, sample_idx):
"""
Reset variables present in spiking layers. Can be turned off for
instance when a video sequence is tested.
"""
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
do_reset = sample_idx % mod == 0
if do_reset:
k.set_value(self.mem, self.init_membrane_potential())
k.set_value(self.time, np.float32(self.dt))
zeros_output_shape = np.zeros(self.output_shape, k.floatx())
if self.tau_refrac > 0:
k.set_value(self.refrac_until, zeros_output_shape)
if self.spiketrain is not None:
k.set_value(self.spiketrain, zeros_output_shape)
k.set_value(self.last_spiketimes, zeros_output_shape - 1)
示例10: reset_spikevars
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def reset_spikevars(self, sample_idx):
"""
Reset variables present in spiking layers. Can be turned off for
instance when a video sequence is tested.
"""
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
do_reset = sample_idx % mod == 0
if do_reset:
k.set_value(self.mem, self.init_membrane_potential())
k.set_value(self.time, np.float32(self.dt))
zeros_output_shape = np.zeros(self.output_shape, k.floatx())
if self.tau_refrac > 0:
k.set_value(self.refrac_until, zeros_output_shape)
if self.spiketrain is not None:
k.set_value(self.spiketrain, zeros_output_shape)
k.set_value(self.last_spiketimes, zeros_output_shape - 1)
k.set_value(self.v_thresh, zeros_output_shape + self._v_thresh)
k.set_value(self.prospective_spikes, zeros_output_shape)
k.set_value(self.missing_impulse, zeros_output_shape)
示例11: call
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def call(self, x, mask=None):
"""Layer functionality."""
# Skip integration of input spikes in membrane potential. Directly
# transmit new spikes. The output psp is nonzero wherever there has
# been an input spike at any time during simulation.
input_psp = MaxPooling2D.call(self, x)
if self.spiketrain is not None:
new_spikes = tf.math.logical_xor(
k.greater(input_psp, 0), k.greater(self.last_spiketimes, 0))
self.add_update([(self.spiketrain,
self.time * k.cast(new_spikes, k.floatx()))])
psp = self.get_psp(input_psp)
return k.cast(psp, k.floatx())
示例12: reset_spikevars
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def reset_spikevars(self, sample_idx):
"""
Reset variables present in spiking layers. Can be turned off for
instance when a video sequence is tested.
"""
mod = self.config.getint('simulation', 'reset_between_nth_sample')
mod = mod if mod else sample_idx + 1
do_reset = sample_idx % mod == 0
if do_reset:
k.set_value(self.mem, self.init_membrane_potential())
k.set_value(self.time, np.float32(self.dt))
zeros_output_shape = np.zeros(self.output_shape, k.floatx())
if self.spiketrain is not None:
k.set_value(self.spiketrain, zeros_output_shape)
k.set_value(self.last_spiketimes, zeros_output_shape - 1)
示例13: cat_acc
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def cat_acc(y_true, y_pred):
"""Keras loss function for sparse_categorical_accuracy.
:param y_true: tensor of true class labels.
:param y_pred: class output scores from network.
:returns: categorical accuracy.
"""
# sparse_categorical_accuracy is broken in keras 2.2.4
# https://github.com/keras-team/keras/issues/11348#issuecomment-439969957
# this is taken from e59570ae
from tensorflow.keras import backend as K
# reshape in case it's in shape (num_samples, 1) instead of (num_samples,)
if K.ndim(y_true) == K.ndim(y_pred):
y_true = K.squeeze(y_true, -1)
# convert dense predictions to labels
y_pred_labels = K.argmax(y_pred, axis=-1)
y_pred_labels = K.cast(y_pred_labels, K.floatx())
return K.cast(K.equal(y_true, y_pred_labels), K.floatx())
示例14: _build_tf_cosine_similarity
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def _build_tf_cosine_similarity(max_rank=0, offset=1, eps=1e-12):
# We build the graph (See utils.generic_utils.tf_recall_at_k for original implementation):
tf_db = K.placeholder(ndim=2, dtype=K.floatx()) # Where to find
tf_labels = K.placeholder(ndim=1, dtype=K.floatx()) # and their labels
tf_batch_query = K.placeholder(ndim=2, dtype=K.floatx()) # Used in case of memory issues
batch_labels = K.placeholder(ndim=2, dtype=K.floatx()) # and their labels
all_representations_T = K.expand_dims(tf_db, axis=0) # 1 x D x N
batch_representations = K.expand_dims(tf_batch_query, axis=0) # 1 x n x D
sim = K.batch_dot(batch_representations, all_representations_T) # 1 x n x N
sim = K.squeeze(sim, axis=0) # n x N
sim /= tf.linalg.norm(tf_batch_query, axis=1, keepdims=True) + eps
sim /= tf.linalg.norm(tf_db, axis=0, keepdims=True) + eps
if max_rank > 0: # computing r@K or mAP@K
index_ranking = tf.nn.top_k(sim, k=max_rank + offset).indices
else:
index_ranking = tf.contrib.framework.argsort(sim, axis=-1, direction='DESCENDING', stable=True)
top_k = index_ranking[:, offset:]
tf_ranking = tf.gather(tf_labels, top_k)
return tf_db, tf_labels, tf_batch_query, batch_labels, tf_ranking
示例15: _find_maxima
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import floatx [as 别名]
def _find_maxima(x, coordinate_scale=1, confidence_scale=255.0):
x = K.cast(x, K.floatx())
col_max = K.max(x, axis=1)
row_max = K.max(x, axis=2)
maxima = K.max(col_max, 1)
maxima = K.expand_dims(maxima, -2) / confidence_scale
cols = K.cast(K.argmax(col_max, -2), K.floatx())
rows = K.cast(K.argmax(row_max, -2), K.floatx())
cols = K.expand_dims(cols, -2) * coordinate_scale
rows = K.expand_dims(rows, -2) * coordinate_scale
maxima = K.concatenate([cols, rows, maxima], -2)
return maxima