本文整理汇总了Python中tensorflow.keras.backend.eval方法的典型用法代码示例。如果您正苦于以下问题:Python backend.eval方法的具体用法?Python backend.eval怎么用?Python backend.eval使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.eval方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_amplitude_to_decibel
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def test_amplitude_to_decibel():
"""test for backend_keras.amplitude_to_decibel"""
from kapre.backend_keras import amplitude_to_decibel
x = np.array([[1e-20, 1e-5, 1e-3, 5e-2], [0.3, 1.0, 20.5, 9999]]) # random positive numbers
amin = 1e-5
dynamic_range = 80.0
x_decibel = 10 * np.log10(np.maximum(x, amin))
x_decibel = x_decibel - np.max(x_decibel, axis=(1,), keepdims=True)
x_decibel_ref = np.maximum(x_decibel, -1 * dynamic_range)
x_var = K.variable(x)
x_decibel_kapre = amplitude_to_decibel(x_var, amin, dynamic_range)
assert np.allclose(K.eval(x_decibel_kapre), x_decibel_ref, atol=TOL)
示例2: on_epoch_end
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def on_epoch_end(self, epochs, logs):
max_variance = -1
for layer in self.model.layers:
if layer.__class__.__name__ in [
"BatchNormalization",
"QBatchNormalization"
]:
variance = np.max(layer.get_weights()[-1])
if variance > max_variance:
max_variance = variance
if max_variance > 32 and self.learning_rate_factor < 100:
learning_rate = K.get_value(self.model.optimizer.learning_rate)
self.learning_rate_factor /= 2.0
print("***** max_variance is {} / lr is {} *****".format(
max_variance, learning_rate))
K.eval(K.update(
self.model.optimizer.learning_rate, learning_rate / 2.0
))
示例3: test_binary_auto
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def test_binary_auto():
"""Test binary auto scale quantizer."""
np.random.seed(42)
N = 1000000
m_list = [1.0, 0.1, 0.01, 0.001]
for m in m_list:
x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
x = K.constant(x)
quantizer = binary(alpha="auto")
q = K.eval(quantizer(x))
result = get_weight_scale(quantizer, q)
expected = m / 2.0
logging.info("expect %s", expected)
logging.info("result %s", result)
assert_allclose(result, expected, rtol=0.02)
示例4: test_ternary_auto
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def test_ternary_auto():
"""Test ternary auto scale quantizer."""
np.random.seed(42)
N = 1000000
m_list = [1.0, 0.1, 0.01, 0.001]
for m in m_list:
x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
x = K.constant(x)
quantizer = ternary(alpha="auto")
q = K.eval(quantizer(x))
d = m/3.0
result = np.mean(get_weight_scale(quantizer, q))
expected = (m + d) / 2.0
assert_allclose(result, expected, rtol=0.02)
示例5: test_ternary_auto_po2
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def test_ternary_auto_po2():
"""Test ternary auto_po2 scale quantizer."""
np.random.seed(42)
N = 1000000
m_list = [1.0, 0.1, 0.01, 0.001]
for m in m_list:
x = np.random.uniform(-m, m, (N, 10)).astype(K.floatx())
x = K.constant(x)
quantizer_ref = ternary(alpha="auto")
quantizer = ternary(alpha="auto_po2")
q_ref = K.eval(quantizer_ref(x))
q = K.eval(quantizer(x))
ref = get_weight_scale(quantizer_ref, q_ref)
expected = np.power(2.0, np.round(np.log2(ref)))
result = get_weight_scale(quantizer, q)
assert_allclose(result, expected, rtol=0.0001)
示例6: test_stochastic_ternary
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def test_stochastic_ternary(bound, alpha, temperature, expected_values, expected_scale):
np.random.seed(42)
K.set_learning_phase(1)
n = 1000
x = np.random.uniform(-bound, bound, size=(n, 10))
x = np.sort(x, axis=1)
s = stochastic_ternary(alpha=alpha, temperature=temperature)
y = K.eval(s(K.constant(x)))
scale = K.eval(s.scale).astype(np.float32)[0]
ty = np.zeros_like(s)
for i in range(n):
ty = ty + (y[i] / scale)
result = (ty/n).astype(np.float32)
assert_allclose(result, expected_values, atol=0.1)
assert_allclose(scale, expected_scale, rtol=0.1)
示例7: test_huber_loss
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def test_huber_loss():
a = np.array([1., 1.5, 2., 4.])
b = np.array([1.5, 1., 4., 2.])
assert_allclose(K.eval(huber_loss(a, b, 1.)), np.array([.125, .125, 1.5, 1.5]))
assert_allclose(K.eval(huber_loss(a, b, 3.)), np.array([.125, .125, 2., 2.]))
assert_allclose(K.eval(huber_loss(a, b, np.inf)), np.array([.125, .125, 2., 2.]))
示例8: z_effect
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def z_effect(model, gen, z_layer_name, nb_samples=100, do_plot=False, tqdm=tqdm):
"""
compute the effect of each z dimension on the final outcome via derivatives
we attempt this by taking gradients as in
https://stackoverflow.com/questions/39561560/getting-gradient-of-model-output-w-r-t-weights-using-keras
e.g. layer name: 'img-img-dense-vae_ae_dense_sample'
"""
outputTensor = model.outputs[0]
inner = model.get_layer(z_layer_name).get_output_at(1)
# compute gradients
gradients = K.gradients(outputTensor, inner)
assert len(gradients) == 1, "wrong gradients"
# would be nice to be able to do this with K.eval() as opposed to explicit tensorflow sessions.
with tf.Session() as sess:
sess.run(tf.initialize_all_variables())
evaluated_gradients = [None] * nb_samples
for i in tqdm(range(nb_samples)):
sample = next(gen)
fdct = {model.get_input_at(0): sample[0]}
evaluated_gradients[i] = sess.run(gradients, feed_dict=fdct)[0]
all_gradients = np.mean(np.abs(np.vstack(evaluated_gradients)), 0)
if do_plot:
plt.figure()
plt.plot(np.sort(all_gradients))
plt.xlabel('sorted z index')
plt.ylabel('mean(|grad|)')
plt.show()
return all_gradients
示例9: get_weights
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def get_weights(layer):
weights = layer.get_weights()
out = copy.deepcopy(weights)
for j, weight in enumerate(weights):
if hasattr(layer, "get_quantizers") and layer.get_quantizers()[j]:
out[j] = K.eval(
layer.get_quantizers()[j](K.constant(weight)))
return out
示例10: get_weight_scale
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def get_weight_scale(quantizer, x=None):
"""Gets the scales of weights for (stochastic_)binary and ternary quantizers.
Arguments:
quantizer: A binary or teneray quantizer class.
x: A weight tensor. We keep it here for now for backward compatibility.
Returns:
Weight scale per channel for binary and ternary
quantizers with auto or auto_po2 alpha/threshold.
"""
if hasattr(quantizer, "scale") and quantizer.scale is not None:
return K.eval(quantizer.scale)
return 1.0
示例11: test_stochastic_binary
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def test_stochastic_binary():
np.random.seed(42)
K.set_learning_phase(1)
x = np.random.uniform(-0.01, 0.01, size=10)
x = np.sort(x)
s = stochastic_binary(alpha="auto_po2")
ty = np.zeros_like(s)
ts = 0.0
n = 1000
for _ in range(n):
y = K.eval(s(K.constant(x)))
scale = K.eval(s.scale)[0]
ts = ts + scale
ty = ty + (y / scale)
result = (ty/n).astype(np.float32)
scale = np.array([ts/n])
expected = np.array(
[-1., -1., -1., -0.852, 0.782, 0.768, 0.97, 0.978, 1.0, 1.0]
).astype(np.float32)
expected_scale = np.array([0.003906])
assert_allclose(result, expected, atol=0.1)
assert_allclose(scale, expected_scale, rtol=0.1)
示例12: on_epoch_end
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def on_epoch_end(self, epoch: int, logs: Dict = None):
"""
Check the loss value at the end of an epoch
Args:
epoch (int): epoch id
logs (dict): log history
Returns: None
"""
logs = logs or {}
loss = logs.get('loss')
last_saved_epoch, last_metric, last_file = self._get_checkpoints()
if last_saved_epoch is not None:
if last_saved_epoch + self.patience <= epoch:
self.model.stop_training = True
logger.info('%s does not improve after %d, stopping '
'the fitting...' % (self.monitor, self.patience))
if loss is not None:
self.losses.append(loss)
if np.isnan(loss) or np.isinf(loss):
if self.verbose:
logger.info("Nan loss found!")
self._reduce_lr_and_load(last_file)
if self.verbose:
logger.info("Now lr is %s." % float(
kb.eval(self.model.optimizer.lr)))
else:
if len(self.losses) > 1:
if self.losses[-1] > (self.losses[-2] * 100):
self._reduce_lr_and_load(last_file)
if self.verbose:
logger.info(
"Loss shot up from %.3f to %.3f! Reducing lr " % (
self.losses[-1], self.losses[-2]))
logger.info("Now lr is %s." % float(
kb.eval(self.model.optimizer.lr)))
示例13: _reduce_lr_and_load
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def _reduce_lr_and_load(self, last_file):
old_value = float(kb.eval(self.model.optimizer.lr))
self.model.reset_states()
self.model.optimizer.lr = old_value * self.factor
opt_dict = self.model.optimizer.get_config()
self.model.compile(self.model.optimizer.__class__(**opt_dict), self.model.loss)
if last_file is not None:
self.model.load_weights(last_file)
if self.verbose:
logger.info("Load weights %s" % last_file)
else:
logger.info("No weights were loaded")
示例14: save
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def save(self, fname: str = None) -> None:
"""
Save the model parameters into <<fname>>_opt.json (or <<ser_file>>_opt.json)
and model weights into <<fname>>.h5 (or <<ser_file>>.h5)
Args:
fname: file_path to save model. If not explicitly given seld.opt["ser_file"] will be used
Returns:
None
"""
if not fname:
fname = self.save_path
else:
fname = Path(fname).resolve()
if not fname.parent.is_dir():
raise ConfigError("Provided save path is incorrect!")
else:
opt_path = f"{fname}_opt.json"
weights_path = f"{fname}.h5"
log.info(f"[saving model to {opt_path}]")
self.model.save_weights(weights_path)
# if model was loaded from one path and saved to another one
# then change load_path to save_path for config
self.opt["epochs_done"] = self.epochs_done
if isinstance(self.opt.get("learning_rate", None), float):
self.opt["final_learning_rate"] = (K.eval(self.optimizer.lr) /
(1. + K.eval(self.optimizer.decay) * self.batches_seen))
if self.opt.get("load_path") and self.opt.get("save_path"):
if self.opt.get("save_path") != self.opt.get("load_path"):
self.opt["load_path"] = str(self.opt["save_path"])
save_json(self.opt, opt_path)
# noinspection PyUnusedLocal
示例15: quantized_model_debug
# 需要导入模块: from tensorflow.keras import backend [as 别名]
# 或者: from tensorflow.keras.backend import eval [as 别名]
def quantized_model_debug(model, X_test, plot=False):
"""Debugs and plots model weights and activations."""
outputs = []
output_names = []
for layer in model.layers:
if layer.__class__.__name__ in REGISTERED_LAYERS:
output_names.append(layer.name)
outputs.append(layer.output)
model_debug = Model(inputs=model.inputs, outputs=outputs)
y_pred = model_debug.predict(X_test)
print("{:30} {: 8.4f} {: 8.4f}".format(
"input", np.min(X_test), np.max(X_test)))
for n, p in zip(output_names, y_pred):
layer = model.get_layer(n)
if layer.__class__.__name__ == "QActivation":
alpha = get_weight_scale(layer.activation, p)
else:
alpha = 1.0
print(
"{:30} {: 8.4f} {: 8.4f}".format(n, np.min(p / alpha),
np.max(p / alpha)),
end="")
if alpha != 1.0:
print(" a[{: 8.4f} {:8.4f}]".format(np.min(alpha), np.max(alpha)))
if plot and layer.__class__.__name__ in [
"QConv2D", "QDense", "QActivation"
]:
plt.hist(p.flatten(), bins=25)
plt.title(layer.name + "(output)")
plt.show()
alpha = None
for i, weights in enumerate(layer.get_weights()):
if hasattr(layer, "get_quantizers") and layer.get_quantizers()[i]:
weights = K.eval(layer.get_quantizers()[i](K.constant(weights)))
if i == 0 and layer.__class__.__name__ in [
"QConv1D", "QConv2D", "QDense"
]:
alpha = get_weight_scale(layer.get_quantizers()[i], weights)
# if alpha is 0, let's remove all weights.
alpha_mask = (alpha == 0.0)
weights = np.where(alpha_mask, weights * alpha, weights / alpha)
if plot:
plt.hist(weights.flatten(), bins=25)
plt.title(layer.name + "(weights)")
plt.show()
print(" ({: 8.4f} {: 8.4f})".format(np.min(weights), np.max(weights)),
end="")
if alpha is not None and isinstance(alpha, np.ndarray):
print(" a({: 10.6f} {: 10.6f})".format(
np.min(alpha), np.max(alpha)), end="")
print("")