本文整理匯總了Python中tensorflow.keras.backend.set_learning_phase方法的典型用法代碼示例。如果您正苦於以下問題:Python backend.set_learning_phase方法的具體用法?Python backend.set_learning_phase怎麽用?Python backend.set_learning_phase使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tensorflow.keras.backend
的用法示例。
在下文中一共展示了backend.set_learning_phase方法的12個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: main
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def main(base_model_name, weights_file, export_path):
# Load model and weights
nima = Nima(base_model_name, weights=None)
nima.build()
nima.nima_model.load_weights(weights_file)
# Tell keras that this will be used for making predictions
K.set_learning_phase(0)
# CustomObject required by MobileNet
with CustomObjectScope({'relu6': relu6, 'DepthwiseConv2D': DepthwiseConv2D}):
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(
inputs={'input_image': nima.nima_model.input},
outputs={'quality_prediction': nima.nima_model.output}
)
builder.add_meta_graph_and_variables(
sess=K.get_session(),
tags=[tag_constants.SERVING],
signature_def_map={'image_quality': signature}
)
builder.save()
print(f'TF model exported to: {export_path}')
示例2: test_stochastic_ternary
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def test_stochastic_ternary(bound, alpha, temperature, expected_values, expected_scale):
np.random.seed(42)
K.set_learning_phase(1)
n = 1000
x = np.random.uniform(-bound, bound, size=(n, 10))
x = np.sort(x, axis=1)
s = stochastic_ternary(alpha=alpha, temperature=temperature)
y = K.eval(s(K.constant(x)))
scale = K.eval(s.scale).astype(np.float32)[0]
ty = np.zeros_like(s)
for i in range(n):
ty = ty + (y[i] / scale)
result = (ty/n).astype(np.float32)
assert_allclose(result, expected_values, atol=0.1)
assert_allclose(scale, expected_scale, rtol=0.1)
示例3: __init__
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def __init__(self, model, shape):
shape = (None, shape[0], shape[1], shape[2])
x_name = 'image_tensor_x'
with K.get_session() as sess:
x_tensor = tf.placeholder(tf.float32, shape, x_name)
K.set_learning_phase(0)
y_tensor = model(x_tensor)
y_name = [y_tensor[-1].name[:-2], y_tensor[-2].name[:-2]]
graph = sess.graph.as_graph_def()
graph0 = tf.graph_util.convert_variables_to_constants(sess, graph, y_name)
graph1 = tf.graph_util.remove_training_nodes(graph0)
self.x_name = [x_name]
self.y_name = y_name
self.frozen = graph1
self.model = model
示例4: test_stochastic_round_quantized_po2
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def test_stochastic_round_quantized_po2(test_values, expected_values):
K.set_learning_phase(1)
np.random.seed(666)
x = K.placeholder(ndim=2)
q = quantized_po2(use_stochastic_rounding=True)
f = K.function([x], [q(x)])
res = f([test_values])[0]
res = np.average(res)
assert_allclose(res, expected_values, rtol=1e-01, atol=1e-6)
示例5: test_stochastic_round_quantized_relu_po2
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def test_stochastic_round_quantized_relu_po2(test_values, expected_values):
K.set_learning_phase(1)
np.random.seed(666)
x = K.placeholder(ndim=2)
q = quantized_relu_po2(use_stochastic_rounding=True)
f = K.function([x], [q(x)])
res = f([test_values])[0]
res = np.average(res)
assert_allclose(res, expected_values, rtol=1e-01, atol=1e-6)
示例6: test_stochastic_binary
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def test_stochastic_binary():
np.random.seed(42)
K.set_learning_phase(1)
x = np.random.uniform(-0.01, 0.01, size=10)
x = np.sort(x)
s = stochastic_binary(alpha="auto_po2")
ty = np.zeros_like(s)
ts = 0.0
n = 1000
for _ in range(n):
y = K.eval(s(K.constant(x)))
scale = K.eval(s.scale)[0]
ts = ts + scale
ty = ty + (y / scale)
result = (ty/n).astype(np.float32)
scale = np.array([ts/n])
expected = np.array(
[-1., -1., -1., -0.852, 0.782, 0.768, 0.97, 0.978, 1.0, 1.0]
).astype(np.float32)
expected_scale = np.array([0.003906])
assert_allclose(result, expected, atol=0.1)
assert_allclose(scale, expected_scale, rtol=0.1)
示例7: test_stochastic_binary_inference_mode
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def test_stochastic_binary_inference_mode(alpha, test_values, expected_values):
K.set_learning_phase(0)
x = K.placeholder(ndim=2)
q = stochastic_binary(alpha)
f = K.function([x], [q(x)])
result = f([test_values])[0]
assert_allclose(result, expected_values, rtol=1e-05)
示例8: _store_keras
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def _store_keras(self, name):
K.set_learning_phase(0) # necessary to prevent model from modifying weights
model_json = self.artifact.to_json()
with open(os.path.join(self.model_path, name + '.json'), 'w') as json_file:
json_file.write(model_json)
self.artifact.save_weights(os.path.join(self.model_path, name + '.h5'))
_logger.info("Saved Keras model to disk")
示例9: __init__
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def __init__(self, **kwargs):
super(YOLO_np, self).__init__()
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = get_classes(self.classes_path)
self.anchors = get_anchors(self.anchors_path)
self.colors = get_colors(self.class_names)
K.set_learning_phase(0)
self.yolo_model = self._generate_model()
示例10: load_eval_model
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def load_eval_model(model_path):
# support of tflite model
if model_path.endswith('.tflite'):
from tensorflow.lite.python import interpreter as interpreter_wrapper
model = interpreter_wrapper.Interpreter(model_path=model_path)
model.allocate_tensors()
model_format = 'TFLITE'
# support of MNN model
elif model_path.endswith('.mnn'):
model = MNN.Interpreter(model_path)
model_format = 'MNN'
# support of TF 1.x frozen pb model
elif model_path.endswith('.pb'):
model = load_graph(model_path)
model_format = 'PB'
# support of ONNX model
elif model_path.endswith('.onnx'):
model = onnxruntime.InferenceSession(model_path)
model_format = 'ONNX'
# normal keras h5 model
elif model_path.endswith('.h5'):
custom_object_dict = get_custom_objects()
model = load_model(model_path, compile=False, custom_objects=custom_object_dict)
model_format = 'H5'
K.set_learning_phase(0)
else:
raise ValueError('invalid model file')
return model, model_format
示例11: main
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def main(args):
include_top = True
if args.dump_headless:
include_top = False
# prepare model
model, input_shape = get_model(args.model_type, include_top=include_top)
if args.weights_path:
model.load_weights(args.weights_path, by_name=True)
# support multi-gpu training
if args.gpu_num >= 2:
model = multi_gpu_model(model, gpus=args.gpu_num)
model.summary()
if args.evaluate:
K.set_learning_phase(0)
evaluate_model(args, model, input_shape)
elif args.verify_with_image:
K.set_learning_phase(0)
verify_with_image(model, input_shape)
elif args.dump_headless:
K.set_learning_phase(0)
model.save(args.output_model_file)
print('export headless model to %s' % str(args.output_model_file))
else:
train(args, model, input_shape)
示例12: dump_saved_model
# 需要導入模塊: from tensorflow.keras import backend [as 別名]
# 或者: from tensorflow.keras.backend import set_learning_phase [as 別名]
def dump_saved_model(self, saved_model_path):
model = self.inference_model
os.makedirs(saved_model_path, exist_ok=True)
tf.keras.experimental.export_saved_model(model, saved_model_path)
print('export inference model to %s' % str(saved_model_path))
#class YOLO_prenms(object):
#_defaults = default_config
#@classmethod
#def get_defaults(cls, n):
#if n in cls._defaults:
#return cls._defaults[n]
#else:
#return "Unrecognized attribute name '" + n + "'"
#def __init__(self, **kwargs):
#super(YOLO_prenms, self).__init__()
#self.__dict__.update(self._defaults) # set up default values
#self.__dict__.update(kwargs) # and update with user overrides
#self.class_names = get_classes(self.classes_path)
#self.anchors = get_anchors(self.anchors_path)
#self.colors = get_colors(self.class_names)
#K.set_learning_phase(0)
#self.prenms_model = self._generate_model()
#def _generate_model(self):
#'''to generate the bounding boxes'''
#weights_path = os.path.expanduser(self.weights_path)
#assert weights_path.endswith('.h5'), 'Keras model or weights must be a .h5 file.'
## Load model, or construct model and load weights.
#num_anchors = len(self.anchors)
#num_classes = len(self.class_names)
##YOLOv3 model has 9 anchors and 3 feature layers but
##Tiny YOLOv3 model has 6 anchors and 2 feature layers,
##so we can calculate feature layers number to get model type
#num_feature_layers = num_anchors//3
#prenms_model = get_yolo3_prenms_model(self.model_type, self.anchors, num_classes, weights_path=weights_path, input_shape=self.model_image_size + (3,))
#return prenms_model
#def dump_model_file(self, output_model_file):
#self.prenms_model.save(output_model_file)
#def dump_saved_model(self, saved_model_path):
#model = self.prenms_model
#os.makedirs(saved_model_path, exist_ok=True)
#tf.keras.experimental.export_saved_model(model, saved_model_path)
#print('export inference model to %s' % str(saved_model_path))