本文整理汇总了Python中keras.backend.get_session方法的典型用法代码示例。如果您正苦于以下问题:Python backend.get_session方法的具体用法?Python backend.get_session怎么用?Python backend.get_session使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.backend
的用法示例。
在下文中一共展示了backend.get_session方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_ckpt
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def load_ckpt(ckpt_name, var_scope_name, scope, constructor, input_tensor, label_offset, load_weights, **kwargs):
"""
Arguments
ckpt_name file name of the checkpoint
var_scope_name name of the variable scope
scope arg_scope
constructor constructor of the model
input_tensor tensor of input image
label_offset whether it is 1000 classes or 1001 classes, if it is 1001, remove class 0
load_weights whether to load weights
kwargs
is_training
create_aux_logits
"""
with slim.arg_scope(scope):
logits, endpoints = constructor(\
input_tensor, num_classes=1000+label_offset, \
scope=var_scope_name, **kwargs)
if load_weights:
init_fn = slim.assign_from_checkpoint_fn(\
ckpt_name, slim.get_model_variables(var_scope_name))
init_fn(K.get_session())
return logits, endpoints
示例2: one_shot_method
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def one_shot_method(prediction, x, curr_sample, curr_target, p_t):
grad_est = np.zeros((BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
DELTA = np.random.randint(2, size=(BATCH_SIZE, IMAGE_ROWS, IMAGE_COLS, NUM_CHANNELS))
np.place(DELTA, DELTA==0, -1)
y_plus = np.clip(curr_sample + args.delta * DELTA, CLIP_MIN, CLIP_MAX)
y_minus = np.clip(curr_sample - args.delta * DELTA, CLIP_MIN, CLIP_MAX)
if args.CW_loss == 0:
pred_plus = K.get_session().run([prediction], feed_dict={x: y_plus, K.learning_phase(): 0})[0]
pred_plus_t = pred_plus[np.arange(BATCH_SIZE), list(curr_target)]
pred_minus = K.get_session().run([prediction], feed_dict={x: y_minus, K.learning_phase(): 0})[0]
pred_minus_t = pred_minus[np.arange(BATCH_SIZE), list(curr_target)]
num_est = (pred_plus_t - pred_minus_t)
grad_est = num_est[:, None, None, None]/(args.delta * DELTA)
# Getting gradient of the loss
if args.CW_loss == 0:
loss_grad = -1.0 * grad_est/p_t[:, None, None, None]
return loss_grad
示例3: CW_est
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def CW_est(logits, x, x_plus_i, x_minus_i, curr_sample, curr_target):
curr_logits = K.get_session().run([logits], feed_dict={x: curr_sample})[0]
# So that when max is taken, it returns max among classes apart from the
# target
curr_logits[np.arange(BATCH_SIZE), list(curr_target)] = -1e4
max_indices = np.argmax(curr_logits, 1)
logit_plus = K.get_session().run([logits], feed_dict={x: x_plus_i})[0]
logit_plus_t = logit_plus[np.arange(BATCH_SIZE), list(curr_target)]
logit_plus_max = logit_plus[np.arange(BATCH_SIZE), list(max_indices)]
logit_minus = K.get_session().run([logits], feed_dict={x: x_minus_i})[0]
logit_minus_t = logit_minus[np.arange(BATCH_SIZE), list(curr_target)]
logit_minus_max = logit_minus[np.arange(BATCH_SIZE), list(max_indices)]
logit_t_grad_est = (logit_plus_t - logit_minus_t)/args.delta
logit_max_grad_est = (logit_plus_max - logit_minus_max)/args.delta
return logit_t_grad_est/2.0, logit_max_grad_est/2.0
示例4: test_ShapGradientExplainer
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def test_ShapGradientExplainer(self):
# model = VGG16(weights='imagenet', include_top=True)
# X, y = shap.datasets.imagenet50()
# to_explain = X[[39, 41]]
#
# url = "https://s3.amazonaws.com/deep-learning-models/image-models/imagenet_class_index.json"
# fname = shap.datasets.cache(url)
# with open(fname) as f:
# class_names = json.load(f)
#
# def map2layer(x, layer):
# feed_dict = dict(zip([model.layers[0].input], [preprocess_input(x.copy())]))
# return K.get_session().run(model.layers[layer].input, feed_dict)
#
# e = GradientExplainer((model.layers[7].input, model.layers[-1].output),
# map2layer(preprocess_input(X.copy()), 7))
# shap_values, indexes = e.explain_instance(map2layer(to_explain, 7), ranked_outputs=2)
#
print("Skipped Shap GradientExplainer")
示例5: to_savedmodel
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def to_savedmodel(model, export_path):
"""Convert the Keras HDF5 model into TensorFlow SavedModel."""
builder = saved_model_builder.SavedModelBuilder(export_path)
signature = predict_signature_def(
inputs={'input': model.inputs[0]}, outputs={'income': model.outputs[0]})
with K.get_session() as sess:
builder.add_meta_graph_and_variables(
sess=sess,
tags=[tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY: signature
})
builder.save()
示例6: compile_func
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def compile_func(inputs, outputs):
if (isinstance(inputs, list)==False):
print("Wrapping the inputs in a list...")
inputs = [inputs]
assert isinstance(inputs, list)
def func_to_return(inp):
if len(inp) > len(inputs) and len(inputs)==1:
print("Wrapping the inputs in a list...")
inp = [inp]
assert len(inp)==len(inputs),\
("length of provided list should be "
+str(len(inputs))+" for tensors "+str(inputs)
+" but got input of length "+str(len(inp)))
feed_dict = {}
for input_tensor, input_val in zip(inputs, inp):
feed_dict[input_tensor] = input_val
sess = get_session()
return sess.run(outputs, feed_dict=feed_dict)
return func_to_return
示例7: _average_metrics_in_place
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def _average_metrics_in_place(self, logs):
logs = logs or {}
reduced_logs = {}
# Reduce every metric among workers. Sort metrics by name
# to ensure consistent order.
for metric, value in sorted(logs.items()):
if metric not in self.variables:
self.variables[metric], self.allreduce_ops[metric] = \
self._make_variable(metric, value)
else:
K.set_value(self.variables[metric], value)
reduced_logs[metric] = \
K.get_session().run(self.allreduce_ops[metric])
# Override the reduced values back into logs dictionary
# for other callbacks to use.
for metric, value in reduced_logs.items():
logs[metric] = value
示例8: keras_to_tensorflow
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def keras_to_tensorflow(keras_model, output_dir, model_name, out_prefix="output_", log_tensorboard=True):
if os.path.exists(output_dir) == False:
os.mkdir(output_dir)
out_nodes = []
for i in range(len(keras_model.outputs)):
out_nodes.append(out_prefix + str(i + 1))
tf.identity(keras_model.output[i], out_prefix + str(i + 1))
sess = K.get_session()
init_graph = sess.graph.as_graph_def()
main_graph = graph_util.convert_variables_to_constants(sess, init_graph, out_nodes)
graph_io.write_graph(main_graph, output_dir, name=model_name, as_text=False)
if log_tensorboard:
import_pb_to_tensorboard.import_to_tensorboard(os.path.join(output_dir, model_name), output_dir)
示例9: export_savedmodel
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def export_savedmodel(model):
print("input: {}, output: {}".format(model.input, model.output))
model_signature = tf.saved_model.signature_def_utils.predict_signature_def(
inputs={'input': model.input}, outputs={'output': model.output})
model_path = "model"
model_version = 1
export_path = os.path.join(
compat.as_bytes(model_path), compat.as_bytes(str(model_version)))
logging.info("Export the model to {}".format(export_path))
builder = tf.saved_model.builder.SavedModelBuilder(export_path)
builder.add_meta_graph_and_variables(
sess=K.get_session(),
tags=[tf.saved_model.tag_constants.SERVING],
clear_devices=True,
signature_def_map={
tf.saved_model.signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
model_signature
})
builder.save()
示例10: on_epoch_end
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def on_epoch_end(self, epoch, logs={}):
# Save training and validation losses
logz.log_tabular('train_loss', logs.get('loss'))
logz.log_tabular('val_loss', logs.get('val_loss'))
logz.dump_tabular()
# Save model every 'period' epochs
if (epoch+1) % self.period == 0:
filename = self.filepath + '/model_weights_' + str(epoch) + '.h5'
print("Saved model at {}".format(filename))
self.model.save_weights(filename, overwrite=True)
# Hard mining
sess = K.get_session()
mse_function = self.batch_size-(self.batch_size-10)*(np.maximum(0.0,1.0-np.exp(-1.0/30.0*(epoch-30.0))))
entropy_function = self.batch_size-(self.batch_size-5)*(np.maximum(0.0,1.0-np.exp(-1.0/30.0*(epoch-30.0))))
self.model.k_mse.load(int(np.round(mse_function)), sess)
self.model.k_entropy.load(int(np.round(entropy_function)), sess)
示例11: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def __init__(self, model_path='model_data/yolo.h5', anchors_path='model_data/yolo_anchors.txt', yolo3_dir=None):
self.yolo3_dir = yolo3_dir
self.model_path = model_path
self.anchors_path = anchors_path
self.classes_path = 'model_data/coco_classes.txt'
self.score = 0.3
self.iou = 0.45
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.model_image_size = (416, 416) # fixed size or (None, None), hw
self.session = None
self.final_model = None
# Generate colors for drawing bounding boxes.
hsv_tuples = [(x / len(self.class_names), 1., 1.)
for x in range(len(self.class_names))]
self.colors = list(map(lambda x: colorsys.hsv_to_rgb(*x), hsv_tuples))
self.colors = list(
map(lambda x: (int(x[0] * 255), int(x[1] * 255), int(x[2] * 255)),
self.colors))
np.random.seed(10101) # Fixed seed for consistent colors across runs.
np.random.shuffle(self.colors) # Shuffle colors to decorrelate adjacent classes.
np.random.seed(None) # Reset seed to default.
K.set_learning_phase(0)
示例12: predict_tfrecord
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def predict_tfrecord(self, x_batch):
if self.uses_learning_phase and not isinstance(K.learning_phase(), int):
ins = [0.]
else:
ins = []
self._make_tfrecord_predict_function()
try:
sess = K.get_session()
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
outputs = self.predict_function(ins)
finally:
# TODO: If you close the queue, you can't open it again..
# if stop_queue_runners:
# coord.request_stop()
# coord.join(threads)
pass
if len(outputs) == 1:
return outputs[0]
return outputs
示例13: as_keras_metric
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def as_keras_metric(method):
import functools
from keras import backend as K
import tensorflow as tf
@functools.wraps(method)
def wrapper(self, args, **kwargs):
""" Wrapper for turning tensorflow metrics into keras metrics """
value, update_op = method(self, args, **kwargs)
K.get_session().run(tf.local_variables_initializer())
with tf.control_dependencies([update_op]):
value = tf.identity(value)
return value
return wrapper
示例14: __init__
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def __init__(self, **kwargs):
self.__dict__.update(self._defaults) # set up default values
self.__dict__.update(kwargs) # and update with user overrides
self.class_names = self._get_class()
self.anchors = self._get_anchors()
self.sess = K.get_session()
self.boxes, self.scores, self.classes = self.generate()
示例15: loss
# 需要导入模块: from keras import backend [as 别名]
# 或者: from keras.backend import get_session [as 别名]
def loss(X):
X = X.reshape((1, FLAGS.IMAGE_ROWS, FLAGS.IMAGE_COLS, FLAGS.NUM_CHANNELS))
confidence = K.get_session().run([prediction], feed_dict={x: X, K.learning_phase(): 0})[0]
# confidence[:,curr_target] = 1e-4
max_conf_i = np.argmax(confidence, 1)
max_conf = np.max(confidence, 1)[0]
if max_conf_i == curr_target:
return max_conf
elif max_conf_i != curr_target:
return -1.0 * max_conf