本文整理汇总了Python中keras.backend方法的典型用法代码示例。如果您正苦于以下问题:Python keras.backend方法的具体用法?Python keras.backend怎么用?Python keras.backend使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras
的用法示例。
在下文中一共展示了keras.backend方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: on_epoch_end
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def on_epoch_end(self, epoch, logs={}):
self.losses += [logs.get('val_loss')]
if not self.losses[-1] < self.min_loss:
self.steps = self.steps + 1
else:
self.steps = 0
if self.steps > self.convergence_steps:
lr = keras.backend.get_value(self.model.optimizer.lr)
keras.backend.set_value(
self.model.optimizer.lr, lr / self.lr_decay)
self.steps = 0
logger.info("\n Reduced learning rate to " + str(lr))
if lr < self.lr_minimum:
self.model.stop_training = True
self.min_loss = min(self.min_loss, self.losses[-1])
################################################################################
# QRNN
################################################################################
示例2: __init__
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def __init__(self, mode, config, model_dir):
"""
mode: Either "training" or "inference"
config: A Sub-class of the Config class
model_dir: Directory to save training logs and trained weights
"""
assert mode in ['training', 'inference']
if mode == 'training':
import keras.backend.tensorflow_backend as KTF
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
session = tf.Session(config=config)
KTF.set_session(session)
self.mode = mode
self.config = config
self.model_dir = model_dir
self.set_log_dir()
self.keras_model = self.build(mode=mode, config=config)
示例3: preres_activation
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def preres_activation(x,
name="preres_activation"):
"""
PreResNet pure pre-activation block without convolution layer. It's used by itself as the final block.
Parameters:
----------
x : keras.backend tensor/variable/symbol
Input tensor/variable/symbol.
name : str, default 'preres_activation'
Block name.
Returns
-------
keras.backend tensor/variable/symbol
Resulted tensor/variable/symbol.
"""
x = batchnorm(
x=x,
name=name + "/bn")
x = nn.Activation("relu", name=name + "/activ")(x)
return x
示例4: compile
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def compile(self):
"""
Compile model for training.
Only `keras` native metrics are compiled together with backend.
MatchZoo metrics are evaluated only through :meth:`evaluate`.
Notice that `keras` count `loss` as one of the metrics while MatchZoo
:class:`matchzoo.engine.BaseTask` does not.
Examples:
>>> from matchzoo import models
>>> model = models.Naive()
>>> model.guess_and_fill_missing_params(verbose=0)
>>> model.params['task'].metrics = ['mse', 'map']
>>> model.params['task'].metrics
['mse', mean_average_precision(0.0)]
>>> model.build()
>>> model.compile()
"""
self._backend.compile(optimizer=self._params['optimizer'],
loss=self._params['task'].loss)
示例5: __call__
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def __call__(self, inputs):
if not isinstance(inputs, (list, tuple)):
raise TypeError('`inputs` should be a list or tuple.')
feed_dict = self.feed_dict.copy()
for tensor, value in zip(self.inputs, inputs):
if is_sparse(tensor):
sparse_coo = value.tocoo()
indices = np.concatenate((np.expand_dims(sparse_coo.row, 1),
np.expand_dims(sparse_coo.col, 1)), 1)
value = (indices, sparse_coo.data, sparse_coo.shape)
feed_dict[tensor] = value
fetches = self.outputs + [self.updates_op] + self.fetches
session = get_session()
updated = session.run(fetches=fetches, feed_dict=feed_dict,
**self.session_kwargs)
return updated
# function_get_fetches adapted from function() in K.backend.tensorflow_backend
示例6: function_get_fetches
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def function_get_fetches(inputs, outputs, updates=None, **kwargs):
"""Instantiates a Keras function.
# Arguments
inputs: List of placeholder tensors.
outputs: List of output tensors.
updates: List of update ops.
**kwargs: Passed to `tf.Session.run`.
# Returns
Output values as Numpy arrays.
# Raises
ValueError: if invalid kwargs are passed in.
"""
if kwargs:
for key in kwargs:
if not (has_arg(tf.Session.run, key, True) or has_arg(Function.__init__, key, True)):
msg = 'Invalid argument "%s" passed to K.function with TensorFlow backend' % key
raise ValueError(msg)
return FunctionGetFetches(inputs, outputs, updates=updates, **kwargs)
# _make_predict_function_get_fetches adapted from _make_predict_function() in K.backend.tensorflow_backend
示例7: crf_loss
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def crf_loss(y_true, y_pred):
"""General CRF loss function depending on the learning mode.
# Arguments
y_true: tensor with true targets.
y_pred: tensor with predicted targets.
# Returns
If the CRF layer is being trained in the join mode, returns the negative
log-likelihood. Otherwise returns the categorical crossentropy implemented
by the underlying Keras backend.
# About GitHub
If you open an issue or a pull request about CRF, please
add `cc @lzfelix` to notify Luiz Felix.
"""
crf, idx = y_pred._keras_history[:2]
if crf.learn_mode == 'join':
return crf_nll(y_true, y_pred)
else:
if crf.sparse_target:
return sparse_categorical_crossentropy(y_true, y_pred)
else:
return categorical_crossentropy(y_true, y_pred)
# crf_marginal_accuracy, crf_viterbi_accuracy
示例8: assert_save_load
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def assert_save_load(self, model, metrics_fns, samples_fn):
metrics = [m() for m in metrics_fns]
custom_objects = {m.__name__: m for m in metrics}
custom_objects["sin"] = keras.backend.sin
custom_objects["abs"] = keras.backend.abs
x, y = samples_fn(100)
model.fit(x, y, epochs=10)
with tempfile.NamedTemporaryFile() as file:
model.save(file.name, overwrite=True)
loaded_model = keras.models.load_model(
file.name, custom_objects=custom_objects)
expected = model.evaluate(x, y)[1:]
received = loaded_model.evaluate(x, y)[1:]
self.assertEqual(expected, received)
示例9: _sort_weights_by_name
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def _sort_weights_by_name(self, weights):
"""Sorts weights by name and returns them."""
if not weights:
return []
if K.backend() == 'theano':
key = lambda x: x.name if x.name else x.auto_name
else:
key = lambda x: x.name
weights.sort(key=key)
return weights
示例10: start_keras
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def start_keras(logger, job_backend):
if 'KERAS_BACKEND' not in os.environ:
os.environ['KERAS_BACKEND'] = 'tensorflow'
from . import keras_model_utils
# we need to import keras here, so we know which backend is used (and whether GPU is used)
os.chdir(job_backend.git.work_tree)
logger.debug("Start simple model")
# we use the source from the job commit directly
with job_backend.git.batch_commit('Git Version'):
job_backend.set_system_info('git_remote_url', job_backend.git.get_remote_url('origin'))
job_backend.set_system_info('git_version', job_backend.git.job_id)
# all our shapes are Tensorflow schema. (height, width, channels)
import keras.backend
if hasattr(keras.backend, 'set_image_dim_ordering'):
keras.backend.set_image_dim_ordering('tf')
if hasattr(keras.backend, 'set_image_data_format'):
keras.backend.set_image_data_format('channels_last')
from .KerasCallback import KerasCallback
trainer = Trainer(job_backend)
keras_logger = KerasCallback(job_backend, job_backend.logger)
job_backend.progress(0, job_backend.job['config']['epochs'])
logger.info("Start training")
keras_model_utils.job_start(job_backend, trainer, keras_logger)
job_backend.done()
示例11: on_signusr1
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def on_signusr1(self, signal, frame):
self.logger.warning("USR1: backend job_id=%s (running=%s, ended=%s), client (online=%s, active=%s, registered=%s, "
"connected=%s, queue=%s), git (active_thread=%s, last_push_time=%s)." % (
str(self.job_id),
str(self.running),
str(self.ended),
str(self.client.online),
str(self.client.active),
str(self.client.registered),
str(self.client.connected),
str([str(i)+':'+str(len(x)) for i, x in six.iteritems(self.client.queues)]),
str(self.git.active_thread),
str(self.git.last_push_time),
))
示例12: is_master_process
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def is_master_process(self):
"""
Master means that aetros.backend.start_job() has been called without using the command `aetros start`.
If master is true, we collect and track some data that usually `aetros start` would do and reset the job's
temp files on the server.
:return:
"""
return os.getenv('AETROS_JOB_ID') is None
示例13: sync_weights
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def sync_weights(self, push=True):
if not os.path.exists(self.get_job_model().get_weights_filepath_latest()):
return
self.logger.debug("sync weights...")
self.set_status('SYNC WEIGHTS', add_section=False)
with open(self.get_job_model().get_weights_filepath_latest(), 'rb') as f:
import keras.backend
self.git.commit_file('Added weights', 'aetros/weights/latest.hdf5', f.read())
image_data_format = None
if hasattr(keras.backend, 'set_image_data_format'):
image_data_format = keras.backend.image_data_format()
info = {
'framework': 'keras',
'backend': keras.backend.backend(),
'image_data_format': image_data_format
}
self.git.commit_file('Added weights', 'aetros/weights/latest.json', simplejson.dumps(info))
if push:
self.git.push()
# todo, implement optional saving of self.get_job_model().get_weights_filepath_best()
示例14: batch_eval
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def batch_eval(sess, tf_inputs, tf_outputs, numpy_inputs):
"""
A helper function that computes a tensor on numpy inputs by batches.
"""
n = len(numpy_inputs)
assert n > 0
assert n == len(tf_inputs)
m = numpy_inputs[0].shape[0]
for i in six.moves.xrange(1, n):
assert numpy_inputs[i].shape[0] == m
out = []
for _ in tf_outputs:
out.append([])
with sess.as_default():
for start in six.moves.xrange(0, m, FLAGS.batch_size):
batch = start // FLAGS.batch_size
if batch % 100 == 0 and batch > 0:
print("Batch " + str(batch))
# Compute batch start and end indices
start = batch * FLAGS.batch_size
end = start + FLAGS.batch_size
numpy_input_batches = [numpy_input[start:end] for numpy_input in numpy_inputs]
cur_batch_size = numpy_input_batches[0].shape[0]
assert cur_batch_size <= FLAGS.batch_size
for e in numpy_input_batches:
assert e.shape[0] == cur_batch_size
feed_dict = dict(zip(tf_inputs, numpy_input_batches))
feed_dict[keras.backend.learning_phase()] = 0
numpy_output_batches = sess.run(tf_outputs, feed_dict=feed_dict)
for e in numpy_output_batches:
assert e.shape[0] == cur_batch_size, e.shape
for out_elem, numpy_output_batch in zip(out, numpy_output_batches):
out_elem.append(numpy_output_batch)
out = [np.concatenate(x, axis=0) for x in out]
for e in out:
assert e.shape[0] == m, e.shape
return out
示例15: model_argmax
# 需要导入模块: import keras [as 别名]
# 或者: from keras import backend [as 别名]
def model_argmax(sess, x, predictions, sample):
"""
Helper function that computes the current class prediction
:param sess: TF session
:param x: the input placeholder
:param predictions: the model's symbolic output
:param sample: (1 x 1 x img_rows x img_cols) numpy array with sample input
:return: the argmax output of predictions, i.e. the current predicted class
"""
feed_dict = {x: sample, keras.backend.learning_phase(): 0}
probabilities = sess.run(predictions, feed_dict)
return np.argmax(probabilities)