本文整理汇总了Python中keras.optimizers.serialize方法的典型用法代码示例。如果您正苦于以下问题:Python optimizers.serialize方法的具体用法?Python optimizers.serialize怎么用?Python optimizers.serialize使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类keras.optimizers
的用法示例。
在下文中一共展示了optimizers.serialize方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import serialize [as 别名]
def __init__(self, model, optimizer, loss, loss_weights, metrics=["accuracy"], features_col="features", label_col="label",
batch_size=32, num_epoch=1, learning_rate=1.0):
assert isinstance(optimizer, (str, Optimizer)), "'optimizer' must be a string or a Keras Optimizer instance"
assert isinstance(features_col, (str, list)), "'features_col' must be a string or a list of strings"
assert isinstance(label_col, (str, list)), "'label_col' must be a string or a list of strings"
self.model = model
self.optimizer = {'class_name': optimizer, 'config': {}} if isinstance(optimizer, str) else serialize(optimizer)
self.loss = loss
self.loss_weights = loss_weights
self.metrics= metrics
self.features_column = [features_col] if isinstance(features_col, str) else features_col
self.label_column = [label_col] if isinstance(label_col, str) else label_col
self.batch_size = batch_size
self.num_epoch = num_epoch
self.max_mini_batches = 100
self.prefetching_thread = None
self.mini_batches = None
self.is_prefetching = True
self.worker_id = -1
self.learning_rate = learning_rate
self.num_inputs = len(self.features_column)
self.num_outputs = len(self.label_column)
self.current_epoch = 0
示例2: test_spark_ml_model
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import serialize [as 别名]
def test_spark_ml_model(spark_context):
df = to_data_frame(spark_context, x_train, y_train, categorical=True)
test_df = to_data_frame(spark_context, x_test, y_test, categorical=True)
sgd = optimizers.SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True)
sgd_conf = optimizers.serialize(sgd)
# Initialize Spark ML Estimator
estimator = ElephasEstimator()
estimator.set_keras_model_config(model.to_yaml())
estimator.set_optimizer_config(sgd_conf)
estimator.set_mode("synchronous")
estimator.set_loss("categorical_crossentropy")
estimator.set_metrics(['acc'])
estimator.set_epochs(epochs)
estimator.set_batch_size(batch_size)
estimator.set_validation_split(0.1)
estimator.set_categorical_labels(True)
estimator.set_nb_classes(nb_classes)
# Fitting a model returns a Transformer
pipeline = Pipeline(stages=[estimator])
fitted_pipeline = pipeline.fit(df)
# Evaluate Spark model by evaluating the underlying model
prediction = fitted_pipeline.transform(test_df)
pnl = prediction.select("label", "prediction")
pnl.show(100)
prediction_and_label = pnl.rdd.map(lambda row: (row.label, row.prediction))
metrics = MulticlassMetrics(prediction_and_label)
print(metrics.precision())
print(metrics.recall())
示例3: _test_optimizer
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import serialize [as 别名]
def _test_optimizer(optimizer, target=0.75):
x_train, y_train = get_test_data()
model = get_model(x_train.shape[1], 10, y_train.shape[1])
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
assert history.history['acc'][-1] >= target
config = optimizers.serialize(optimizer)
custom_objects = {optimizer.__class__.__name__: optimizer.__class__}
optim = optimizers.deserialize(config, custom_objects)
new_config = optimizers.serialize(optim)
assert config == new_config
示例4: _test_optimizer
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import serialize [as 别名]
def _test_optimizer(optimizer, target=0.75):
x_train, y_train = get_test_data()
model = Sequential()
model.add(Dense(10, input_shape=(x_train.shape[1],)))
model.add(Activation('relu'))
model.add(Dense(y_train.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
assert history.history['acc'][-1] >= target
config = optimizers.serialize(optimizer)
optim = optimizers.deserialize(config)
new_config = optimizers.serialize(optim)
new_config['class_name'] = new_config['class_name'].lower()
assert config == new_config
# Test constraints.
model = Sequential()
dense = Dense(10,
input_shape=(x_train.shape[1],),
kernel_constraint=lambda x: 0. * x + 1.,
bias_constraint=lambda x: 0. * x + 2.,)
model.add(dense)
model.add(Activation('relu'))
model.add(Dense(y_train.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.train_on_batch(x_train[:10], y_train[:10])
kernel, bias = dense.get_weights()
assert_allclose(kernel, 1.)
assert_allclose(bias, 2.)
示例5: _test_optimizer
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import serialize [as 别名]
def _test_optimizer(optimizer, target=0.75):
x_train, y_train = get_test_data()
model = Sequential()
model.add(Dense(10, input_shape=(x_train.shape[1],)))
model.add(Activation('relu'))
model.add(Dense(y_train.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
history = model.fit(x_train, y_train, epochs=2, batch_size=16, verbose=0)
# TODO PlaidML fails this test
assert history.history['acc'][-1] >= target
config = k_optimizers.serialize(optimizer)
optim = k_optimizers.deserialize(config)
new_config = k_optimizers.serialize(optim)
new_config['class_name'] = new_config['class_name'].lower()
assert config == new_config
# Test constraints.
model = Sequential()
dense = Dense(10,
input_shape=(x_train.shape[1],),
kernel_constraint=lambda x: 0. * x + 1.,
bias_constraint=lambda x: 0. * x + 2.,)
model.add(dense)
model.add(Activation('relu'))
model.add(Dense(y_train.shape[1]))
model.add(Activation('softmax'))
model.compile(loss='categorical_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
model.train_on_batch(x_train[:10], y_train[:10])
kernel, bias = dense.get_weights()
assert_allclose(kernel, 1.)
assert_allclose(bias, 2.)
示例6: get_config
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import serialize [as 别名]
def get_config(self):
config = {'optimizer': serialize(self.optimizer),
'l2_full_step': float(K.get_value(self.l2_full_step)),
'l2_full_ratio': float(K.get_value(self.l2_full_ratio)),
'l2_difference_full_ratio': float(K.get_value(self.l2_difference_full_ratio))}
return config
示例7: __init__
# 需要导入模块: from keras import optimizers [as 别名]
# 或者: from keras.optimizers import serialize [as 别名]
def __init__(self, model, mode='asynchronous', frequency='epoch', parameter_server_mode='http', num_workers=None,
custom_objects=None, batch_size=32, port=4000, *args, **kwargs):
"""SparkModel
Base class for distributed training on RDDs. Spark model takes a Keras
model as master network, an optimization scheme, a parallelisation mode
and an averaging frequency.
:param model: Compiled Keras model
:param mode: String, choose from `asynchronous`, `synchronous` and `hogwild`
:param frequency: String, either `epoch` or `batch`
:param parameter_server_mode: String, either `http` or `socket`
:param num_workers: int, number of workers used for training (defaults to None)
:param custom_objects: Keras custom objects
:param batch_size: batch size used for training and inference
:param port: port used in case of 'http' parameter server mode
"""
self._master_network = model
if not hasattr(model, "loss"):
raise Exception(
"Compile your Keras model before initializing an Elephas model with it")
metrics = model.metrics
loss = model.loss
optimizer = serialize_optimizer(model.optimizer)
if custom_objects is None:
custom_objects = {}
if metrics is None:
metrics = ["accuracy"]
self.mode = mode
self.frequency = frequency
self.num_workers = num_workers
self.weights = self._master_network.get_weights()
self.pickled_weights = None
self.master_optimizer = optimizer
self.master_loss = loss
self.master_metrics = metrics
self.custom_objects = custom_objects
self.parameter_server_mode = parameter_server_mode
self.batch_size = batch_size
self.port = port
self.kwargs = kwargs
self.serialized_model = model_to_dict(model)
if self.mode is not 'synchronous':
if self.parameter_server_mode == 'http':
self.parameter_server = HttpServer(
self.serialized_model, self.mode, self.port)
self.client = HttpClient(self.port)
elif self.parameter_server_mode == 'socket':
self.parameter_server = SocketServer(self.serialized_model)
self.client = SocketClient()
else:
raise ValueError("Parameter server mode has to be either `http` or `socket`, "
"got {}".format(self.parameter_server_mode))