当前位置: 首页>>代码示例>>Python>>正文


Python models.model_from_yaml方法代码示例

本文整理汇总了Python中keras.models.model_from_yaml方法的典型用法代码示例。如果您正苦于以下问题:Python models.model_from_yaml方法的具体用法?Python models.model_from_yaml怎么用?Python models.model_from_yaml使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在keras.models的用法示例。


在下文中一共展示了models.model_from_yaml方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: train

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def train(self, data_iterator):
        """Train a keras model on a worker
        """
        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss, metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        self.model.compile(optimizer=self.master_optimizer,
                           loss=self.master_loss,
                           metrics=self.master_metrics)

        weights_before_training = self.model.get_weights()
        if x_train.shape[0] > self.train_config.get('batch_size'):
            self.model.fit(x_train, y_train, **self.train_config)
        weights_after_training = self.model.get_weights()
        deltas = subtract_params(
            weights_before_training, weights_after_training)
        yield deltas 
开发者ID:maxpumperla,项目名称:elephas,代码行数:26,代码来源:worker.py

示例2: best_models

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def best_models(self, nb_models, model, data, max_evals):
        trials_list = self.compute_trials(model, data, max_evals)
        num_trials = sum(len(trials) for trials in trials_list)
        if num_trials < nb_models:
            nb_models = len(trials_list)
        scores = []
        for trials in trials_list:
            scores = scores + [trial.get('result').get('loss')
                               for trial in trials]
        cut_off = sorted(scores, reverse=True)[nb_models - 1]
        model_list = []
        for trials in trials_list:
            for trial in trials:
                if trial.get('result').get('loss') >= cut_off:
                    model = model_from_yaml(trial.get('result').get('model'))
                    model.set_weights(pickle.loads(
                        trial.get('result').get('weights')))
                    model_list.append(model)
        return model_list 
开发者ID:maxpumperla,项目名称:elephas,代码行数:21,代码来源:hyperparam.py

示例3: load_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_folder is not found: {}'.format(self.model_folder)
        assert self.threshold is not None, 'Argument required: --threshold'
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        # load models
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.agentAct_vocab_size = np.int32(npzfile['agentAct_vocab_size'][()])
        self.userTagIntent_vocab_size = np.int32(npzfile['userTagIntent_vocab_size'][()])
        self.id2agentAct = npzfile['id2agentAct'][()]
        self.window_size = np.int32(npzfile['window_size'][()]) 
开发者ID:XuesongYang,项目名称:end2end_dialog,代码行数:23,代码来源:AgentActClassifyingModel.py

示例4: load_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.maxlen_userUtter = np.int32(npzfile['maxlen_userUtter'][()])
        self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
        self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
        self.userIntent_vocab_size = np.int32(
            npzfile['userIntent_vocab_size'][()])
        self.id2userTag = npzfile['id2userTag'][()]
        self.id2word = npzfile['id2word'][()]
        self.id2userIntent = npzfile['id2userIntent'][()]
        self.userTag2id = npzfile['userTag2id'][()] 
开发者ID:XuesongYang,项目名称:end2end_dialog,代码行数:26,代码来源:SlotTaggingModel_multitask.py

示例5: load_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def load_model(bin_dir):
    ''' Load model from .yaml and the weights from .h5

        Arguments:
            bin_dir: The directory of the bin (normally bin/)

        Returns:
            Loaded model from file
    '''

    # load YAML and create model
    yaml_file = open('%s/model.yaml' % bin_dir, 'r')
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    model = model_from_yaml(loaded_model_yaml)

    # load weights into new model
    model.load_weights('%s/model.h5' % bin_dir)
    return model 
开发者ID:Coopss,项目名称:EMNIST,代码行数:21,代码来源:server.py

示例6: __init__

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def __init__(self, model_file, weights_file, dictionary):
        self.logger = logging.getLogger(__name__)

        with open(model_file, "r") as file:
            self.model = model_from_yaml(file.read())
            height = self.model.inputs[0].shape[1]
            self.img_size = (height, height)
        self.model.load_weights(weights_file)

        with open(dictionary, "r") as file:
            self.dictionary = {}
            data = file.read().split("\n")
            for index, character in enumerate(data):
                self.dictionary[index] = character

        self.logger.debug("Loaded model") 
开发者ID:lars76,项目名称:chinese-subtitle-ocr,代码行数:18,代码来源:recognition.py

示例7: pred_data

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def pred_data():

    with open('./models/cat_dog.yaml') as yamlfile:
        loaded_model_yaml = yamlfile.read()
    model = model_from_yaml(loaded_model_yaml)
    model.load_weights('./models/cat_dog.h5')

    sgd = Adam(lr=0.0003)
    model.compile(loss='categorical_crossentropy',optimizer=sgd, metrics=['accuracy'])

    images = []
    path='./data/test/'
    for f in os.listdir(path):
        img = image.load_img(path + f, target_size=image_size)
        img_array = image.img_to_array(img)

        x = np.expand_dims(img_array, axis=0)
        x = preprocess_input(x)
        result = model.predict_classes(x,verbose=0)

        print(f,result[0]) 
开发者ID:jarvisqi,项目名称:deep_learning,代码行数:23,代码来源:cat_dog.py

示例8: load_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def load_model(model_path):
    custom_layers = {
        "multihead_attention": multihead_attention,
        "Conv2D": L.Conv2D,
        "split_heads_2d": split_heads_2d,
        "local_attention_2d": local_attention_2d,
        "combine_heads_2d": combine_heads_2d
    }
    model = model_from_yaml(open(os.path.join(model_path, "arch.yaml")).read(), custom_objects=custom_layers)

    full_path = os.path.join(model_path, "weights.h5")
    with h5py.File(full_path, "r") as w:
        keys = list(w.keys())
        is_para = any(["model" in k for k in keys])

    if is_para:
        para_model = multi_gpu_model(model, gpus=2)
        para_model.load_weights(full_path)
        model = para_model.layers[-2]
    else:
        model.load_weights(full_path)

    print("Model " + model_path + " loaded")
    return model 
开发者ID:BreezeWhite,项目名称:Music-Transcription-with-Semantic-Segmentation,代码行数:26,代码来源:utils.py

示例9: load_keras_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def load_keras_model(h5_file, json_file=None, yaml_file=None, is_weights=False, from_json=True):
    """
    Utility to load the whole model
    """
    # third-party imports
    from keras.models import load_model, model_from_json, model_from_yaml

    if is_weights:
        if from_json:
            json_string = open(json_file, "r").read()
            model = model_from_json(json_string)
        else:
            yaml_string = open(yaml_file, "r").read()
            model = model_from_yaml(yaml_string)
        model.load_weights(h5_file)
    else:
        model = load_model(h5_file)

    return model


# %% 
开发者ID:kourouklides,项目名称:artificial_neural_networks,代码行数:24,代码来源:generic_utils.py

示例10: _fit

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def _fit(self, df):
        """Private fit method of the Estimator, which trains the model.
        """
        simple_rdd = df_to_simple_rdd(df, categorical=self.get_categorical_labels(), nb_classes=self.get_nb_classes(),
                                      features_col=self.getFeaturesCol(), label_col=self.getLabelCol())
        simple_rdd = simple_rdd.repartition(self.get_num_workers())
        keras_model = model_from_yaml(self.get_keras_model_config())
        metrics = self.get_metrics()
        loss = self.get_loss()
        optimizer = get_optimizer(self.get_optimizer_config())
        keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        spark_model = SparkModel(model=keras_model,
                                 mode=self.get_mode(),
                                 frequency=self.get_frequency(),
                                 num_workers=self.get_num_workers())
        spark_model.fit(simple_rdd,
                        epochs=self.get_epochs(),
                        batch_size=self.get_batch_size(),
                        verbose=self.get_verbosity(),
                        validation_split=self.get_validation_split())

        model_weights = spark_model.master_network.get_weights()
        weights = simple_rdd.ctx.broadcast(model_weights)
        return ElephasTransformer(labelCol=self.getLabelCol(),
                                  outputCol='prediction',
                                  keras_model_config=spark_model.master_network.to_yaml(),
                                  weights=weights) 
开发者ID:maxpumperla,项目名称:elephas,代码行数:30,代码来源:ml_model.py

示例11: get_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def get_model(self):
        return model_from_yaml(self.get_keras_model_config()) 
开发者ID:maxpumperla,项目名称:elephas,代码行数:4,代码来源:ml_model.py

示例12: _transform

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def _transform(self, df):
        """Private transform method of a Transformer. This serves as batch-prediction method for our purposes.
        """
        output_col = self.getOutputCol()
        label_col = self.getLabelCol()
        new_schema = copy.deepcopy(df.schema)
        new_schema.add(StructField(output_col, StringType(), True))

        rdd = df.rdd.coalesce(1)
        features = np.asarray(
            rdd.map(lambda x: from_vector(x.features)).collect())
        # Note that we collect, since executing this on the rdd would require model serialization once again
        model = model_from_yaml(self.get_keras_model_config())
        model.set_weights(self.weights.value)
        predictions = rdd.ctx.parallelize(
            model.predict_classes(features)).coalesce(1)
        predictions = predictions.map(lambda x: tuple(str(x)))

        results_rdd = rdd.zip(predictions).map(lambda x: x[0] + x[1])
        results_df = df.sql_ctx.createDataFrame(results_rdd, new_schema)
        results_df = results_df.withColumn(
            output_col, results_df[output_col].cast(DoubleType()))
        results_df = results_df.withColumn(
            label_col, results_df[label_col].cast(DoubleType()))

        return results_df 
开发者ID:maxpumperla,项目名称:elephas,代码行数:28,代码来源:ml_model.py

示例13: save_model_yaml

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def save_model_yaml(model, model_fpath):
    from keras.models import model_from_yaml

    """Save pre-trained Keras model."""
    with open(model_fpath, "w") as yaml_file:
        yaml_file.write(model.to_yaml()) 
开发者ID:developmentseed,项目名称:ml-hv-grid-pub,代码行数:8,代码来源:utils.py

示例14: load_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def load_model(model_fpath, weights_fpath):
    """Load a model from yaml architecture and h5 weights."""
    assert model_fpath[-5:] == '.yaml'
    assert weights_fpath[-3:] == '.h5'

    with open(model_fpath, "r") as yaml_file:
        yaml_architecture = yaml_file.read()

    model = model_from_yaml(yaml_architecture)
    model.load_weights(weights_fpath)

    return model 
开发者ID:developmentseed,项目名称:ml-hv-grid-pub,代码行数:14,代码来源:utils.py

示例15: load_model

# 需要导入模块: from keras import models [as 别名]
# 或者: from keras.models import model_from_yaml [as 别名]
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.id2agentAct = npzfile['id2agentAct'][()]
        self.id2word = npzfile['id2word'][()]
        self.id2userTag = npzfile['id2userTag'][()]
        self.userTag2id = npzfile['userTag2id'][()]
        self.id2userIntent = npzfile['id2userIntent'][()]
        self.agentAct_vocab_size = np.int32(npzfile['agentAct_vocab_size'][()])
        self.userIntent_vocab_size = np.int32(npzfile['userIntent_vocab_size'][()])
        self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
        self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
        self.maxlen_userUtter = npzfile['maxlen_userUtter'][()]
        self.window_size = np.int32(npzfile['window_size'][()]) 
开发者ID:XuesongYang,项目名称:end2end_dialog,代码行数:28,代码来源:JointModel_multitask_jointraining.py


注:本文中的keras.models.model_from_yaml方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。