當前位置: 首頁>>代碼示例>>Python>>正文


Python models.model_from_yaml方法代碼示例

本文整理匯總了Python中keras.models.model_from_yaml方法的典型用法代碼示例。如果您正苦於以下問題:Python models.model_from_yaml方法的具體用法?Python models.model_from_yaml怎麽用?Python models.model_from_yaml使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在keras.models的用法示例。


在下文中一共展示了models.model_from_yaml方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: train

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def train(self, data_iterator):
        """Train a keras model on a worker
        """
        optimizer = get_optimizer(self.master_optimizer)
        self.model = model_from_yaml(self.yaml, self.custom_objects)
        self.model.compile(optimizer=optimizer,
                           loss=self.master_loss, metrics=self.master_metrics)
        self.model.set_weights(self.parameters.value)

        feature_iterator, label_iterator = tee(data_iterator, 2)
        x_train = np.asarray([x for x, y in feature_iterator])
        y_train = np.asarray([y for x, y in label_iterator])

        self.model.compile(optimizer=self.master_optimizer,
                           loss=self.master_loss,
                           metrics=self.master_metrics)

        weights_before_training = self.model.get_weights()
        if x_train.shape[0] > self.train_config.get('batch_size'):
            self.model.fit(x_train, y_train, **self.train_config)
        weights_after_training = self.model.get_weights()
        deltas = subtract_params(
            weights_before_training, weights_after_training)
        yield deltas 
開發者ID:maxpumperla,項目名稱:elephas,代碼行數:26,代碼來源:worker.py

示例2: best_models

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def best_models(self, nb_models, model, data, max_evals):
        trials_list = self.compute_trials(model, data, max_evals)
        num_trials = sum(len(trials) for trials in trials_list)
        if num_trials < nb_models:
            nb_models = len(trials_list)
        scores = []
        for trials in trials_list:
            scores = scores + [trial.get('result').get('loss')
                               for trial in trials]
        cut_off = sorted(scores, reverse=True)[nb_models - 1]
        model_list = []
        for trials in trials_list:
            for trial in trials:
                if trial.get('result').get('loss') >= cut_off:
                    model = model_from_yaml(trial.get('result').get('model'))
                    model.set_weights(pickle.loads(
                        trial.get('result').get('weights')))
                    model_list.append(model)
        return model_list 
開發者ID:maxpumperla,項目名稱:elephas,代碼行數:21,代碼來源:hyperparam.py

示例3: load_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_folder is not found: {}'.format(self.model_folder)
        assert self.threshold is not None, 'Argument required: --threshold'
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        # load models
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.agentAct_vocab_size = np.int32(npzfile['agentAct_vocab_size'][()])
        self.userTagIntent_vocab_size = np.int32(npzfile['userTagIntent_vocab_size'][()])
        self.id2agentAct = npzfile['id2agentAct'][()]
        self.window_size = np.int32(npzfile['window_size'][()]) 
開發者ID:XuesongYang,項目名稱:end2end_dialog,代碼行數:23,代碼來源:AgentActClassifyingModel.py

示例4: load_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.maxlen_userUtter = np.int32(npzfile['maxlen_userUtter'][()])
        self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
        self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
        self.userIntent_vocab_size = np.int32(
            npzfile['userIntent_vocab_size'][()])
        self.id2userTag = npzfile['id2userTag'][()]
        self.id2word = npzfile['id2word'][()]
        self.id2userIntent = npzfile['id2userIntent'][()]
        self.userTag2id = npzfile['userTag2id'][()] 
開發者ID:XuesongYang,項目名稱:end2end_dialog,代碼行數:26,代碼來源:SlotTaggingModel_multitask.py

示例5: load_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def load_model(bin_dir):
    ''' Load model from .yaml and the weights from .h5

        Arguments:
            bin_dir: The directory of the bin (normally bin/)

        Returns:
            Loaded model from file
    '''

    # load YAML and create model
    yaml_file = open('%s/model.yaml' % bin_dir, 'r')
    loaded_model_yaml = yaml_file.read()
    yaml_file.close()
    model = model_from_yaml(loaded_model_yaml)

    # load weights into new model
    model.load_weights('%s/model.h5' % bin_dir)
    return model 
開發者ID:Coopss,項目名稱:EMNIST,代碼行數:21,代碼來源:server.py

示例6: __init__

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def __init__(self, model_file, weights_file, dictionary):
        self.logger = logging.getLogger(__name__)

        with open(model_file, "r") as file:
            self.model = model_from_yaml(file.read())
            height = self.model.inputs[0].shape[1]
            self.img_size = (height, height)
        self.model.load_weights(weights_file)

        with open(dictionary, "r") as file:
            self.dictionary = {}
            data = file.read().split("\n")
            for index, character in enumerate(data):
                self.dictionary[index] = character

        self.logger.debug("Loaded model") 
開發者ID:lars76,項目名稱:chinese-subtitle-ocr,代碼行數:18,代碼來源:recognition.py

示例7: pred_data

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def pred_data():

    with open('./models/cat_dog.yaml') as yamlfile:
        loaded_model_yaml = yamlfile.read()
    model = model_from_yaml(loaded_model_yaml)
    model.load_weights('./models/cat_dog.h5')

    sgd = Adam(lr=0.0003)
    model.compile(loss='categorical_crossentropy',optimizer=sgd, metrics=['accuracy'])

    images = []
    path='./data/test/'
    for f in os.listdir(path):
        img = image.load_img(path + f, target_size=image_size)
        img_array = image.img_to_array(img)

        x = np.expand_dims(img_array, axis=0)
        x = preprocess_input(x)
        result = model.predict_classes(x,verbose=0)

        print(f,result[0]) 
開發者ID:jarvisqi,項目名稱:deep_learning,代碼行數:23,代碼來源:cat_dog.py

示例8: load_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def load_model(model_path):
    custom_layers = {
        "multihead_attention": multihead_attention,
        "Conv2D": L.Conv2D,
        "split_heads_2d": split_heads_2d,
        "local_attention_2d": local_attention_2d,
        "combine_heads_2d": combine_heads_2d
    }
    model = model_from_yaml(open(os.path.join(model_path, "arch.yaml")).read(), custom_objects=custom_layers)

    full_path = os.path.join(model_path, "weights.h5")
    with h5py.File(full_path, "r") as w:
        keys = list(w.keys())
        is_para = any(["model" in k for k in keys])

    if is_para:
        para_model = multi_gpu_model(model, gpus=2)
        para_model.load_weights(full_path)
        model = para_model.layers[-2]
    else:
        model.load_weights(full_path)

    print("Model " + model_path + " loaded")
    return model 
開發者ID:BreezeWhite,項目名稱:Music-Transcription-with-Semantic-Segmentation,代碼行數:26,代碼來源:utils.py

示例9: load_keras_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def load_keras_model(h5_file, json_file=None, yaml_file=None, is_weights=False, from_json=True):
    """
    Utility to load the whole model
    """
    # third-party imports
    from keras.models import load_model, model_from_json, model_from_yaml

    if is_weights:
        if from_json:
            json_string = open(json_file, "r").read()
            model = model_from_json(json_string)
        else:
            yaml_string = open(yaml_file, "r").read()
            model = model_from_yaml(yaml_string)
        model.load_weights(h5_file)
    else:
        model = load_model(h5_file)

    return model


# %% 
開發者ID:kourouklides,項目名稱:artificial_neural_networks,代碼行數:24,代碼來源:generic_utils.py

示例10: _fit

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def _fit(self, df):
        """Private fit method of the Estimator, which trains the model.
        """
        simple_rdd = df_to_simple_rdd(df, categorical=self.get_categorical_labels(), nb_classes=self.get_nb_classes(),
                                      features_col=self.getFeaturesCol(), label_col=self.getLabelCol())
        simple_rdd = simple_rdd.repartition(self.get_num_workers())
        keras_model = model_from_yaml(self.get_keras_model_config())
        metrics = self.get_metrics()
        loss = self.get_loss()
        optimizer = get_optimizer(self.get_optimizer_config())
        keras_model.compile(loss=loss, optimizer=optimizer, metrics=metrics)

        spark_model = SparkModel(model=keras_model,
                                 mode=self.get_mode(),
                                 frequency=self.get_frequency(),
                                 num_workers=self.get_num_workers())
        spark_model.fit(simple_rdd,
                        epochs=self.get_epochs(),
                        batch_size=self.get_batch_size(),
                        verbose=self.get_verbosity(),
                        validation_split=self.get_validation_split())

        model_weights = spark_model.master_network.get_weights()
        weights = simple_rdd.ctx.broadcast(model_weights)
        return ElephasTransformer(labelCol=self.getLabelCol(),
                                  outputCol='prediction',
                                  keras_model_config=spark_model.master_network.to_yaml(),
                                  weights=weights) 
開發者ID:maxpumperla,項目名稱:elephas,代碼行數:30,代碼來源:ml_model.py

示例11: get_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def get_model(self):
        return model_from_yaml(self.get_keras_model_config()) 
開發者ID:maxpumperla,項目名稱:elephas,代碼行數:4,代碼來源:ml_model.py

示例12: _transform

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def _transform(self, df):
        """Private transform method of a Transformer. This serves as batch-prediction method for our purposes.
        """
        output_col = self.getOutputCol()
        label_col = self.getLabelCol()
        new_schema = copy.deepcopy(df.schema)
        new_schema.add(StructField(output_col, StringType(), True))

        rdd = df.rdd.coalesce(1)
        features = np.asarray(
            rdd.map(lambda x: from_vector(x.features)).collect())
        # Note that we collect, since executing this on the rdd would require model serialization once again
        model = model_from_yaml(self.get_keras_model_config())
        model.set_weights(self.weights.value)
        predictions = rdd.ctx.parallelize(
            model.predict_classes(features)).coalesce(1)
        predictions = predictions.map(lambda x: tuple(str(x)))

        results_rdd = rdd.zip(predictions).map(lambda x: x[0] + x[1])
        results_df = df.sql_ctx.createDataFrame(results_rdd, new_schema)
        results_df = results_df.withColumn(
            output_col, results_df[output_col].cast(DoubleType()))
        results_df = results_df.withColumn(
            label_col, results_df[label_col].cast(DoubleType()))

        return results_df 
開發者ID:maxpumperla,項目名稱:elephas,代碼行數:28,代碼來源:ml_model.py

示例13: save_model_yaml

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def save_model_yaml(model, model_fpath):
    from keras.models import model_from_yaml

    """Save pre-trained Keras model."""
    with open(model_fpath, "w") as yaml_file:
        yaml_file.write(model.to_yaml()) 
開發者ID:developmentseed,項目名稱:ml-hv-grid-pub,代碼行數:8,代碼來源:utils.py

示例14: load_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def load_model(model_fpath, weights_fpath):
    """Load a model from yaml architecture and h5 weights."""
    assert model_fpath[-5:] == '.yaml'
    assert weights_fpath[-3:] == '.h5'

    with open(model_fpath, "r") as yaml_file:
        yaml_architecture = yaml_file.read()

    model = model_from_yaml(yaml_architecture)
    model.load_weights(weights_fpath)

    return model 
開發者ID:developmentseed,項目名稱:ml-hv-grid-pub,代碼行數:14,代碼來源:utils.py

示例15: load_model

# 需要導入模塊: from keras import models [as 別名]
# 或者: from keras.models import model_from_yaml [as 別名]
def load_model(self):
        print('Loading model ...')
        # check existence of params
        assert os.path.exists(self.model_folder), 'model_fold is not found: {}'.format(self.model_folder)
        assert self.weights_fname is not None, 'Argument required: --weights-file'
        checkExistence(self.weights_fname)
        model_graph = '{}/graph-arch.yaml'.format(self.model_folder)
        model_train_vars = '{}/other_vars.npz'.format(self.model_folder)
        checkExistence(model_graph)
        checkExistence(model_train_vars)
        from keras.models import model_from_yaml
        with open(model_graph, 'r') as fgraph:
            self.model = model_from_yaml(fgraph.read())
            self.model.load_weights(self.weights_fname)
        npzfile = np.load(model_train_vars)
        self.id2agentAct = npzfile['id2agentAct'][()]
        self.id2word = npzfile['id2word'][()]
        self.id2userTag = npzfile['id2userTag'][()]
        self.userTag2id = npzfile['userTag2id'][()]
        self.id2userIntent = npzfile['id2userIntent'][()]
        self.agentAct_vocab_size = np.int32(npzfile['agentAct_vocab_size'][()])
        self.userIntent_vocab_size = np.int32(npzfile['userIntent_vocab_size'][()])
        self.userTag_vocab_size = np.int32(npzfile['userTag_vocab_size'][()])
        self.word_vocab_size = np.int32(npzfile['word_vocab_size'][()])
        self.maxlen_userUtter = npzfile['maxlen_userUtter'][()]
        self.window_size = np.int32(npzfile['window_size'][()]) 
開發者ID:XuesongYang,項目名稱:end2end_dialog,代碼行數:28,代碼來源:JointModel_multitask_jointraining.py


注:本文中的keras.models.model_from_yaml方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。