當前位置: 首頁>>代碼示例>>Python>>正文


Python cntk.load_model方法代碼示例

本文整理匯總了Python中cntk.load_model方法的典型用法代碼示例。如果您正苦於以下問題:Python cntk.load_model方法的具體用法?Python cntk.load_model怎麽用?Python cntk.load_model使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在cntk的用法示例。


在下文中一共展示了cntk.load_model方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: evaluateimage

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def evaluateimage(file_path, mode, eval_model=None):

    #from plot_helpers import eval_and_plot_faster_rcnn
    if eval_model==None:
        print("Loading existing model from %s" % model_path)
        eval_model = load_model(model_path)
    img_shape = (num_channels, image_height, image_width)
    results_folder = globalvars['temppath']
    results=eval_faster_rcnn(eval_model, file_path, img_shape,
                              results_folder, feature_node_name, globalvars['classes'], mode,
                              drawUnregressedRois=cfg["CNTK"].DRAW_UNREGRESSED_ROIS,
                              drawNegativeRois=cfg["CNTK"].DRAW_NEGATIVE_ROIS,
                              nmsThreshold=cfg["CNTK"].RESULTS_NMS_THRESHOLD,
                              nmsConfThreshold=cfg["CNTK"].RESULTS_NMS_CONF_THRESHOLD,
                              bgrPlotThreshold=cfg["CNTK"].RESULTS_BGR_PLOT_THRESHOLD)
    return results 
開發者ID:karolzak,項目名稱:cntk-python-web-service-on-azure,代碼行數:18,代碼來源:evaluate.py

示例2: test

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def test(model_path, num_episodes=10):

    root = cntk.load_model(model_path)
    observation = env.reset()  # reset environment for new episode
    for episode in range(num_episodes):
        done = False
        print(episode)
        while not done:
            try:
                env.render()
            except Exception:
                # this might fail on a VM without OpenGL
                pass

            action = np.argmax(root.eval([observation.astype(np.float32)]))
            observation, reward, done, info = env.step(action)
        if done:
            observation = env.reset()  # reset environment for new episode 
開發者ID:MattChanTK,項目名稱:ai-gym,代碼行數:20,代碼來源:cart_pole_dqn_cntk.py

示例3: test

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def test(model_path, num_episodes=10):

    root = cntk.load_model(model_path)
    observation = env.reset()  # reset environment for new episode
    done = False
    for episode in range(num_episodes):
        while not done:
            try:
                env.render()
            except Exception:
                # this might fail on a VM without OpenGL
                pass

            observation = preprocess_image(observation)
            action = np.argmax(root.eval(observation.astype(np.float32)))
            observation, reward, done, info = env.step(action)
        if done:
            observation = env.reset()  # reset environment for new episode 
開發者ID:MattChanTK,項目名稱:ai-gym,代碼行數:20,代碼來源:atari_breakout_dqn_cntk.py

示例4: test_one_seq

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def test_one_seq(visualizer):
    # directory to store output video. It will be created if it doesn't exist
    save_dir = "H:/Speech_data/test_output_single"
    model_file = "H:/Speech_data/model_audio2exp_2018-08-01-05-14/model_audio2exp_2018-08-01-05-14.dnn"
    # video directory holding separate frames of the video. Each image should be square.
    video_dir = "H:/FrontalFaceData/RAVDESS/Actor_21/01-01-07-02-01-01-21"
    # spectrogram sequence is stored in a .csv file
    audio_file = "H:/Speech_data/RAVDESS_feat/Actor_21/01-01-07-02-01-01-21/dbspectrogram.csv"
    # AU labels are stored in an .npy file
    exp_file = "H:/Training_data_image/ExpLabels/RAVDESS/Actor_21/01-01-07-02-01-01-21.npy"

    video_list = get_items(video_dir, "full") # set to None if video_dir does not exist
    model = C.load_model(model_file)

    visualize_one_audio_seq(model, video_list, audio_file, exp_file, visualizer, save_dir)

#---------------------------------------------------------------------------------- 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:19,代碼來源:eval_speech.py

示例5: init

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def init():
    """ Initialise ResNet 152 model
    """
    global trainedModel, labelLookup, mem_after_init

    start = t.default_timer()

    # Load the model and labels from disk
    with open(LABEL_FILE, 'r') as f:
        labelLookup = [l.rstrip() for l in f]

    # Load model and load the model from brainscript (3rd index)
    trainedModel = load_model(MODEL_FILE)
    trainedModel = combine([trainedModel.outputs[3].owner])
    end = t.default_timer()

    loadTimeMsg = "Model loading time: {0} ms".format(round((end - start) * 1000, 2))
    logger.info(loadTimeMsg) 
開發者ID:Azure,項目名稱:DevOps-For-AI-Apps,代碼行數:20,代碼來源:driver.py

示例6: create_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def create_model(base_model_file, input_features, num_classes,  dropout_rate = 0.5, freeze_weights = False):
    # Load the pretrained classification net and find nodes
    base_model   = load_model(base_model_file)
    feature_node = find_by_name(base_model, 'features')
    beforePooling_node = find_by_name(base_model, "z.x.x.r")
    #graph.plot(base_model, filename="base_model.pdf") # Write graph visualization

    # Clone model until right before the pooling layer, ie. until including z.x.x.r
    modelCloned = combine([beforePooling_node.owner]).clone(
        CloneMethod.freeze if freeze_weights else CloneMethod.clone,
        {feature_node: placeholder(name='features')})

    # Center the input around zero and set model input.
    # Do this early, to avoid CNTK bug with wrongly estimated layer shapes
    feat_norm = input_features - constant(114)
    model = modelCloned(feat_norm)

    # Pool over all spatial dimensions and add dropout layer
    avgPool = GlobalAveragePooling(name = "poolingLayer")(model)
    if dropout_rate > 0:
        avgPoolDrop = Dropout(dropout_rate)(avgPool)
    else:
        avgPoolDrop = avgPool

    # Add new dense layer for class prediction
    finalModel = Dense(num_classes, activation=None, name="prediction") (avgPoolDrop)
    return finalModel


# Trains a transfer learning model 
開發者ID:Azure-Samples,項目名稱:MachineLearningSamples-ImageClassificationUsingCntk,代碼行數:32,代碼來源:helpers_cntk.py

示例7: audio_encoder_3

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def audio_encoder_3(input, model_file, cloning=False):
    # Load and freeze pre-trained encoder
    last_layer_name = "t_conv3"
    model = C.load_model(model_file)
    input_node = model.find_by_name("input")
    last_conv = model.find_by_name(last_layer_name)
    if not last_conv:
        raise ValueError("the layer does not exist")
    h = C.combine([last_conv.owner]).clone(C.CloneMethod.clone if cloning else C.CloneMethod.freeze, {input_node: input})
    return h 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:12,代碼來源:train_end2end.py

示例8: estimate_one_audio_seq

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def estimate_one_audio_seq(model, audio_seq, small_mem=False):
    if isinstance(model, str):
        model = C.load_model(model)
    # set up 2 cases: if the model is recurrent or static
    if is_recurrent(model):
        n = audio_seq.shape[0]
        NNN = 125
        if n > NNN and small_mem:
            nseqs = n//NNN + 1
            indices = []
            for i in range(nseqs-1):
                indices.append(NNN*i + NNN)
            input_seqs = np.vsplit(audio_seq, indices)
            outputs = []
            for seq in input_seqs:
                output = model.eval({model.arguments[0]:[seq]})[0]
                outputs.append(output)
            output = np.concatenate(outputs)
        else:
            output = model.eval({model.arguments[0]:[audio_seq]})[0]
    else:
        output = model.eval({model.arguments[0]: audio_seq})
    return output


#----------------------- feed sequence ------------------------- 
開發者ID:haixpham,項目名稱:end2end_AU_speech,代碼行數:28,代碼來源:eval_speech.py

示例9: load_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def load_model(self, model_path):
        '''
        Fill this method to write your own model loading python code
        save it in self object if you would like to reference it later.

        Tips: you can access emd information through self.json_info.
        '''
        # Todo: fill in this method to load your model
        self.model = C.load_model(model_path) 
開發者ID:Esri,項目名稱:raster-deep-learning,代碼行數:11,代碼來源:AzurePixelLevelLandClassification.py

示例10: inference

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def inference(self, batch, **kwargs):
        '''
        Fill this method to write your own inference python code, you can refer to the model instance that is created
        in the load_model method. Expected results format is described in the returns as below.

        Tips: you can access emd information through self.json_info.

        :param batch: numpy array with shape (B, H, W, D), B is batch size, H, W is specified and equal to
                      ImageHeight and ImageWidth in the emd file and D is the number of bands and equal to the length
                      of ExtractBands in the emd.
        :param kwargs: inference parameters, accessed by the parameter name,
                       i.e. score_threshold=float(kwargs['score_threshold']). If you want to have more inference
                       parameters, add it to the list of the following getParameterInfo method.
        :return: semantic segmentation, numpy array in the shape [B, 1, H, W] and type np.uint8, B is the batch size,
                 H and W are the tile size, equal to ImageHeight and ImageWidth in the emd file respectively if Padding
                 is not set
        '''
        # Todo: fill in this method to inference your model and return bounding boxes, scores and classes
        batch=batch.astype(np.float32)

        output = self.model.eval(
            {
                self.model.arguments[0]: batch
            }
        )
        semantic_predictions = np.argmax(output, axis=1)
        semantic_predictions = np.expand_dims(semantic_predictions, axis=1)

        return semantic_predictions 
開發者ID:Esri,項目名稱:raster-deep-learning,代碼行數:31,代碼來源:AzurePixelLevelLandClassification.py

示例11: load_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def load_model(self, model_path):
        '''
        Fill this method to write your own model loading python code
        save it self object if you would like to reference it later.

        Tips: you can access emd information through self.json_info.
        '''
        #Todo: fill in this method to load your model
        self.model = C.load_model(model_path) 
開發者ID:Esri,項目名稱:raster-deep-learning,代碼行數:11,代碼來源:FasterRCNN.py

示例12: load_cntk_model_from_binary

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def load_cntk_model_from_binary(model_bin, verbose=False):
    model_file = "tmp.model"
    with open(model_file, "wb") as file:
        file.write(model_bin)
    loaded_model = load_model(model_file)
    if verbose:
        print(len(loaded_model.constants))
        node_outputs = get_node_outputs(loaded_model)
        for out in node_outputs: print("{0} {1}".format(out.name, out.shape))
    return loaded_model 
開發者ID:Azure,項目名稱:sql_python_deep_learning,代碼行數:12,代碼來源:lung_cancer_utils.py

示例13: get_cntk_model

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def get_cntk_model(model_name):
    node_name = "z.x"
    loaded_model  = load_model(model_name)
    node_in_graph = loaded_model.find_by_name(node_name)
    output_nodes  = combine([node_in_graph.owner])
    return output_nodes 
開發者ID:Azure,項目名稱:sql_python_deep_learning,代碼行數:8,代碼來源:lung_cancer_utils.py

示例14: ensure_model_is_loaded

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def ensure_model_is_loaded(self):
        if not self.__model:
            self.load_model() 
開發者ID:microsoft,項目名稱:CNTK-FastRCNNDetector,代碼行數:5,代碼來源:frcnn_detector.py

示例15: cntkRunner

# 需要導入模塊: import cntk [as 別名]
# 或者: from cntk import load_model [as 別名]
def cntkRunner(model_path, inputs_path):
    import cntk as C
    model = C.load_model(model_path, device=C.device.cpu())
    input_dict = gen_io_dict(inputs_path, model.arguments, True)
    output = model.eval(input_dict)
    return output 
開發者ID:microsoft,項目名稱:OLive,代碼行數:8,代碼來源:check_model.py


注:本文中的cntk.load_model方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。