本文整理汇总了Python中cntk.load_model方法的典型用法代码示例。如果您正苦于以下问题:Python cntk.load_model方法的具体用法?Python cntk.load_model怎么用?Python cntk.load_model使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类cntk
的用法示例。
在下文中一共展示了cntk.load_model方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: evaluateimage
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def evaluateimage(file_path, mode, eval_model=None):
#from plot_helpers import eval_and_plot_faster_rcnn
if eval_model==None:
print("Loading existing model from %s" % model_path)
eval_model = load_model(model_path)
img_shape = (num_channels, image_height, image_width)
results_folder = globalvars['temppath']
results=eval_faster_rcnn(eval_model, file_path, img_shape,
results_folder, feature_node_name, globalvars['classes'], mode,
drawUnregressedRois=cfg["CNTK"].DRAW_UNREGRESSED_ROIS,
drawNegativeRois=cfg["CNTK"].DRAW_NEGATIVE_ROIS,
nmsThreshold=cfg["CNTK"].RESULTS_NMS_THRESHOLD,
nmsConfThreshold=cfg["CNTK"].RESULTS_NMS_CONF_THRESHOLD,
bgrPlotThreshold=cfg["CNTK"].RESULTS_BGR_PLOT_THRESHOLD)
return results
示例2: test
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def test(model_path, num_episodes=10):
root = cntk.load_model(model_path)
observation = env.reset() # reset environment for new episode
for episode in range(num_episodes):
done = False
print(episode)
while not done:
try:
env.render()
except Exception:
# this might fail on a VM without OpenGL
pass
action = np.argmax(root.eval([observation.astype(np.float32)]))
observation, reward, done, info = env.step(action)
if done:
observation = env.reset() # reset environment for new episode
示例3: test
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def test(model_path, num_episodes=10):
root = cntk.load_model(model_path)
observation = env.reset() # reset environment for new episode
done = False
for episode in range(num_episodes):
while not done:
try:
env.render()
except Exception:
# this might fail on a VM without OpenGL
pass
observation = preprocess_image(observation)
action = np.argmax(root.eval(observation.astype(np.float32)))
observation, reward, done, info = env.step(action)
if done:
observation = env.reset() # reset environment for new episode
示例4: test_one_seq
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def test_one_seq(visualizer):
# directory to store output video. It will be created if it doesn't exist
save_dir = "H:/Speech_data/test_output_single"
model_file = "H:/Speech_data/model_audio2exp_2018-08-01-05-14/model_audio2exp_2018-08-01-05-14.dnn"
# video directory holding separate frames of the video. Each image should be square.
video_dir = "H:/FrontalFaceData/RAVDESS/Actor_21/01-01-07-02-01-01-21"
# spectrogram sequence is stored in a .csv file
audio_file = "H:/Speech_data/RAVDESS_feat/Actor_21/01-01-07-02-01-01-21/dbspectrogram.csv"
# AU labels are stored in an .npy file
exp_file = "H:/Training_data_image/ExpLabels/RAVDESS/Actor_21/01-01-07-02-01-01-21.npy"
video_list = get_items(video_dir, "full") # set to None if video_dir does not exist
model = C.load_model(model_file)
visualize_one_audio_seq(model, video_list, audio_file, exp_file, visualizer, save_dir)
#----------------------------------------------------------------------------------
示例5: init
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def init():
""" Initialise ResNet 152 model
"""
global trainedModel, labelLookup, mem_after_init
start = t.default_timer()
# Load the model and labels from disk
with open(LABEL_FILE, 'r') as f:
labelLookup = [l.rstrip() for l in f]
# Load model and load the model from brainscript (3rd index)
trainedModel = load_model(MODEL_FILE)
trainedModel = combine([trainedModel.outputs[3].owner])
end = t.default_timer()
loadTimeMsg = "Model loading time: {0} ms".format(round((end - start) * 1000, 2))
logger.info(loadTimeMsg)
示例6: create_model
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def create_model(base_model_file, input_features, num_classes, dropout_rate = 0.5, freeze_weights = False):
# Load the pretrained classification net and find nodes
base_model = load_model(base_model_file)
feature_node = find_by_name(base_model, 'features')
beforePooling_node = find_by_name(base_model, "z.x.x.r")
#graph.plot(base_model, filename="base_model.pdf") # Write graph visualization
# Clone model until right before the pooling layer, ie. until including z.x.x.r
modelCloned = combine([beforePooling_node.owner]).clone(
CloneMethod.freeze if freeze_weights else CloneMethod.clone,
{feature_node: placeholder(name='features')})
# Center the input around zero and set model input.
# Do this early, to avoid CNTK bug with wrongly estimated layer shapes
feat_norm = input_features - constant(114)
model = modelCloned(feat_norm)
# Pool over all spatial dimensions and add dropout layer
avgPool = GlobalAveragePooling(name = "poolingLayer")(model)
if dropout_rate > 0:
avgPoolDrop = Dropout(dropout_rate)(avgPool)
else:
avgPoolDrop = avgPool
# Add new dense layer for class prediction
finalModel = Dense(num_classes, activation=None, name="prediction") (avgPoolDrop)
return finalModel
# Trains a transfer learning model
开发者ID:Azure-Samples,项目名称:MachineLearningSamples-ImageClassificationUsingCntk,代码行数:32,代码来源:helpers_cntk.py
示例7: audio_encoder_3
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def audio_encoder_3(input, model_file, cloning=False):
# Load and freeze pre-trained encoder
last_layer_name = "t_conv3"
model = C.load_model(model_file)
input_node = model.find_by_name("input")
last_conv = model.find_by_name(last_layer_name)
if not last_conv:
raise ValueError("the layer does not exist")
h = C.combine([last_conv.owner]).clone(C.CloneMethod.clone if cloning else C.CloneMethod.freeze, {input_node: input})
return h
示例8: estimate_one_audio_seq
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def estimate_one_audio_seq(model, audio_seq, small_mem=False):
if isinstance(model, str):
model = C.load_model(model)
# set up 2 cases: if the model is recurrent or static
if is_recurrent(model):
n = audio_seq.shape[0]
NNN = 125
if n > NNN and small_mem:
nseqs = n//NNN + 1
indices = []
for i in range(nseqs-1):
indices.append(NNN*i + NNN)
input_seqs = np.vsplit(audio_seq, indices)
outputs = []
for seq in input_seqs:
output = model.eval({model.arguments[0]:[seq]})[0]
outputs.append(output)
output = np.concatenate(outputs)
else:
output = model.eval({model.arguments[0]:[audio_seq]})[0]
else:
output = model.eval({model.arguments[0]: audio_seq})
return output
#----------------------- feed sequence -------------------------
示例9: load_model
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def load_model(self, model_path):
'''
Fill this method to write your own model loading python code
save it in self object if you would like to reference it later.
Tips: you can access emd information through self.json_info.
'''
# Todo: fill in this method to load your model
self.model = C.load_model(model_path)
示例10: inference
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def inference(self, batch, **kwargs):
'''
Fill this method to write your own inference python code, you can refer to the model instance that is created
in the load_model method. Expected results format is described in the returns as below.
Tips: you can access emd information through self.json_info.
:param batch: numpy array with shape (B, H, W, D), B is batch size, H, W is specified and equal to
ImageHeight and ImageWidth in the emd file and D is the number of bands and equal to the length
of ExtractBands in the emd.
:param kwargs: inference parameters, accessed by the parameter name,
i.e. score_threshold=float(kwargs['score_threshold']). If you want to have more inference
parameters, add it to the list of the following getParameterInfo method.
:return: semantic segmentation, numpy array in the shape [B, 1, H, W] and type np.uint8, B is the batch size,
H and W are the tile size, equal to ImageHeight and ImageWidth in the emd file respectively if Padding
is not set
'''
# Todo: fill in this method to inference your model and return bounding boxes, scores and classes
batch=batch.astype(np.float32)
output = self.model.eval(
{
self.model.arguments[0]: batch
}
)
semantic_predictions = np.argmax(output, axis=1)
semantic_predictions = np.expand_dims(semantic_predictions, axis=1)
return semantic_predictions
示例11: load_model
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def load_model(self, model_path):
'''
Fill this method to write your own model loading python code
save it self object if you would like to reference it later.
Tips: you can access emd information through self.json_info.
'''
#Todo: fill in this method to load your model
self.model = C.load_model(model_path)
示例12: load_cntk_model_from_binary
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def load_cntk_model_from_binary(model_bin, verbose=False):
model_file = "tmp.model"
with open(model_file, "wb") as file:
file.write(model_bin)
loaded_model = load_model(model_file)
if verbose:
print(len(loaded_model.constants))
node_outputs = get_node_outputs(loaded_model)
for out in node_outputs: print("{0} {1}".format(out.name, out.shape))
return loaded_model
示例13: get_cntk_model
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def get_cntk_model(model_name):
node_name = "z.x"
loaded_model = load_model(model_name)
node_in_graph = loaded_model.find_by_name(node_name)
output_nodes = combine([node_in_graph.owner])
return output_nodes
示例14: ensure_model_is_loaded
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def ensure_model_is_loaded(self):
if not self.__model:
self.load_model()
示例15: cntkRunner
# 需要导入模块: import cntk [as 别名]
# 或者: from cntk import load_model [as 别名]
def cntkRunner(model_path, inputs_path):
import cntk as C
model = C.load_model(model_path, device=C.device.cpu())
input_dict = gen_io_dict(inputs_path, model.arguments, True)
output = model.eval(input_dict)
return output