本文整理汇总了Python中gpumodel.IGPUModel类的典型用法代码示例。如果您正苦于以下问题:Python IGPUModel类的具体用法?Python IGPUModel怎么用?Python IGPUModel使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了IGPUModel类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_net
def load_net(fname):
cudaconv_net = IGPUModel.load_checkpoint(fname)
layers = cudaconv_net["model_state"]["layers"]
#Note, data dimensions are hardcoded here - not sure we have that info in the cudaconv object?
decafnet = translator.translate_cuda_network(layers, {'data': (32, 32, 3)})
return decafnet
示例2: load_from_convnet
def load_from_convnet(filename):
cudaconv_net = IGPUModel.load_checkpoint(filename)
layers = cudaconv_net["model_state"]["layers"]
data_layer = [l for l in layers if l["name"] == "data"][0]
data_consumer = [l for l in layers if data_layer in l.get("inputLayers", [])][0]
input_dim = int(sqrt(data_consumer["imgPixels"][0])) # The width/height of the square images
input_channels = data_consumer["channels"][0] # The number of channels in the input images
return translator.translate_cuda_network(layers, {"data": (input_dim, input_dim, input_channels)})
示例3: plot_predictions
def plot_predictions(self, data_provider, output_file='/tmp/predictions.png', train=True, only_errors=True):
op = shownet.ShowConvNet.get_options_parser()
local_train = train
predict_dict = {
'--write-features': 'probs',
'--feature-path' : '/tmp/feature_path',
'--test-range': '2',
'--train-range': '1',
'--show-preds' : 'probs',
'-f': self.last_model,
'--data-provider': 'dp_scikit',
'--multiview-test': 0,
'--logreg-name': 'aaa'
}
op.parse_from_dictionary(predict_dict)
load_dic = None
options = op.options
if options["load_file"].value_given:
print 'load file option provided'
load_dic = IGPUModel.load_checkpoint(options["load_file"].value)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
class MyConvNet(shownet.ShowConvNet):
def get_data_dims(self, idx):
return data_provider.get_data_dims(idx)
def get_num_classes(self):
return data_provider.get_num_classes()
def get_next_batch(self, train=True):
return data_provider.get_next_batch(local_train)
def get_num_test_batches(self):
return data_provider.get_num_test_batches()
def get_plottable_data(self, data):
return data_provider.get_plottable_data(data)
def init_data_providers(self):
data_provider.init_data_providers()
def get_label_names(self):
return data_provider.get_label_names()
model = MyConvNet(op, load_dic=load_dic)
model.only_errors = only_errors
model.plot_predictions()
pl.savefig(output_file)
model.cleanup()
示例4: get_model
def get_model():
global _model
if _model is None:
# This code is adapted from gpumodel.py and shownet.py
load_dic = IGPUModel.load_checkpoint(app.config["TRAINED_MODEL_PATH"])
op = ShowConvNet.get_options_parser()
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
_model = ShowConvNet(op, load_dic)
return _model
示例5: initBFrom
def initBFrom(name, shape, params=None):
assert(params != None)
assert(len(params) > 0)
(checkPointFile, layerName) = params[0].split('.')
net = IGPUModel.load_checkpoint(checkPointFile)
layernames = [ layer['name'] for layer in net['model_state']['layers'] ]
if not layerName in layernames:
raise initWError("There is layer named '%s' in file '%s'" % (layerName, checkPointFile))
else:
weightlist = net['model_state']['layers'][layernames.index(layerName)]['biases']
assert(len(weightlist) > 0)
assert(weightlist.shape == shape)
return weightlist
示例6: __init__
def __init__(self, model_path, data_processor, gpu, layers):
op = ConvNetPredict.get_options_parser()
op.set_value('load_file', model_path)
op.set_value('gpu', str(gpu))
load_dic = IGPUModel.load_checkpoint(model_path)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
ConvNet.__init__(self, op, load_dic)
self.dp = data_processor
self.ftr_layer_idx = map(self.get_layer_idx, layers)
示例7: get_test_error
def get_test_error(self):
data, filenames = self.get_test_patches_and_filenames()
if self.test_on_images:
print 'testing on images'
probabilities, test_results = self.get_predictions(data)
fileProbs, fileLabels, fileIDs = ModelEvaluation.GetUnormalizedJointLogProbability(probabilities, data[1].reshape(-1), filenames)
filePredictions = ModelEvaluation.GetPredictions(fileProbs)
fileAccuracy, misclassifiedFiles = ModelEvaluation.CalculateAccuracy(filePredictions, fileLabels)
nExamples = test_results[1]
results = ({'logprob' : [test_results[0]['logprob'][0], (1-fileAccuracy) * test_results[1]]}, test_results[1])
if self.test_only: # Print the individual batch results for safety
print str(results)
else:
print 'not testing on images'
self.libmodel.startBatch(data, True)
results = self.finish_batch()
self.regular_test_outputs += [IGPUModel.get_test_error(self)]
return results
示例8: float
# result.append(item + ';' +model.ClassNameTest(item)+';'+ door + '\n')
ground_truth=model.ClassNameTest(item)
print ground_truth,door
if not model.ClassNameTest(item)=='0':
P_num +=1
if not door == ground_truth:
error += 1
erro_ratio = float(error)/i
print erro_ratio
print i,P_num,len(result),error
# result.append('error_ratio:'+str(erro_ratio)+' Positive_num:'+str(P_num)+' total_num:'+str(i))
# myreslut = sorted(result, key=lambda result:result[0])
# if P_num<2000:
# my_result = file('myresult_p.txt', 'wb')
# else:
# my_result = file('myresult_n.txt', 'wb')
# my_result.writelines(myreslut)
# my_result.close()
except (UnpickleError, ShowNetError, opt.GetoptError), e:
print "----------------"
print "Error:"
print e
print 'finish_8'
op = ShowPredction.get_options_parser()
op, load_dic = IGPUModel.parse_options(op)
model = ShowPredction(op, load_dic)
print os.path.exists("G:\\door_data_sampling\\posture\\data_pos\\test\\test_value_p\\")
show_predict_dir('G:\\door_data_sampling\\posture\\test\\org_data\\')
示例9: ShowConvNet
if not os.path.exists(outPred):
os.makedirs(outPred)
####################################
##################load CNN here######
import getopt as opt
from gpumodel import IGPUModel
from options import *
op = ShowConvNet.get_options_parser()
#op.options['load_file'].value=r'.\ConvNet_3DCNN\tmp\ConvNet__2014-05-28_01.59.00'
### old
op.options['load_file'].value=r'I:\Kaggle_multimodal\StartingKit_track3\Final_project\ConvNet_3DCNN\tmp\ConvNet__2014-05-26_03.40.18'
op.options['write_features'].value ='probs'
load_dic = IGPUModel.load_checkpoint(op.options["load_file"].value)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
op.options['train_batch_range'].value=[1]
op.options['test_batch_range'].value=[1]
op.options['data_path'].value=r'.\ConvNet_3DCNN\storage_sk_final'
model = ShowConvNet(op, load_dic)
model.crop_border = 0
meta = pickle.load(open(r'.\ConvNet_3DCNN\storage_sk_final\batches.meta'))
data_mean = meta['data_mean']
示例10: predict_proba
def predict_proba(self, X, train=True):
op = shownet.ShowConvNet.get_options_parser()
predict_dict = {
'--write-features': 'probs',
'--feature-path' : '/tmp/feature_path',
'--test-range': '2',
'--train-range': '1',
'-f': self.last_model,
'--data-provider': 'dp_scikit',
'--show-preds' : '',
'--multiview-test': 0,
'--logreg-name': 'aaa'
}
op.parse_from_dictionary(predict_dict)
load_dic = None
options = op.options
if options["load_file"].value_given:
print 'load file option provided'
load_dic = IGPUModel.load_checkpoint(options["load_file"].value)
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
if hasattr(X, 'get_next_batch'):
data_provider = X
else:
data_provider = InMemorySplitDataProvider(X,None,fraction_test=0.0)
data_provider.init_data_providers()
class MyConvNet(shownet.ShowConvNet):
def init_data_providers(self):
self.dp_params['convnet'] = self
def compute_probs(self, X):
out = None
while True:
data_all = data_provider.get_next_batch(train=train)
epoch, batch = data_all[0], data_all[1]
if epoch != 1:
break
print 'working on epoch: {}, batch: {}'.format(epoch, batch)
data = data_all[2]
if isinstance(data[0], list):
data_point = data[0][4]
else:
data_point = data[0].shape[1]
print 'data points {}'.format(data_point)
num_ftrs = self.layers[self.ftr_layer_idx]['outputs']
ftrs = np.zeros((data_point, num_ftrs), dtype=np.single)
self.libmodel.startFeatureWriter(data + [ftrs], self.ftr_layer_idx)
self.finish_batch()
if out is None:
out = ftrs
else:
out = np.vstack((out,ftrs))
return out
model = MyConvNet(op, load_dic=load_dic)
probs = model.compute_probs(data_provider)
model.cleanup()
return probs
示例11: fit
def fit(self, X, y, use_starting_point=True, **kwargs):
print 'about to fit ConvNetLearn'
if use_starting_point and self.last_model is not None:
self.dict['-f']=self.last_model
op = convnet.ConvNet.get_options_parser()
op.parse_from_dictionary(self.dict)
load_dic = None
options = op.options
if options["load_file"].value_given:
print 'load file option provided'
load_dic = IGPUModel.load_checkpoint(options["load_file"].value)
name_to_weights = {}
if self.init_states_models is not None:
name_to_weights = {}
for init_model in self.init_states_models:
load_dic_local = IGPUModel.load_checkpoint(init_model)
for k, v in load_dic_local['model_state'].iteritems():
if k == 'layers':
for elem in v:
name = elem.get('name')
weights = elem.get('weights')
if weights is not None:
print 'adding weights for layer {}'.format(name)
if name not in name_to_weights:
name_to_weights[name] = {}
name_to_weights[name]['weights'] = weights
name_to_weights[name]['biases'] = elem.get('biases')
name_to_weights[name]['weightsInc'] = elem.get('weightsInc')
name_to_weights[name]['biasesInc'] = elem.get('biasesInc')
if len(name_to_weights) > 0:
print 'layer names with init arrays: {}'.format(name_to_weights.keys())
for k, v in load_dic['model_state'].iteritems():
if k == 'layers':
for elem in v:
name = elem.get('name')
print 'name of layer to possibly be updated {}'.format(name)
weights = elem.get('weights')
if weights is not None:
if name in name_to_weights:
print 'changing init point of model for layer {}'.format(name)
coefs_name = name_to_weights.get(name)
if coefs_name is None or 'weights' not in coefs_name:
raise Exception('coeef names doent have weights for {}, coef names fields: {}'.format(name, coefs_name.keys()))
elem['weights'] = coefs_name['weights']
elem['biases'] = coefs_name['biases']
elem['weightsInc'] = coefs_name['weightsInc']
elem['biasesInc'] = coefs_name['biasesInc']
old_op = load_dic["op"]
old_op.merge_from(op)
op = old_op
op.eval_expr_defaults()
try:
self.dict.pop('-f')
except:
pass
if hasattr(X, 'get_next_batch'):
data_provider = X
else:
data_provider = InMemorySplitDataProvider(X,y,fraction_test=self.fraction_test)
data_provider.init_data_providers()
num_classes = data_provider.get_num_classes()
logger.info('num classes {}'.format(num_classes))
#we adjust the number of classes dynamically
for name in self.mcp_layers.sections():
if self.mcp_layers.has_option(name,'outputs'):
if self.mcp_layers.get(name, 'outputs') == 'num_classes':
self.mcp_layers.set( name, 'outputs', value='{}'.format(num_classes))
##################
class MyConvNet(convnet.ConvNet):
def __init__(self, op, load_dic, mcp_layers, mcp_params, fraction_test):
self.layer_def_dict = mcp_layers
self.layer_params_dict = mcp_params
convnet.ConvNet.__init__(self,op,load_dic=load_dic,initialize_from_file=False)
self.test_one = True
self.epoch = 1
#.........这里部分代码省略.........