本文整理汇总了Python中svmutil.svm_load_model函数的典型用法代码示例。如果您正苦于以下问题:Python svm_load_model函数的具体用法?Python svm_load_model怎么用?Python svm_load_model使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了svm_load_model函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: matchPers
def matchPers(p1, rgdP, conf, score = None):
global svmModel, mt_tmp, SVMfeatures
if not svmModel:
if 'featureSet' in conf:
SVMfeatures = getattr(importlib.import_module('featureSet'), conf['featureSet'])
svmModel = svm_load_model('conf/person_' + conf['featureSet'] + '.model')
else: #default
SVMfeatures = getattr(importlib.import_module('featureSet'), 'personDefault')
svmModel = svm_load_model('conf/personDefault.model')
nodeScore = nodeSim(p1, rgdP)
#pFam = conf['families'].find_one({ 'children': p1['_id']}) #find fam if p in 'children'
pFam = getFamilyFromChild(p1['_id'], conf['families'], conf['relations'])
#rgdFam = conf['match_families'].find_one({ 'children': rgdP['_id']})
rgdFam = getFamilyFromChild(rgdP['_id'], conf['match_families'], conf['match_relations'])
famScore = familySim(pFam, conf['persons'], rgdFam, conf['match_persons'])
cand_matchtxt = mt_tmp.matchtextPerson(rgdP, conf['match_persons'], conf['match_families'], conf['match_relations'])
matchtxt = mt_tmp.matchtextPerson(p1, conf['persons'], conf['families'], conf['relations'])
cosScore = cos(matchtxt, cand_matchtxt)
if score is None and 'featureSet' in conf: #score not used by deault
try: #Lucene FIX
#from luceneUtils import search
import traceback
candidates = search(matchtxt, p1['sex'], ant=100, config=conf) #Lucene search
score = 0.0
for (kid,sc) in candidates:
if str(kid) == str(rgdP['_id']):
score = sc
break
#except: #use cos instead ?? if problems running Java in Bottle
except Exception, e:
exc_type, exc_value, exc_traceback = sys.exc_info()
traceback.print_exception(exc_type, exc_value, exc_traceback)
示例2: cons_train_sample_for_cla
def cons_train_sample_for_cla(filename,indexes,dic_path,glo_aff_path,result_save_path,model_path,LSA_path,LSA_model_path,decom_meas,delete):
dic_list = read_dic(dic_path,dtype=str)
glo_aff_list = read_list(glo_aff_path)
f= file(filename,'r')
fs = file(result_save_path,'w')
fd = file(dust_save_path,'w')
m= svm_load_model(model_path)
lsa_m = svm_load_model(LSA_model_path)
U = load_lsa_model(LSA_path,"U")
for line in f.readlines():
text = line.strip().split(tc_splitTag)
if len(text)!=line_length:
fd.write(line)
continue
text_temp=""
for i in indexes:
text_temp+=str_splitTag+text[i]
vec = cons_vec_for_cla(text_temp.strip().split(str_splitTag),dic_list,glo_aff_list)
y,x=cons_svm_problem(text[0],vec)
p_lab,p_acc,p_sc=svm_predict(y,x,m)
if decom_meas==1:
weight = cal_weight(p_sc[0][0])
#vec = [value*weight for value in vec ]
vec = [0]*len(vec)
for key in x[0].keys():
vec[int(key)-1]= weight*float(x[0][key])
vec = pre_doc_svds(vec,U)
y,x=cons_svm_problem(text[0],vec)
lsa_lab,lsa_acc,lsa_sc = svm_predict(y,x,lsa_m)
fs.write(text[0]+"\t"+str(p_sc[0][0])+"\t"+str(lsa_sc[0][0])+"\t"+text[1]+"\t"+text[2]+"\n")
else :
fs.write(text[0]+"\t"+str(p_sc[0][0])+"\t"+text[1]+"\t"+text[2]+"\n")
f.close()
fs.close()
示例3: get_fearure_weights
def get_fearure_weights(yprob, years=range(2004,2017), normalize=False, binary_features=False, top10_type='sum', reg=1e-3):
from svmutil import svm_load_model
d=(102,103)[binary_features]
W=np.array([]).reshape(0,d)
periods=[]
for y in years:
periods.append(get_year_str(y))
# alpha, SV = get_alpha_and_SV('/home/arya/out/model.'+periods[-1])
model = svm_load_model('/home/arya/out/model.'+periods[-1])
alpha = np.array(map(lambda x: abs(x[0]), model.get_sv_coef()))
SV = model.get_SV()
X=np.zeros((len(SV),d))
for i in range(len(SV)):
for k,v in SV[i].items():
if k>0:
X[i,k-1]=v
W=np.append(W,alpha.dot(X)[None,:],axis=0)
np.set_printoptions(linewidth='1000', precision=3, edgeitems=55, suppress=True)
W=W+reg
if normalize:
W=W/ W.sum(1)[:,None]
print ('UnNormalized','Normalized')[normalize], 'Feature Weights:'
print W
if top10_type=='sum':
sumW= W.sum(0)
indices = range(len(sumW))
indices.sort(lambda x,y: -cmp(sumW[x], sumW[y]))
top10=indices[:10]
elif top10_type =='genbank':
yprob = yprob / yprob.sum(0)
err0 = abs(W[:11,:]-yprob[:,0][:,None]).sum(0)
indices = range(len(err0))
indices.sort(lambda x,y: -cmp(err0[x], err0[y]))
top10=indices[:10]
elif top10_type=='geo':
yprob = yprob / yprob.sum(0)
err1 = abs(W[:11,:]-yprob[:,1][:,None]).sum(0)
indices = range(len(err1))
indices.sort(lambda x,y: -cmp(err1[x], err1[y]))
top10=indices[:10]
elif top10_type == 'all':
top10 = range(W.shape[1])
else:
print top10_type , 'not found'
exit(1)
top10=sorted(top10)
print top10, top10_type
# exit(1)
info="""
W: t x d matrix of weights which each line contains A weight correponding to time t (periods[t]
periods: t x 1 string list which each element contains the period, e.g. 2004-2008
top10: Top 10 features which has A larger sum over all the periods
"""
Data={'W':W, 'periods':periods, 'top10':top10, 'info':info}
save_data_pkl(Data, '/home/arya/out/trends{}{}.pkl'.format(('_unnormalized','_normalized')[normalize],('_integer','_binary')[binary_features]))
return W,periods, top10
示例4: __setstate__
def __setstate__(self, state):
self.svm_model_fp = state['svm_model_fp']
self.svm_label_map_fp = state['svm_label_map_fp']
self.train_params = state['train_params']
self.normalize = state['normalize']
# C libraries/pointers don't survive across processes.
if '__LOCAL__' in state:
fd, fp = tempfile.mkstemp()
try:
os.close(fd)
self.svm_label_map = state['__LOCAL_LABELS__']
# write model binary to file, then load via libSVM
with open(fp, 'wb') as model_f:
model_f.write(state['__LOCAL_MODEL__'])
self.svm_model = svmutil.svm_load_model(fp)
finally:
os.remove(fp)
else:
self.svm_model = None
self._reload_model()
示例5: _get_classifier
def _get_classifier(svm_name=None):
"""
If need be, initializes, and then returns a classifier trained to
differentiate between different ions and water. Also returns of options for
gathering features.
To use the classifier, you will need to pass it to
svm.libsvm.svm_predict_probability. Ion prediction is already encapsulated by
predict_ion, so most users should just call that.
Parameters
----------
svm_name : str, optional
The SVM to use for prediction. By default, the SVM trained on heavy atoms
and calcium in the presence of anomalous data is used. See
chem_data/classifiers for a full list of SVMs available.
Returns
-------
svm.svm_model
The libsvm classifier used to predict the identities of ion sites.
dict of str, bool
Options to pass to ion_vector when collecting features about these sites.
tuple of ((tuple of numpy.array of float, numpy.array of float),
tuple of float)
The scaling parameters passed to scale_to.
numpy.array of bool
The features of the vector that were selected as important for
classification. Useful for both asserting that ion_vector is returning
something of the correct size as well as only selection features that
actually affect classification.
See Also
--------
svm.libsvm.svm_predict_probability
mmtbx.ions.svm.predict_ion
phenix_dev.ion_identification.nader_ml.ions_train_svms
"""
assert (svmutil is not None)
global _CLASSIFIER, _CLASSIFIER_OPTIONS
if not svm_name or str(svm_name) == "Auto" :
svm_name = _DEFAULT_SVM_NAME
if svm_name not in _CLASSIFIER:
svm_path = os.path.join(CLASSIFIERS_PATH, "{}.model".format(svm_name))
options_path = os.path.join(CLASSIFIERS_PATH,
"{}_options.pkl".format(svm_name))
try:
_CLASSIFIER[svm_name] = svmutil.svm_load_model(svm_path)
except IOError as err:
if err.errno != errno.ENOENT:
raise err
else:
_CLASSIFIER[svm_name] = None
_CLASSIFIER_OPTIONS[svm_name] = (None, None, None)
_CLASSIFIER_OPTIONS[svm_name] = load(options_path)
vector_options, scaling, features = _CLASSIFIER_OPTIONS[svm_name]
return _CLASSIFIER[svm_name], vector_options, scaling, features
示例6: classify
def classify(filename, classLabel=0):
str = "/Thu_Life/CS/SVM/data/trainData/Test_SVMFile/singleSVM_TestFile"
f = open(str, "wb")
t = VSM.TextToVector2(filename)
slabel = ("%d ") % classLabel
if len(t) > 0:
f.write(slabel)
for k in range(len(t)):
str1 = ("%d:%d ") % (t[k][0], t[k][1])
f.write(str1)
f.write("\r\n")
else:
print "The text can't be classified to the Four Labels!"
return "Can't be classified ! "
f.close()
y, x = svmutil.svm_read_problem(str)
model = svmutil.svm_load_model("../SVMTrainFile250.model")
label, b, c = svmutil.svm_predict(y, x, model)
print "label", label
if label[0] == 1:
print "类别:财经"
return "财经"
elif label[0] == 2:
print "类别:IT"
return "IT"
elif label[0] == 3:
print "类别:旅游"
return "旅游"
elif label[0] == 4:
print "类别:体育"
return "体育"
示例7: predict_all
def predict_all(request):
'''Predicts points in an array'''
width = float( request.POST.get("width", "None") )
height = float( request.POST.get("height", "None") )
model = svmutil.svm_load_model('libsvm.model')
# Get grid of points to query
points = []
for counterY in [ 1.0 / 15.0 * y for y in range(0, 15) ]:
for counterX in [ 1.0 / 15.0 * x for x in range(0, 15) ]:
points.append([counterX, counterY])
#for counterY in [ 1.0 / 10.0 * x for x in range(0, 10) ]:
# for counterX in [ 1.0 / 10.0 * y for y in range(0, 10) ]:
# label , acc, val = svmutil.svm_predict( [0], [[counterX, counterY]], model )
# results[i] = [counterX, counterY, label]
# i = i + 1
#results["length"] = i
# Get labels
labels, acc, val = svmutil.svm_predict([0] * len(points), points, model)
results = {}
for index, value in enumerate(points):
results[index] = { "x" : points[index][0],
"y" : points[index][1],
"label" : labels[index] }
results["length"] = len(points)
return json(results)
示例8: predict
def predict(request):
predictX = float( request.POST.get("x", -1) )
predictY = float( request.POST.get("y", -1) )
predictLabel = int( request.POST.get("label", -1) )
if predictX == -1 or predictY == -1 or predictLabel == -1:
return django.http.HttpResponse("Missing Params")
points = models.Point2d.objects.all()
# Storing the information to be presented to SVM
labels = []
inputs = []
# For each point, store the information into arrays
#for p in points:
# labels.append( p.label )
# inputs.append([p.x, p.y])
#prob = svm.svm_problem(labels, inputs)
#param = svm.svm_parameter('-t 2 -c 100')
#model = svmutil.svm_train(prob, param)
#svmutil.svm_save_model('libsvm.model', model)
model = svmutil.svm_load_model('libsvm.model')
p_label , acc, val = svmutil.svm_predict([0], [[predictX, predictY]], model)
data = {'x': predictX, 'y': predictY, 'label': int( p_label[0] ) }
return json(data)
示例9: __init__
def __init__(self,train_feature_file = TRAIN_FEATURE_FILE):
if os.path.exists(SAVED_MODEL):
self.model = svmutil.svm_load_model(SAVED_MODEL)
else:
y, x = svmutil.svm_read_problem(train_feature_file)
self.model = svmutil.svm_train(y, x, '-c 4')
svmutil.svm_save_model(SAVED_MODEL,self.model)
示例10: trainSVMAndSave
def trainSVMAndSave(modelLoc, kernel, labels):
if os.path.exists(modelLoc):
return svm_load_model(modelLoc)
else:
model = trainSVM(kernel, labels)
svm_save_model(modelLoc, model)
return model
示例11: predict
def predict(V, yy):
m = svmutil.svm_load_model("sample.model")
x = [list(map(lambda z: z * 10, list(t))) for t in V]
y = [0 if t < 0 else 1 for t in yy]
p_label, p_acc, p_val = svmutil.svm_predict(y, x, m)
print(y)
print(p_label)
print(x[10])
示例12: think
def think(self,text):
from twss.twss import twss_lite
import pickle
from svmutil import svm_load_model
input = open(self.vocab)
vocabList = pickle.load(input)
input.close()
model = svm_load_model(self.model)
return "That's what she said!" if twss_lite(text,vocabList,model) == 1 else ""
示例13: _reload_model
def _reload_model(self):
"""
Reload SVM model from configured file path.
"""
if self.svm_model_fp and os.path.isfile(self.svm_model_fp):
self.svm_model = svmutil.svm_load_model(self.svm_model_fp)
if self.svm_label_map_fp and os.path.isfile(self.svm_label_map_fp):
with open(self.svm_label_map_fp, "rb") as f:
self.svm_label_map = cPickle.load(f)
示例14: main
def main():
try:
lModel = svmutil.svm_load_model(sys.argv[1])
lRanges = read_ranges(sys.argv[2])
lFile = sys.argv[3]
lBlockSize = int(sys.argv[4])
except IndexError, pExc:
print "Usage: " + sys.argv[0] + " <model-file> <range-file> "\
"<problem-file> <block-size>"
sys.exit(-1)
示例15: _reload_model
def _reload_model(self):
"""
Reload SVM model from configured file path.
"""
if self.svm_model_elem and not self.svm_model_elem.is_empty():
svm_model_tmp_fp = self.svm_model_elem.write_temp()
self.svm_model = svmutil.svm_load_model(svm_model_tmp_fp)
self.svm_model_elem.clean_temp()
if self.svm_label_map_elem and not self.svm_label_map_elem.is_empty():
self.svm_label_map = \
cPickle.loads(self.svm_label_map_elem.get_bytes())