本文整理汇总了Python中Bio.LogisticRegression类的典型用法代码示例。如果您正苦于以下问题:Python LogisticRegression类的具体用法?Python LogisticRegression怎么用?Python LogisticRegression使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了LogisticRegression类的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_calculate_model_with_update_callback
def test_calculate_model_with_update_callback(self):
model = LogisticRegression.train(xs, ys, update_fn=show_progress)
beta = model.beta
self.assertAlmostEqual(beta[0], 8.9830, places=4)
示例2: str
from Bio import LogisticRegression
import numpy as np
all_data = np.loadtxt("../datasets/iris/iris.data", delimiter=",",
dtype="float, float, float, float, S11")
xs = []
ys = []
for i in all_data:
if 'virgi' not in str(i[-1]):
xs.append([i[0], i[1], i[2], i[3]])
if 'setosa' in str(i[-1]):
ys.append(0)
else:
ys.append(1)
test_xs = xs.pop()
test_ys = ys.pop()
def show_progress(iteration, loglikelihood):
print("Iteration:", iteration, "Log-likelihood function:", loglikelihood)
model = LogisticRegression.train(xs, ys, update_fn=show_progress)
print("This should be Iris-versic (1): {}".format(LogisticRegression.classify(model, test_xs)))
示例3: post
def post(self):
alldata = self.getRequestData()
user = self.objUserInfo
s=Entity.model(self.db)
print(alldata)
if alldata['model_type']==1:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(len(xs),len(ys))
model=LogisticRegression.train(xs,ys)
if model.beta:
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"beta" : str(model.beta),
"note" : alldata['note']
}
id = s.save(lsData,table='public.logistis')
self.response(id)
elif alldata['model_type']==2:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(xs,ys)
print(xs,ys)
count=1
while count >= 0 :
rpath = str(random.randint(10000, 90000))
pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
if not os.path.isfile(pyfile):
count=-1
else:
count=1
f=open(pyfile,'w')
text = 'from Bio import kNN'+'\n'+'class model():'+'\n'+' def knn(self):'+'\n'+' xs = '+str(xs)+'\n'+' ys ='+str(ys)+'\n'+' k='+str(alldata['k'])+'\n'+' model = kNN.train(xs,ys,k)'+'\n'+' return model'
print(text)
f.write(text)
f.close()
if os.path.isfile(pyfile):
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"file_name" : rpath,
"packpath" : pyfile,
"type" : '2',
"note" : alldata['note']
}
id = s.save(lsData,table='public.pymodel')
self.response(id)
elif alldata['model_type']==3:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(xs,ys)
count=1
while count >= 0 :
rpath = str(random.randint(10000, 90000))
pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
if not os.path.isfile(pyfile):
count=-1
else:
count=1
f=open(pyfile,'w')
text = 'from Bio import NaiveBayes'+'\n'+'class model():'+'\n'+' def bayes(self):'+'\n'+' xs = '+str(xs)+'\n'+' ys ='+str(ys)+'\n'+' model = NaiveBayes.train(xs,ys)'+'\n'+' return model'
print(text)
f.write(text)
f.close()
if os.path.isfile(pyfile):
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"file_name" : rpath,
"packpath" : pyfile,
"type" : '3',
"note" : alldata['note']
}
id = s.save(lsData,table='public.pymodel')
#.........这里部分代码省略.........
示例4: get
def get(self):
offset = int(self.get_argument('o',default='1'))
rowcount = int(self.get_argument('r',default='10'))
offset=(offset-1)*rowcount
no = self.get_argument('no', default='')
model_id = self.get_argument('model_id', default='')
model_type = self.get_argument('model_type', default='')
package=self.get_argument('model_name', default='')
cur=self.db.getCursor()
rowdata={}
#查询
if no=='1':
if model_type =='1':
cur.execute(" select b.name,a.create_id,a.name,a.note,a.beta from public.logistis a "
" left join public.account b on a.create_id = b.id "
"where a.id='%s' "% (model_id) )
rows = cur.fetchall()
print(rows)
rowdata['struct']="id,create_id,name,note,beta "
rowdata['rows']= rows
else:
cur.execute(" select b.name,a.create_id,a.name,a.note,c.name,a.file_name from public.pymodel a "
" left join public.account b on a.create_id = b.id "
" left join public.model c on a.type = c.type "
" where a.id='%s' and a.type='%s' "% (model_id,model_type) )
rows = cur.fetchall()
rowdata['struct']="id,create_id,name,note,type,filename "
rowdata['rows']= rows
self.response(rowdata)
elif no=='2':
if model_type=='1':
beta = self.get_argument('beta', default='')
model_data=self.get_argument('model', default='')
a=[]
q=0
print(model_data)
a=(list(eval(model_data)))
model=LogisticRegression.LogisticRegression()
model.beta=(list(eval(beta)))
rowdata={}
rowdata['op']=LogisticRegression.calculate(model,a)
rowdata['rows']=LogisticRegression.classify(model,a)
elif model_type=='2':
pack='data_mining.'+package
import importlib
bb=importlib.import_module(pack)
ma=kNN.kNN()
model=bb.model.knn(ma)
model_data=self.get_argument('model', default='')
a=[]
a=(list(eval(model_data)))
rowdata={}
rowdata['op']=kNN.calculate(model,a)
rowdata['rows']=kNN.classify(model,a)
elif model_type=='3':
pack='data_mining.'+package
import importlib
bb=importlib.import_module(pack)
ma=NaiveBayes.NaiveBayes()
model=bb.model.bayes(ma)
model_data=self.get_argument('model', default='')
a=[]
a=(list(eval(model_data)))
rowdata={}
rowdata['op']=NaiveBayes.calculate(model,a)
rowdata['rows']=NaiveBayes.classify(model,a)
self.response(rowdata)