本文整理匯總了Python中Bio.LogisticRegression.train方法的典型用法代碼示例。如果您正苦於以下問題:Python LogisticRegression.train方法的具體用法?Python LogisticRegression.train怎麽用?Python LogisticRegression.train使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類Bio.LogisticRegression
的用法示例。
在下文中一共展示了LogisticRegression.train方法的3個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_calculate_model_with_update_callback
# 需要導入模塊: from Bio import LogisticRegression [as 別名]
# 或者: from Bio.LogisticRegression import train [as 別名]
def test_calculate_model_with_update_callback(self):
model = LogisticRegression.train(xs, ys, update_fn=show_progress)
beta = model.beta
self.assertAlmostEqual(beta[0], 8.9830, places=4)
示例2: str
# 需要導入模塊: from Bio import LogisticRegression [as 別名]
# 或者: from Bio.LogisticRegression import train [as 別名]
from Bio import LogisticRegression
import numpy as np
all_data = np.loadtxt("../datasets/iris/iris.data", delimiter=",",
dtype="float, float, float, float, S11")
xs = []
ys = []
for i in all_data:
if 'virgi' not in str(i[-1]):
xs.append([i[0], i[1], i[2], i[3]])
if 'setosa' in str(i[-1]):
ys.append(0)
else:
ys.append(1)
test_xs = xs.pop()
test_ys = ys.pop()
def show_progress(iteration, loglikelihood):
print("Iteration:", iteration, "Log-likelihood function:", loglikelihood)
model = LogisticRegression.train(xs, ys, update_fn=show_progress)
print("This should be Iris-versic (1): {}".format(LogisticRegression.classify(model, test_xs)))
示例3: post
# 需要導入模塊: from Bio import LogisticRegression [as 別名]
# 或者: from Bio.LogisticRegression import train [as 別名]
def post(self):
alldata = self.getRequestData()
user = self.objUserInfo
s=Entity.model(self.db)
print(alldata)
if alldata['model_type']==1:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(len(xs),len(ys))
model=LogisticRegression.train(xs,ys)
if model.beta:
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"beta" : str(model.beta),
"note" : alldata['note']
}
id = s.save(lsData,table='public.logistis')
self.response(id)
elif alldata['model_type']==2:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(xs,ys)
print(xs,ys)
count=1
while count >= 0 :
rpath = str(random.randint(10000, 90000))
pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
if not os.path.isfile(pyfile):
count=-1
else:
count=1
f=open(pyfile,'w')
text = 'from Bio import kNN'+'\n'+'class model():'+'\n'+' def knn(self):'+'\n'+' xs = '+str(xs)+'\n'+' ys ='+str(ys)+'\n'+' k='+str(alldata['k'])+'\n'+' model = kNN.train(xs,ys,k)'+'\n'+' return model'
print(text)
f.write(text)
f.close()
if os.path.isfile(pyfile):
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"file_name" : rpath,
"packpath" : pyfile,
"type" : '2',
"note" : alldata['note']
}
id = s.save(lsData,table='public.pymodel')
self.response(id)
elif alldata['model_type']==3:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(xs,ys)
count=1
while count >= 0 :
rpath = str(random.randint(10000, 90000))
pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
if not os.path.isfile(pyfile):
count=-1
else:
count=1
f=open(pyfile,'w')
text = 'from Bio import NaiveBayes'+'\n'+'class model():'+'\n'+' def bayes(self):'+'\n'+' xs = '+str(xs)+'\n'+' ys ='+str(ys)+'\n'+' model = NaiveBayes.train(xs,ys)'+'\n'+' return model'
print(text)
f.write(text)
f.close()
if os.path.isfile(pyfile):
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"file_name" : rpath,
"packpath" : pyfile,
"type" : '3',
"note" : alldata['note']
}
id = s.save(lsData,table='public.pymodel')
#.........這裏部分代碼省略.........