本文整理汇总了Python中Bio.LogisticRegression.train方法的典型用法代码示例。如果您正苦于以下问题:Python LogisticRegression.train方法的具体用法?Python LogisticRegression.train怎么用?Python LogisticRegression.train使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Bio.LogisticRegression
的用法示例。
在下文中一共展示了LogisticRegression.train方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_calculate_model_with_update_callback
# 需要导入模块: from Bio import LogisticRegression [as 别名]
# 或者: from Bio.LogisticRegression import train [as 别名]
def test_calculate_model_with_update_callback(self):
model = LogisticRegression.train(xs, ys, update_fn=show_progress)
beta = model.beta
self.assertAlmostEqual(beta[0], 8.9830, places=4)
示例2: str
# 需要导入模块: from Bio import LogisticRegression [as 别名]
# 或者: from Bio.LogisticRegression import train [as 别名]
from Bio import LogisticRegression
import numpy as np
all_data = np.loadtxt("../datasets/iris/iris.data", delimiter=",",
dtype="float, float, float, float, S11")
xs = []
ys = []
for i in all_data:
if 'virgi' not in str(i[-1]):
xs.append([i[0], i[1], i[2], i[3]])
if 'setosa' in str(i[-1]):
ys.append(0)
else:
ys.append(1)
test_xs = xs.pop()
test_ys = ys.pop()
def show_progress(iteration, loglikelihood):
print("Iteration:", iteration, "Log-likelihood function:", loglikelihood)
model = LogisticRegression.train(xs, ys, update_fn=show_progress)
print("This should be Iris-versic (1): {}".format(LogisticRegression.classify(model, test_xs)))
示例3: post
# 需要导入模块: from Bio import LogisticRegression [as 别名]
# 或者: from Bio.LogisticRegression import train [as 别名]
def post(self):
alldata = self.getRequestData()
user = self.objUserInfo
s=Entity.model(self.db)
print(alldata)
if alldata['model_type']==1:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(len(xs),len(ys))
model=LogisticRegression.train(xs,ys)
if model.beta:
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"beta" : str(model.beta),
"note" : alldata['note']
}
id = s.save(lsData,table='public.logistis')
self.response(id)
elif alldata['model_type']==2:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(xs,ys)
print(xs,ys)
count=1
while count >= 0 :
rpath = str(random.randint(10000, 90000))
pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
if not os.path.isfile(pyfile):
count=-1
else:
count=1
f=open(pyfile,'w')
text = 'from Bio import kNN'+'\n'+'class model():'+'\n'+' def knn(self):'+'\n'+' xs = '+str(xs)+'\n'+' ys ='+str(ys)+'\n'+' k='+str(alldata['k'])+'\n'+' model = kNN.train(xs,ys,k)'+'\n'+' return model'
print(text)
f.write(text)
f.close()
if os.path.isfile(pyfile):
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"file_name" : rpath,
"packpath" : pyfile,
"type" : '2',
"note" : alldata['note']
}
id = s.save(lsData,table='public.pymodel')
self.response(id)
elif alldata['model_type']==3:
xss=alldata['xs'].split()
xs=[]
ys=[]
q=0
for i in xss:
xs.append([float(i.split(',')[0]),float(i.split(',')[1])])
for i in range(len(xs)):
ys.append(int(alldata['ys'].split(',')[q]))
q=q+1
print(xs,ys)
count=1
while count >= 0 :
rpath = str(random.randint(10000, 90000))
pyfile='/home/ubuntu/pythonff/mdt/mdt/mdtproject/trunk/service/data_mining/'+rpath+'.py'
if not os.path.isfile(pyfile):
count=-1
else:
count=1
f=open(pyfile,'w')
text = 'from Bio import NaiveBayes'+'\n'+'class model():'+'\n'+' def bayes(self):'+'\n'+' xs = '+str(xs)+'\n'+' ys ='+str(ys)+'\n'+' model = NaiveBayes.train(xs,ys)'+'\n'+' return model'
print(text)
f.write(text)
f.close()
if os.path.isfile(pyfile):
lsData={
"create_id" : user['id'],
"name" : alldata['name'],
"file_name" : rpath,
"packpath" : pyfile,
"type" : '3',
"note" : alldata['note']
}
id = s.save(lsData,table='public.pymodel')
#.........这里部分代码省略.........