本文整理汇总了Python中bigml.api.BigML.create_prediction方法的典型用法代码示例。如果您正苦于以下问题:Python BigML.create_prediction方法的具体用法?Python BigML.create_prediction怎么用?Python BigML.create_prediction使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bigml.api.BigML
的用法示例。
在下文中一共展示了BigML.create_prediction方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1:
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_prediction [as 别名]
model = api.create_ensemble(train_dataset)
# <codecell>
# Read the test dataset
test_X = pd.read_csv('test.csv')
test_y = pd.read_csv('test_target.csv')
test_set = test_X.T.to_dict().values()
# <codecell>
# Holds predictions from all the samples in test set
prediction = []
for x in test_set:
# Get predictions for complete test set
predict = api.create_prediction(model, x)
api.pprint(predict)
# Append it to the prediction list
prediction.append(predict['object'].get('output'))
# <codecell>
# Classification error
y = np.array(test_y.target)
yhat = np.array(prediction)
error = np.sum(y == yhat)/float(len(y))
print error
示例2: BigML
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_prediction [as 别名]
from bigml.api import BigML
api = BigML()
source1 = api.create_source("iris.csv")
api.ok(source1)
dataset1 = api.create_dataset(source1, \
{'name': u'iris'})
api.ok(dataset1)
model1 = api.create_model(dataset1, \
{'name': u'iris'})
api.ok(model1)
prediction1 = api.create_prediction(model1, \
{u'petal length': 0.5}, \
{'name': u'my_prediction_name'})
api.ok(prediction1)
示例3: BigML
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_prediction [as 别名]
#@see: http://bigml.readthedocs.org/en/latest/#local-predictions
from bigml.api import BigML
api = BigML('smarkit',"37b903bf765414b5e1c3164061cee5fa57e7e6ad",storage='./storage')
source = api.create_source('./data/red_bule_balls_2003.csv')
api.pprint(api.get_fields(source))
dataset = api.create_dataset(source)
model = api.create_model(dataset)
prediction = api.create_prediction(model, {'red':[1,2,3,4,5,6],'blue':7})
#prediction
api.pprint(prediction)
示例4: BigML
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_prediction [as 别名]
#!/usr/bin/env python
from bigml.api import BigML
from bigml.model import Model
from bigml.ensemble import Ensemble
from bigml.anomaly import Anomaly
api = BigML(dev_mode=True)
model = api.get_model("model/563a1c7a3cd25747430023ce")
prediction = api.create_prediction(model, {"petal length": 4.07, "sepal width": 3.15, "petal width": 1.51})
local_model = Model("model/56430eb8636e1c79b0001f90", api=api)
prediction = local_model.predict(
{"petal length": 0.96, "sepal width": 4.1, "petal width": 2.52}, 2, add_confidence=True, multiple=3
)
local_model = Ensemble("ensemble/564a02d5636e1c79b5006e13", api=api)
local_model = Ensemble("ensemble/564a081bc6c19b6cf3011c60", api=api)
prediction = local_model.predict(
{"petal length": 0.95, "sepal width": 3.9, "petal width": 1.51, "sepal length": 7.0}, method=2, add_confidence=True
)
local_ensemble = Ensemble("ensemble/564623d4636e1c79b00051f7", api=api)
prediction = local_ensemble.predict({"Price": 5.8, "Grape": "Pinot Grigio", "Country": "Italy", "Rating": 92}, True)
local_anomaly = Anomaly("anomaly/564c5a76636e1c3d52000007", api=api)
prediction = local_anomaly.anomaly_score(
{"petal length": 4.07, "sepal width": 3.15, "petal width": 1.51, "sepal length": 6.02, "species": "Iris-setosa"},
True,
)
prediction = local_anomaly.anomaly_score(
示例5: BigML
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_prediction [as 别名]
from bigml.api import BigML
if __name__ == "__main__":
print "test"
api = BigML("onidzelskyi", "a5b11ebe462ad583478cf40daf17e92060dc5915", dev_mode=True)
source = api.create_source("./data/iris.csv")
dataset = api.create_dataset(source)
model = api.create_model(dataset)
prediction = api.create_prediction(model,{"sepal length": 5, "sepal width": 2.5})
api.pprint(prediction)
示例6: BigML
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_prediction [as 别名]
from bigml.api import BigML
import csv
import time
api = BigML(dev_mode=True)
# get args
train_csv = sys.argv[1]
test_csv = sys.argv[2]
# train model
source_train = api.create_source('./../../data/census/train.csv')
dataset_train = api.create_dataset(dataset_train)
model = api.create_model(dataset)
# test model
with open('./data/census/test.csv', 'rb') as csv_test_file:
test_csv_reader = csv.reader(csv_test_file, delimiter=',', quotechar='"')
for row in test_csv_reader:
row.pop()
row = dict(zip(range(0, len(row)), row))
prediction = api.create_prediction(model, row)
api.pprint(prediction)
示例7:
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_prediction [as 别名]
source1_file = "iris.csv"
args = \
{u'fields': {u'000000': {u'name': u'sepal length', u'optype': u'numeric'},
u'000001': {u'name': u'sepal width', u'optype': u'numeric'},
u'000002': {u'name': u'petal length', u'optype': u'numeric'},
u'000003': {u'name': u'petal width', u'optype': u'numeric'},
u'000004': {u'name': u'species',
u'optype': u'categorical',
u'term_analysis': {u'enabled': True}}},
}
source2 = api.create_source(source1_file, args)
api.ok(source2)
args = \
{u'objective_field': {u'id': u'000004'},
}
dataset1 = api.create_dataset(source2, args)
api.ok(dataset1)
args = \
{u'split_candidates': 32}
model1 = api.create_model(dataset1, args)
api.ok(model1)
args = \
{u'input_data': {u'petal length': 0.5},
u'operating_kind': u'probability',
}
prediction1 = api.create_prediction(model1, args)
api.ok(prediction1)