本文整理汇总了Python中bigml.api.BigML.create_evaluation方法的典型用法代码示例。如果您正苦于以下问题:Python BigML.create_evaluation方法的具体用法?Python BigML.create_evaluation怎么用?Python BigML.create_evaluation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bigml.api.BigML
的用法示例。
在下文中一共展示了BigML.create_evaluation方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: BigML
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_evaluation [as 别名]
from bigml.api import BigML
api = BigML()
source1 = api.create_source("iris.csv")
api.ok(source1)
dataset1 = api.create_dataset(source1)
api.ok(dataset1)
dataset2 = api.create_dataset(dataset1, \
{'name': u"iris' dataset - sample (30.00%)",
'out_of_bag': True,
'sample_rate': 0.7})
api.ok(dataset2)
dataset3 = api.create_dataset(dataset1, \
{'name': u"iris' dataset - sample (70.00%)", 'sample_rate': 0.7})
api.ok(dataset3)
model1 = api.create_model(dataset3)
api.ok(model1)
evaluation1 = api.create_evaluation(model1, dataset2, \
{'name': u'my_evaluation_name'})
api.ok(evaluation1)
示例2:
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_evaluation [as 别名]
u'000001': {u'name': u'sepal width', u'optype': u'numeric'},
u'000002': {u'name': u'petal length', u'optype': u'numeric'},
u'000003': {u'name': u'petal width', u'optype': u'numeric'},
u'000004': {u'name': u'species',
u'optype': u'categorical',
u'term_analysis': {u'enabled': True}}},
}
source2 = api.create_source(source1_file, args)
api.ok(source2)
args = \
{u'objective_field': {u'id': u'000004'},
}
dataset1 = api.create_dataset(source2, args)
api.ok(dataset1)
args = \
{u'split_candidates': 32}
model1 = api.create_model(dataset1, args)
api.ok(model1)
args = \
{u'fields_map': {u'000001': u'000001',
u'000002': u'000002',
u'000003': u'000003',
u'000004': u'000004'},
u'operating_kind': u'probability',
}
evaluation1 = api.create_evaluation(model1, dataset1, args)
api.ok(evaluation1)
示例3: main
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_evaluation [as 别名]
def main(args=sys.argv[1:]):
"""Parses command-line parameters and calls the actual main function.
"""
parser = argparse.ArgumentParser(
description="Dataset analysis",
epilog="BigML, Inc")
# source with activity data
parser.add_argument('--source',
action='store',
dest='source',
default=None,
help="Full path to file")
# create private links or not
parser.add_argument('--share',
action='store_true',
default=False,
help="Share created resources or not")
# weight models or not
parser.add_argument('--balance',
action='store_true',
default=False,
help="Weight model or not")
args = parser.parse_args(args)
if not args.source:
sys.exit("You need to provide a valid path to a source")
api = BigML()
name = "Sean's activity"
log("Creating source...")
source_args = {'name': name}
source = api.create_source(args.source, source_args)
if not api.ok(source):
sys.exit("Source isn't ready...")
log("Creating dataset...")
dataset = api.create_dataset(source)
if not api.ok(dataset):
sys.exit("Dataset isn't ready...")
log("Transforming dataset...")
# Extends dataset with new field for previous activity, previous duration,
# start day, and start hour. Removes first column, start, and end fields.
new_dataset_args = {
'name': name,
'new_fields': new_fields(),
'all_but': excluded_fields()}
new_dataset = api.create_dataset(dataset, new_dataset_args)
if not api.ok(new_dataset):
sys.exit("Dataset isn't ready...")
# Set objective field to activity
fields = Fields(new_dataset['object']['fields'])
objective_id = fields.field_id('activity')
new_dataset_args = {
'objective_field': {'id': objective_id}}
new_dataset = api.update_dataset(new_dataset, new_dataset_args)
# Create training and test set for evaluation
log("Splitting dataset...")
training, test = train_test_split(api, new_dataset)
log("Creating a model using the training dataset...")
model_args = {
'objective_field': objective_id,
'balance_objective': args.balance,
'name': training['object']['name']}
model = api.create_model(training, model_args)
if not api.ok(model):
sys.exit("Model isn't ready...")
# Creating an evaluation
log("Evaluating model against the test dataset...")
eval_args = {
'name': name + ' - 80% vs 20%'}
evaluation = api.create_evaluation(model, test, eval_args)
if not api.ok(evaluation):
sys.exit("Evaluation isn't ready...")
log("Creating model for the full dataset...")
model = api.create_model(new_dataset, model_args)
if not api.ok(model):
sys.exit("Model isn't ready...")
# Create private links
if args.share:
log("Sharing resources...")
dataset_private_link = share_dataset(api, new_dataset)
model_private_link = share_model(api, model)
evaluation_private_link = share_evaluation(api, evaluation)
log(dataset_private_link)
log(model_private_link)
log(evaluation_private_link)
示例4: BigML
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_evaluation [as 别名]
from bigml.api import BigML
api = BigML()
source1 = api.create_source("iris.csv")
api.ok(source1)
dataset1 = api.create_dataset(source1)
api.ok(dataset1)
model1 = api.create_model(dataset1)
api.ok(model1)
evaluation1 = api.create_evaluation(model1, dataset1, {"name": u"my_evaluation_name"})
api.ok(evaluation1)
示例5: main
# 需要导入模块: from bigml.api import BigML [as 别名]
# 或者: from bigml.api.BigML import create_evaluation [as 别名]
def main(args=sys.argv[1:]):
"""Parses command-line parameters and calls the actual main function.
"""
parser = argparse.ArgumentParser(description="Market sentiment analysis", epilog="BigML, Inc")
# source with activity data
parser.add_argument("--data", action="store", dest="data", default="data", help="Full path to data with csv files")
# create private links or not
parser.add_argument("--share", action="store_true", default=True, help="Share created resources or not")
args = parser.parse_args(args)
if not args.data:
sys.exit("You need to provide a valid path to a data directory")
api = BigML()
name = "UpOrDown?"
log("Creating sources...")
csvs = glob.glob(os.path.join(args.data, "*.csv"))
sources = []
for csv in csvs:
source = api.create_source(csv)
api.ok(source)
sources.append(source)
log("Creating datasets...")
datasets = []
for source in sources:
dataset = api.create_dataset(source)
api.ok(dataset)
datasets.append(dataset)
new_datasets = []
for dataset in datasets:
new_dataset = api.create_dataset(dataset, {"new_fields": new_fields(), "all_fields": False})
new_datasets.append(new_dataset)
log("Merging datasets...")
multi_dataset = api.create_dataset(new_datasets, {"name": name})
api.ok(multi_dataset)
# Create training and test set for evaluation
log("Splitting dataset...")
training, test = training_test_split(api, multi_dataset)
log("Creating a model using the training dataset...")
model = api.create_model(training, {"name": name + " (80%)"})
api.ok(model)
# Creating an evaluation
log("Evaluating model against the test dataset...")
eval_args = {"name": name + " - Single model: 80% vs 20%"}
evaluation_model = api.create_evaluation(model, test, eval_args)
api.ok(evaluation_model)
log("Creating an ensemble using the training dataset...")
ensemble = api.create_ensemble(training, {"name": name})
api.ok(ensemble)
# Creating an evaluation
log("Evaluating ensemble against the test dataset...")
eval_args = {"name": name + " - Ensemble: 80% vs 20%"}
evaluation_ensemble = api.create_evaluation(ensemble, test, eval_args)
api.ok(evaluation_ensemble)
log("Creating model for the full dataset...")
model = api.create_model(multi_dataset, {"name": name})
api.ok(model)
# Create private links
if args.share:
log("Sharing resources...")
dataset_link = share_resource(api, multi_dataset)
model_link = share_resource(api, model)
evaluation_model_link = share_resource(api, evaluation_model)
evaluation_ensemble_link = share_resource(api, evaluation_ensemble)
log(dataset_link)
log(model_link)
log(evaluation_model_link)
log(evaluation_ensemble_link)