本文整理汇总了Python中wekaexamples.helper.print_title函数的典型用法代码示例。如果您正苦于以下问题:Python print_title函数的具体用法?Python print_title怎么用?Python print_title使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了print_title函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
"""
Just runs some example code.
"""
classifier = Classifier("weka.classifiers.trees.J48")
helper.print_title("Capabilities")
capabilities = classifier.capabilities
print(capabilities)
# load a dataset
iris_file = helper.get_data_dir() + os.sep + "iris.arff"
helper.print_info("Loading dataset: " + iris_file)
loader = Loader("weka.core.converters.ArffLoader")
iris_data = loader.load_file(iris_file)
iris_data.class_is_last()
data_capabilities = Capabilities.for_instances(iris_data)
print(data_capabilities)
print("classifier handles dataset: " + str(capabilities.supports(data_capabilities)))
# disable/enable
helper.print_title("Disable/Enable")
capability = Capability(member="UNARY_ATTRIBUTES")
capabilities.disable(capability)
capabilities.min_instances = 10
print("Removing: " + str(capability))
print(capabilities)
示例2: main
def main():
"""
Just runs some example code.
"""
# setup the flow
helper.print_title("Generate dataset")
flow = Flow(name="generate dataset")
generator = DataGenerator()
generator.config["setup"] = datagen.DataGenerator(classname="weka.datagenerators.classifiers.classification.Agrawal")
flow.actors.append(generator)
console = Console()
flow.actors.append(console)
# run the flow
msg = flow.setup()
if msg is None:
print("\n" + flow.tree + "\n")
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
示例3: gridsearch
def gridsearch():
"""
Applies GridSearch to a dataset. GridSearch package must be not be installed, as the monolithic weka.jar
already contains this package.
"""
helper.print_title("GridSearch")
# load a dataset
fname = helper.get_data_dir() + os.sep + "bolts.arff"
helper.print_info("Loading train: " + fname)
loader = Loader(classname="weka.core.converters.ArffLoader")
train = loader.load_file(fname)
train.class_is_last()
# classifier
grid = GridSearch(options=["-sample-size", "100.0", "-traversal", "ROW-WISE", "-num-slots", "1", "-S", "1"])
grid.evaluation = "CC"
grid.y = {"property": "kernel.gamma", "min": -3.0, "max": 3.0, "step": 1.0, "base": 10.0, "expression": "pow(BASE,I)"}
grid.x = {"property": "C", "min": -3.0, "max": 3.0, "step": 1.0, "base": 10.0, "expression": "pow(BASE,I)"}
cls = Classifier(
classname="weka.classifiers.functions.SMOreg",
options=["-K", "weka.classifiers.functions.supportVector.RBFKernel"])
grid.classifier = cls
grid.build_classifier(train)
print("Model:\n" + str(grid))
print("\nBest setup:\n" + grid.best.to_commandline())
示例4: main
def main():
"""
Just runs some example code.
"""
"""
Loads data from a database.
"""
# setup the flow
helper.print_title("Load from database")
flow = Flow(name="load from database")
loaddatabase = LoadDatabase()
loaddatabase.config["db_url"] = "jdbc:mysql://HOSTNAME:3306/DBNAME"
loaddatabase.config["user"] = "DBUSER"
loaddatabase.config["password"] = "DBPW"
loaddatabase.config["query"] = "select * from TABLE"
flow.actors.append(loaddatabase)
console = Console()
flow.actors.append(console)
# run the flow
msg = flow.setup()
if msg is None:
print("\n" + flow.tree + "\n")
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
示例5: main
def main():
"""
Just runs some example code.
"""
# load a dataset
iris_file = helper.get_data_dir() + os.sep + "iris.arff"
helper.print_info("Loading dataset: " + iris_file)
loader = Loader("weka.core.converters.ArffLoader")
data = loader.load_file(iris_file)
# remove class attribute
data.delete_last_attribute()
# build a clusterer and output model
helper.print_title("Training SimpleKMeans clusterer")
clusterer = Clusterer(classname="weka.clusterers.SimpleKMeans", options=["-N", "3"])
clusterer.build_clusterer(data)
print(clusterer)
# cluster data
helper.print_info("Clustering data")
for index, inst in enumerate(data):
cl = clusterer.cluster_instance(inst)
dist = clusterer.distribution_for_instance(inst)
print(str(index+1) + ": cluster=" + str(cl) + ", distribution=" + str(dist))
示例6: main
def main():
"""
Just runs some example code.
"""
helper.print_title("Generate data (Agrawal)")
generator = DataGenerator(
classname="weka.datagenerators.classifiers.classification.Agrawal",
options=["-n", "10", "-r", "agrawal"])
generator.dataset_format = generator.define_data_format()
print(generator.dataset_format)
if generator.single_mode_flag:
for i in xrange(generator.num_examples_act):
print(generator.generate_example())
else:
print(generator.generate_examples())
helper.print_title("Generate data (BayesNet)")
generator = DataGenerator(
classname="weka.datagenerators.classifiers.classification.BayesNet",
options=["-S", "2", "-n", "10", "-C", "10"])
generator.dataset_format = generator.define_data_format()
print(generator.dataset_format)
if generator.single_mode_flag:
for i in xrange(generator.num_examples_act):
print(generator.generate_example())
else:
print(generator.generate_examples())
示例7: main
def main():
"""
Just runs some example code.
"""
# load a dataset
iris_file = helper.get_data_dir() + os.sep + "iris.arff"
helper.print_info("Loading dataset: " + iris_file)
loader = Loader("weka.core.converters.ArffLoader")
iris_data = loader.load_file(iris_file)
iris_data.class_is_last()
# train classifier
classifier = Classifier("weka.classifiers.trees.J48")
classifier.build_classifier(iris_data)
# save and read object
helper.print_title("I/O: single object")
outfile = tempfile.gettempdir() + os.sep + "j48.model"
serialization.write(outfile, classifier)
model = Classifier(jobject=serialization.read(outfile))
print(model)
# save classifier and dataset header (multiple objects)
helper.print_title("I/O: single object")
serialization.write_all(outfile, [classifier, Instances.template_instances(iris_data)])
objects = serialization.read_all(outfile)
for i, obj in enumerate(objects):
helper.print_info("Object #" + str(i+1) + ":")
if javabridge.get_env().is_instance_of(obj, javabridge.get_env().find_class("weka/core/Instances")):
obj = Instances(jobject=obj)
elif javabridge.get_env().is_instance_of(obj, javabridge.get_env().find_class("weka/classifiers/Classifier")):
obj = Classifier(jobject=obj)
print(obj)
示例8: load_incremental
def load_incremental():
"""
Loads a dataset incrementally.
"""
# setup the flow
helper.print_title("Load dataset (incremental)")
iris = helper.get_data_dir() + os.sep + "iris.arff"
flow = Flow(name="load dataset")
filesupplier = FileSupplier()
filesupplier.config["files"] = [iris]
flow.actors.append(filesupplier)
loaddataset = LoadDataset()
loaddataset.config["incremental"] = True
flow.actors.append(loaddataset)
console = Console()
flow.actors.append(console)
# run the flow
msg = flow.setup()
if msg is None:
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
示例9: load_custom_loader
def load_custom_loader():
"""
Loads a dataset using a custom loader.
"""
# setup the flow
helper.print_title("Load dataset (custom loader)")
iris = helper.get_data_dir() + os.sep + "iris.csv"
flow = Flow(name="load dataset")
filesupplier = FileSupplier()
filesupplier.config["files"] = [iris]
flow.actors.append(filesupplier)
loaddataset = LoadDataset()
loaddataset.config["incremental"] = False
loaddataset.config["use_custom_loader"] = True
loaddataset.config["custom_loader"] = Loader(classname="weka.core.converters.CSVLoader")
flow.actors.append(loaddataset)
console = Console()
flow.actors.append(console)
# run the flow
msg = flow.setup()
if msg is None:
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
示例10: main
def main():
"""
Just runs some example code.
"""
"""
Plots a dataset.
"""
# setup the flow
helper.print_title("Plot dataset")
iris = helper.get_data_dir() + os.sep + "iris.arff"
flow = Flow(name="plot dataset")
filesupplier = FileSupplier()
filesupplier.config["files"] = [iris]
flow.actors.append(filesupplier)
loaddataset = LoadDataset()
flow.actors.append(loaddataset)
branch = Branch()
flow.actors.append(branch)
seq = Sequence(name="matrix plot")
branch.actors.append(seq)
mplot = MatrixPlot()
mplot.config["percent"] = 50.0
mplot.config["wait"] = False
seq.actors.append(mplot)
seq = Sequence(name="line plot")
branch.actors.append(seq)
copy = Copy()
seq.actors.append(copy)
flter = Filter()
flter.config["setup"] = filters.Filter(
classname="weka.filters.unsupervised.attribute.Remove", options=["-R", "last"])
flter.config["keep_relationname"] = True
seq.actors.append(flter)
lplot = LinePlot()
lplot.config["percent"] = 50.0
lplot.config["wait"] = True
seq.actors.append(lplot)
# run the flow
msg = flow.setup()
if msg is None:
print("\n" + flow.tree + "\n")
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
示例11: main
def main():
"""
Just runs some example code.
"""
# setup the flow
helper.print_title("Output actor help")
cv = CrossValidate()
cv.config["setup"] = Classifier(classname="weka.classifiers.trees.J48")
cv.print_help()
示例12: main
def main():
"""
Just runs some example code.
"""
# load a dataset
bodyfat_file = helper.get_data_dir() + os.sep + "bodyfat.arff"
helper.print_info("Loading dataset: " + bodyfat_file)
loader = Loader("weka.core.converters.ArffLoader")
bodyfat_data = loader.load_file(bodyfat_file)
bodyfat_data.class_is_last()
# classifier help
helper.print_title("Creating help string")
classifier = Classifier(classname="weka.classifiers.trees.M5P")
classifier.build_classifier(bodyfat_data)
print(classifier)
示例13: main
def main():
"""
Just runs some example code.
"""
"""
Loads/filters a dataset incrementally and saves it to a new file.
"""
# setup the flow
helper.print_title("Load/filter/save dataset (incrementally)")
iris = helper.get_data_dir() + os.sep + "iris.arff"
flow = Flow(name="Load/filter/save dataset (incrementally)")
filesupplier = FileSupplier()
filesupplier.config["files"] = [iris]
flow.actors.append(filesupplier)
loaddataset = LoadDataset()
loaddataset.config["incremental"] = True
flow.actors.append(loaddataset)
flter = Filter()
flter.config["setup"] = filters.Filter(
classname="weka.filters.unsupervised.attribute.Remove", options=["-R", "last"])
flow.actors.append(flter)
rename = RenameRelation()
rename.config["name"] = "iris-reduced"
flow.actors.append(rename)
dumper = InstanceDumper()
dumper.config["output"] = tempfile.gettempdir() + os.sep + "out.arff"
flow.actors.append(dumper)
# run the flow
msg = flow.setup()
if msg is None:
print("\n" + flow.tree + "\n")
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
示例14: main
def main():
"""
Just runs some example code.
"""
# setup the flow
helper.print_title("build and save clusterer")
iris = helper.get_data_dir() + os.sep + "iris_no_class.arff"
flow = Flow(name="build and save clusterer")
filesupplier = FileSupplier()
filesupplier.config["files"] = [iris]
flow.actors.append(filesupplier)
loaddataset = LoadDataset()
flow.actors.append(loaddataset)
train = Train()
train.config["setup"] = Clusterer(classname="weka.clusterers.SimpleKMeans")
flow.actors.append(train)
pick = ContainerValuePicker()
pick.config["value"] = "Model"
flow.actors.append(pick)
console = Console()
pick.actors.append(console)
writer = ModelWriter()
writer.config["output"] = str(tempfile.gettempdir()) + os.sep + "simplekmeans.model"
flow.actors.append(writer)
# run the flow
msg = flow.setup()
if msg is None:
print("\n" + flow.tree + "\n")
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()
示例15: main
def main():
"""
Just runs some example code.
"""
# setup the flow
helper.print_title("Cross-validate clusterer")
iris = helper.get_data_dir() + os.sep + "iris.arff"
flow = Flow(name="cross-validate clusterer")
filesupplier = FileSupplier()
filesupplier.config["files"] = [iris]
flow.actors.append(filesupplier)
loaddataset = LoadDataset()
flow.actors.append(loaddataset)
flter = Filter()
flter.name = "Remove class"
flter.config["filter"] = filters.Filter(
classname="weka.filters.unsupervised.attribute.Remove", options=["-R", "last"])
flow.actors.append(flter)
cv = CrossValidate()
cv.config["setup"] = Clusterer(classname="weka.clusterers.EM")
flow.actors.append(cv)
console = Console()
console.config["prefix"] = "Loglikelihood: "
flow.actors.append(console)
# run the flow
msg = flow.setup()
if msg is None:
print("\n" + flow.tree + "\n")
msg = flow.execute()
if msg is not None:
print("Error executing flow:\n" + msg)
else:
print("Error setting up flow:\n" + msg)
flow.wrapup()
flow.cleanup()