本文整理汇总了Python中weka.core.jvm.stop函数的典型用法代码示例。如果您正苦于以下问题:Python stop函数的具体用法?Python stop怎么用?Python stop使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了stop函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: start_search
def start_search(file_name, type):
start = time.clock() # time Preprocessing
tile_set, characteristic, nmrClass = read_file(file_name)
calculate_char_heuristic(tile_set, characteristic) # do before you add the place holder tiles
tile_set = generate_placeholders(tile_set, characteristic, nmrClass) # gets place holder tiles
# kill jvm that was started after calling read file
# the jvm is used for the machine learning filtering
# so that weka can be run
jvm.stop()
calculate_order_heuristic(tile_set)
# add up heuristic from all tiles and make starting node
heuristic_val = 0
for tile in tile_set:
heuristic_val += tile.heuristic_cost
# print tile.heuristic_order_cost,
# print tile.get_tile()
root = Node(tile_set, [], heuristic_val, characteristic,0,0, heuristic=heuristic_val) # makes start state for search
end = time.clock() # time Preprocessing
print "Preprocessing Time: " + str(end-start)
# picks algorithm
if (int(type) == 0): # uniform cost search
best_solution, node_count = aStar([root])
output_soultion(best_solution, node_count)
elif (int(type) == 1): # puzzle building
best_solution = puzzle_building_search([root])
示例2: main
def main():
"""
Runs a datagenerator from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Executes a data generator from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("datagenerator", help="data generator classname, e.g., "
+ "weka.datagenerators.classifiers.classification.LED24")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional data generator options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
generator = DataGenerator(classname=parsed.datagenerator)
if len(parsed.option) > 0:
generator.options = parsed.option
DataGenerator.make_data(generator, parsed.option)
except Exception as e:
print(e)
finally:
jvm.stop()
示例3: main
def main():
"""
Runs a associator from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Executes an associator from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("associator", help="associator classname, e.g., weka.associations.Apriori")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional associator options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
associator = Associator(classname=parsed.associator)
if len(parsed.option) > 0:
associator.options = parsed.option
loader = converters.loader_for_file(parsed.train)
data = loader.load_file(parsed.train)
associator.build_associations(data)
print(str(associator))
except Exception as e:
print(e)
finally:
jvm.stop()
示例4: stop
def stop():
"""
Stop a weka connection.
May be called multiple times, but note that a new connection
cannot be started after calling this.
"""
if MODULE_SUPPORTED:
jvm.stop()
示例5: main
def main():
"""
Runs a clusterer from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Performs clustering from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-t", metavar="train", dest="train", required=True, help="training set file")
parser.add_argument("-T", metavar="test", dest="test", help="test set file")
parser.add_argument("-d", metavar="outmodel", dest="outmodel", help="model output file name")
parser.add_argument("-l", metavar="inmodel", dest="inmodel", help="model input file name")
parser.add_argument("-p", metavar="attributes", dest="attributes", help="attribute range")
parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds")
parser.add_argument("-s", metavar="seed", dest="seed", help="seed value for randomization")
parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index")
parser.add_argument("-g", metavar="graph", dest="graph", help="graph output file (if supported)")
parser.add_argument("clusterer", help="clusterer classname, e.g., weka.clusterers.SimpleKMeans")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional clusterer options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.train is not None:
params.extend(["-t", parsed.train])
if parsed.test is not None:
params.extend(["-T", parsed.test])
if parsed.outmodel is not None:
params.extend(["-d", parsed.outmodel])
if parsed.inmodel is not None:
params.extend(["-l", parsed.inmodel])
if parsed.attributes is not None:
params.extend(["-p", parsed.attributes])
if parsed.numfolds is not None:
params.extend(["-x", parsed.numfolds])
if parsed.seed is not None:
params.extend(["-s", parsed.seed])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
if parsed.graph is not None:
params.extend(["-g", parsed.graph])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
clusterer = Clusterer(classname=parsed.clusterer)
if len(parsed.option) > 0:
clusterer.options = parsed.option
print(ClusterEvaluation.evaluate_clusterer(clusterer, params))
except Exception as e:
print(e)
finally:
jvm.stop()
示例6: start_search
def start_search(file_name, type):
tile_set, characteristic, nmrClass = read_file(file_name) # gets data from file
tile_set = generate_placeholders(tile_set, characteristic, nmrClass) # gets place holder tiles
jvm.stop()
root = Node(tile_set, [], 0.0, characteristic,0,0) # makes start state for search
# picks algorithm
if (int(type) == 0): # uniform cost search
best_solution, node_count = uniform_cost([root])
output_soultion(best_solution, node_count)
elif (int(type) == 1): # puzzle building
best_solution = puzzle_building_search([root])
示例7: generate_folds
def generate_folds(dataset_path, output_folder, n_folds=10, random_state=None):
"""
Given a dataset df, generate n_folds for it and store them in <output_folder>/<dataset_name>.
:type dataset_path: str
:param dataset_path: Path to dataset with .arff file extension (i.e my_dataset.arff)
:type output_folder: str
:param output_folder: Path to store both index file with folds and fold files.
:type n_folds: int
:param n_folds: Optional - Number of folds to split the dataset into. Defaults to 10.
:type random_state: int
:param random_state: Optional - Seed to use in the splitting process. Defaults to None (no seed).
"""
import warnings
warnings.filterwarnings('error')
dataset_name = dataset_path.split('/')[-1].split('.')[0]
af = load_arff(dataset_path)
df = load_dataframe(af)
skf = StratifiedKFold(n_splits=n_folds, shuffle=True, random_state=random_state)
fold_iter = skf.split(df[df.columns[:-1]], df[df.columns[-1]])
fold_index = dict()
jvm.start()
csv_loader = Loader(classname="weka.core.converters.CSVLoader")
arff_saver = Saver(classname='weka.core.converters.ArffSaver')
for i, (arg_rest, arg_test) in enumerate(fold_iter):
fold_index[i] = list(arg_test)
_temp_path = 'temp_%s_%d.csv' % (dataset_name, i)
fold_data = df.loc[arg_test] # type: pd.DataFrame
fold_data.to_csv(_temp_path, sep=',', index=False)
java_arff_dataset = csv_loader.load_file(_temp_path)
java_arff_dataset.relationname = af['relation']
java_arff_dataset.class_is_last()
arff_saver.save_file(java_arff_dataset, os.path.join(output_folder, '%s_fold_%d.arff' % (dataset_name, i)))
os.remove(_temp_path)
json.dump(
fold_index, open(os.path.join(output_folder, dataset_name + '.json'), 'w'), indent=2
)
jvm.stop()
warnings.filterwarnings('default')
示例8: assign_classify
def assign_classify(file_location, output="classified.out", model="naivebayes.model"):
data = read_csv_file(file_location)
jvm.start()
# load clusters
obj = serialization.read(model)
classifier = Classifier(jobject=obj)
# create file with cluster group
with open(output, 'w') as cluster_file:
for index, attrs in enumerate(data):
inst = Instance.create_instance(attrs[1:])
pred = classifier.classify_instance(inst)
print(str(index + 1) + ": label index=" + str(pred))
jvm.stop()
示例9: playback_speed_checker
def playback_speed_checker(inputFile, dirRef):
TRAINING_ARFF = 'dataset_playback.arff'
inputRef = ""
# Start JVM
jvm.start()
jvm.start(system_cp=True, packages=True)
jvm.start(max_heap_size="512m")
# Find reference file
for file in os.listdir(dirRef):
if str(file).find(str(os.path.basename(inputFile))) != -1:
inputRef = os.path.join(dirRef, file)
break
# Calculation distance
(result, distance) = dtw_checker(inputFile, inputRef)
# Loading data
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(TRAINING_ARFF)
data.class_is_last() # set class attribute
# Train the classifier
#cls = Classifier(classname="weka.classifiers.functions.SMO")
cls = Classifier(classname="weka.classifiers.trees.J48", options = ["-C", "0.3", "-M", "10"])
cls.build_classifier(data)
# Classify instance
speed_instance = Instance.create_instance(numpy.ndarray(distance), classname='weka.core.DenseInstance', weight=1.0)
speed_instance.dataset = data
# Classify instance
speed_flag = cls.classify_instance(speed_instance)
if (distance == 0):
speed_class = 'nominal'
else:
if speed_flag == 0: speed_class = 'down_speed'
if speed_flag == 0: speed_class = 'up_speed'
# print os.path.basename(inputFile) + ' --- ' + speed_class
# Stop JVM
jvm.stop()
print "SPEED IS: " + speed_class
return speed_class
示例10: predict
def predict(attributes):
jvm.start()
file_path = print_to_file(attributes)
# load the saved model
objects = serialization.read_all("/Users/hosyvietanh/Desktop/data_mining/trained_model.model")
classifier = Classifier(jobject=objects[0])
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(file_path)
data.class_is_last()
for index, inst in enumerate(data):
pred = classifier.classify_instance(inst)
dist = classifier.distribution_for_instance(inst)
return int(pred)
jvm.stop()
示例11: query_instance
def query_instance(attributes, model="out.model"):
"""
get the cluster for defined attributes
:params attributes: array or list
:returns: cluster id
"""
jvm.start()
# create instance
inst = Instance(attributes)
# load model
obj = serialization.read(model)
# load cluster and get the cluster_id
cluster = Clusterer(jobject=obj)
cluster_id = cluster.cluster_instance(inst)
jvm.stop()
return cluster_id
示例12: main
def main():
"""
Runs attribute selection from the command-line. Calls JVM start/stop automatically.
Use -h to see all options.
"""
parser = argparse.ArgumentParser(
description='Performs attribute selection from the command-line. Calls JVM start/stop automatically.')
parser.add_argument("-j", metavar="classpath", dest="classpath", help="additional classpath, jars/directories")
parser.add_argument("-X", metavar="heap", dest="heap", help="max heap size for jvm, e.g., 512m")
parser.add_argument("-i", metavar="input", dest="input", required=True, help="input file")
parser.add_argument("-c", metavar="class index", dest="classindex", help="1-based class attribute index")
parser.add_argument("-s", metavar="search", dest="search", help="search method, classname and options")
parser.add_argument("-x", metavar="num folds", dest="numfolds", help="number of folds")
parser.add_argument("-n", metavar="seed", dest="seed", help="the seed value for randomization")
parser.add_argument("evaluator", help="evaluator classname, e.g., weka.attributeSelection.CfsSubsetEval")
parser.add_argument("option", nargs=argparse.REMAINDER, help="additional evaluator options")
parsed = parser.parse_args()
jars = []
if parsed.classpath is not None:
jars = parsed.classpath.split(os.pathsep)
params = []
if parsed.input is not None:
params.extend(["-i", parsed.input])
if parsed.classindex is not None:
params.extend(["-c", parsed.classindex])
if parsed.search is not None:
params.extend(["-s", parsed.search])
if parsed.numfolds is not None:
params.extend(["-x", parsed.numfolds])
if parsed.seed is not None:
params.extend(["-n", parsed.seed])
jvm.start(jars, max_heap_size=parsed.heap, packages=True)
logger.debug("Commandline: " + join_options(sys.argv[1:]))
try:
evaluation = ASEvaluation(classname=parsed.evaluator)
if len(parsed.option) > 0:
evaluation.options = parsed.option
print(AttributeSelection.attribute_selection(evaluation, params))
except Exception as e:
print(e)
finally:
jvm.stop()
示例13: dict2arff
def dict2arff(self, fileIn, fileOut):
'''
:param fileIn: name of csv file
:param fileOut: name of new arff file
:return:
'''
dataIn = os.path.join(self.dataDir, fileIn)
dataOut = os.path.join(self.dataDir, fileOut)
logger.info('[%s] : [INFO] Starting conversion of %s to %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), dataIn, dataOut)
try:
jvm.start()
convertCsvtoArff(dataIn, dataOut)
except Exception as inst:
pass
finally:
logger.error('[%s] : [ERROR] Exception occured while converting to arff with %s and %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), type(inst), inst.args)
jvm.stop()
logger.info('[%s] : [INFO] Finished conversion of %s to %s', datetime.fromtimestamp(time.time()).strftime('%Y-%m-%d %H:%M:%S'), dataIn, dataOut)
示例14: riaa_checker
def riaa_checker(inputFile):
TRAINING_ARFF = 'C:\Users\ASUS\Desktop\IGNASI\SMC\Workspace\dataset_riaa.arff'
# Start JVM
jvm.start()
jvm.start(system_cp=True, packages=True)
jvm.start(max_heap_size="512m")
# Calculation of bark bands information
(absolute_bark, relative_bark, bark_ratios) = compute_bark_spectrum(inputFile)
# Loading data
loader = Loader(classname="weka.core.converters.ArffLoader")
data = loader.load_file(TRAINING_ARFF)
data.class_is_last() # set class attribute
# Train the classifier
cls = Classifier(classname="weka.classifiers.functions.SMO")
#cls = Classifier(classname="weka.classifiers.trees.J48", options = ["-C", "0.3", "-M", "10"])
cls.build_classifier(data)
# Classify instance
bark_instance = Instance.create_instance(bark_ratios, classname='weka.core.DenseInstance', weight=1.0)
bark_instance.dataset = data
# Classify instance
riaa_flag = cls.classify_instance(bark_instance)
if riaa_flag == 0:
riaa_class = 'riaa_ok'
else:
riaa_class = 'riaa_ko'
# print os.path.basename(inputFile) + ' --- ' + riaa_class
# Stop JVM
jvm.stop()
print "RIAA FILTERING?: " + riaa_class
return riaa_class
示例15: batch_riaa_checking
def batch_riaa_checking(inputDir):
# Start JVM
jvm.start()
jvm.start(system_cp=True, packages=True)
jvm.start(max_heap_size="512m")
riaa_ok = 0
riaa_ko = 0
for file in os.listdir(inputDir):
if file.endswith(".wav"):
riaa_flag = riaa_checker(os.path.join(inputDir, file))
if (riaa_flag == 'riaa_ko'): riaa_ko+=1
if (riaa_flag == 'riaa_ok'): riaa_ok+=1
# Stop JVM
jvm.stop()
return (riaa_ko, riaa_ok)