本文整理汇总了Java中weka.core.converters.CSVLoader类的典型用法代码示例。如果您正苦于以下问题:Java CSVLoader类的具体用法?Java CSVLoader怎么用?Java CSVLoader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
CSVLoader类属于weka.core.converters包,在下文中一共展示了CSVLoader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: main
import weka.core.converters.CSVLoader; //导入依赖的package包/类
public static void main(String args[]) throws Exception {
//read the input params
readParams(args);
CSVLoader loader = new CSVLoader();
loader.setSource(inFile);
Instances data = loader.getDataSet();
System.setErr(err);//hack to avoid some error messages
// Create the KMeans object.
SimpleKMeans kmeans = new SimpleKMeans();
kmeans.setNumClusters(K);
kmeans.setMaxIterations(maxIteration);
kmeans.setPreserveInstancesOrder(true);
// Perform K-Means clustering.
try {
kmeans.buildClusterer(data);
} catch (Exception ex) {
System.err.println("Unable to buld Clusterer: " + ex.getMessage());
ex.printStackTrace();
}
// print out the cluster centroids
Instances centroids = kmeans.getClusterCentroids();
for (int i = 0; i < K; i++) {
System.out.print("Cluster " + i + " size: " + kmeans.getClusterSizes()[i]);
System.out.println(" Centroid: " + centroids.instance(i));
}
// Print Assignments:
// int[] assignments = kmeans.getAssignments();
// System.out.println("Length: "+assignments.length);
// for (int i = 0; i < assignments.length; i++) {
// System.out.println(assignments[i]);
//
// }
}
示例2: main
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* Method for testing the class from command line.
*
* @param args The supplied command line arguments.
*/
public static void main(String[] args) {
if (args.length != 1) {
System.err.println("Usage: CoverTree <ARFF file>");
System.exit(-1);
}
try {
Instances insts = null;
if (args[0].endsWith(".csv")) {
CSVLoader csv = new CSVLoader();
csv.setFile(new File(args[0]));
insts = csv.getDataSet();
} else {
insts = new Instances(new BufferedReader(new FileReader(args[0])));
}
CoverTree tree = new CoverTree();
tree.setInstances(insts);
print("Created data tree:\n");
print(0, tree.m_Root);
println("");
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例3: main
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* Method for testing the class from command line.
*
* @param args The supplied command line arguments.
*/
public static void main(String[] args) {
if (args.length != 1) {
System.err.println("Usage: CoverTree <ARFF file>");
System.exit(-1);
}
try {
Instances insts = null;
if (args[0].endsWith(".csv")) {
CSVLoader csv = new CSVLoader();
csv.setFile(new File(args[0]));
insts = csv.getDataSet();
} else {
insts = new Instances(new BufferedReader(new FileReader(args[0])));
}
CoverTree tree = new CoverTree();
tree.setInstances(insts);
print("Created data tree:\n");
print(0, tree.m_Root);
println("");
} catch (Exception ex) {
ex.printStackTrace();
}
}
示例4: CSVtoARFF
import weka.core.converters.CSVLoader; //导入依赖的package包/类
public static void CSVtoARFF(String csvfilename, String arfffilename) {
try {
// load CSV
CSVLoader loader = new CSVLoader();
loader.setSource(new File(csvfilename));
Instances data = loader.getDataSet();
// save ARFF
ArffSaver saver = new ArffSaver();
saver.setInstances(data);
saver.setFile(new File(arfffilename));
saver.setDestination(new File(arfffilename));
saver.writeBatch();
} catch (Exception e) {
e.printStackTrace();
}
}
示例5: main
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* takes 2 arguments:
* - CSV input file
* - ARFF output file
*/
public static void main(String[] vagina) throws Exception {
String[] args = {"trainingSet.csv","penis.arff"};
// load CSV
CSVLoader loader = new CSVLoader();
loader.setSource(new File(args[0]));
Instances data = loader.getDataSet();
// save ARFF
ArffSaver saver = new ArffSaver();
saver.setInstances(data);
File penis = new File(args[1]);
saver.setFile(penis);
// saver.setDestination(penis);
saver.writeBatch();
}
示例6: loadDataFromCsvFile
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* Return the data set loaded from the CSV file at @param path
*/
public static Instances loadDataFromCsvFile(String path) throws IOException{
CSVLoader loader = new CSVLoader();
loader.setSource(new File(path));
Instances data = loader.getDataSet();
System.out.println("\nHeader of dataset:\n");
System.out.println(new Instances(data, 0));
return data;
}
示例7: loadCSV
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* Load the mnist minimal meta arff file
*
* @return Mnist minimal meta data as Instances
* @throws Exception IO error.
*/
public static Instances loadCSV(String path) throws Exception {
CSVLoader csv = new CSVLoader();
csv.setSource(new File(path));
Instances data = csv.getDataSet();
data.setClassIndex(data.numAttributes() - 1);
return data;
}
示例8: generateArff
import weka.core.converters.CSVLoader; //导入依赖的package包/类
public void generateArff() throws Exception{
//
CSVLoader loader = new CSVLoader();
// Set options
loader.setNominalAttributes("last");
loader.setStringAttributes("");
loader.setMissingValue("?");
loader.setFieldSeparator("\t");
loader.setFile(new File(seqConfig.getOutDir().getAbsolutePath()+File.separator+"tmpCounts.mat"));
Instances data = loader.getDataSet();
//Set subgroup index
if(data.classIndex() == -1)
data.setClassIndex(data.numAttributes()-1);
//First, get weight index
int wInd = data.numAttributes()-2;
// Now set weights
for(int i=0; i<data.numInstances(); i++){
double weight = data.instance(i).value(wInd);
data.instance(i).setWeight(weight);
}
// Now delete the weight attribute
data.deleteAttributeAt(wInd);
//Save the arff file
ArffSaver saver = new ArffSaver();
saver.setFile(new File(seqConfig.getOutDir().getAbsolutePath()+File.separator+seqConfig.getArffOutName()));
saver.setInstances(data);
saver.writeBatch();
}
示例9: setInstancesFromFile
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* Loads results from a set of instances contained in the supplied file.
*
* @param f a value of type 'File'
*/
protected void setInstancesFromFile(File f) {
String fileType = f.getName();
try {
m_FromLab.setText("Reading from file...");
if (f.getName().toLowerCase().endsWith(Instances.FILE_EXTENSION)) {
fileType = "arff";
Reader r = new BufferedReader(new FileReader(f));
setInstances(new Instances(r));
r.close();
} else if (f.getName().toLowerCase().endsWith(CSVLoader.FILE_EXTENSION)) {
fileType = "csv";
CSVLoader cnv = new CSVLoader();
cnv.setSource(f);
Instances inst = cnv.getDataSet();
setInstances(inst);
} else {
throw new Exception("Unrecognized file type");
}
} catch (Exception ex) {
m_FromLab.setText("File '" + f.getName() + "' not recognised as an "
+ fileType + " file.");
if (JOptionPane.showOptionDialog(ResultsPanel.this,
"File '" + f.getName() + "' not recognised as an " + fileType
+ " file.\n" + "Reason:\n" + ex.getMessage(), "Load Instances", 0,
JOptionPane.ERROR_MESSAGE, null, new String[] { "OK" }, null) == 1) {
}
}
}
示例10: CSVToARFF
import weka.core.converters.CSVLoader; //导入依赖的package包/类
public static void CSVToARFF(File input, File output) throws IOException {
CSVLoader csvDataset = new CSVLoader();
csvDataset.setSource(input);
Instances arffDataset = csvDataset.getDataSet();
ArffSaver saver = new ArffSaver();
saver.setInstances(arffDataset);
saver.setFile(output);
saver.writeBatch();
}
示例11: trainClassifier
import weka.core.converters.CSVLoader; //导入依赖的package包/类
public void trainClassifier(Classifier classifier, File trainingDataset,
FileOutputStream trainingModel, Integer
crossValidationFoldNumber) throws Exception {
CSVLoader csvLoader = new CSVLoader();
csvLoader.setSource(trainingDataset);
Instances instances = csvLoader.getDataSet();
switch(classifier) {
case KNN:
int K = (int) Math.ceil(Math.sqrt(instances.numInstances()));
this.classifier = new IBk(K);
break;
case NB:
this.classifier = new NaiveBayes();
}
if(instances.classIndex() == -1) {
instances.setClassIndex(instances.numAttributes() - 1);
}
this.classifier.buildClassifier(instances);
if(crossValidationFoldNumber > 0) {
Evaluation evaluation = new Evaluation(instances);
evaluation.crossValidateModel(this.classifier, instances, crossValidationFoldNumber,
new Random(1));
kappa = evaluation.kappa();
fMeasure = evaluation.weightedFMeasure();
confusionMatrix = evaluation.toMatrixString("Confusion matrix: ");
}
ObjectOutputStream outputStream = new ObjectOutputStream(trainingModel);
outputStream.writeObject(this.classifier);
outputStream.flush();
outputStream.close();
}
示例12: parse
import weka.core.converters.CSVLoader; //导入依赖的package包/类
@Override
public List<ComplexDataObject> parse(String filename) throws IOException {
CSVLoader loader = new CSVLoader();
loader.setSource(new File(filename));
Instances instances = loader.getDataSet();
List<ComplexDataObject> data = new ArrayList<>();
// Step1: create metaMapping
Map<Integer, Entry<String, Class<?>>> metaMapping = WekaTools.getAttributeSchema(instances);
// Step2: create ComplexDataObjects
for (int zeile = 0; zeile < instances.numInstances(); zeile++) {
Instance instance = instances.instance(zeile);
ComplexDataObject complexDataObject = new ComplexDataObject();
// parse columns
for (Integer spalte = 0; spalte < instances.numAttributes(); spalte++) {
Entry<String, ?> entry = WekaTools.assignEntry(metaMapping, instance, spalte, missingValueIndicator);
if (entry != null) {
if (entry.getValue() != null && entry.getValue() instanceof String) {
Date date = ParserTools.parseDate((String) entry.getValue());
if (date != null)
complexDataObject.add(entry.getKey(), date);
else
complexDataObject.add(entry.getKey(), entry.getValue());
} else
complexDataObject.add(entry.getKey(), entry.getValue());
} else
throw new NullArgumentException();
}
data.add(complexDataObject);
}
return data;
}
示例13: setInstancesFromFile
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* Loads results from a set of instances contained in the supplied
* file.
*
* @param f a value of type 'File'
*/
protected void setInstancesFromFile(File f) {
String fileType = f.getName();
try {
m_FromLab.setText("Reading from file...");
if (f.getName().toLowerCase().endsWith(Instances.FILE_EXTENSION)) {
fileType = "arff";
Reader r = new BufferedReader(new FileReader(f));
setInstances(new Instances(r));
r.close();
} else if (f.getName().toLowerCase().endsWith(CSVLoader.FILE_EXTENSION)) {
fileType = "csv";
CSVLoader cnv = new CSVLoader();
cnv.setSource(f);
Instances inst = cnv.getDataSet();
setInstances(inst);
} else {
throw new Exception("Unrecognized file type");
}
} catch (Exception ex) {
m_FromLab.setText("File '" + f.getName() + "' not recognised as an "
+fileType+" file.");
if (JOptionPane.showOptionDialog(ResultsPanel.this,
"File '" + f.getName()
+ "' not recognised as an "
+fileType+" file.\n"
+ "Reason:\n" + ex.getMessage(),
"Load Instances",
0,
JOptionPane.ERROR_MESSAGE,
null,
new String[] {"OK"},
null) == 1) {
}
}
}
示例14: setInstancesFromFile
import weka.core.converters.CSVLoader; //导入依赖的package包/类
/**
* Loads results from a set of instances contained in the supplied
* file.
*
* @param f a value of type 'File'
*/
protected void setInstancesFromFile(File f) {
String fileType = f.getName();
try {
m_FromLab.setText(Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_FromLab_Text"));
if (f.getName().toLowerCase().endsWith(Instances.FILE_EXTENSION)) {
fileType = "arff";
Reader r = new BufferedReader(new FileReader(f));
setInstances(new Instances(r));
r.close();
} else if (f.getName().toLowerCase().endsWith(CSVLoader.FILE_EXTENSION)) {
fileType = "csv";
CSVLoader cnv = new CSVLoader();
cnv.setSource(f);
Instances inst = cnv.getDataSet();
setInstances(inst);
} else {
throw new Exception(Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_Text"));
}
} catch (Exception ex) {
m_FromLab.setText(Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_FromLab_Text_First") + f.getName() + Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_FromLab_Text_Second")
+fileType + Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_FromLab_Text_Third"));
if (JOptionPane.showOptionDialog(ResultsPanel.this,
Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_JOptionPaneShowOptionDialog_Text_First") + f.getName()
+ Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_JOptionPaneShowOptionDialog_Text_Second")
+fileType + Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_JOptionPaneShowOptionDialog_Text_Third")
+ Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_JOptionPaneShowOptionDialog_Text_Fourth") + ex.getMessage(),
Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_JOptionPaneShowOptionDialog_Text_Fifth"),
0,
JOptionPane.ERROR_MESSAGE,
null,
new String[] {Messages.getInstance().getString("ResultsPanel_SetInstancesFromFile_Error_JOptionPaneShowOptionDialog_Text_Sixth")},
null) == 1) {
}
}
}
示例15: getNomNumMap
import weka.core.converters.CSVLoader; //导入依赖的package包/类
public Map<String, VariableType> getNomNumMap(File trainFile) throws Exception {
StringBuilder sb = new StringBuilder();
BufferedReader br = new BufferedReader(new FileReader(trainFile));
int maxLines = 50, lineCounter = 0;
String line;
while ((line = br.readLine()) != null && lineCounter < maxLines) {
sb.append(line).append("\n");
lineCounter++;
}
br.close();
Map<String, VariableType> nomNumMap = new HashMap<String, VariableType>();
CSVLoader csvLoader = new CSVLoader();
csvLoader.setSource(new ByteArrayInputStream(sb.toString().getBytes()));
Instances wekaData = csvLoader.getDataSet();
List<Attribute> attributes = Collections.list(wekaData.enumerateAttributes());
for (Attribute attribute : attributes) {
if (attribute.isNumeric() || attribute.isDate()) {
nomNumMap.put(attribute.name(), VariableType.NUMERIC);
} else if (attribute.isNominal() || attribute.isString()) {
nomNumMap.put(attribute.name(), VariableType.CATEGORICAL);
} else {
new RuntimeException(this.getClass().getName() + "impl me").printStackTrace();
}
}
return nomNumMap;
}