本文整理汇总了Java中net.sf.javaml.core.Dataset.noAttributes方法的典型用法代码示例。如果您正苦于以下问题:Java Dataset.noAttributes方法的具体用法?Java Dataset.noAttributes怎么用?Java Dataset.noAttributes使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类net.sf.javaml.core.Dataset
的用法示例。
在下文中一共展示了Dataset.noAttributes方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: build
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
@Override
public void build(Dataset data) {
weights = new double[data.noAttributes()];
/* Normalize the data to [0,1] */
NormalizeMidrange dnm = new NormalizeMidrange(0.5, 1);
dnm.filter(data);
/* Number of iterations */
int m = data.size();
for (int i = 0; i < m; i++) {
Instance random = data.instance(rg.nextInt(data.size()));
findNearest(data, random);
for (int j = 0; j < weights.length; j++)
weights[j] = weights[j] - diff(j, random, nearestHit) / m + diff(j, random, nearestMiss) / m;
}
}
示例2: build
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
@Override
public void build(Dataset data) {
int numAtt = data.noAttributes();
/* [i] contains the sum of ranks of feature i */
double[] sum = new double[numAtt];
for (FeatureRanking ae : aes) {
Dataset bootstrapData = new DefaultDataset();
while (bootstrapData.size() < data.size()) {
int random = rg.nextInt(data.size());
bootstrapData.add(data.get(random));
}
Dataset copy = bootstrapData.copy();
ae.build(copy);
for (int i = 0; i < numAtt; i++)
sum[i] += ae.rank(i);
}
toRank(sum);
}
示例3: build
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
@Override
public void build(Dataset data) {
/*
* When more attributes should be selected then there are, return all
* attributes.
*/
if (n > data.noAttributes()) {
selectedAttributes = data.get(0).keySet();
return;
}
/*
* Regular procedure, add iteratively the best attribute till we have
* enough attributes selected.
*/
Instance classInstance = DatasetTools.createInstanceFromClass(data);
selectedAttributes = new HashSet<Integer>();
while (selectedAttributes.size() < n) {
selectNext(data, classInstance);
}
}
示例4: selectNext
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
private void selectNext(Dataset data, Instance classInstance) {
int bestIndex = -1;
double bestScore = Double.NaN;
for (int i = 0; i < data.noAttributes(); i++) {
if (!selectedAttributes.contains(i)) {
Instance attributeInstance = DatasetTools.createInstanceFromAttribute(data, i);
double score = dm.measure(attributeInstance, classInstance);
if (!Double.isNaN(score) && bestIndex == -1) {
bestIndex = i;
bestScore = score;
} else {
if (!Double.isNaN(score) && dm.compare(score, bestScore)) {
bestIndex = i;
bestScore = score;
}
}
}
}
selectedAttributes.add(bestIndex);
}
示例5: cluster
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
@Override
public Dataset[] cluster(Dataset data) {
// hexa || rect
wV = new WeightVectors(xdim, ydim, data.noAttributes(), gridType.toString());
InputVectors iV = convertDataset(data);
JSomTraining jst = new JSomTraining(iV);
// exponential || inverse || linear
// gaussian || step
jst.setTrainingInstructions(iterations, learningRate, initialRadius, learningType.toString(),
neighbourhoodFunction.toString());
// WeightVectors out = jst.doTraining();
jst.doTraining();
Vector<Dataset> clusters = new Vector<Dataset>();
for (int i = 0; i < wV.size(); i++) {
clusters.add(new DefaultDataset());
}
wV = doLabeling(wV, iV, data, clusters);
// Filter empty clusters out;
int nonEmptyClusterCount = 0;
for (int i = 0; i < clusters.size(); i++) {
if (clusters.get(i).size() > 0)
nonEmptyClusterCount++;
}
Dataset[] output = new Dataset[nonEmptyClusterCount];
int index = 0;
for (Dataset tmp : clusters) {
if (tmp.size() > 0) {
output[index] = tmp;
index++;
}
}
return output;
}
示例6: buildClassifier
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
@Override
public void buildClassifier(Dataset data) {
this.training = data;
tree = new KDTree(data.noAttributes());
for (Instance inst : data) {
tree.insert(InstanceTools.array(inst), inst);
}
}
示例7: average
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
/**
* Creates an instance that contains the average values for the attributes.
*
* @param data
* data set to calculate average attribute values for
* @return Instance representing the average attribute values
*/
public static Instance average(Dataset data) {
double[] tmpOut = new double[data.noAttributes()];
for (int i = 0; i < data.noAttributes(); i++) {
double sum=0;
for (int j = 0; j < data.size(); j++) {
sum+= data.get(j).value(i);
}
tmpOut[i] = sum/data.size();
}
return new DenseInstance(tmpOut);
}
示例8: percentile
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
/**
* Calculates the percentile hinge for a given percentile.
*
* @param data
* data set to calculate percentile for
* @param perc
* percentile to calculate, Q1=25, Q2=median=50,Q3=75
* @return
*/
public static Instance percentile(Dataset data, double perc) {
double[] tmpOut = new double[data.noAttributes()];
for (int i = 0; i < data.noAttributes(); i++) {
double[] vals = new double[data.size()];
for (int j = 0; j < data.size(); j++) {
vals[j] = data.get(j).value(i);
}
tmpOut[i] = StatUtils.percentile(vals, perc);
}
return new DenseInstance(tmpOut);
}
示例9: build
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
public void build(Dataset data) {
int noAttributes=data.noAttributes();
ae.build(data);
double[] values = new double[noAttributes];
for (int i = 0; i < values.length; i++)
values[i] = ae.score(i);
ranking = new int[values.length];
int[] order = ArrayUtils.sort(values);
for (int i = 0; i < order.length; i++) {
ranking[order[i]] = order.length - i - 1;
}
}
示例10: build
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
public void build(Dataset data) {
AbstractFilter discretize = new EqualWidthBinning(bins);
discretize.build(data);
discretize.filter(data);
Instance min = DatasetTools.minAttributes(data);
Instance max = DatasetTools.maxAttributes(data);
for (int i = 0; i < data.noAttributes(); i++) {
if (min.value(i) != 0 || max.value(i) != 9) {
System.err.println(i + " " + min.value(i) + "\t" + max.value(i));
}
}
this.training = data;
}
示例11: build
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
@Override
public void build(Dataset data) {
/*
* When more attributes should be selected then there are, return all
* attributes.
*/
if (n > data.noAttributes()) {
removedAttributes = new HashSet<Integer>();
return;
}
/*
* Regular procedure, remove the worst attribute till we have enough
* attributes left.
*/
Instance classInstance = DatasetTools.createInstanceFromClass(data);
removedAttributes = new HashSet<Integer>();
while (removedAttributes.size() < data.noAttributes() - n) {
removeNext(data, classInstance);
}
/* Create the inverse of the removed attributes */
selectedAttributes = new HashSet<Integer>();
for (int i = 0; i < data.noAttributes(); i++)
selectedAttributes.add(i);
selectedAttributes.removeAll(removedAttributes);
}
示例12: removeNext
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
private void removeNext(Dataset data, Instance classInstance) {
int worstIndex = -1;
double worstScore = Double.NaN;
for (int i = 0; i < data.noAttributes(); i++) {
if (!removedAttributes.contains(i)) {
Instance attributeInstance = DatasetTools.createInstanceFromAttribute(data, i);
double score = dm.measure(attributeInstance, classInstance);
/* When the score is NaN, remove the attribute immediately */
if (Double.isNaN(score)) {
worstIndex = i;
break;
}
if (worstIndex == -1) {
worstIndex = i;
worstScore = score;
} else {
if (!dm.compare(score, worstScore)) {
worstIndex = i;
worstScore = score;
}
}
}
}
removedAttributes.add(worstIndex);
}
示例13: pairWise
import net.sf.javaml.core.Dataset; //导入方法依赖的package包/类
private double[] pairWise(Object p, Object q, Dataset data) {
double[] divergence = new double[data.noAttributes()];
/*
* For probability distributions P and Q of a discrete random variable
* the K�L divergence of Q from P is defined to be:
*
* D_KL(P|Q)=sum_i(P(i)log(P(i)/Q(i)))
*/
double maxSum = 0;
for (int i = 0; i < data.noAttributes(); i++) {
double sum = 0;
double[] countQ = new double[bins];
double[] countP = new double[bins];
double pCount = 0, qCount = 0;
for (Instance inst : data) {
if (inst.classValue().equals(q)) {
countQ[(int) inst.value(i)]++;
qCount++;
}
if (inst.classValue().equals(p)) {
countP[(int) inst.value(i)]++;
pCount++;
}
}
for (int j = 0; j < countP.length; j++) {
countP[j] /= pCount;
countQ[j] /= qCount;
/*
* Probabilities should never be really 0, they can be small
* though
*/
if (countP[j] == 0)
countP[j] = 0.0000001;
if (countQ[j] == 0)
countQ[j] = 0.0000001;
sum += countP[j] * Math.log(countP[j] / countQ[j]);
}
divergence[i] = sum;
/* Keep track of highest value */
if (sum > maxSum)
maxSum = sum;
}
/* Normalize to [0,1] */
for (int i = 0; i < data.noAttributes(); i++) {
divergence[i] /= maxSum;
}
return divergence;
}