当前位置: 首页>>代码示例>>Java>>正文


Java ClusterWritable.getValue方法代码示例

本文整理汇总了Java中org.apache.mahout.clustering.iterator.ClusterWritable.getValue方法的典型用法代码示例。如果您正苦于以下问题:Java ClusterWritable.getValue方法的具体用法?Java ClusterWritable.getValue怎么用?Java ClusterWritable.getValue使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.mahout.clustering.iterator.ClusterWritable的用法示例。


在下文中一共展示了ClusterWritable.getValue方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: configureWithClusterInfo

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
/**
 * Create a list of SoftClusters from whatever type is passed in as the prior
 * 
 * @param conf
 *          the Configuration
 * @param clusterPath
 *          the path to the prior Clusters
 * @param clusters
 *          a List<Cluster> to put values into
 */
public static void configureWithClusterInfo(Configuration conf, Path clusterPath, List<Cluster> clusters) {
  for (Writable value : new SequenceFileDirValueIterable<Writable>(clusterPath, PathType.LIST,
      PathFilters.partFilter(), conf)) {
    Class<? extends Writable> valueClass = value.getClass();
    
    if (valueClass.equals(ClusterWritable.class)) {
      ClusterWritable clusterWritable = (ClusterWritable) value;
      value = clusterWritable.getValue();
      valueClass = value.getClass();
    }
    
    if (valueClass.equals(Kluster.class)) {
      // get the cluster info
      Kluster cluster = (Kluster) value;
      clusters.add(new SoftCluster(cluster.getCenter(), cluster.getId(), cluster.getMeasure()));
    } else if (valueClass.equals(SoftCluster.class)) {
      // get the cluster info
      clusters.add((SoftCluster) value);
    } else if (valueClass.equals(Canopy.class)) {
      // get the cluster info
      Canopy canopy = (Canopy) value;
      clusters.add(new SoftCluster(canopy.getCenter(), canopy.getId(), canopy.getMeasure()));
    } else {
      throw new IllegalStateException("Bad value class: " + valueClass);
    }
  }
  
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:39,代码来源:FuzzyKMeansUtil.java

示例2: configureWithClusterInfo

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
/**
 * Create a list of Klusters from whatever Cluster type is passed in as the prior
 * 
 * @param conf
 *          the Configuration
 * @param clusterPath
 *          the path to the prior Clusters
 * @param clusters
 *          a List<Cluster> to put values into
 */
public static void configureWithClusterInfo(Configuration conf, Path clusterPath, Collection<Cluster> clusters) {
  for (Writable value : new SequenceFileDirValueIterable<Writable>(clusterPath, PathType.LIST,
      PathFilters.partFilter(), conf)) {
    Class<? extends Writable> valueClass = value.getClass();
    if (valueClass.equals(ClusterWritable.class)) {
      ClusterWritable clusterWritable = (ClusterWritable) value;
      value = clusterWritable.getValue();
      valueClass = value.getClass();
    }
    log.debug("Read 1 Cluster from {}", clusterPath);
    
    if (valueClass.equals(Kluster.class)) {
      // get the cluster info
      clusters.add((Kluster) value);
    } else if (valueClass.equals(Canopy.class)) {
      // get the cluster info
      Canopy canopy = (Canopy) value;
      clusters.add(new Kluster(canopy.getCenter(), canopy.getId(), canopy.getMeasure()));
    } else {
      throw new IllegalStateException("Bad value class: " + valueClass);
    }
  }
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:34,代码来源:KMeansUtil.java

示例3: populateClusterModels

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
/**
 * Populates a list with clusters present in clusters-*-final directory.
 * 
 * @param clusterOutputPath
 *            The output path of the clustering.
 * @param conf
 *            The Hadoop Configuration
 * @return The list of clusters found by the clustering.
 * @throws IOException
 */
private static List<Cluster> populateClusterModels(Path clusterOutputPath,
		Configuration conf) throws IOException {
	List<Cluster> clusterModels = Lists.newArrayList();
	Path finalClustersPath = finalClustersPath(conf, clusterOutputPath);
	Iterator<?> it = new SequenceFileDirValueIterator<Writable>(
			finalClustersPath, PathType.LIST, PathFilters.partFilter(),
			null, false, conf);
	while (it.hasNext()) {
		ClusterWritable next = (ClusterWritable) it.next();
		Cluster cluster = next.getValue();
		cluster.configure(conf);
		clusterModels.add(cluster);
	}
	return clusterModels;
}
 
开发者ID:pgorecki,项目名称:visearch,代码行数:26,代码来源:ImageToTextDriver.java

示例4: readClusters

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
public static List<List<Cluster>> readClusters(Configuration conf, Path output)
		throws IOException {
	List<List<Cluster>> Clusters = Lists.newArrayList();
	FileSystem fs = FileSystem.get(output.toUri(), conf);

	for (FileStatus s : fs.listStatus(output, new ClustersFilter())) {
		List<Cluster> clusters = Lists.newArrayList();
		for (ClusterWritable value : new SequenceFileDirValueIterable<ClusterWritable>(
				s.getPath(), PathType.LIST, PathFilters.logsCRCFilter(),
				conf)) {
			Cluster cluster = value.getValue();
			clusters.add(cluster);
		}
		Clusters.add(clusters);
	}
	return Clusters;
}
 
开发者ID:tknandu,项目名称:recommender_pilot,代码行数:18,代码来源:ClusterHelper.java

示例5: populateClusterModels

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
/**
 * Populates a list with clusters present in clusters-*-final directory.
 * 
 * @param clusterOutputPath
 *          The output path of the clustering.
 * @param conf
 *          The Hadoop Configuration
 * @return The list of clusters found by the clustering.
 * @throws IOException
 */
private static List<Cluster> populateClusterModels(Path clusterOutputPath, Configuration conf) throws IOException {
  List<Cluster> clusterModels = new ArrayList<Cluster>();
  Path finalClustersPath = finalClustersPath(conf, clusterOutputPath);
  Iterator<?> it = new SequenceFileDirValueIterator<Writable>(finalClustersPath, PathType.LIST,
      PathFilters.partFilter(), null, false, conf);
  while (it.hasNext()) {
    ClusterWritable next = (ClusterWritable) it.next();
    Cluster cluster = next.getValue();
    cluster.configure(conf);
    clusterModels.add(cluster);
  }
  return clusterModels;
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:24,代码来源:ClusterClassificationDriver.java

示例6: readFromSeqFiles

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
public void readFromSeqFiles(Configuration conf, Path path) throws IOException {
  Configuration config = new Configuration();
  List<Cluster> clusters = Lists.newArrayList();
  for (ClusterWritable cw : new SequenceFileDirValueIterable<ClusterWritable>(path, PathType.LIST,
      PathFilters.logsCRCFilter(), config)) {
    Cluster cluster = cw.getValue();
    cluster.configure(conf);
    clusters.add(cluster);
  }
  this.models = clusters;
  modelClass = models.get(0).getClass().getName();
  this.policy = readPolicy(path);
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:14,代码来源:ClusterClassifier.java

示例7: populateClusterModels

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
public static List<Cluster> populateClusterModels(Path clusterOutputPath, Configuration conf) throws IOException {
  List<Cluster> clusters = new ArrayList<Cluster>();
  FileSystem fileSystem = clusterOutputPath.getFileSystem(conf);
  FileStatus[] clusterFiles = fileSystem.listStatus(clusterOutputPath, PathFilters.finalPartFilter());
  Iterator<?> it = new SequenceFileDirValueIterator<Writable>(
      clusterFiles[0].getPath(), PathType.LIST, PathFilters.partFilter(),
      null, false, conf);
  while (it.hasNext()) {
    ClusterWritable next = (ClusterWritable) it.next();
    Cluster cluster = next.getValue();
    cluster.configure(conf);
    clusters.add(cluster);
  }
  return clusters;
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:16,代码来源:ClusterClassificationMapper.java

示例8: getCanopies

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
public static List<MeanShiftCanopy> getCanopies(Configuration conf) {
  String statePath = conf.get(MeanShiftCanopyDriver.STATE_IN_KEY);
  List<MeanShiftCanopy> canopies = Lists.newArrayList();
  Path path = new Path(statePath);
  for (ClusterWritable clusterWritable 
       : new SequenceFileDirValueIterable<ClusterWritable>(path, PathType.LIST, PathFilters.logsCRCFilter(), conf)) {
    MeanShiftCanopy canopy = (MeanShiftCanopy)clusterWritable.getValue();
    canopies.add(canopy);
  }
  return canopies;
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigData-Max,代码行数:12,代码来源:MeanShiftCanopyClusterMapper.java

示例9: map

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
@Override
 protected void map(WritableComparable<?> key, ClusterWritable clusterWritable, Context context)
   throws IOException, InterruptedException {
   // canopies use canopyIds assigned when input vectors are processed as vectorIds too
MeanShiftCanopy canopy = (MeanShiftCanopy)clusterWritable.getValue();
   int vectorId = canopy.getId();
   for (MeanShiftCanopy msc : canopies) {
     for (int containedId : msc.getBoundPoints().toList()) {
       if (vectorId == containedId) {
         context.write(new IntWritable(msc.getId()),
                        new WeightedVectorWritable(1, canopy.getCenter()));
       }
     }
   }
 }
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigData-Max,代码行数:16,代码来源:MeanShiftCanopyClusterMapper.java

示例10: populateClusterModels

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
public static List<Cluster> populateClusterModels(Path clusterOutputPath, Configuration conf) throws IOException {
  List<Cluster> clusters = Lists.newArrayList();
  FileSystem fileSystem = clusterOutputPath.getFileSystem(conf);
  FileStatus[] clusterFiles = fileSystem.listStatus(clusterOutputPath, PathFilters.finalPartFilter());
  Iterator<?> it = new SequenceFileDirValueIterator<Writable>(
      clusterFiles[0].getPath(), PathType.LIST, PathFilters.partFilter(),
      null, false, conf);
  while (it.hasNext()) {
    ClusterWritable next = (ClusterWritable) it.next();
    Cluster cluster = next.getValue();
    cluster.configure(conf);
    clusters.add(cluster);
  }
  return clusters;
}
 
开发者ID:pgorecki,项目名称:visearch,代码行数:16,代码来源:ImageToTextMapper.java

示例11: map

import org.apache.mahout.clustering.iterator.ClusterWritable; //导入方法依赖的package包/类
@Override
protected void map(WritableComparable<?> key, ClusterWritable clusterWritable, Context context)
  throws IOException, InterruptedException {
  MeanShiftCanopy canopy = (MeanShiftCanopy) clusterWritable.getValue();
  clusterer.mergeCanopy(canopy.shallowCopy(), canopies);
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:7,代码来源:MeanShiftCanopyMapper.java


注:本文中的org.apache.mahout.clustering.iterator.ClusterWritable.getValue方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。