当前位置: 首页>>代码示例>>Java>>正文


Java Pair.getFirst方法代码示例

本文整理汇总了Java中org.apache.mahout.common.Pair.getFirst方法的典型用法代码示例。如果您正苦于以下问题:Java Pair.getFirst方法的具体用法?Java Pair.getFirst怎么用?Java Pair.getFirst使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.mahout.common.Pair的用法示例。


在下文中一共展示了Pair.getFirst方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: loadDictionary

import org.apache.mahout.common.Pair; //导入方法依赖的package包/类
private static String[] loadDictionary(String dictionaryPath, Configuration conf) {
  if (dictionaryPath == null) {
    return null;
  }
  Path dictionaryFile = new Path(dictionaryPath);
  List<Pair<Integer, String>> termList = Lists.newArrayList();
  int maxTermId = 0;
   // key is word value is id
  for (Pair<Writable, IntWritable> record
          : new SequenceFileIterable<Writable, IntWritable>(dictionaryFile, true, conf)) {
    termList.add(new Pair<Integer, String>(record.getSecond().get(),
        record.getFirst().toString()));
    maxTermId = Math.max(maxTermId, record.getSecond().get());
  }
  String[] terms = new String[maxTermId + 1];
  for (Pair<Integer, String> pair : termList) {
    terms[pair.getFirst()] = pair.getSecond();
  }
  return terms;
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:21,代码来源:InMemoryCollapsedVariationalBayes0.java

示例2: loadVectors

import org.apache.mahout.common.Pair; //导入方法依赖的package包/类
private static Matrix loadVectors(String vectorPathString, Configuration conf)
  throws IOException {
  Path vectorPath = new Path(vectorPathString);
  FileSystem fs = vectorPath.getFileSystem(conf);
  List<Path> subPaths = Lists.newArrayList();
  if (fs.isFile(vectorPath)) {
    subPaths.add(vectorPath);
  } else {
    for (FileStatus fileStatus : fs.listStatus(vectorPath, PathFilters.logsCRCFilter())) {
      subPaths.add(fileStatus.getPath());
    }
  }
  List<Pair<Integer, Vector>> rowList = Lists.newArrayList();
  int numRows = Integer.MIN_VALUE;
  int numCols = -1;
  boolean sequentialAccess = false;
  for (Path subPath : subPaths) {
    for (Pair<IntWritable, VectorWritable> record
        : new SequenceFileIterable<IntWritable, VectorWritable>(subPath, true, conf)) {
      int id = record.getFirst().get();
      Vector vector = record.getSecond().get();
      if (vector instanceof NamedVector) {
        vector = ((NamedVector)vector).getDelegate();
      }
      if (numCols < 0) {
        numCols = vector.size();
        sequentialAccess = vector.isSequentialAccess();
      }
      rowList.add(Pair.of(id, vector));
      numRows = Math.max(numRows, id);
    }
  }
  numRows++;
  Vector[] rowVectors = new Vector[numRows];
  for (Pair<Integer, Vector> pair : rowList) {
    rowVectors[pair.getFirst()] = pair.getSecond();
  }
  return new SparseRowMatrix(numRows, numCols, rowVectors, true, !sequentialAccess);

}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:41,代码来源:InMemoryCollapsedVariationalBayes0.java

示例3: TopicModel

import org.apache.mahout.common.Pair; //导入方法依赖的package包/类
private TopicModel(Pair<Matrix, Vector> model, double eta, double alpha, String[] dict,
    int numThreads, double modelWeight) {
  this(model.getFirst(), model.getSecond(), eta, alpha, dict, numThreads, modelWeight);
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:5,代码来源:TopicModel.java

示例4: createDictionaryChunks

import org.apache.mahout.common.Pair; //导入方法依赖的package包/类
/**
 * Read the feature frequency List which is built at the end of the Word Count Job and assign ids to them.
 * This will use constant memory and will run at the speed of your disk read
 */
private static List<Path> createDictionaryChunks(Path wordCountPath,
                                                 Path dictionaryPathBase,
                                                 Configuration baseConf,
                                                 int chunkSizeInMegabytes,
                                                 int[] maxTermDimension) throws IOException {
  List<Path> chunkPaths = Lists.newArrayList();
  
  Configuration conf = new Configuration(baseConf);
  
  FileSystem fs = FileSystem.get(wordCountPath.toUri(), conf);

  long chunkSizeLimit = chunkSizeInMegabytes * 1024L * 1024L;
  int chunkIndex = 0;
  Path chunkPath = new Path(dictionaryPathBase, DICTIONARY_FILE + chunkIndex);
  chunkPaths.add(chunkPath);
  
  SequenceFile.Writer dictWriter = new SequenceFile.Writer(fs, conf, chunkPath, Text.class, IntWritable.class);

  try {
    long currentChunkSize = 0;
    Path filesPattern = new Path(wordCountPath, OUTPUT_FILES_PATTERN);
    int i = 0;
    for (Pair<Writable,Writable> record
         : new SequenceFileDirIterable<Writable,Writable>(filesPattern, PathType.GLOB, null, null, true, conf)) {
      if (currentChunkSize > chunkSizeLimit) {
        Closeables.closeQuietly(dictWriter);
        chunkIndex++;

        chunkPath = new Path(dictionaryPathBase, DICTIONARY_FILE + chunkIndex);
        chunkPaths.add(chunkPath);

        dictWriter = new SequenceFile.Writer(fs, conf, chunkPath, Text.class, IntWritable.class);
        currentChunkSize = 0;
      }

      Writable key = record.getFirst();
      int fieldSize = DICTIONARY_BYTE_OVERHEAD + key.toString().length() * 2 + Integer.SIZE / 8;
      currentChunkSize += fieldSize;
      dictWriter.append(key, new IntWritable(i++));
    }
    maxTermDimension[0] = i;
  } finally {
    Closeables.closeQuietly(dictWriter);
  }
  
  return chunkPaths;
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:52,代码来源:DictionaryVectorizer.java

示例5: createDictionaryChunks

import org.apache.mahout.common.Pair; //导入方法依赖的package包/类
/**
 * Read the document frequency List which is built at the end of the DF Count Job. This will use constant
 * memory and will run at the speed of your disk read
 */
private static Pair<Long[], List<Path>> createDictionaryChunks(Path featureCountPath,
                                                               Path dictionaryPathBase,
                                                               Configuration baseConf,
                                                               int chunkSizeInMegabytes) throws IOException {
  List<Path> chunkPaths = Lists.newArrayList();
  Configuration conf = new Configuration(baseConf);

  FileSystem fs = FileSystem.get(featureCountPath.toUri(), conf);

  long chunkSizeLimit = chunkSizeInMegabytes * 1024L * 1024L;
  int chunkIndex = 0;
  Path chunkPath = new Path(dictionaryPathBase, FREQUENCY_FILE + chunkIndex);
  chunkPaths.add(chunkPath);
  SequenceFile.Writer freqWriter =
    new SequenceFile.Writer(fs, conf, chunkPath, IntWritable.class, LongWritable.class);

  try {
    long currentChunkSize = 0;
    long featureCount = 0;
    long vectorCount = Long.MAX_VALUE;
    Path filesPattern = new Path(featureCountPath, OUTPUT_FILES_PATTERN);
    for (Pair<IntWritable,LongWritable> record
         : new SequenceFileDirIterable<IntWritable,LongWritable>(filesPattern,
                                                                 PathType.GLOB,
                                                                 null,
                                                                 null,
                                                                 true,
                                                                 conf)) {

      if (currentChunkSize > chunkSizeLimit) {
        Closeables.closeQuietly(freqWriter);
        chunkIndex++;

        chunkPath = new Path(dictionaryPathBase, FREQUENCY_FILE + chunkIndex);
        chunkPaths.add(chunkPath);

        freqWriter = new SequenceFile.Writer(fs, conf, chunkPath, IntWritable.class, LongWritable.class);
        currentChunkSize = 0;
      }

      int fieldSize = SEQUENCEFILE_BYTE_OVERHEAD + Integer.SIZE / 8 + Long.SIZE / 8;
      currentChunkSize += fieldSize;
      IntWritable key = record.getFirst();
      LongWritable value = record.getSecond();
      if (key.get() >= 0) {
        freqWriter.append(key, value);
      } else if (key.get() == -1) {
        vectorCount = value.get();
      }
      featureCount = Math.max(key.get(), featureCount);

    }
    featureCount++;
    Long[] counts = {featureCount, vectorCount};
    return new Pair<Long[], List<Path>>(counts, chunkPaths);
  } finally {
    Closeables.closeQuietly(freqWriter);
  }
}
 
开发者ID:saradelrio,项目名称:Chi-FRBCS-BigDataCS,代码行数:64,代码来源:TFIDFConverter.java

示例6: writeDictEntry

import org.apache.mahout.common.Pair; //导入方法依赖的package包/类
@Override
public void writeDictEntry(Pair<String, Integer> entry) throws IOException {
    Text key = new Text(entry.getFirst());
    IntWritable value = new IntWritable(entry.getSecond());
    dictWriter.append(key, value);
}
 
开发者ID:project-asap,项目名称:IReS-Platform,代码行数:7,代码来源:SparkOutput.java


注:本文中的org.apache.mahout.common.Pair.getFirst方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。