当前位置: 首页>>代码示例>>Java>>正文


Java Text.append方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.Text.append方法的典型用法代码示例。如果您正苦于以下问题:Java Text.append方法的具体用法?Java Text.append怎么用?Java Text.append使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.Text的用法示例。


在下文中一共展示了Text.append方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: lowLevelFastqRead

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
protected boolean lowLevelFastqRead(Text readName, Text value) throws IOException {
    // ID line
    readName.clear();
    long skipped = appendLineInto(readName, true);
    if (skipped == 0)
        return false; // EOF
    if (readName.getBytes()[0] != '@')
        throw new RuntimeException("unexpected fastq record didn't start with '@' at " + makePositionMessage() + ". Line: " + readName + ". \n");

    value.append(readName.getBytes(), 0, readName.getLength());

    // sequence
    appendLineInto(value, false);

    // separator line
    appendLineInto(value, false);

    // quality
    appendLineInto(value, false);

    return true;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:23,代码来源:SingleFastqInputFormat.java

示例2: nextKeyValue

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {

    if (eof || eos) {
        return noMorePairs();
    }

    if (line == null && !findFirstFirstLine()) {
        return noMorePairs();
    }

    key = new Tuple2<>(hdfsPath, Long.valueOf(lastReadPos));

    Text text = new Text(line);

    fetchLine();

    while (!isFirstLine(line) && !eof) {
        byte[] bytes = (System.lineSeparator() + line).getBytes();
        text.append(bytes, 0, bytes.length);
        fetchLine();
    }

    value = new Text(text.toString());

    return true;
}
 
开发者ID:comdirect,项目名称:hadoop-logfile-inputformat,代码行数:28,代码来源:LogfileRecordReader.java

示例3: buildTrie

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
/**
 * Given a sorted set of cut points, build a trie that will find the correct
 * partition quickly.
 * @param splits the list of cut points
 * @param lower the lower bound of partitions 0..numPartitions-1
 * @param upper the upper bound of partitions 0..numPartitions-1
 * @param prefix the prefix that we have already checked against
 * @param maxDepth the maximum depth we will build a trie for
 * @return the trie node that will divide the splits correctly
 */
private static TrieNode buildTrie(Text[] splits, int lower, int upper, 
                                  Text prefix, int maxDepth) {
  int depth = prefix.getLength();
  if (depth >= maxDepth || lower == upper) {
    return new LeafTrieNode(depth, splits, lower, upper);
  }
  InnerTrieNode result = new InnerTrieNode(depth);
  Text trial = new Text(prefix);
  // append an extra byte on to the prefix
  trial.append(new byte[1], 0, 1);
  int currentBound = lower;
  for(int ch = 0; ch < 255; ++ch) {
    trial.getBytes()[depth] = (byte) (ch + 1);
    lower = currentBound;
    while (currentBound < upper) {
      if (splits[currentBound].compareTo(trial) >= 0) {
        break;
      }
      currentBound += 1;
    }
    trial.getBytes()[depth] = (byte) ch;
    result.child[ch] = buildTrie(splits, lower, currentBound, trial, 
                                 maxDepth);
  }
  // pick up the rest
  trial.getBytes()[depth] = (byte) 255;
  result.child[255] = buildTrie(splits, currentBound, upper, trial,
                                maxDepth);
  return result;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TeraSort.java

示例4: appendLineInto

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
private int appendLineInto(Text dest, boolean eofOk) throws EOFException, IOException {
    Text buf = new Text();
    int bytesRead = lineReader.readLine(buf, MAX_LINE_LENGTH);

    if (bytesRead < 0 || (bytesRead == 0 && !eofOk))
        throw new EOFException();

    dest.append(buf.getBytes(), 0, buf.getLength());
    dest.append(newline, 0, 1);
    pos += bytesRead;

    return bytesRead;
}
 
开发者ID:PAA-NCIC,项目名称:SparkSeq,代码行数:14,代码来源:SingleFastqInputFormat.java

示例5: readDefaultLine

import org.apache.hadoop.io.Text; //导入方法依赖的package包/类
/**
 * Read a line terminated by one of CR, LF, or CRLF.
 */
private int readDefaultLine(Text str, int maxLineLength, int maxBytesToConsume)
throws IOException {
  /* We're reading data from in, but the head of the stream may be
   * already buffered in buffer, so we have several cases:
   * 1. No newline characters are in the buffer, so we need to copy
   *    everything and read another buffer from the stream.
   * 2. An unambiguously terminated line is in buffer, so we just
   *    copy to str.
   * 3. Ambiguously terminated line is in buffer, i.e. buffer ends
   *    in CR.  In this case we copy everything up to CR to str, but
   *    we also need to see what follows CR: if it's LF, then we
   *    need consume LF as well, so next call to readLine will read
   *    from after that.
   * We use a flag prevCharCR to signal if previous character was CR
   * and, if it happens to be at the end of the buffer, delay
   * consuming it until we have a chance to look at the char that
   * follows.
   */
  str.clear();
  int txtLength = 0; //tracks str.getLength(), as an optimization
  int newlineLength = 0; //length of terminating newline
  boolean prevCharCR = false; //true of prev char was CR
  long bytesConsumed = 0;
  do {
    int startPosn = bufferPosn; //starting from where we left off the last time
    if (bufferPosn >= bufferLength) {
      startPosn = bufferPosn = 0;
      if (prevCharCR) {
        ++bytesConsumed; //account for CR from previous read
      }
      bufferLength = fillBuffer(in, buffer, prevCharCR);
      if (bufferLength <= 0) {
        break; // EOF
      }
    }
    for (; bufferPosn < bufferLength; ++bufferPosn) { //search for newline
      if (buffer[bufferPosn] == LF) {
        newlineLength = (prevCharCR) ? 2 : 1;
        ++bufferPosn; // at next invocation proceed from following byte
        break;
      }
      if (prevCharCR) { //CR + notLF, we are at notLF
        newlineLength = 1;
        break;
      }
      prevCharCR = (buffer[bufferPosn] == CR);
    }
    int readLength = bufferPosn - startPosn;
    if (prevCharCR && newlineLength == 0) {
      --readLength; //CR at the end of the buffer
    }
    bytesConsumed += readLength;
    int appendLength = readLength - newlineLength;
    if (appendLength > maxLineLength - txtLength) {
      appendLength = maxLineLength - txtLength;
    }
    if (appendLength > 0) {
      str.append(buffer, startPosn, appendLength);
      txtLength += appendLength;
    }
  } while (newlineLength == 0 && bytesConsumed < maxBytesToConsume);

  if (bytesConsumed > Integer.MAX_VALUE) {
    throw new IOException("Too many bytes before newline: " + bytesConsumed);
  }
  return (int)bytesConsumed;
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:71,代码来源:LineReader.java


注:本文中的org.apache.hadoop.io.Text.append方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。