當前位置: 首頁>>代碼示例>>Java>>正文


Java Text.append方法代碼示例

本文整理匯總了Java中org.apache.hadoop.io.Text.append方法的典型用法代碼示例。如果您正苦於以下問題:Java Text.append方法的具體用法?Java Text.append怎麽用?Java Text.append使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.io.Text的用法示例。


在下文中一共展示了Text.append方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: lowLevelFastqRead

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
protected boolean lowLevelFastqRead(Text readName, Text value) throws IOException {
    // ID line
    readName.clear();
    long skipped = appendLineInto(readName, true);
    if (skipped == 0)
        return false; // EOF
    if (readName.getBytes()[0] != '@')
        throw new RuntimeException("unexpected fastq record didn't start with '@' at " + makePositionMessage() + ". Line: " + readName + ". \n");

    value.append(readName.getBytes(), 0, readName.getLength());

    // sequence
    appendLineInto(value, false);

    // separator line
    appendLineInto(value, false);

    // quality
    appendLineInto(value, false);

    return true;
}
 
開發者ID:PAA-NCIC,項目名稱:SparkSeq,代碼行數:23,代碼來源:SingleFastqInputFormat.java

示例2: nextKeyValue

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
@Override
public boolean nextKeyValue() throws IOException, InterruptedException {

    if (eof || eos) {
        return noMorePairs();
    }

    if (line == null && !findFirstFirstLine()) {
        return noMorePairs();
    }

    key = new Tuple2<>(hdfsPath, Long.valueOf(lastReadPos));

    Text text = new Text(line);

    fetchLine();

    while (!isFirstLine(line) && !eof) {
        byte[] bytes = (System.lineSeparator() + line).getBytes();
        text.append(bytes, 0, bytes.length);
        fetchLine();
    }

    value = new Text(text.toString());

    return true;
}
 
開發者ID:comdirect,項目名稱:hadoop-logfile-inputformat,代碼行數:28,代碼來源:LogfileRecordReader.java

示例3: buildTrie

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
/**
 * Given a sorted set of cut points, build a trie that will find the correct
 * partition quickly.
 * @param splits the list of cut points
 * @param lower the lower bound of partitions 0..numPartitions-1
 * @param upper the upper bound of partitions 0..numPartitions-1
 * @param prefix the prefix that we have already checked against
 * @param maxDepth the maximum depth we will build a trie for
 * @return the trie node that will divide the splits correctly
 */
private static TrieNode buildTrie(Text[] splits, int lower, int upper, 
                                  Text prefix, int maxDepth) {
  int depth = prefix.getLength();
  if (depth >= maxDepth || lower == upper) {
    return new LeafTrieNode(depth, splits, lower, upper);
  }
  InnerTrieNode result = new InnerTrieNode(depth);
  Text trial = new Text(prefix);
  // append an extra byte on to the prefix
  trial.append(new byte[1], 0, 1);
  int currentBound = lower;
  for(int ch = 0; ch < 255; ++ch) {
    trial.getBytes()[depth] = (byte) (ch + 1);
    lower = currentBound;
    while (currentBound < upper) {
      if (splits[currentBound].compareTo(trial) >= 0) {
        break;
      }
      currentBound += 1;
    }
    trial.getBytes()[depth] = (byte) ch;
    result.child[ch] = buildTrie(splits, lower, currentBound, trial, 
                                 maxDepth);
  }
  // pick up the rest
  trial.getBytes()[depth] = (byte) 255;
  result.child[255] = buildTrie(splits, currentBound, upper, trial,
                                maxDepth);
  return result;
}
 
開發者ID:naver,項目名稱:hadoop,代碼行數:41,代碼來源:TeraSort.java

示例4: appendLineInto

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
private int appendLineInto(Text dest, boolean eofOk) throws EOFException, IOException {
    Text buf = new Text();
    int bytesRead = lineReader.readLine(buf, MAX_LINE_LENGTH);

    if (bytesRead < 0 || (bytesRead == 0 && !eofOk))
        throw new EOFException();

    dest.append(buf.getBytes(), 0, buf.getLength());
    dest.append(newline, 0, 1);
    pos += bytesRead;

    return bytesRead;
}
 
開發者ID:PAA-NCIC,項目名稱:SparkSeq,代碼行數:14,代碼來源:SingleFastqInputFormat.java

示例5: readDefaultLine

import org.apache.hadoop.io.Text; //導入方法依賴的package包/類
/**
 * Read a line terminated by one of CR, LF, or CRLF.
 */
private int readDefaultLine(Text str, int maxLineLength, int maxBytesToConsume)
throws IOException {
  /* We're reading data from in, but the head of the stream may be
   * already buffered in buffer, so we have several cases:
   * 1. No newline characters are in the buffer, so we need to copy
   *    everything and read another buffer from the stream.
   * 2. An unambiguously terminated line is in buffer, so we just
   *    copy to str.
   * 3. Ambiguously terminated line is in buffer, i.e. buffer ends
   *    in CR.  In this case we copy everything up to CR to str, but
   *    we also need to see what follows CR: if it's LF, then we
   *    need consume LF as well, so next call to readLine will read
   *    from after that.
   * We use a flag prevCharCR to signal if previous character was CR
   * and, if it happens to be at the end of the buffer, delay
   * consuming it until we have a chance to look at the char that
   * follows.
   */
  str.clear();
  int txtLength = 0; //tracks str.getLength(), as an optimization
  int newlineLength = 0; //length of terminating newline
  boolean prevCharCR = false; //true of prev char was CR
  long bytesConsumed = 0;
  do {
    int startPosn = bufferPosn; //starting from where we left off the last time
    if (bufferPosn >= bufferLength) {
      startPosn = bufferPosn = 0;
      if (prevCharCR) {
        ++bytesConsumed; //account for CR from previous read
      }
      bufferLength = fillBuffer(in, buffer, prevCharCR);
      if (bufferLength <= 0) {
        break; // EOF
      }
    }
    for (; bufferPosn < bufferLength; ++bufferPosn) { //search for newline
      if (buffer[bufferPosn] == LF) {
        newlineLength = (prevCharCR) ? 2 : 1;
        ++bufferPosn; // at next invocation proceed from following byte
        break;
      }
      if (prevCharCR) { //CR + notLF, we are at notLF
        newlineLength = 1;
        break;
      }
      prevCharCR = (buffer[bufferPosn] == CR);
    }
    int readLength = bufferPosn - startPosn;
    if (prevCharCR && newlineLength == 0) {
      --readLength; //CR at the end of the buffer
    }
    bytesConsumed += readLength;
    int appendLength = readLength - newlineLength;
    if (appendLength > maxLineLength - txtLength) {
      appendLength = maxLineLength - txtLength;
    }
    if (appendLength > 0) {
      str.append(buffer, startPosn, appendLength);
      txtLength += appendLength;
    }
  } while (newlineLength == 0 && bytesConsumed < maxBytesToConsume);

  if (bytesConsumed > Integer.MAX_VALUE) {
    throw new IOException("Too many bytes before newline: " + bytesConsumed);
  }
  return (int)bytesConsumed;
}
 
開發者ID:nucypher,項目名稱:hadoop-oss,代碼行數:71,代碼來源:LineReader.java


注:本文中的org.apache.hadoop.io.Text.append方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。