当前位置: 首页>>代码示例>>Java>>正文


Java Entry类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.wal.HLog.Entry的典型用法代码示例。如果您正苦于以下问题:Java Entry类的具体用法?Java Entry怎么用?Java Entry使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Entry类属于org.apache.hadoop.hbase.regionserver.wal.HLog包,在下文中一共展示了Entry类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: parseHLog

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
  * Parse a single hlog and put the edits in entryBuffers
  *
  * @param in the hlog reader
  * @param path the path of the log file
  * @param entryBuffers the buffer to hold the parsed edits
  * @param fs the file system
  * @param conf the configuration
  * @param skipErrors indicator if CorruptedLogFileException should be thrown instead of IOException
  * @throws IOException
  * @throws CorruptedLogFileException if hlog is corrupted
  */
 private void parseHLog(final Reader in, Path path,
	EntryBuffers entryBuffers, final FileSystem fs,
   final Configuration conf, boolean skipErrors)
throws IOException, CorruptedLogFileException {
   int editsCount = 0;
   try {
     Entry entry;
     while ((entry = getNextLogLine(in, path, skipErrors)) != null) {
       entryBuffers.appendEntry(entry);
       editsCount++;
     }
   } catch (InterruptedException ie) {
     IOException t = new InterruptedIOException();
     t.initCause(ie);
     throw t;
   } finally {
     LOG.debug("Pushed=" + editsCount + " entries from " + path);
   }
 }
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:32,代码来源:HLogSplitter.java

示例2: appendEntry

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * Append a log entry into the corresponding region buffer.
 * Blocks if the total heap usage has crossed the specified threshold.
 *
 * @throws InterruptedException
 * @throws IOException
 */
void appendEntry(Entry entry) throws InterruptedException, IOException {
  HLogKey key = entry.getKey();

  RegionEntryBuffer buffer;
  long incrHeap;
  synchronized (this) {
    buffer = buffers.get(key.getEncodedRegionName());
    if (buffer == null) {
      buffer = new RegionEntryBuffer(key.getTablename(), key.getEncodedRegionName());
      buffers.put(key.getEncodedRegionName(), buffer);
    }
    incrHeap= buffer.appendEntry(entry);        
  }

  // If we crossed the chunk threshold, wait for more space to be available
  synchronized (dataAvailable) {
    totalBuffered += incrHeap;
    while (totalBuffered > maxHeapUsage && thrown.get() == null) {
      LOG.debug("Used " + totalBuffered + " bytes of buffered edits, waiting for IO threads...");
      dataAvailable.wait(3000);
    }
    dataAvailable.notifyAll();
  }
  checkForErrors();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:33,代码来源:HLogSplitter.java

示例3: getChunkToWrite

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
synchronized RegionEntryBuffer getChunkToWrite() {
  long biggestSize=0;
  byte[] biggestBufferKey=null;

  for (Map.Entry<byte[], RegionEntryBuffer> entry : buffers.entrySet()) {
    long size = entry.getValue().heapSize();
    if (size > biggestSize && !currentlyWriting.contains(entry.getKey())) {
      biggestSize = size;
      biggestBufferKey = entry.getKey();
    }
  }
  if (biggestBufferKey == null) {
    return null;
  }

  RegionEntryBuffer buffer = buffers.remove(biggestBufferKey);
  currentlyWriting.add(biggestBufferKey);
  return buffer;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:20,代码来源:HLogSplitter.java

示例4: createWAP

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
private WriterAndPath createWAP(byte[] region, Entry entry, Path rootdir,
    FileSystem fs, Configuration conf)
throws IOException {
  Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, true);
  if (regionedits == null) {
    return null;
  }
  if (fs.exists(regionedits)) {
    LOG.warn("Found existing old edits file. It could be the "
        + "result of a previous failed split attempt. Deleting "
        + regionedits + ", length="
        + fs.getFileStatus(regionedits).getLen());
    if (!HBaseFileSystem.deleteFileFromFileSystem(fs, regionedits)) {
      LOG.warn("Failed delete of old " + regionedits);
    }
  }
  Writer w = createWriter(fs, regionedits, conf);
  LOG.debug("Creating writer path=" + regionedits + " region="
      + Bytes.toStringBinary(region));
  return (new WriterAndPath(regionedits, w));
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:HLogSplitter.java

示例5: getWriterAndPath

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * Get a writer and path for a log starting at the given entry.
 *
 * This function is threadsafe so long as multiple threads are always
 * acting on different regions.
 *
 * @return null if this region shouldn't output any logs
 */
WriterAndPath getWriterAndPath(Entry entry) throws IOException {
  byte region[] = entry.getKey().getEncodedRegionName();
  WriterAndPath ret = logWriters.get(region);
  if (ret != null) {
    return ret;
  }
  // If we already decided that this region doesn't get any output
  // we don't need to check again.
  if (blacklistedRegions.contains(region)) {
    return null;
  }
  ret = createWAP(region, entry, rootDir, fs, conf);
  if (ret == null) {
    blacklistedRegions.add(region);
    return null;
  }
  logWriters.put(region, ret);
  return ret;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:28,代码来源:HLogSplitter.java

示例6: transformFile

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
private static void transformFile(Path input, Path output)
    throws IOException {
  SequenceFileLogReader in = new SequenceFileLogReader();
  SequenceFileLogWriter out = new SequenceFileLogWriter();

  try {
    Configuration conf = HBaseConfiguration.create();

    FileSystem inFS = input.getFileSystem(conf);
    FileSystem outFS = output.getFileSystem(conf);

    in.init(inFS, input, conf);
    boolean compress = in.reader.isWALCompressionEnabled();

    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress);
    out.init(outFS, output, conf);

    Entry e = null;
    while ((e = in.next()) != null) out.append(e);
  } finally {
    in.close();
    out.close();
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:25,代码来源:Compressor.java

示例7: testRecoveredEditsPathForMeta

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * @throws IOException
 * @see https://issues.apache.org/jira/browse/HBASE-3020
 */
@Test 
public void testRecoveredEditsPathForMeta() throws IOException {
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
  Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME));
  Path regiondir = new Path(tdir,
      HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
  fs.mkdirs(regiondir);
  long now = System.currentTimeMillis();
  HLog.Entry entry =
      new HLog.Entry(new HLogKey(encoded,
          HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
    new WALEdit());
  Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true);
  String parentOfParent = p.getParent().getParent().getName();
  assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:22,代码来源:TestHLogSplit.java

示例8: testOldRecoveredEditsFileSidelined

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * Test old recovered edits file doesn't break HLogSplitter.
 * This is useful in upgrading old instances.
 */
@Test
public void testOldRecoveredEditsFileSidelined() throws IOException {
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  byte [] encoded = HRegionInfo.FIRST_META_REGIONINFO.getEncodedNameAsBytes();
  Path tdir = new Path(hbaseDir, Bytes.toString(HConstants.META_TABLE_NAME));
  Path regiondir = new Path(tdir,
      HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
  fs.mkdirs(regiondir);
  long now = System.currentTimeMillis();
  HLog.Entry entry =
      new HLog.Entry(new HLogKey(encoded,
          HConstants.META_TABLE_NAME, 1, now, HConstants.DEFAULT_CLUSTER_ID),
    new WALEdit());
  Path parent = HLog.getRegionDirRecoveredEditsDir(regiondir);
  assertEquals(parent.getName(), HLog.RECOVERED_EDITS_DIR);
  fs.createNewFile(parent); // create a recovered.edits file

  Path p = HLogSplitter.getRegionSplitEditsPath(fs, entry, hbaseDir, true);
  String parentOfParent = p.getParent().getParent().getName();
  assertEquals(parentOfParent, HRegionInfo.FIRST_META_REGIONINFO.getEncodedName());
  HLog.createWriter(fs, p, conf).close();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:TestHLogSplit.java

示例9: verify

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * Verify the content of the WAL file.
 * Verify that sequenceids are ascending and that the file has expected number
 * of edits.
 * @param wal
 * @return Count of edits.
 * @throws IOException
 */
private long verify(final Path wal, final boolean verbose) throws IOException {
  HLog.Reader reader = HLog.getReader(wal.getFileSystem(getConf()), wal, getConf());
  long previousSeqid = -1;
  long count = 0;
  try {
    while (true) {
      Entry e = reader.next();
      if (e == null) break;
      count++;
      long seqid = e.getKey().getLogSeqNum();
      if (verbose) LOG.info("seqid=" + seqid);
      if (previousSeqid >= seqid) {
        throw new IllegalStateException("wal=" + wal.getName() +
          ", previousSeqid=" + previousSeqid + ", seqid=" + seqid);
      }
      previousSeqid = seqid;
    }
  } finally {
    reader.close();
  }
  return count;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:31,代码来源:HLogPerformanceEvaluation.java

示例10: appendEntry

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * Append a log entry into the corresponding region buffer.
 * Blocks if the total heap usage has crossed the specified threshold.
 *
 * @throws InterruptedException
 * @throws IOException
 */
void appendEntry(Entry entry) throws InterruptedException, IOException {
  HLogKey key = entry.getKey();

  RegionEntryBuffer buffer;
  long incrHeap;
  synchronized (this) {
    buffer = buffers.get(key.getEncodedRegionName());
    if (buffer == null) {
      buffer = new RegionEntryBuffer(key.getTablename(), key.getEncodedRegionName());
      buffers.put(key.getEncodedRegionName(), buffer);
    }
    incrHeap= buffer.appendEntry(entry);
  }

  // If we crossed the chunk threshold, wait for more space to be available
  synchronized (dataAvailable) {
    totalBuffered += incrHeap;
    while (totalBuffered > maxHeapUsage && thrown.get() == null) {
      LOG.debug("Used " + totalBuffered + " bytes of buffered edits, waiting for IO threads...");
      dataAvailable.wait(2000);
    }
    dataAvailable.notifyAll();
  }
  checkForErrors();
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:33,代码来源:HLogSplitter.java

示例11: getChunkToWrite

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * @return RegionEntryBuffer a buffer of edits to be written or replayed.
 */
synchronized RegionEntryBuffer getChunkToWrite() {
  long biggestSize = 0;
  byte[] biggestBufferKey = null;

  for (Map.Entry<byte[], RegionEntryBuffer> entry : buffers.entrySet()) {
    long size = entry.getValue().heapSize();
    if (size > biggestSize && (!currentlyWriting.contains(entry.getKey()))) {
      biggestSize = size;
      biggestBufferKey = entry.getKey();
    }
  }
  if (biggestBufferKey == null) {
    return null;
  }

  RegionEntryBuffer buffer = buffers.remove(biggestBufferKey);
  currentlyWriting.add(biggestBufferKey);
  return buffer;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:23,代码来源:HLogSplitter.java

示例12: getWriterAndPath

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
/**
 * Get a writer and path for a log starting at the given entry. This function is threadsafe so
 * long as multiple threads are always acting on different regions.
 * @return null if this region shouldn't output any logs
 */
private WriterAndPath getWriterAndPath(Entry entry) throws IOException {
  byte region[] = entry.getKey().getEncodedRegionName();
  WriterAndPath ret = (WriterAndPath) writers.get(region);
  if (ret != null) {
    return ret;
  }
  // If we already decided that this region doesn't get any output
  // we don't need to check again.
  if (blacklistedRegions.contains(region)) {
    return null;
  }
  ret = createWAP(region, entry, rootDir, fs, conf);
  if (ret == null) {
    blacklistedRegions.add(region);
    return null;
  }
  writers.put(region, ret);
  return ret;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:25,代码来源:HLogSplitter.java

示例13: createWAP

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
private WriterAndPath createWAP(byte[] region, Entry entry, Path rootdir, FileSystem fs,
    Configuration conf) throws IOException {
  Path regionedits = getRegionSplitEditsPath(fs, entry, rootdir, true);
  if (regionedits == null) {
    return null;
  }
  if (fs.exists(regionedits)) {
    LOG.warn("Found old edits file. It could be the "
        + "result of a previous failed split attempt. Deleting " + regionedits + ", length="
        + fs.getFileStatus(regionedits).getLen());
    if (!fs.delete(regionedits, false)) {
      LOG.warn("Failed delete of old " + regionedits);
    }
  }
  Writer w = createWriter(fs, regionedits, conf);
  LOG.debug("Creating writer path=" + regionedits + " region=" + Bytes.toStringBinary(region));
  return (new WriterAndPath(regionedits, w));
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:HLogSplitter.java

示例14: processWorkItems

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
private void processWorkItems(String key, List<Pair<HRegionLocation, HLog.Entry>> actions)
    throws IOException {
  RegionServerWriter rsw = null;

  long startTime = System.nanoTime();
  try {
    rsw = getRegionServerWriter(key);
    rsw.sink.replayEntries(actions);

    // Pass along summary statistics
    rsw.incrementEdits(actions.size());
    rsw.incrementNanoTime(System.nanoTime() - startTime);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.fatal(" Got while writing log entry to log", e);
    throw e;
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:HLogSplitter.java

示例15: flush

import org.apache.hadoop.hbase.regionserver.wal.HLog.Entry; //导入依赖的package包/类
@Override
protected boolean flush() throws IOException {
  String curLoc = null;
  int curSize = 0;
  List<Pair<HRegionLocation, HLog.Entry>> curQueue = null;
  synchronized (this.serverToBufferQueueMap) {
    for (String locationKey : this.serverToBufferQueueMap.keySet()) {
      curQueue = this.serverToBufferQueueMap.get(locationKey);
      if (!curQueue.isEmpty()) {
        curSize = curQueue.size();
        curLoc = locationKey;
        break;
      }
    }
    if (curSize > 0) {
      this.serverToBufferQueueMap.remove(curLoc);
    }
  }

  if (curSize > 0) {
    this.processWorkItems(curLoc, curQueue);
    dataAvailable.notifyAll();
    return true;
  }
  return false;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:27,代码来源:HLogSplitter.java


注:本文中的org.apache.hadoop.hbase.regionserver.wal.HLog.Entry类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。