当前位置: 首页>>代码示例>>Java>>正文


Java WAL.Entry方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.wal.WAL.Entry方法的典型用法代码示例。如果您正苦于以下问题:Java WAL.Entry方法的具体用法?Java WAL.Entry怎么用?Java WAL.Entry使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.wal.WAL的用法示例。


在下文中一共展示了WAL.Entry方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testReplayingFlushRequestRestoresReadsEnabledState

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Test the case where the secondary region replica is not in reads enabled state because it is
 * waiting for a flush or region open marker from primary region. Replaying CANNOT_FLUSH
 * flush marker entry should restore the reads enabled status in the region and allow the reads
 * to continue.
 */
@Test
public void testReplayingFlushRequestRestoresReadsEnabledState() throws IOException {
  disableReads(secondaryRegion);

  // Test case 1: Test that replaying CANNOT_FLUSH request marker assuming this came from
  // triggered flush restores readsEnabled
  primaryRegion.flushcache(true, true);
  reader = createWALReaderForPrimary();
  while (true) {
    WAL.Entry entry = reader.next();
    if (entry == null) {
      break;
    }
    FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
    if (flush != null) {
      secondaryRegion.replayWALFlushMarker(flush, entry.getKey().getLogSeqNum());
    }
  }

  // now reads should be enabled
  secondaryRegion.get(new Get(Bytes.toBytes(0)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:TestHRegionReplayEvents.java

示例2: transformFile

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
private static void transformFile(Path input, Path output)
    throws IOException {
  Configuration conf = HBaseConfiguration.create();

  FileSystem inFS = input.getFileSystem(conf);
  FileSystem outFS = output.getFileSystem(conf);

  WAL.Reader in = WALFactory.createReaderIgnoreCustomClass(inFS, input, conf);
  WALProvider.Writer out = null;

  try {
    if (!(in instanceof ReaderBase)) {
      System.err.println("Cannot proceed, invalid reader type: " + in.getClass().getName());
      return;
    }
    boolean compress = ((ReaderBase)in).hasCompression();
    conf.setBoolean(HConstants.ENABLE_WAL_COMPRESSION, !compress);
    out = WALFactory.createWALWriter(outFS, output, conf);

    WAL.Entry e = null;
    while ((e = in.next()) != null) out.append(e);
  } finally {
    in.close();
    if (out != null) {
      out.close();
      out = null;
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:Compressor.java

示例3: testLogMoving

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Sanity check that we can move logs around while we are reading
 * from them. Should this test fail, ReplicationSource would have a hard
 * time reading logs that are being archived.
 * @throws Exception
 */
@Test
public void testLogMoving() throws Exception{
  Path logPath = new Path(logDir, "log");
  if (!FS.exists(logDir)) FS.mkdirs(logDir);
  if (!FS.exists(oldLogDir)) FS.mkdirs(oldLogDir);
  WALProvider.Writer writer = WALFactory.createWALWriter(FS, logPath,
      TEST_UTIL.getConfiguration());
  for(int i = 0; i < 3; i++) {
    byte[] b = Bytes.toBytes(Integer.toString(i));
    KeyValue kv = new KeyValue(b,b,b);
    WALEdit edit = new WALEdit();
    edit.add(kv);
    WALKey key = new WALKey(b, TableName.valueOf(b), 0, 0,
        HConstants.DEFAULT_CLUSTER_ID);
    writer.append(new WAL.Entry(key, edit));
    writer.sync();
  }
  writer.close();

  WAL.Reader reader = WALFactory.createReader(FS, logPath, TEST_UTIL.getConfiguration());
  WAL.Entry entry = reader.next();
  assertNotNull(entry);

  Path oldLogPath = new Path(oldLogDir, "log");
  FS.rename(logPath, oldLogPath);

  entry = reader.next();
  assertNotNull(entry);

  entry = reader.next();
  entry = reader.next();

  assertNull(entry);
  reader.close();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:42,代码来源:TestReplicationSource.java

示例4: append

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
@Override
public void append(WAL.Entry entry) throws IOException {
  entry.setCompressionContext(compressionContext);
  try {
    this.writer.append(entry.getKey(), entry.getEdit());
  } catch (NullPointerException npe) {
    // Concurrent close...
    throw new IOException(npe);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:SequenceFileLogWriter.java

示例5: createAEntry

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Creates a WALEdit for the passed KeyValues and returns a WALProvider.Entry instance composed of
 * the WALEdit and passed WALKey.
 * @return WAL.Entry instance for the passed WALKey and KeyValues
 */
private WAL.Entry createAEntry(WALKey walKey, List<KeyValue> kvs) {
  WALEdit edit = new WALEdit();
  for (KeyValue kv : kvs )
  edit.add(kv);
  return new WAL.Entry(walKey, edit);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:12,代码来源:TestReadOldRootAndMetaEdits.java

示例6: verifyWALCount

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
private void verifyWALCount(WALFactory wals, WAL log, int expected) throws Exception {
  Path walPath = DefaultWALProvider.getCurrentFileName(log);
  WAL.Reader reader = wals.createReader(FS, walPath);
  int count = 0;
  WAL.Entry entry = new WAL.Entry();
  while (reader.next(entry) != null) count++;
  reader.close();
  assertEquals(expected, count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:10,代码来源:TestDurability.java

示例7: testOnlyReplayingFlushStartDoesNotHoldUpRegionClose

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Tests a case where we replay only a flush start marker, then the region is closed. This region
 * should not block indefinitely
 */
@Test (timeout = 60000)
public void testOnlyReplayingFlushStartDoesNotHoldUpRegionClose() throws IOException {
  // load some data to primary and flush
  int start = 0;
  LOG.info("-- Writing some data to primary from " +  start + " to " + (start+100));
  putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
  LOG.info("-- Flushing primary, creating 3 files for 3 stores");
  primaryRegion.flush(true);

  // now replay the edits and the flush marker
  reader = createWALReaderForPrimary();

  LOG.info("-- Replaying edits and flush events in secondary");
  while (true) {
    WAL.Entry entry = reader.next();
    if (entry == null) {
      break;
    }
    FlushDescriptor flushDesc
      = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
    if (flushDesc != null) {
      if (flushDesc.getAction() == FlushAction.START_FLUSH) {
        LOG.info("-- Replaying flush start in secondary");
        PrepareFlushResult result = secondaryRegion.replayWALFlushStartMarker(flushDesc);
      } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
        LOG.info("-- NOT Replaying flush commit in secondary");
      }
    } else {
      replayEdit(secondaryRegion, entry);
    }
  }

  assertTrue(rss.getRegionServerAccounting().getGlobalMemstoreSize() > 0);
  // now close the region which should not cause hold because of un-committed flush
  secondaryRegion.close();

  // verify that the memstore size is back to what it was
  assertEquals(0, rss.getRegionServerAccounting().getGlobalMemstoreSize());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:TestHRegionReplayEvents.java

示例8: replayEdit

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
static int replayEdit(HRegion region, WAL.Entry entry) throws IOException {
  if (WALEdit.isMetaEditFamily(entry.getEdit().getCells().get(0))) {
    return 0; // handled elsewhere
  }
  Put put = new Put(entry.getEdit().getCells().get(0).getRow());
  for (Cell cell : entry.getEdit().getCells()) put.add(cell);
  put.setDurability(Durability.SKIP_WAL);
  MutationReplay mutation = new MutationReplay(MutationType.PUT, put, 0, 0);
  region.batchReplay(new MutationReplay[] {mutation},
    entry.getKey().getLogSeqNum());
  return Integer.parseInt(Bytes.toString(put.getRow()));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:13,代码来源:TestHRegionReplayEvents.java

示例9: testWriteFlushRequestMarker

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Tests the case where a request for flush cache is sent to the region, but region cannot flush.
 * It should write the flush request marker instead.
 */
@Test
public void testWriteFlushRequestMarker() throws IOException {
  // primary region is empty at this point. Request a flush with writeFlushRequestWalMarker=false
  FlushResultImpl result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, false);
  assertNotNull(result);
  assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY);
  assertFalse(result.wroteFlushWalMarker);

  // request flush again, but this time with writeFlushRequestWalMarker = true
  result = (FlushResultImpl)((HRegion)primaryRegion).flushcache(true, true);
  assertNotNull(result);
  assertEquals(result.result, FlushResultImpl.Result.CANNOT_FLUSH_MEMSTORE_EMPTY);
  assertTrue(result.wroteFlushWalMarker);

  List<FlushDescriptor> flushes = Lists.newArrayList();
  reader = createWALReaderForPrimary();
  while (true) {
    WAL.Entry entry = reader.next();
    if (entry == null) {
      break;
    }
    FlushDescriptor flush = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
    if (flush != null) {
      flushes.add(flush);
    }
  }

  assertEquals(1, flushes.size());
  assertNotNull(flushes.get(0));
  assertEquals(FlushDescriptor.FlushAction.CANNOT_FLUSH, flushes.get(0).getAction());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:36,代码来源:TestHRegionReplayEvents.java

示例10: testReplayingRegionOpenEventRestoresReadsEnabledState

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Test the case where the secondary region replica is not in reads enabled state because it is
 * waiting for a flush or region open marker from primary region. Replaying region open event
 * entry from primary should restore the reads enabled status in the region and allow the reads
 * to continue.
 */
@Test
public void testReplayingRegionOpenEventRestoresReadsEnabledState() throws IOException {
  // Test case 3: Test that replaying region open event markers restores readsEnabled
  disableReads(secondaryRegion);

  primaryRegion.close();
  primaryRegion = HRegion.openHRegion(rootDir, primaryHri, htd, walPrimary, CONF, rss, null);

  reader = createWALReaderForPrimary();
  while (true) {
    WAL.Entry entry = reader.next();
    if (entry == null) {
      break;
    }

    RegionEventDescriptor regionEventDesc
      = WALEdit.getRegionEventDescriptor(entry.getEdit().getCells().get(0));

    if (regionEventDesc != null) {
      secondaryRegion.replayWALRegionEventMarker(regionEventDesc);
    }
  }

  // now reads should be enabled
  secondaryRegion.get(new Get(Bytes.toBytes(0)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:TestHRegionReplayEvents.java

示例11: readAllEntriesToReplicateOrNextFile

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Read all the entries from the current log files and retain those that need to be replicated.
 * Else, process the end of the current file.
 * @param currentWALisBeingWrittenTo is the current WAL being written to
 * @param entries resulting entries to be replicated
 * @return true if we got nothing and went to the next file, false if we got entries
 * @throws IOException
 */
protected boolean readAllEntriesToReplicateOrNextFile(boolean currentWALisBeingWrittenTo,
    List<WAL.Entry> entries) throws IOException {
  long seenEntries = 0;
  if (LOG.isTraceEnabled()) {
    LOG.trace("Seeking in " + this.currentPath + " at position "
        + this.repLogReader.getPosition());
  }
  this.repLogReader.seek();
  long positionBeforeRead = this.repLogReader.getPosition();
  WAL.Entry entry = this.repLogReader.readNextAndSetPosition();
  while (entry != null) {
    metrics.incrLogEditsRead();
    seenEntries++;

    // don't replicate if the log entries have already been consumed by the cluster
    if (replicationEndpoint.canReplicateToSameCluster()
        || !entry.getKey().getClusterIds().contains(peerClusterId)) {
      // Remove all KVs that should not be replicated
      entry = walEntryFilter.filter(entry);
      WALEdit edit = null;
      WALKey logKey = null;
      if (entry != null) {
        edit = entry.getEdit();
        logKey = entry.getKey();
      }

      if (edit != null && edit.size() != 0) {
        // Mark that the current cluster has the change
        logKey.addClusterId(clusterId);
        currentNbOperations += countDistinctRowKeys(edit);
        entries.add(entry);
        currentSize += entry.getEdit().heapSize();
      } else {
        metrics.incrLogEditsFiltered();
      }
    }
    // Stop if too many entries or too big
    // FIXME check the relationship between single wal group and overall
    if (currentSize >= replicationQueueSizeCapacity
        || entries.size() >= replicationQueueNbCapacity) {
      break;
    }
    try {
      entry = this.repLogReader.readNextAndSetPosition();
    } catch (IOException ie) {
      LOG.debug("Break on IOE: " + ie.getMessage());
      break;
    }
  }
  metrics.incrLogReadInBytes(this.repLogReader.getPosition() - positionBeforeRead);
  if (currentWALisBeingWrittenTo) {
    return false;
  }
  // If we didn't get anything and the queue has an object, it means we
  // hit the end of the file for sure
  return seenEntries == 0 && processEndOfFile();
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:ReplicationSource.java

示例12: shipEdits

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * Do the shipping logic
 * @param currentWALisBeingWrittenTo was the current WAL being (seemingly)
 * written to when this method was called
 */
protected void shipEdits(boolean currentWALisBeingWrittenTo, List<WAL.Entry> entries) {
  int sleepMultiplier = 0;
  if (entries.isEmpty()) {
    LOG.warn("Was given 0 edits to ship");
    return;
  }
  while (isWorkerActive()) {
    try {
      if (throttler.isEnabled()) {
        long sleepTicks = throttler.getNextSleepInterval(currentSize);
        if (sleepTicks > 0) {
          try {
            if (LOG.isTraceEnabled()) {
              LOG.trace("To sleep " + sleepTicks + "ms for throttling control");
            }
            Thread.sleep(sleepTicks);
          } catch (InterruptedException e) {
            LOG.debug("Interrupted while sleeping for throttling control");
            Thread.currentThread().interrupt();
            // current thread might be interrupted to terminate
            // directly go back to while() for confirm this
            continue;
          }
          // reset throttler's cycle start tick when sleep for throttling occurs
          throttler.resetStartTick();
        }
      }
      // create replicateContext here, so the entries can be GC'd upon return from this call
      // stack
      ReplicationEndpoint.ReplicateContext replicateContext =
          new ReplicationEndpoint.ReplicateContext();
      replicateContext.setEntries(entries).setSize(currentSize);
      replicateContext.setWalGroupId(walGroupId);

      long startTimeNs = System.nanoTime();
      // send the edits to the endpoint. Will block until the edits are shipped and acknowledged
      boolean replicated = replicationEndpoint.replicate(replicateContext);
      long endTimeNs = System.nanoTime();

      if (!replicated) {
        continue;
      } else {
        sleepMultiplier = Math.max(sleepMultiplier - 1, 0);
      }

      if (this.lastLoggedPosition != this.repLogReader.getPosition()) {
        manager.logPositionAndCleanOldLogs(this.currentPath, peerClusterZnode,
          this.repLogReader.getPosition(), this.replicationQueueInfo.isQueueRecovered(),
          currentWALisBeingWrittenTo);
        this.lastLoggedPosition = this.repLogReader.getPosition();
      }
      if (throttler.isEnabled()) {
        throttler.addPushSize(currentSize);
      }
      totalReplicatedEdits.addAndGet(entries.size());
      totalReplicatedOperations.addAndGet(currentNbOperations);
      // FIXME check relationship between wal group and overall
      metrics.shipBatch(currentNbOperations, currentSize / 1024);
      metrics.setAgeOfLastShippedOp(entries.get(entries.size() - 1).getKey().getWriteTime(),
        walGroupId);
      if (LOG.isTraceEnabled()) {
        LOG.trace("Replicated " + totalReplicatedEdits + " entries in total, or "
            + totalReplicatedOperations + " operations in "
            + ((endTimeNs - startTimeNs) / 1000000) + " ms");
      }
      break;
    } catch (Exception ex) {
      LOG.warn(replicationEndpoint.getClass().getName() + " threw unknown exception:"
          + org.apache.hadoop.util.StringUtils.stringifyException(ex));
      if (sleepForRetries("ReplicationEndpoint threw exception", sleepMultiplier)) {
        sleepMultiplier++;
      }
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:81,代码来源:ReplicationSource.java

示例13: test

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
@Test
public void test() throws Exception {
  // Grab the path that was generated when the log rolled as part of its creation
  Path path = pathWatcher.currentPath;

  assertEquals(0, logManager.getPosition());

  appendToLog();

  // There's one edit in the log, read it. Reading past it needs to return nulls
  assertNotNull(logManager.openReader(path));
  logManager.seek();
  WAL.Entry entry = logManager.readNextAndSetPosition();
  assertNotNull(entry);
  entry = logManager.readNextAndSetPosition();
  assertNull(entry);
  logManager.closeReader();
  long oldPos = logManager.getPosition();

  appendToLog();

  // Read the newly added entry, make sure we made progress
  assertNotNull(logManager.openReader(path));
  logManager.seek();
  entry = logManager.readNextAndSetPosition();
  assertNotEquals(oldPos, logManager.getPosition());
  assertNotNull(entry);
  logManager.closeReader();
  oldPos = logManager.getPosition();

  log.rollWriter();

  // We rolled but we still should see the end of the first log and not get data
  assertNotNull(logManager.openReader(path));
  logManager.seek();
  entry = logManager.readNextAndSetPosition();
  assertEquals(oldPos, logManager.getPosition());
  assertNull(entry);
  logManager.finishCurrentFile();

  path = pathWatcher.currentPath;

  for (int i = 0; i < nbRows; i++) { appendToLogPlus(walEditKVs); }
  log.rollWriter();
  logManager.openReader(path);
  logManager.seek();
  for (int i = 0; i < nbRows; i++) {
    WAL.Entry e = logManager.readNextAndSetPosition();
    if (e == null) {
      fail("Should have enough entries");
    }
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:54,代码来源:TestReplicationWALReaderManager.java

示例14: verifyAllEditsMadeItIn

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
/**
 * @param fs
 * @param conf
 * @param edits
 * @param region
 * @return Return how many edits seen.
 * @throws IOException
 */
private int verifyAllEditsMadeItIn(final FileSystem fs, final Configuration conf,
    final Path edits, final HRegion region)
throws IOException {
  int count = 0;
  // Based on HRegion#replayRecoveredEdits
  WAL.Reader reader = null;
  try {
    reader = WALFactory.createReader(fs, edits, conf);
    WAL.Entry entry;
    while ((entry = reader.next()) != null) {
      WALKey key = entry.getKey();
      WALEdit val = entry.getEdit();
      count++;
      // Check this edit is for this region.
      if (!Bytes.equals(key.getEncodedRegionName(),
          region.getRegionInfo().getEncodedNameAsBytes())) {
        continue;
      }
      Cell previous = null;
      for (Cell cell: val.getCells()) {
        if (CellUtil.matchingFamily(cell, WALEdit.METAFAMILY)) continue;
        if (previous != null && CellComparator.compareRows(previous, cell) == 0) continue;
        previous = cell;
        Get g = new Get(CellUtil.cloneRow(cell));
        Result r = region.get(g);
        boolean found = false;
        for (CellScanner scanner = r.cellScanner(); scanner.advance();) {
          Cell current = scanner.current();
          if (CellComparator.compare(cell, current, true) == 0) {
            found = true;
            break;
          }
        }
        assertTrue("Failed to find " + cell, found);
      }
    }
  } finally {
    if (reader != null) reader.close();
  }
  return count;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:50,代码来源:TestRecoveredEdits.java

示例15: testReplayFlushSeqIds

import org.apache.hadoop.hbase.wal.WAL; //导入方法依赖的package包/类
@Test
public void testReplayFlushSeqIds() throws IOException {
  // load some data to primary and flush
  int start = 0;
  LOG.info("-- Writing some data to primary from " +  start + " to " + (start+100));
  putData(primaryRegion, Durability.SYNC_WAL, start, 100, cq, families);
  LOG.info("-- Flushing primary, creating 3 files for 3 stores");
  primaryRegion.flush(true);

  // now replay the flush marker
  reader =  createWALReaderForPrimary();

  long flushSeqId = -1;
  LOG.info("-- Replaying flush events in secondary");
  while (true) {
    WAL.Entry entry = reader.next();
    if (entry == null) {
      break;
    }
    FlushDescriptor flushDesc
      = WALEdit.getFlushDescriptor(entry.getEdit().getCells().get(0));
    if (flushDesc != null) {
      if (flushDesc.getAction() == FlushAction.START_FLUSH) {
        LOG.info("-- Replaying flush start in secondary");
        secondaryRegion.replayWALFlushStartMarker(flushDesc);
        flushSeqId = flushDesc.getFlushSequenceNumber();
      } else if (flushDesc.getAction() == FlushAction.COMMIT_FLUSH) {
        LOG.info("-- Replaying flush commit in secondary");
        secondaryRegion.replayWALFlushCommitMarker(flushDesc);
        assertEquals(flushSeqId, flushDesc.getFlushSequenceNumber());
      }
    }
    // else do not replay
  }

  // TODO: what to do with this?
  // assert that the newly picked up flush file is visible
  long readPoint = secondaryRegion.getMVCC().getReadPoint();
  assertEquals(flushSeqId, readPoint);

  // after replay verify that everything is still visible
  verifyData(secondaryRegion, 0, 100, cq, families);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:44,代码来源:TestHRegionReplayEvents.java


注:本文中的org.apache.hadoop.hbase.wal.WAL.Entry方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。