本文整理汇总了Java中org.apache.hadoop.hbase.regionserver.wal.FSHLog类的典型用法代码示例。如果您正苦于以下问题:Java FSHLog类的具体用法?Java FSHLog怎么用?Java FSHLog使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FSHLog类属于org.apache.hadoop.hbase.regionserver.wal包,在下文中一共展示了FSHLog类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getNumLogFiles
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
/**
* iff the given WALFactory is using the BoundedRegionGroupingProvider for meta and/or non-meta,
* count the number of files (rolled and active). if either of them isn't, count 0
* for that provider.
* @param walFactory may not be null.
*/
public static long getNumLogFiles(WALFactory walFactory) {
long result = 0;
if (walFactory.provider instanceof BoundedRegionGroupingProvider) {
BoundedRegionGroupingProvider groupProviders =
(BoundedRegionGroupingProvider)walFactory.provider;
for (int i = 0; i < groupProviders.delegates.length; i++) {
result +=
((FSHLog)((DefaultWALProvider)(groupProviders.delegates[i])).log).getNumLogFiles();
}
}
WALProvider meta = walFactory.metaProvider.get();
if (meta instanceof BoundedRegionGroupingProvider) {
for (int i = 0; i < ((BoundedRegionGroupingProvider)meta).delegates.length; i++) {
result += ((FSHLog)
((DefaultWALProvider)(((BoundedRegionGroupingProvider)meta).delegates[i])).log)
.getNumLogFiles(); }
}
return result;
}
示例2: getLogFileSize
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
/**
* iff the given WALFactory is using the BoundedRegionGroupingProvider for meta and/or non-meta,
* count the size of files (rolled and active). if either of them isn't, count 0
* for that provider.
* @param walFactory may not be null.
*/
public static long getLogFileSize(WALFactory walFactory) {
long result = 0;
if (walFactory.provider instanceof BoundedRegionGroupingProvider) {
BoundedRegionGroupingProvider groupProviders =
(BoundedRegionGroupingProvider)walFactory.provider;
for (int i = 0; i < groupProviders.delegates.length; i++) {
result +=
((FSHLog)((DefaultWALProvider)(groupProviders.delegates[i])).log).getLogFileSize();
}
}
WALProvider meta = walFactory.metaProvider.get();
if (meta instanceof BoundedRegionGroupingProvider) {
for (int i = 0; i < ((BoundedRegionGroupingProvider)meta).delegates.length; i++) {
result += ((FSHLog)
((DefaultWALProvider)(((BoundedRegionGroupingProvider)meta).delegates[i])).log)
.getLogFileSize();
}
}
return result;
}
示例3: initialize
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
/**
* Setting up a Store
* @throws IOException with error
*/
protected void initialize() throws IOException {
Path basedir = new Path(DIR);
String logName = "logs";
Path logdir = new Path(DIR, logName);
HColumnDescriptor hcd = new HColumnDescriptor(Bytes.toBytes("family"));
FileSystem fs = FileSystem.get(conf);
fs.delete(logdir, true);
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(Bytes.toBytes("table")));
htd.addFamily(hcd);
HRegionInfo info = new HRegionInfo(htd.getTableName(), null, null, false);
hlog = new FSHLog(fs, basedir, logName, conf);
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
region = HRegion.createHRegion(info, basedir, conf, htd, hlog);
region.close();
Path tableDir = FSUtils.getTableDir(basedir, htd.getTableName());
region = new HRegion(tableDir, hlog, fs, conf, info, htd, null);
store = new HStore(region, hcd, conf);
TEST_FILE = region.getRegionFileSystem().createTempName();
fs.createNewFile(TEST_FILE);
}
示例4: getNumLogFiles
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
/**
* iff the given WALFactory is using the DefaultWALProvider for meta and/or non-meta,
* count the number of files (rolled and active). if either of them aren't, count 0
* for that provider.
* @param walFactory may not be null.
*/
public static long getNumLogFiles(WALFactory walFactory) {
long result = 0;
if (walFactory.provider instanceof DefaultWALProvider) {
result += ((FSHLog)((DefaultWALProvider)walFactory.provider).log).getNumLogFiles();
}
WALProvider meta = walFactory.metaProvider.get();
if (meta instanceof DefaultWALProvider) {
result += ((FSHLog)((DefaultWALProvider)meta).log).getNumLogFiles();
}
return result;
}
示例5: getLogFileSize
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
/**
* iff the given WALFactory is using the DefaultWALProvider for meta and/or non-meta,
* count the size of files (rolled and active). if either of them aren't, count 0
* for that provider.
* @param walFactory may not be null.
*/
public static long getLogFileSize(WALFactory walFactory) {
long result = 0;
if (walFactory.provider instanceof DefaultWALProvider) {
result += ((FSHLog)((DefaultWALProvider)walFactory.provider).log).getLogFileSize();
}
WALProvider meta = walFactory.metaProvider.get();
if (meta instanceof DefaultWALProvider) {
result += ((FSHLog)((DefaultWALProvider)meta).log).getLogFileSize();
}
return result;
}
示例6: extractFileNumFromWAL
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
/**
* It returns the file create timestamp from the file name.
* For name format see {@link #validateWALFilename(String)}
* public until remaining tests move to o.a.h.h.wal
* @param wal must not be null
* @return the file number that is part of the WAL file name
*/
@VisibleForTesting
public static long extractFileNumFromWAL(final WAL wal) {
final Path walName = ((FSHLog)wal).getCurrentFileName();
if (walName == null) {
throw new IllegalArgumentException("The WAL path couldn't be null");
}
final String[] walPathStrs = walName.toString().split("\\" + WAL_FILE_NAME_DELIMITER);
return Long.parseLong(walPathStrs[walPathStrs.length - (isMetaFile(walName) ? 2:1)]);
}
示例7: testMemstoreSizeWithFlushCanceling
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
/**
* Test for HBASE-14229: Flushing canceled by coprocessor still leads to memstoreSize set down
*/
@Test
public void testMemstoreSizeWithFlushCanceling() throws IOException {
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + "testMemstoreSizeWithFlushCanceling");
FSHLog hLog = new FSHLog(fs, rootDir, "testMemstoreSizeWithFlushCanceling", CONF);
HRegion region = initHRegion(tableName, null, null, name.getMethodName(),
CONF, false, Durability.SYNC_WAL, hLog, COLUMN_FAMILY_BYTES);
Store store = region.getStore(COLUMN_FAMILY_BYTES);
assertEquals(0, region.getMemstoreSize());
// Put some value and make sure flush could be completed normally
byte [] value = Bytes.toBytes(name.getMethodName());
Put put = new Put(value);
put.add(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
region.put(put);
long onePutSize = region.getMemstoreSize();
assertTrue(onePutSize > 0);
region.flush(true);
assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
assertEquals("flushable size should be zero", 0, store.getFlushableSize());
// save normalCPHost and replaced by mockedCPHost, which will cancel flush requests
RegionCoprocessorHost normalCPHost = region.getCoprocessorHost();
RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class);
when(mockedCPHost.preFlush(Mockito.isA(HStore.class), Mockito.isA(InternalScanner.class))).
thenReturn(null);
region.setCoprocessorHost(mockedCPHost);
region.put(put);
region.flush(true);
assertEquals("memstoreSize should NOT be zero", onePutSize, region.getMemstoreSize());
assertEquals("flushable size should NOT be zero", onePutSize, store.getFlushableSize());
// set normalCPHost and flush again, the snapshot will be flushed
region.setCoprocessorHost(normalCPHost);
region.flush(true);
assertEquals("memstoreSize should be zero", 0, region.getMemstoreSize());
assertEquals("flushable size should be zero", 0, store.getFlushableSize());
HRegion.closeHRegion(region);
}
示例8: getRegion
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
private HRegion getRegion(final Configuration conf, final String tableName) throws IOException {
WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
TEST_UTIL.getDataTestDir().toString(), conf);
return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf,
false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
}
示例9: createWAL
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
@Override
protected FSHLog createWAL() throws IOException {
return new FSHLog(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf),
getWALDirectoryName(factory.factoryId),
getWALArchiveDirectoryName(conf, factory.factoryId), conf, listeners, true, logPrefix,
META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null);
}
示例10: createWAL
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
private FSHLog createWAL() throws IOException {
String logPrefix = factory.factoryId + WAL_FILE_NAME_DELIMITER + providerId;
return new IOTestWAL(CommonFSUtils.getWALFileSystem(conf), CommonFSUtils.getWALRootDir(conf),
AbstractFSWALProvider.getWALDirectoryName(factory.factoryId),
HConstants.HREGION_OLDLOGDIR_NAME, conf, listeners, true, logPrefix,
META_WAL_PROVIDER_ID.equals(providerId) ? META_WAL_PROVIDER_ID : null);
}
示例11: getWAL
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
@Override
public WAL getWAL(RegionInfo region) throws IOException {
FSHLog log = this.log;
if (log != null) {
return log;
}
synchronized (this) {
log = this.log;
if (log == null) {
log = createWAL();
this.log = log;
}
}
return log;
}
示例12: close
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
@Override
public void close() throws IOException {
FSHLog log = this.log;
if (log != null) {
log.close();
}
}
示例13: shutdown
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
@Override
public void shutdown() throws IOException {
FSHLog log = this.log;
if (log != null) {
log.shutdown();
}
}
示例14: testMemstoreSizeAccountingWithFailedPostBatchMutate
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
@Test
public void testMemstoreSizeAccountingWithFailedPostBatchMutate() throws IOException {
String testName = "testMemstoreSizeAccountingWithFailedPostBatchMutate";
FileSystem fs = FileSystem.get(CONF);
Path rootDir = new Path(dir + testName);
FSHLog hLog = new FSHLog(fs, rootDir, testName, CONF);
HRegion region = initHRegion(tableName, null, null, false, Durability.SYNC_WAL, hLog,
COLUMN_FAMILY_BYTES);
HStore store = region.getStore(COLUMN_FAMILY_BYTES);
assertEquals(0, region.getMemStoreSize());
// Put one value
byte [] value = Bytes.toBytes(method);
Put put = new Put(value);
put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("abc"), value);
region.put(put);
long onePutSize = region.getMemStoreSize();
assertTrue(onePutSize > 0);
RegionCoprocessorHost mockedCPHost = Mockito.mock(RegionCoprocessorHost.class);
doThrow(new IOException())
.when(mockedCPHost).postBatchMutate(Mockito.<MiniBatchOperationInProgress<Mutation>>any());
region.setCoprocessorHost(mockedCPHost);
put = new Put(value);
put.addColumn(COLUMN_FAMILY_BYTES, Bytes.toBytes("dfg"), value);
try {
region.put(put);
fail("Should have failed with IOException");
} catch (IOException expected) {
}
long expectedSize = onePutSize * 2;
assertEquals("memstoreSize should be incremented", expectedSize, region.getMemStoreSize());
assertEquals("flushable size should be incremented", expectedSize,
store.getFlushableSize().getDataSize());
region.setCoprocessorHost(null);
HBaseTestingUtility.closeRegionAndWAL(region);
}
示例15: getRegion
import org.apache.hadoop.hbase.regionserver.wal.FSHLog; //导入依赖的package包/类
private HRegion getRegion(final Configuration conf, final String tableName) throws IOException {
WAL wal = new FSHLog(FileSystem.get(conf), TEST_UTIL.getDataTestDir(),
TEST_UTIL.getDataTestDir().toString(), conf);
ChunkCreator.initialize(MemStoreLABImpl.CHUNK_SIZE_DEFAULT, false, 0, 0, 0, null);
return (HRegion)TEST_UTIL.createLocalHRegion(Bytes.toBytes(tableName),
HConstants.EMPTY_BYTE_ARRAY, HConstants.EMPTY_BYTE_ARRAY, tableName, conf,
false, Durability.SKIP_WAL, wal, INCREMENT_BYTES);
}