当前位置: 首页>>代码示例>>Java>>正文


Java FSTableDescriptors类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.util.FSTableDescriptors的典型用法代码示例。如果您正苦于以下问题:Java FSTableDescriptors类的具体用法?Java FSTableDescriptors怎么用?Java FSTableDescriptors使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


FSTableDescriptors类属于org.apache.hadoop.hbase.util包,在下文中一共展示了FSTableDescriptors类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: bootstrap

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating hbase:meta region");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstrap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
    setInfoFamilyCachingForMeta(metaDescriptor, false);
    HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null, true, true);
    setInfoFamilyCachingForMeta(metaDescriptor, true);
    HRegion.closeHRegion(meta);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:MasterFileSystem.java

示例2: verifyHColumnDescriptor

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
private void verifyHColumnDescriptor(int expected, final TableName tableName,
    final byte[]... families) throws IOException {
  Admin admin = TEST_UTIL.getHBaseAdmin();

  // Verify descriptor from master
  HTableDescriptor htd = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] hcds = htd.getColumnFamilies();
  verifyHColumnDescriptor(expected, hcds, tableName, families);

  // Verify descriptor from HDFS
  MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
  Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
  htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
  hcds = htd.getColumnFamilies();
  verifyHColumnDescriptor(expected, hcds, tableName, families);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:17,代码来源:TestHColumnDescriptorDefaultVersions.java

示例3: testReadAndWriteHRegionInfoFile

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  Path basedir = htu.getDataTestDir();
  FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
  // Create a region.  That'll write the .regioninfo file.
  HRegion r = HRegion.createHRegion(hri, basedir, htu.getConfiguration(),
    fsTableDescriptors.get(TableName.META_TABLE_NAME));
  // Get modtime on the file.
  long modtime = getModTime(r);
  HRegion.closeHRegion(r);
  Thread.sleep(1001);
  r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
    null, htu.getConfiguration());
  // Ensure the file is not written for a second time.
  long modtime2 = getModTime(r);
  assertEquals(modtime, modtime2);
  // Now load the file.
  HRegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
      r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
  assertTrue(hri.equals(deserializedHri));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:TestHRegionInfo.java

示例4: call

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
@Override
public Void call() throws Exception {
  LOG.debug("Running table info copy.");
  this.rethrowException();
  LOG.debug("Attempting to copy table info for snapshot:"
      + SnapshotDescriptionUtils.toString(this.snapshot));
  // get the HTable descriptor
  HTableDescriptor orig = FSTableDescriptors.getTableDescriptor(fs, rootDir,
    Bytes.toBytes(this.snapshot.getTable()));
  this.rethrowException();
  // write a copy of descriptor to the snapshot directory
  Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
  FSTableDescriptors.createTableDescriptorForTableDirectory(fs, snapshotDir, orig, false);
  LOG.debug("Finished copying tableinfo.");
  return null;
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:17,代码来源:TableInfoCopyTask.java

示例5: bootstrap

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating hbase:meta region");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstrap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
    setInfoFamilyCachingForMeta(metaDescriptor, false);
    HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor);
    setInfoFamilyCachingForMeta(metaDescriptor, true);
    HRegion.closeHRegion(meta);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:21,代码来源:MasterFileSystem.java

示例6: call

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
@Override
public Void call() throws Exception {
  LOG.debug("Running table info copy.");
  this.rethrowException();
  LOG.debug("Attempting to copy table info for snapshot:"
      + ClientSnapshotDescriptionUtils.toString(this.snapshot));
  // get the HTable descriptor
  HTableDescriptor orig = FSTableDescriptors.getTableDescriptorFromFs(fs, rootDir,
      TableName.valueOf(this.snapshot.getTable()));
  this.rethrowException();
  // write a copy of descriptor to the snapshot directory
  Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
  new FSTableDescriptors(fs, rootDir)
    .createTableDescriptorForTableDirectory(snapshotDir, orig, false);
  LOG.debug("Finished copying tableinfo.");
  return null;
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:18,代码来源:TableInfoCopyTask.java

示例7: verifyHColumnDescriptor

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
private void verifyHColumnDescriptor(int expected, final TableName tableName,
    final byte[]... families) throws IOException {
  HBaseAdmin admin = TEST_UTIL.getHBaseAdmin();

  // Verify descriptor from master
  HTableDescriptor htd = admin.getTableDescriptor(tableName);
  HColumnDescriptor[] hcds = htd.getColumnFamilies();
  verifyHColumnDescriptor(expected, hcds, tableName, families);

  // Verify descriptor from HDFS
  MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
  Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
  htd = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
  hcds = htd.getColumnFamilies();
  verifyHColumnDescriptor(expected, hcds, tableName, families);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:17,代码来源:TestHColumnDescriptorDefaultVersions.java

示例8: bootstrap

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating hbase:meta region");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstrap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    TableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
    HRegion meta = HRegion.createHRegion(RegionInfoBuilder.FIRST_META_REGIONINFO, rd,
        c, setInfoFamilyCachingForMeta(metaDescriptor, false), null);
    meta.close();
  } catch (IOException e) {
      e = e instanceof RemoteException ?
              ((RemoteException)e).unwrapRemoteException() : e;
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:20,代码来源:MasterFileSystem.java

示例9: createFsLayout

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
protected static List<RegionInfo> createFsLayout(final MasterProcedureEnv env,
    final TableDescriptor tableDescriptor, List<RegionInfo> newRegions,
    final CreateHdfsRegions hdfsRegionHandler) throws IOException {
  final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
  final Path tempdir = mfs.getTempDir();

  // 1. Create Table Descriptor
  // using a copy of descriptor, table will be created enabling first
  final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
  ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
      .createTableDescriptorForTableDirectory(
        tempTableDir, tableDescriptor, false);

  // 2. Create Regions
  newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
          tableDescriptor.getTableName(), newRegions);

  // 3. Move Table temp directory to the hbase root location
  moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);

  return newRegions;
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:CreateTableProcedure.java

示例10: createFsLayout

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
/**
 * Create region layout in file system.
 * @param env MasterProcedureEnv
 * @throws IOException
 */
private List<RegionInfo> createFsLayout(
  final MasterProcedureEnv env,
  final TableDescriptor tableDescriptor,
  List<RegionInfo> newRegions,
  final CreateHdfsRegions hdfsRegionHandler) throws IOException {
  final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
  final Path tempdir = mfs.getTempDir();

  // 1. Create Table Descriptor
  // using a copy of descriptor, table will be created enabling first
  final Path tempTableDir = FSUtils.getTableDir(tempdir, tableDescriptor.getTableName());
  ((FSTableDescriptors)(env.getMasterServices().getTableDescriptors()))
    .createTableDescriptorForTableDirectory(tempTableDir,
            TableDescriptorBuilder.newBuilder(tableDescriptor).build(), false);

  // 2. Create Regions
  newRegions = hdfsRegionHandler.createHdfsRegions(
    env, tempdir, tableDescriptor.getTableName(), newRegions);

  // 3. Move Table temp directory to the hbase root location
  CreateTableProcedure.moveTempDirectoryToHBaseRoot(env, tableDescriptor, tempTableDir);

  return newRegions;
}
 
开发者ID:apache,项目名称:hbase,代码行数:30,代码来源:CloneSnapshotProcedure.java

示例11: verifyHColumnDescriptor

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
private void verifyHColumnDescriptor(int expected, final TableName tableName,
    final byte[]... families) throws IOException {
  Admin admin = TEST_UTIL.getAdmin();

  // Verify descriptor from master
  TableDescriptor htd = admin.getDescriptor(tableName);
  ColumnFamilyDescriptor[] hcds = htd.getColumnFamilies();
  verifyHColumnDescriptor(expected, hcds, tableName, families);

  // Verify descriptor from HDFS
  MasterFileSystem mfs = TEST_UTIL.getMiniHBaseCluster().getMaster().getMasterFileSystem();
  Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), tableName);
  TableDescriptor td = FSTableDescriptors.getTableDescriptorFromFs(mfs.getFileSystem(), tableDir);
  hcds = td.getColumnFamilies();
  verifyHColumnDescriptor(expected, hcds, tableName, families);
}
 
开发者ID:apache,项目名称:hbase,代码行数:17,代码来源:TestHColumnDescriptorDefaultVersions.java

示例12: testReadAndWriteHRegionInfoFile

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
@Test
public void testReadAndWriteHRegionInfoFile() throws IOException, InterruptedException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  HRegionInfo hri = HRegionInfo.FIRST_META_REGIONINFO;
  Path basedir = htu.getDataTestDir();
  // Create a region.  That'll write the .regioninfo file.
  FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
  HRegion r = HBaseTestingUtility.createRegionAndWAL(hri, basedir, htu.getConfiguration(),
      fsTableDescriptors.get(TableName.META_TABLE_NAME));
  // Get modtime on the file.
  long modtime = getModTime(r);
  HBaseTestingUtility.closeRegionAndWAL(r);
  Thread.sleep(1001);
  r = HRegion.openHRegion(basedir, hri, fsTableDescriptors.get(TableName.META_TABLE_NAME),
      null, htu.getConfiguration());
  // Ensure the file is not written for a second time.
  long modtime2 = getModTime(r);
  assertEquals(modtime, modtime2);
  // Now load the file.
  org.apache.hadoop.hbase.client.RegionInfo deserializedHri = HRegionFileSystem.loadRegionInfoFileContent(
      r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
  assertTrue(org.apache.hadoop.hbase.client.RegionInfo.COMPARATOR.compare(hri, deserializedHri) == 0);
  HBaseTestingUtility.closeRegionAndWAL(r);
}
 
开发者ID:apache,项目名称:hbase,代码行数:25,代码来源:TestHRegionInfo.java

示例13: testReadAndWriteRegionInfoFile

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
@Test
public void testReadAndWriteRegionInfoFile() throws IOException, InterruptedException {
  HBaseTestingUtility htu = new HBaseTestingUtility();
  RegionInfo ri = RegionInfoBuilder.FIRST_META_REGIONINFO;
  Path basedir = htu.getDataTestDir();
  // Create a region.  That'll write the .regioninfo file.
  FSTableDescriptors fsTableDescriptors = new FSTableDescriptors(htu.getConfiguration());
  HRegion r = HBaseTestingUtility.createRegionAndWAL(convert(ri), basedir, htu.getConfiguration(),
      fsTableDescriptors.get(TableName.META_TABLE_NAME));
  // Get modtime on the file.
  long modtime = getModTime(r);
  HBaseTestingUtility.closeRegionAndWAL(r);
  Thread.sleep(1001);
  r = HRegion.openHRegion(basedir, convert(ri), fsTableDescriptors.get(TableName.META_TABLE_NAME),
      null, htu.getConfiguration());
  // Ensure the file is not written for a second time.
  long modtime2 = getModTime(r);
  assertEquals(modtime, modtime2);
  // Now load the file.
  RegionInfo deserializedRi = HRegionFileSystem.loadRegionInfoFileContent(
      r.getRegionFileSystem().getFileSystem(), r.getRegionFileSystem().getRegionDir());
  HBaseTestingUtility.closeRegionAndWAL(r);
}
 
开发者ID:apache,项目名称:hbase,代码行数:24,代码来源:TestRegionInfoBuilder.java

示例14: testGetSetOfHTD

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
@Test
public void testGetSetOfHTD() throws IOException {
  HBaseTestingUtility HTU = new HBaseTestingUtility();
      final String tablename = "testGetSetOfHTD";

  // Delete the temporary table directory that might still be there from the
  // previous test run.
  FSTableDescriptors.deleteTableDescriptorIfExists(tablename,
      HTU.getConfiguration());

  HTableDescriptor htd = new HTableDescriptor(tablename);
  FSTableDescriptors.createTableDescriptor(htd, HTU.getConfiguration());
  HRegionInfo hri = new HRegionInfo(Bytes.toBytes("testGetSetOfHTD"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_END_ROW);
  HTableDescriptor htd2 = hri.getTableDesc();
  assertTrue(htd.equals(htd2));
  final String key = "SOME_KEY";
  assertNull(htd.getValue(key));
  final String value = "VALUE";
  htd.setValue(key, value);
  hri.setTableDesc(htd);
  HTableDescriptor htd3 = hri.getTableDesc();
  assertTrue(htd.equals(htd3));
}
 
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:25,代码来源:TestHRegionInfo.java

示例15: createFsLayout

import org.apache.hadoop.hbase.util.FSTableDescriptors; //导入依赖的package包/类
protected static List<HRegionInfo> createFsLayout(final MasterProcedureEnv env,
    final HTableDescriptor hTableDescriptor, List<HRegionInfo> newRegions,
    final CreateHdfsRegions hdfsRegionHandler) throws IOException {
  final MasterFileSystem mfs = env.getMasterServices().getMasterFileSystem();
  final Path tempdir = mfs.getTempDir();

  // 1. Create Table Descriptor
  // using a copy of descriptor, table will be created enabling first
  final Path tempTableDir = FSUtils.getTableDir(tempdir, hTableDescriptor.getTableName());
  new FSTableDescriptors(env.getMasterConfiguration()).createTableDescriptorForTableDirectory(
    tempTableDir, hTableDescriptor, false);

  // 2. Create Regions
  newRegions = hdfsRegionHandler.createHdfsRegions(env, tempdir,
    hTableDescriptor.getTableName(), newRegions);

  // 3. Move Table temp directory to the hbase root location
  final Path tableDir = FSUtils.getTableDir(mfs.getRootDir(), hTableDescriptor.getTableName());
  FileSystem fs = mfs.getFileSystem();
  if (!fs.delete(tableDir, true) && fs.exists(tableDir)) {
    throw new IOException("Couldn't delete " + tableDir);
  }
  if (!fs.rename(tempTableDir, tableDir)) {
    throw new IOException("Unable to move table from temp=" + tempTableDir +
      " to hbase root=" + tableDir);
  }
  return newRegions;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:29,代码来源:CreateTableProcedure.java


注:本文中的org.apache.hadoop.hbase.util.FSTableDescriptors类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。