当前位置: 首页>>代码示例>>Java>>正文


Java HFileLink.create方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.io.HFileLink.create方法的典型用法代码示例。如果您正苦于以下问题:Java HFileLink.create方法的具体用法?Java HFileLink.create怎么用?Java HFileLink.create使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.io.HFileLink的用法示例。


在下文中一共展示了HFileLink.create方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: addStoreFile

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
 * Add the specified store file to the stats
 * @param region region encoded Name
 * @param family family name
 * @param hfile store file name
 * @return the store file information
 */
FileInfo addStoreFile(final String region, final String family, final String hfile)
      throws IOException {
  String table = this.snapshot.getTable();
  HFileLink link = HFileLink.create(conf, table, region, family, hfile);
  boolean inArchive = false;
  long size = -1;
  try {
    if ((inArchive = fs.exists(link.getArchivePath()))) {
      size = fs.getFileStatus(link.getArchivePath()).getLen();
      hfileArchiveSize += size;
      hfileArchiveCount++;
    } else {
      size = link.getFileStatus(fs).getLen();
      hfileSize += size;
      hfilesCount++;
    }
  } catch (FileNotFoundException e) {
    hfilesMissing++;
  }
  return new FileInfo(inArchive, size);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:29,代码来源:SnapshotInfo.java

示例2: addStoreFile

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
 * Add the specified store file to the stats
 * @param region region encoded Name
 * @param family family name
 * @param hfile store file name
 * @return the store file information
 */
FileInfo addStoreFile(final HRegionInfo region, final String family,
    final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
  HFileLink link = HFileLink.create(conf, snapshotTable, region.getEncodedName(),
                                    family, storeFile.getName());
  boolean isCorrupted = false;
  boolean inArchive = false;
  long size = -1;
  try {
    if ((inArchive = fs.exists(link.getArchivePath()))) {
      size = fs.getFileStatus(link.getArchivePath()).getLen();
      hfileArchiveSize.addAndGet(size);
      hfileArchiveCount.incrementAndGet();
    } else {
      size = link.getFileStatus(fs).getLen();
      hfileSize.addAndGet(size);
      hfilesCount.incrementAndGet();
    }
    isCorrupted = (storeFile.hasFileSize() && storeFile.getFileSize() != size);
    if (isCorrupted) hfilesCorrupted.incrementAndGet();
  } catch (FileNotFoundException e) {
    hfilesMissing.incrementAndGet();
  }
  return new FileInfo(inArchive, size, isCorrupted);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:32,代码来源:SnapshotInfo.java

示例3: addStoreFile

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
 * Add the specified store file to the stats
 * @param region region encoded Name
 * @param family family name
 * @param hfile store file name
 * @return the store file information
 */
FileInfo addStoreFile(final String region, final String family, final String hfile)
      throws IOException {
  TableName table = snapshotTable;
  HFileLink link = HFileLink.create(conf, table, region, family, hfile);
  boolean inArchive = false;
  long size = -1;
  try {
    if ((inArchive = fs.exists(link.getArchivePath()))) {
      size = fs.getFileStatus(link.getArchivePath()).getLen();
      hfileArchiveSize += size;
      hfileArchiveCount++;
    } else {
      size = link.getFileStatus(fs).getLen();
      hfileSize += size;
      hfilesCount++;
    }
  } catch (FileNotFoundException e) {
    hfilesMissing++;
  }
  return new FileInfo(inArchive, size);
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:29,代码来源:SnapshotInfo.java

示例4: addStoreFile

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
 * Add the specified store file to the stats
 * @param region region encoded Name
 * @param family family name
 * @param hfile store file name
 * @return the store file information
 */
FileInfo addStoreFile(final HRegionInfo region, final String family,
    final SnapshotRegionManifest.StoreFile storeFile) throws IOException {
  HFileLink link = HFileLink.create(conf, snapshotTable, region.getEncodedName(),
                                    family, storeFile.getName());
  boolean inArchive = false;
  long size = -1;
  try {
    if ((inArchive = fs.exists(link.getArchivePath()))) {
      size = fs.getFileStatus(link.getArchivePath()).getLen();
      hfileArchiveSize.addAndGet(size);
      hfileArchiveCount.incrementAndGet();
    } else {
      size = link.getFileStatus(fs).getLen();
      hfileSize.addAndGet(size);
      hfilesCount.incrementAndGet();
    }
  } catch (FileNotFoundException e) {
    hfilesMissing.incrementAndGet();
  }
  return new FileInfo(inArchive, size);
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:29,代码来源:SnapshotInfo.java

示例5: testHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
  final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
  // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);
  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();

  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);

  Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
  StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
    BloomType.NONE);
  assertTrue(storeFileInfo.isLink());

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:38,代码来源:TestStoreFile.java

示例6: testHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
  final String columnFamily = "f";

  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);

  HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
  Path storedir = new Path(new Path(this.testDir,
    new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);

  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf,
       this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  Path storeFilePath = writer.getPath();
  writeStoreFile(writer);
  writer.close();

  Path dstPath = new Path(this.testDir, new Path("test-region", columnFamily));
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFile hsf = new StoreFile(this.fs, linkFilePath, testConf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  assertTrue(hsf.isLink());

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:39,代码来源:TestStoreFile.java

示例7: testHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLink() throws IOException {
  final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
  // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, testDir);
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(testDir, hri.getTable()), hri);
  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();

  // Make a store file and write data to it.
  StoreFileWriter writer = new StoreFileWriter.Builder(conf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);

  Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
  HStoreFile hsf =
      new HStoreFile(this.fs, storeFileInfo, testConf, cacheConf, BloomType.NONE, true);
  assertTrue(storeFileInfo.isLink());
  hsf.initReader();

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.getReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:apache,项目名称:hbase,代码行数:40,代码来源:TestHStoreFile.java

示例8: testHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
  final HRegionInfo hri = new HRegionInfo(TableName.valueOf("testHFileLinkTb"));
  // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);

  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs, 8 * 1024)
          .withFilePath(regionFs.createTempName())
          .build();
  writeStoreFile(writer);

  Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());
  Path dstPath = new Path(regionFs.getTableDir(), new Path("test-region", TEST_FAMILY));
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFileInfo storeFileInfo = new StoreFileInfo(testConf, this.fs, linkFilePath);
  StoreFile hsf = new StoreFile(this.fs, storeFileInfo, testConf, cacheConf,
    BloomType.NONE);
  assertTrue(storeFileInfo.isLink());

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:cloud-software-foundation,项目名称:c5,代码行数:36,代码来源:TestStoreFile.java

示例9: testHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
public void testHFileLink() throws IOException {
  final String columnFamily = "f";
  HRegionInfo hri = new HRegionInfo(Bytes.toBytes("table-link"));
  Path storedir = new Path(new Path(FSUtils.getRootDir(conf),
    new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);

  // Make a store file and write data to it.
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
       this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  Path storeFilePath = writer.getPath();
  writeStoreFile(writer);
  writer.close();

  Path dstPath = new Path(FSUtils.getRootDir(conf), new Path("test-region", columnFamily));
  HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // Try to open store file from link
  StoreFile hsf = new StoreFile(this.fs, linkFilePath, conf, cacheConf,
      BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);
  assertTrue(hsf.isLink());

  // Now confirm that I can read from the link
  int count = 1;
  HFileScanner s = hsf.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:35,代码来源:TestStoreFile.java

示例10: testHFileLinkCleaning

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);

  final TableName tableName = TableName.valueOf("test-table");
  final TableName tableLinkName = TableName.valueOf("test-link");
  final String hfileName = "1234567890";
  final String familyName = "cf";

  HRegionInfo hri = new HRegionInfo(tableName);
  HRegionInfo hriLink = new HRegionInfo(tableLinkName);

  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableName, hri.getEncodedName(), familyName);
  Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableLinkName, hriLink.getEncodedName(), familyName);

  // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
  Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
  fs.mkdirs(familyPath);
  Path hfilePath = new Path(familyPath, hfileName);
  fs.createNewFile(hfilePath);

  // Create link to hfile
  Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
                                      hriLink.getEncodedName(), familyName);
  fs.mkdirs(familyLinkPath);
  HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
  Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
  assertTrue(fs.exists(linkBackRefDir));
  FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
  assertEquals(1, backRefs.length);
  Path linkBackRef = backRefs[0].getPath();

  // Initialize cleaner
  final long ttl = 1000;
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);

  // Link backref cannot be removed
  cleaner.chore();
  assertTrue(fs.exists(linkBackRef));
  assertTrue(fs.exists(hfilePath));

  // Link backref can be removed
  fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
      FSUtils.getTableDir(archiveDir, tableLinkName));
  cleaner.chore();
  assertFalse("Link should be deleted", fs.exists(linkBackRef));

  // HFile can be removed
  Thread.sleep(ttl * 2);
  cleaner.chore();
  assertFalse("HFile should be deleted", fs.exists(hfilePath));

  // Remove everything
  for (int i = 0; i < 4; ++i) {
    Thread.sleep(ttl * 2);
    cleaner.chore();
  }
  assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
  assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:70,代码来源:TestHFileLinkCleaner.java

示例11: testReferenceToHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
 * This test creates an hfile and then the dir structures and files to verify that references
 * to hfilelinks (created by snapshot clones) can be properly interpreted.
 */
public void testReferenceToHFileLink() throws IOException {
  // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);

  // adding legal table name chars to verify regex handles it.
  HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
  StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);
  Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());

  // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
  HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
  HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()),
      hriClone);
  Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // create splits of the link.
  // <root>/clone/splitA/<cf>/<reftohfilelink>,
  // <root>/clone/splitB/<cf>/<reftohfilelink>
  HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
  HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
  StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
  f.createReader();
  Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
  Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom
  f.closeReader(true);
  // OK test the thing
  FSUtils.logFileSystemState(fs, this.testDir, LOG);

  // There is a case where a file with the hfilelink pattern is actually a daughter
  // reference to a hfile link.  This code in StoreFile that handles this case.

  // Try to open store file from link
  StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
    BloomType.NONE);

  // Now confirm that I can read from the ref to link
  int count = 1;
  HFileScanner s = hsfA.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertTrue(count > 0); // read some rows here

  // Try to open store file from link
  StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
    BloomType.NONE);

  // Now confirm that I can read from the ref to link
  HFileScanner sB = hsfB.createReader().getScanner(false, false);
  sB.seekTo();
  
  //count++ as seekTo() will advance the scanner
  count++;
  while (sB.next()) {
    count++;
  }

  // read the rest of the rows
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:80,代码来源:TestStoreFile.java

示例12: testHFileLinkCleaning

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  conf.set(HConstants.HBASE_DIR, TEST_UTIL.getDataTestDir().toString());
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);

  final String tableName = "test-table";
  final String tableLinkName = "test-link";
  final String hfileName = "1234567890";
  final String familyName = "cf";

  HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tableName));
  HRegionInfo hriLink = new HRegionInfo(Bytes.toBytes(tableLinkName));

  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableName, hri.getEncodedName(), familyName);
  Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableLinkName, hriLink.getEncodedName(), familyName);

  // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
  Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
  fs.mkdirs(familyPath);
  Path hfilePath = new Path(familyPath, hfileName);
  fs.createNewFile(hfilePath);

  // Create link to hfile
  Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
                                      hriLink.getEncodedName(), familyName);
  fs.mkdirs(familyLinkPath);
  HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
  Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
  assertTrue(fs.exists(linkBackRefDir));
  FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
  assertEquals(1, backRefs.length);
  Path linkBackRef = backRefs[0].getPath();

  // Initialize cleaner
  final long ttl = 1000;
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);

  // Link backref cannot be removed
  cleaner.chore();
  assertTrue(fs.exists(linkBackRef));
  assertTrue(fs.exists(hfilePath));

  // Link backref can be removed
  fs.rename(new Path(rootDir, tableLinkName), new Path(archiveDir, tableLinkName));
  cleaner.chore();
  assertFalse("Link should be deleted", fs.exists(linkBackRef));

  // HFile can be removed
  Thread.sleep(ttl * 2);
  cleaner.chore();
  assertFalse("HFile should be deleted", fs.exists(hfilePath));

  // Remove everything
  for (int i = 0; i < 4; ++i) {
    Thread.sleep(ttl * 2);
    cleaner.chore();
  }
  assertFalse("HFile should be deleted", fs.exists(new Path(archiveDir, tableName)));
  assertFalse("Link should be deleted", fs.exists(new Path(archiveDir, tableLinkName)));

  cleaner.interrupt();
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:71,代码来源:TestHFileLinkCleaner.java

示例13: testReferenceToHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
 * This test creates an hfile and then the dir structures and files to verify that references
 * to hfilelinks (created by snapshot clones) can be properly interpreted.
 */
public void testReferenceToHFileLink() throws IOException {
  final String columnFamily = "f";

  Path rootDir = FSUtils.getRootDir(conf);

  String tablename = "_original-evil-name"; // adding legal table name chars to verify regex handles it.
  HRegionInfo hri = new HRegionInfo(Bytes.toBytes(tablename));
  // store dir = <root>/<tablename>/<rgn>/<cf>
  Path storedir = new Path(new Path(rootDir,
    new Path(hri.getTableNameAsString(), hri.getEncodedName())), columnFamily);

  // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
  StoreFile.Writer writer = new StoreFile.WriterBuilder(conf, cacheConf,
       this.fs, 8 * 1024)
          .withOutputDir(storedir)
          .build();
  Path storeFilePath = writer.getPath();
  writeStoreFile(writer);
  writer.close();

  // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
  String target = "clone";
  Path dstPath = new Path(rootDir, new Path(new Path(target, "7e0102"), columnFamily));
  HFileLink.create(conf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // create splits of the link.
  // <root>/clone/splitA/<cf>/<reftohfilelink>,
  // <root>/clone/splitB/<cf>/<reftohfilelink>
  Path splitDirA = new Path(new Path(rootDir,
      new Path(target, "571A")), columnFamily);
  Path splitDirB = new Path(new Path(rootDir,
      new Path(target, "571B")), columnFamily);
  StoreFile f = new StoreFile(fs, linkFilePath, conf, cacheConf, BloomType.NONE,
      NoOpDataBlockEncoder.INSTANCE);
  byte[] splitRow = SPLITKEY;
  Path pathA = StoreFile.split(fs, splitDirA, f, splitRow, Range.top); // top
  Path pathB = StoreFile.split(fs, splitDirB, f, splitRow, Range.bottom); // bottom

  // OK test the thing
  FSUtils.logFileSystemState(fs, rootDir, LOG);

  // There is a case where a file with the hfilelink pattern is actually a daughter
  // reference to a hfile link.  This code in StoreFile that handles this case.

  // Try to open store file from link
  StoreFile hsfA = new StoreFile(this.fs, pathA,  conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

  // Now confirm that I can read from the ref to link
  int count = 1;
  HFileScanner s = hsfA.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertTrue(count > 0); // read some rows here

  // Try to open store file from link
  StoreFile hsfB = new StoreFile(this.fs, pathB,  conf, cacheConf,
      StoreFile.BloomType.NONE, NoOpDataBlockEncoder.INSTANCE);

  // Now confirm that I can read from the ref to link
  HFileScanner sB = hsfB.createReader().getScanner(false, false);
  sB.seekTo();
  
  //count++ as seekTo() will advance the scanner
  count++;
  while (sB.next()) {
    count++;
  }

  // read the rest of the rows
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:81,代码来源:TestStoreFile.java

示例14: testHFileLinkCleaning

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
@Test
public void testHFileLinkCleaning() throws Exception {
  Configuration conf = TEST_UTIL.getConfiguration();
  FSUtils.setRootDir(conf, TEST_UTIL.getDataTestDir());
  conf.set(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS, HFileLinkCleaner.class.getName());
  Path rootDir = FSUtils.getRootDir(conf);
  FileSystem fs = FileSystem.get(conf);

  final TableName tableName = TableName.valueOf("test-table");
  final TableName tableLinkName = TableName.valueOf("test-link");
  final String hfileName = "1234567890";
  final String familyName = "cf";

  HRegionInfo hri = new HRegionInfo(tableName);
  HRegionInfo hriLink = new HRegionInfo(tableLinkName);

  Path archiveDir = HFileArchiveUtil.getArchivePath(conf);
  Path archiveStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableName, hri.getEncodedName(), familyName);
  Path archiveLinkStoreDir = HFileArchiveUtil.getStoreArchivePath(conf,
        tableLinkName, hriLink.getEncodedName(), familyName);

  // Create hfile /hbase/table-link/region/cf/getEncodedName.HFILE(conf);
  Path familyPath = getFamilyDirPath(archiveDir, tableName, hri.getEncodedName(), familyName);
  fs.mkdirs(familyPath);
  Path hfilePath = new Path(familyPath, hfileName);
  fs.createNewFile(hfilePath);

  // Create link to hfile
  Path familyLinkPath = getFamilyDirPath(rootDir, tableLinkName,
                                      hriLink.getEncodedName(), familyName);
  fs.mkdirs(familyLinkPath);
  HFileLink.create(conf, fs, familyLinkPath, hri, hfileName);
  Path linkBackRefDir = HFileLink.getBackReferencesDir(archiveStoreDir, hfileName);
  assertTrue(fs.exists(linkBackRefDir));
  FileStatus[] backRefs = fs.listStatus(linkBackRefDir);
  assertEquals(1, backRefs.length);
  Path linkBackRef = backRefs[0].getPath();

  // Initialize cleaner
  final long ttl = 1000;
  conf.setLong(TimeToLiveHFileCleaner.TTL_CONF_KEY, ttl);
  Server server = new DummyServer();
  HFileCleaner cleaner = new HFileCleaner(1000, server, conf, fs, archiveDir);

  // Link backref cannot be removed
  cleaner.chore();
  assertTrue(fs.exists(linkBackRef));
  assertTrue(fs.exists(hfilePath));

  // Link backref can be removed
  fs.rename(FSUtils.getTableDir(rootDir, tableLinkName),
      FSUtils.getTableDir(archiveDir, tableLinkName));
  cleaner.chore();
  assertFalse("Link should be deleted", fs.exists(linkBackRef));

  // HFile can be removed
  Thread.sleep(ttl * 2);
  cleaner.chore();
  assertFalse("HFile should be deleted", fs.exists(hfilePath));

  // Remove everything
  for (int i = 0; i < 4; ++i) {
    Thread.sleep(ttl * 2);
    cleaner.chore();
  }
  assertFalse("HFile should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableName)));
  assertFalse("Link should be deleted", fs.exists(FSUtils.getTableDir(archiveDir, tableLinkName)));

  cleaner.interrupt();
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:72,代码来源:TestHFileLinkCleaner.java

示例15: testReferenceToHFileLink

import org.apache.hadoop.hbase.io.HFileLink; //导入方法依赖的package包/类
/**
 * This test creates an hfile and then the dir structures and files to verify that references
 * to hfilelinks (created by snapshot clones) can be properly interpreted.
 */
public void testReferenceToHFileLink() throws IOException {
  // force temp data in hbase/target/test-data instead of /tmp/hbase-xxxx/
  Configuration testConf = new Configuration(this.conf);
  FSUtils.setRootDir(testConf, this.testDir);

  // adding legal table name chars to verify regex handles it.
  HRegionInfo hri = new HRegionInfo(TableName.valueOf("_original-evil-name"));
  HRegionFileSystem regionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()), hri);

  HFileContext meta = new HFileContextBuilder().withBlockSize(8 * 1024).build();
  // Make a store file and write data to it. <root>/<tablename>/<rgn>/<cf>/<file>
  StoreFile.Writer writer = new StoreFile.WriterBuilder(testConf, cacheConf, this.fs)
          .withFilePath(regionFs.createTempName())
          .withFileContext(meta)
          .build();
  writeStoreFile(writer);
  Path storeFilePath = regionFs.commitStoreFile(TEST_FAMILY, writer.getPath());

  // create link to store file. <root>/clone/region/<cf>/<hfile>-<region>-<table>
  HRegionInfo hriClone = new HRegionInfo(TableName.valueOf("clone"));
  HRegionFileSystem cloneRegionFs = HRegionFileSystem.createRegionOnFileSystem(
    testConf, fs, FSUtils.getTableDir(this.testDir, hri.getTable()),
      hriClone);
  Path dstPath = cloneRegionFs.getStoreDir(TEST_FAMILY);
  HFileLink.create(testConf, this.fs, dstPath, hri, storeFilePath.getName());
  Path linkFilePath = new Path(dstPath,
                HFileLink.createHFileLinkName(hri, storeFilePath.getName()));

  // create splits of the link.
  // <root>/clone/splitA/<cf>/<reftohfilelink>,
  // <root>/clone/splitB/<cf>/<reftohfilelink>
  HRegionInfo splitHriA = new HRegionInfo(hri.getTable(), null, SPLITKEY);
  HRegionInfo splitHriB = new HRegionInfo(hri.getTable(), SPLITKEY, null);
  StoreFile f = new StoreFile(fs, linkFilePath, testConf, cacheConf, BloomType.NONE);
  Path pathA = splitStoreFile(cloneRegionFs, splitHriA, TEST_FAMILY, f, SPLITKEY, true); // top
  Path pathB = splitStoreFile(cloneRegionFs, splitHriB, TEST_FAMILY, f, SPLITKEY, false);// bottom

  // OK test the thing
  FSUtils.logFileSystemState(fs, this.testDir, LOG);

  // There is a case where a file with the hfilelink pattern is actually a daughter
  // reference to a hfile link.  This code in StoreFile that handles this case.

  // Try to open store file from link
  StoreFile hsfA = new StoreFile(this.fs, pathA, testConf, cacheConf,
    BloomType.NONE);

  // Now confirm that I can read from the ref to link
  int count = 1;
  HFileScanner s = hsfA.createReader().getScanner(false, false);
  s.seekTo();
  while (s.next()) {
    count++;
  }
  assertTrue(count > 0); // read some rows here

  // Try to open store file from link
  StoreFile hsfB = new StoreFile(this.fs, pathB, testConf, cacheConf,
    BloomType.NONE);

  // Now confirm that I can read from the ref to link
  HFileScanner sB = hsfB.createReader().getScanner(false, false);
  sB.seekTo();
  
  //count++ as seekTo() will advance the scanner
  count++;
  while (sB.next()) {
    count++;
  }

  // read the rest of the rows
  assertEquals((LAST_CHAR - FIRST_CHAR + 1) * (LAST_CHAR - FIRST_CHAR + 1), count);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:79,代码来源:TestStoreFile.java


注:本文中的org.apache.hadoop.hbase.io.HFileLink.create方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。