本文整理汇总了Java中org.apache.hadoop.hbase.backup.HFileArchiver.archiveStoreFiles方法的典型用法代码示例。如果您正苦于以下问题:Java HFileArchiver.archiveStoreFiles方法的具体用法?Java HFileArchiver.archiveStoreFiles怎么用?Java HFileArchiver.archiveStoreFiles使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.backup.HFileArchiver
的用法示例。
在下文中一共展示了HFileArchiver.archiveStoreFiles方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: completeCompaction
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
StoreFile completeCompaction(final Collection<StoreFile> compactedFiles,
final StoreFile.Writer compactedFile)
throws IOException {
// 1. Moving the new files into place -- if there is a new file (may not
// be if all cells were expired or deleted).
StoreFile result = null;
if (compactedFile != null) {
validateStoreFile(compactedFile.getPath());
// Move the file into the right spot
Path origPath = compactedFile.getPath();
Path destPath = new Path(homedir, origPath.getName());
LOG.info("Renaming compacted file at " + origPath + " to " + destPath);
if (!HBaseFileSystem.renameDirForFileSystem(fs, origPath, destPath)) {
LOG.error("Failed move of compacted file " + origPath + " to " +
destPath);
throw new IOException("Failed move of compacted file " + origPath +
" to " + destPath);
}
result = new StoreFile(this.fs, destPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
passSchemaMetricsTo(result);
result.createReader();
}
try {
this.lock.writeLock().lock();
try {
// Change this.storefiles so it reflects new state but do not
// delete old store files until we have sent out notification of
// change in case old files are still being accessed by outstanding
// scanners.
ArrayList<StoreFile> newStoreFiles = Lists.newArrayList(storefiles);
newStoreFiles.removeAll(compactedFiles);
filesCompacting.removeAll(compactedFiles); // safe bc: lock.writeLock()
// If a StoreFile result, move it into place. May be null.
if (result != null) {
newStoreFiles.add(result);
}
this.storefiles = sortAndClone(newStoreFiles);
} finally {
// We need the lock, as long as we are updating the storefiles
// or changing the memstore. Let us release it before calling
// notifyChangeReadersObservers. See HBASE-4485 for a possible
// deadlock scenario that could have happened if continue to hold
// the lock.
this.lock.writeLock().unlock();
}
// Tell observers that list of StoreFiles has changed.
notifyChangedReadersObservers();
// let the archive util decide if we should archive or delete the files
LOG.debug("Removing store files after compaction...");
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.region, this.family.getName(),
compactedFiles);
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("Failed replacing compacted files in " + this +
". Compacted file is " + (result == null? "none": result.toString()) +
". Files replaced " + compactedFiles.toString() +
" some of which may have been already removed", e);
}
// 4. Compute new store size
this.storeSize = 0L;
this.totalUncompressedBytes = 0L;
for (StoreFile hsf : this.storefiles) {
StoreFile.Reader r = hsf.getReader();
if (r == null) {
LOG.warn("StoreFile " + hsf + " has a null Reader");
continue;
}
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
}
return result;
}
示例2: completeCompaction
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
StoreFile completeCompaction(final Collection<StoreFile> compactedFiles,
final StoreFile.Writer compactedFile)
throws IOException {
// 1. Moving the new files into place -- if there is a new file (may not
// be if all cells were expired or deleted).
StoreFile result = null;
if (compactedFile != null) {
validateStoreFile(compactedFile.getPath());
// Move the file into the right spot
Path origPath = compactedFile.getPath();
Path destPath = new Path(homedir, origPath.getName());
LOG.info("Renaming compacted file at " + origPath + " to " + destPath);
if (!fs.rename(origPath, destPath)) {
LOG.error("Failed move of compacted file " + origPath + " to " +
destPath);
throw new IOException("Failed move of compacted file " + origPath +
" to " + destPath);
}
result = new StoreFile(this.fs, destPath, this.conf, this.cacheConf,
this.family.getBloomFilterType(), this.dataBlockEncoder);
result.createReader();
}
try {
this.lock.writeLock().lock();
try {
// Change this.storefiles so it reflects new state but do not
// delete old store files until we have sent out notification of
// change in case old files are still being accessed by outstanding
// scanners.
ArrayList<StoreFile> newStoreFiles = Lists.newArrayList(storefiles);
newStoreFiles.removeAll(compactedFiles);
filesCompacting.removeAll(compactedFiles); // safe bc: lock.writeLock()
// If a StoreFile result, move it into place. May be null.
if (result != null) {
newStoreFiles.add(result);
}
this.storefiles = sortAndClone(newStoreFiles);
} finally {
// We need the lock, as long as we are updating the storefiles
// or changing the memstore. Let us release it before calling
// notifyChangeReadersObservers. See HBASE-4485 for a possible
// deadlock scenario that could have happened if continue to hold
// the lock.
this.lock.writeLock().unlock();
}
// Tell observers that list of StoreFiles has changed.
notifyChangedReadersObservers();
// let the archive util decide if we should archive or delete the files
LOG.debug("Removing store files after compaction...");
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.region,
this.family.getName(), compactedFiles);
} catch (IOException e) {
e = RemoteExceptionHandler.checkIOException(e);
LOG.error("Failed replacing compacted files in " + this +
". Compacted file is " + (result == null? "none": result.toString()) +
". Files replaced " + compactedFiles.toString() +
" some of which may have been already removed", e);
}
// 4. Compute new store size
this.storeSize = 0L;
this.totalUncompressedBytes = 0L;
for (StoreFile hsf : this.storefiles) {
StoreFile.Reader r = hsf.getReader();
if (r == null) {
LOG.warn("StoreFile " + hsf + " has a null Reader");
continue;
}
this.storeSize += r.length();
this.totalUncompressedBytes += r.getTotalUncompressedBytes();
}
return result;
}
示例3: removeStoreFiles
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
* Closes and archives the specified store files from the specified family.
*
* @param familyName Family that contains the store files
* @param storeFiles set of store files to remove
* @throws IOException if the archiving fails
*/
public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
throws IOException {
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs, this.tableDir,
Bytes.toBytes(familyName), storeFiles);
}
示例4: removeStoreFiles
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
* Closes and archives the specified store files from the specified family.
* @param familyName Family that contains the store files
* @param storeFiles set of store files to remove
* @throws IOException if the archiving fails
*/
public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
throws IOException {
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
this.tableDir, Bytes.toBytes(familyName), storeFiles);
}
示例5: removeStoreFiles
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
* Closes and archives the specified store files from the specified family.
* @param familyName Family that contains the store files
* @param storeFiles set of store files to remove
* @throws IOException if the archiving fails
*/
public void removeStoreFiles(final String familyName, final Collection<StoreFile> storeFiles)
throws IOException {
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfo,
this.tableDir, Bytes.toBytes(familyName), storeFiles);
}
示例6: removeMobFiles
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
* Archives the mob files.
* @param conf The current configuration.
* @param fs The current file system.
* @param tableName The table name.
* @param tableDir The table directory.
* @param family The name of the column family.
* @param storeFiles The files to be deleted.
* @throws IOException
*/
public static void removeMobFiles(Configuration conf, FileSystem fs, TableName tableName,
Path tableDir, byte[] family, Collection<HStoreFile> storeFiles) throws IOException {
HFileArchiver.archiveStoreFiles(conf, fs, getMobRegionInfo(tableName), tableDir, family,
storeFiles);
}
示例7: removeStoreFiles
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
* Closes and archives the specified store files from the specified family.
* @param familyName Family that contains the store files
* @param storeFiles set of store files to remove
* @throws IOException if the archiving fails
*/
public void removeStoreFiles(String familyName, Collection<HStoreFile> storeFiles)
throws IOException {
HFileArchiver.archiveStoreFiles(this.conf, this.fs, this.regionInfoForFs,
this.tableDir, Bytes.toBytes(familyName), storeFiles);
}
示例8: removeMobFiles
import org.apache.hadoop.hbase.backup.HFileArchiver; //导入方法依赖的package包/类
/**
* Archives the mob files.
* @param conf The current configuration.
* @param fs The current file system.
* @param tableName The table name.
* @param family The name of the column family.
* @param storeFiles The files to be deleted.
* @throws IOException
*/
public static void removeMobFiles(Configuration conf, FileSystem fs, TableName tableName,
byte[] family, Collection<StoreFile> storeFiles) throws IOException {
HFileArchiver.archiveStoreFiles(conf, fs, getMobRegionInfo(tableName),
FSUtils.getTableDir(MobUtils.getMobHome(conf), tableName), family, storeFiles);
}