本文整理汇总了Java中org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.getWorkingSnapshotDir方法的典型用法代码示例。如果您正苦于以下问题:Java SnapshotDescriptionUtils.getWorkingSnapshotDir方法的具体用法?Java SnapshotDescriptionUtils.getWorkingSnapshotDir怎么用?Java SnapshotDescriptionUtils.getWorkingSnapshotDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils
的用法示例。
在下文中一共展示了SnapshotDescriptionUtils.getWorkingSnapshotDir方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: snapshotTable
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Take a snapshot using the specified handler.
* On failure the snapshot temporary working directory is removed.
* NOTE: prepareToTakeSnapshot() called before this one takes care of the rejecting the
* snapshot request if the table is busy with another snapshot/restore operation.
* @param snapshot the snapshot description
* @param handler the snapshot handler
*/
private synchronized void snapshotTable(SnapshotDescription snapshot,
final TakeSnapshotHandler handler) throws HBaseSnapshotException {
try {
handler.prepare();
this.executorService.submit(handler);
this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
} catch (Exception e) {
// cleanup the working directory by trying to delete it from the fs.
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
try {
if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
ClientSnapshotDescriptionUtils.toString(snapshot));
}
} catch (IOException e1) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
ClientSnapshotDescriptionUtils.toString(snapshot));
}
// fail the snapshot
throw new SnapshotCreationException("Could not build snapshot handler", e, snapshot);
}
}
示例2: snapshotTable
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Take a snapshot using the specified handler.
* On failure the snapshot temporary working directory is removed.
* NOTE: prepareToTakeSnapshot() called before this one takes care of the rejecting the
* snapshot request if the table is busy with another snapshot/restore operation.
* @param snapshot the snapshot description
* @param handler the snapshot handler
*/
private synchronized void snapshotTable(SnapshotDescription snapshot,
final TakeSnapshotHandler handler) throws HBaseSnapshotException {
try {
handler.prepare();
this.executorService.submit(handler);
this.snapshotHandlers.put(snapshot.getTable(), handler);
} catch (Exception e) {
// cleanup the working directory by trying to delete it from the fs.
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
try {
if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
SnapshotDescriptionUtils.toString(snapshot));
}
} catch (IOException e1) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
SnapshotDescriptionUtils.toString(snapshot));
}
// fail the snapshot
throw new SnapshotCreationException("Could not build snapshot handler", e, snapshot);
}
}
示例3: testNoEditsDir
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Check that we don't get an exception if there is no recovered edits directory to copy
* @throws Exception on failure
*/
@Test
public void testNoEditsDir() throws Exception {
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
FileSystem fs = UTIL.getTestFileSystem();
Path root = UTIL.getDataTestDir();
String regionName = "regionA";
Path regionDir = new Path(root, regionName);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root);
try {
// doesn't really matter where the region's snapshot directory is, but this is pretty close
Path snapshotRegionDir = new Path(workingDir, regionName);
fs.mkdirs(snapshotRegionDir);
Path regionEdits = HLog.getRegionDirRecoveredEditsDir(regionDir);
assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits));
CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir,
snapshotRegionDir);
task.call();
} finally {
// cleanup the working directory
FSUtils.delete(fs, regionDir, true);
FSUtils.delete(fs, workingDir, true);
}
}
示例4: testNoEditsDir
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Check that we don't get an exception if there is no recovered edits directory to copy
* @throws Exception on failure
*/
@Test
public void testNoEditsDir() throws Exception {
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
FileSystem fs = UTIL.getTestFileSystem();
Path root = UTIL.getDataTestDir();
String regionName = "regionA";
Path regionDir = new Path(root, regionName);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root);
try {
// doesn't really matter where the region's snapshot directory is, but this is pretty close
Path snapshotRegionDir = new Path(workingDir, regionName);
fs.mkdirs(snapshotRegionDir);
Path regionEdits = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
assertFalse("Edits dir exists already - it shouldn't", fs.exists(regionEdits));
CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir,
snapshotRegionDir);
task.call();
} finally {
// cleanup the working directory
FSUtils.delete(fs, regionDir, true);
FSUtils.delete(fs, workingDir, true);
}
}
示例5: snapshotTable
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Take a snapshot using the specified handler.
* On failure the snapshot temporary working directory is removed.
* NOTE: prepareToTakeSnapshot() called before this one takes care of the rejecting the
* snapshot request if the table is busy with another snapshot/restore operation.
* @param snapshot the snapshot description
* @param handler the snapshot handler
*/
private synchronized void snapshotTable(SnapshotDescription snapshot,
final TakeSnapshotHandler handler) throws HBaseSnapshotException {
try {
handler.prepare();
this.executorService.submit(handler);
this.snapshotHandlers.put(TableName.valueOf(snapshot.getTable()), handler);
} catch (Exception e) {
// cleanup the working directory by trying to delete it from the fs.
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
try {
if (!this.master.getMasterFileSystem().getFileSystem().delete(workingDir, true)) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
ClientSnapshotDescriptionUtils.toString(snapshot));
}
} catch (IOException e1) {
LOG.error("Couldn't delete working directory (" + workingDir + " for snapshot:" +
ClientSnapshotDescriptionUtils.toString(snapshot));
}
// fail the snapshot
throw new SnapshotCreationException("Could not build snapshot handler", e,
ProtobufUtil.createSnapshotDesc(snapshot));
}
}
示例6: resetTempDir
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Cleans up any snapshots in the snapshot/.tmp directory that were left from failed
* snapshot attempts.
*
* @throws IOException if we can't reach the filesystem
*/
void resetTempDir() throws IOException {
// cleanup any existing snapshots.
Path tmpdir = SnapshotDescriptionUtils.getWorkingSnapshotDir(rootDir);
if (master.getMasterFileSystem().getFileSystem().exists(tmpdir)) {
if (!master.getMasterFileSystem().getFileSystem().delete(tmpdir, true)) {
LOG.warn("Couldn't delete working snapshot directory: " + tmpdir);
}
}
}
示例7: testLoadsTmpDir
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testLoadsTmpDir() throws Exception {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
// create a file in a 'completed' snapshot
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
fs.create(file1);
// create an 'in progress' snapshot
SnapshotDescription desc = SnapshotDescription.newBuilder().setName("working").build();
snapshot = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
region = new Path(snapshot, "7e91021");
family = new Path(region, "fam");
Path file2 = new Path(family, "file2");
fs.create(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// then make sure the cache finds both files
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
}
示例8: testLoadsTmpDir
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testLoadsTmpDir() throws Exception {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
// create a file in a 'completed' snapshot
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
fs.createNewFile(file1);
// create an 'in progress' snapshot
SnapshotDescription desc = SnapshotDescription.newBuilder().setName("working").build();
snapshot = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
region = new Path(snapshot, "7e91021");
family = new Path(region, "fam");
Path file2 = new Path(family, "file2");
fs.createNewFile(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// then make sure the cache finds both files
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
}
示例9: testCopyFiles
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testCopyFiles() throws Exception {
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
FileSystem fs = UTIL.getTestFileSystem();
Path root = UTIL.getDataTestDir();
String regionName = "regionA";
Path regionDir = new Path(root, regionName);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root);
try {
// doesn't really matter where the region's snapshot directory is, but this is pretty close
Path snapshotRegionDir = new Path(workingDir, regionName);
fs.mkdirs(snapshotRegionDir);
// put some stuff in the recovered.edits directory
Path edits = HLog.getRegionDirRecoveredEditsDir(regionDir);
fs.mkdirs(edits);
// make a file with some data
Path file1 = new Path(edits, "0000000000000002352");
FSDataOutputStream out = fs.create(file1);
byte[] data = new byte[] { 1, 2, 3, 4 };
out.write(data);
out.close();
// make an empty file
Path empty = new Path(edits, "empty");
fs.createNewFile(empty);
CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir,
snapshotRegionDir);
CopyRecoveredEditsTask taskSpy = Mockito.spy(task);
taskSpy.call();
Path snapshotEdits = HLog.getRegionDirRecoveredEditsDir(snapshotRegionDir);
FileStatus[] snapshotEditFiles = FSUtils.listStatus(fs, snapshotEdits);
assertEquals("Got wrong number of files in the snapshot edits", 1, snapshotEditFiles.length);
FileStatus file = snapshotEditFiles[0];
assertEquals("Didn't copy expected file", file1.getName(), file.getPath().getName());
Mockito.verify(monitor, Mockito.never()).receive(Mockito.any(ForeignException.class));
Mockito.verify(taskSpy, Mockito.never()).snapshotFailure(Mockito.anyString(),
Mockito.any(Exception.class));
} finally {
// cleanup the working directory
FSUtils.delete(fs, regionDir, true);
FSUtils.delete(fs, workingDir, true);
}
}
示例10: snapshotRegions
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Override
public void snapshotRegions(List<Pair<HRegionInfo, ServerName>> regionsAndLocations)
throws IOException, KeeperException {
try {
timeoutInjector.start();
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, rootDir);
// 1. get all the regions hosting this table.
// extract each pair to separate lists
Set<HRegionInfo> regions = new HashSet<HRegionInfo>();
for (Pair<HRegionInfo, ServerName> p : regionsAndLocations) {
regions.add(p.getFirst());
}
// 2. for each region, write all the info to disk
String msg = "Starting to write region info and WALs for regions for offline snapshot:"
+ ClientSnapshotDescriptionUtils.toString(snapshot);
LOG.info(msg);
status.setStatus(msg);
for (HRegionInfo regionInfo : regions) {
snapshotDisabledRegion(regionInfo);
}
// 3. write the table info to disk
LOG.info("Starting to copy tableinfo for offline snapshot: " +
ClientSnapshotDescriptionUtils.toString(snapshot));
TableInfoCopyTask tableInfoCopyTask = new TableInfoCopyTask(this.monitor, snapshot, fs,
FSUtils.getRootDir(conf));
tableInfoCopyTask.call();
monitor.rethrowException();
status.setStatus("Finished copying tableinfo for snapshot of table: " +
snapshotTable);
} catch (Exception e) {
// make sure we capture the exception to propagate back to the client later
String reason = "Failed snapshot " + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " due to exception:" + e.getMessage();
ForeignException ee = new ForeignException(reason, e);
monitor.receive(ee);
status.abort("Snapshot of table: "+ snapshotTable +
" failed because " + e.getMessage());
} finally {
LOG.debug("Marking snapshot" + ClientSnapshotDescriptionUtils.toString(snapshot)
+ " as finished.");
// 6. mark the timer as finished - even if we got an exception, we don't need to time the
// operation any further
timeoutInjector.complete();
}
}
示例11: addRegionToSnapshot
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Complete taking the snapshot on the region. Writes the region info and adds references to the
* working snapshot directory.
*
* TODO for api consistency, consider adding another version with no {@link ForeignExceptionSnare}
* arg. (In the future other cancellable HRegion methods could eventually add a
* {@link ForeignExceptionSnare}, or we could do something fancier).
*
* @param desc snasphot description object
* @param exnSnare ForeignExceptionSnare that captures external exeptions in case we need to
* bail out. This is allowed to be null and will just be ignored in that case.
* @throws IOException if there is an external or internal error causing the snapshot to fail
*/
public void addRegionToSnapshot(SnapshotDescription desc,
ForeignExceptionSnare exnSnare) throws IOException {
// This should be "fast" since we don't rewrite store files but instead
// back up the store files by creating a reference
Path rootDir = FSUtils.getRootDir(this.rsServices.getConfiguration());
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
// 1. dump region meta info into the snapshot directory
LOG.debug("Storing region-info for snapshot.");
HRegionFileSystem snapshotRegionFs = HRegionFileSystem.createRegionOnFileSystem(conf,
this.fs.getFileSystem(), snapshotDir, getRegionInfo());
// 2. iterate through all the stores in the region
LOG.debug("Creating references for hfiles");
// This ensures that we have an atomic view of the directory as long as we have < ls limit
// (batch size of the files in a directory) on the namenode. Otherwise, we get back the files in
// batches and may miss files being added/deleted. This could be more robust (iteratively
// checking to see if we have all the files until we are sure), but the limit is currently 1000
// files/batch, far more than the number of store files under a single column family.
for (Store store : stores.values()) {
// 2.1. build the snapshot reference directory for the store
Path dstStoreDir = snapshotRegionFs.getStoreDir(store.getFamily().getNameAsString());
List<StoreFile> storeFiles = new ArrayList<StoreFile>(store.getStorefiles());
if (LOG.isDebugEnabled()) {
LOG.debug("Adding snapshot references for " + storeFiles + " hfiles");
}
// 2.2. iterate through all the store's files and create "references".
int sz = storeFiles.size();
for (int i = 0; i < sz; i++) {
if (exnSnare != null) {
exnSnare.rethrowException();
}
StoreFile storeFile = storeFiles.get(i);
Path file = storeFile.getPath();
LOG.debug("Creating reference for file (" + (i+1) + "/" + sz + ") : " + file);
Path referenceFile = new Path(dstStoreDir, file.getName());
boolean success = true;
if (storeFile.isReference()) {
// write the Reference object to the snapshot
storeFile.getFileInfo().getReference().write(fs.getFileSystem(), referenceFile);
} else {
// create "reference" to this store file. It is intentionally an empty file -- all
// necessary information is captured by its fs location and filename. This allows us to
// only figure out what needs to be done via a single nn operation (instead of having to
// open and read the files as well).
success = fs.getFileSystem().createNewFile(referenceFile);
}
if (!success) {
throw new IOException("Failed to create reference file:" + referenceFile);
}
}
}
}
示例12: testCopyFiles
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testCopyFiles() throws Exception {
SnapshotDescription snapshot = SnapshotDescription.newBuilder().setName("snapshot").build();
ForeignExceptionDispatcher monitor = Mockito.mock(ForeignExceptionDispatcher.class);
FileSystem fs = UTIL.getTestFileSystem();
Path root = UTIL.getDataTestDir();
String regionName = "regionA";
Path regionDir = new Path(root, regionName);
Path workingDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(snapshot, root);
try {
// doesn't really matter where the region's snapshot directory is, but this is pretty close
Path snapshotRegionDir = new Path(workingDir, regionName);
fs.mkdirs(snapshotRegionDir);
// put some stuff in the recovered.edits directory
Path edits = HLogUtil.getRegionDirRecoveredEditsDir(regionDir);
fs.mkdirs(edits);
// make a file with some data
Path file1 = new Path(edits, "0000000000000002352");
FSDataOutputStream out = fs.create(file1);
byte[] data = new byte[] { 1, 2, 3, 4 };
out.write(data);
out.close();
// make an empty file
Path empty = new Path(edits, "empty");
fs.createNewFile(empty);
CopyRecoveredEditsTask task = new CopyRecoveredEditsTask(snapshot, monitor, fs, regionDir,
snapshotRegionDir);
CopyRecoveredEditsTask taskSpy = Mockito.spy(task);
taskSpy.call();
Path snapshotEdits = HLogUtil.getRegionDirRecoveredEditsDir(snapshotRegionDir);
FileStatus[] snapshotEditFiles = FSUtils.listStatus(fs, snapshotEdits);
assertEquals("Got wrong number of files in the snapshot edits", 1, snapshotEditFiles.length);
FileStatus file = snapshotEditFiles[0];
assertEquals("Didn't copy expected file", file1.getName(), file.getPath().getName());
Mockito.verify(monitor, Mockito.never()).receive(Mockito.any(ForeignException.class));
Mockito.verify(taskSpy, Mockito.never()).snapshotFailure(Mockito.anyString(),
Mockito.any(Exception.class));
} finally {
// cleanup the working directory
FSUtils.delete(fs, regionDir, true);
FSUtils.delete(fs, workingDir, true);
}
}
示例13: addRegionToSnapshot
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Complete taking the snapshot on the region. Writes the region info and adds references to the
* working snapshot directory. TODO for api consistency, consider adding another version with no
* {@link ForeignExceptionSnare} arg. (In the future other cancellable HRegion methods could
* eventually add a {@link ForeignExceptionSnare}, or we could do something fancier).
*
* @param desc snapshot description object
* @param exnSnare ForeignExceptionSnare that captures external exceptions in case we need to bail
* out. This is allowed to be null and will just be ignored in that case.
* @throws IOException if there is an external or internal error causing the snapshot to fail
*/
public void addRegionToSnapshot(SnapshotDescription desc, ForeignExceptionSnare exnSnare)
throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
SnapshotManifest manifest =
SnapshotManifest.create(conf, getFilesystem(), snapshotDir, desc, exnSnare);
manifest.addRegion(this);
}
示例14: addRegionToSnapshot
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Complete taking the snapshot on the region. Writes the region info and adds references to the
* working snapshot directory.
* <p/>
* TODO for api consistency, consider adding another version with no {@link ForeignExceptionSnare}
* arg. (In the future other cancellable HRegion methods could eventually add a
* {@link ForeignExceptionSnare}, or we could do something fancier).
*
* @param desc snapshot description object
* @param exnSnare ForeignExceptionSnare that captures external exceptions in case we need to
* bail out. This is allowed to be null and will just be ignored in that case.
* @throws IOException if there is an external or internal error causing the snapshot to fail
*/
public void addRegionToSnapshot(SnapshotDescription desc,
ForeignExceptionSnare exnSnare) throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
snapshotDir, desc, exnSnare);
manifest.addRegion(this);
}
示例15: addRegionToSnapshot
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Complete taking the snapshot on the region. Writes the region info and adds references to the
* working snapshot directory.
*
* TODO for api consistency, consider adding another version with no {@link ForeignExceptionSnare}
* arg. (In the future other cancellable HRegion methods could eventually add a
* {@link ForeignExceptionSnare}, or we could do something fancier).
*
* @param desc snapshot description object
* @param exnSnare ForeignExceptionSnare that captures external exceptions in case we need to
* bail out. This is allowed to be null and will just be ignored in that case.
* @throws IOException if there is an external or internal error causing the snapshot to fail
*/
public void addRegionToSnapshot(SnapshotDescription desc,
ForeignExceptionSnare exnSnare) throws IOException {
Path rootDir = FSUtils.getRootDir(conf);
Path snapshotDir = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
SnapshotManifest manifest = SnapshotManifest.create(conf, getFilesystem(),
snapshotDir, desc, exnSnare);
manifest.addRegion(this);
}