本文整理汇总了Java中org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils.getSnapshotsDir方法的典型用法代码示例。如果您正苦于以下问题:Java SnapshotDescriptionUtils.getSnapshotsDir方法的具体用法?Java SnapshotDescriptionUtils.getSnapshotsDir怎么用?Java SnapshotDescriptionUtils.getSnapshotsDir使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils
的用法示例。
在下文中一共展示了SnapshotDescriptionUtils.getSnapshotsDir方法的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: migrateFsTableDescriptors
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Migrates all snapshots, user tables and system tables that require migration.
* First migrates snapshots.
* Then migrates each user table in order,
* then attempts ROOT (should be gone)
* Migrates hbase:meta last to indicate migration is complete.
*/
private static void migrateFsTableDescriptors(FileSystem fs, Path rootDir) throws IOException {
// First migrate snapshots - will migrate any snapshot dir that contains a table info file
Path snapshotsDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
if (fs.exists(snapshotsDir)) {
LOG.info("Migrating snapshots");
FileStatus[] snapshots = fs.listStatus(snapshotsDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
for (FileStatus snapshot : snapshots) {
migrateTable(fs, snapshot.getPath());
}
}
LOG.info("Migrating user tables");
List<Path> userTableDirs = FSUtils.getTableDirs(fs, rootDir);
for (Path userTableDir : userTableDirs) {
migrateTable(fs, userTableDir);
}
LOG.info("Migrating system tables");
// migrate meta last because that's what we check to see if migration is complete
migrateTableIfExists(fs, rootDir, TableName.META_TABLE_NAME);
}
示例2: SnapshotFileCache
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Create a snapshot file cache for all snapshots under the specified [root]/.snapshot on the
* filesystem
* @param fs {@link FileSystem} where the snapshots are stored
* @param rootDir hbase root directory
* @param cacheRefreshPeriod period (ms) with which the cache should be refreshed
* @param cacheRefreshDelay amount of time to wait for the cache to be refreshed
* @param refreshThreadName name of the cache refresh thread
* @param inspectSnapshotFiles Filter to apply to each snapshot to extract the files.
*/
public SnapshotFileCache(FileSystem fs, Path rootDir, long cacheRefreshPeriod,
long cacheRefreshDelay, String refreshThreadName, SnapshotFileInspector inspectSnapshotFiles) {
this.fs = fs;
this.fileInspector = inspectSnapshotFiles;
this.snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
// periodically refresh the file cache to make sure we aren't superfluously saving files.
this.refreshTimer = new Timer(refreshThreadName, true);
this.refreshTimer.scheduleAtFixedRate(new RefreshCacheTask(), cacheRefreshDelay,
cacheRefreshPeriod);
}
示例3: getCompletedSnapshots
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Gets the list of all completed snapshots.
* @param snapshotDir snapshot directory
* @return list of SnapshotDescriptions
* @throws IOException File system exception
*/
private List<SnapshotDescription> getCompletedSnapshots(Path snapshotDir) throws IOException {
List<SnapshotDescription> snapshotDescs = new ArrayList<SnapshotDescription>();
// first create the snapshot root path and check to see if it exists
FileSystem fs = master.getMasterFileSystem().getFileSystem();
if (snapshotDir == null) snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
// if there are no snapshots, return an empty list
if (!fs.exists(snapshotDir)) {
return snapshotDescs;
}
// ignore all the snapshots in progress
FileStatus[] snapshots = fs.listStatus(snapshotDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
// loop through all the completed snapshots
for (FileStatus snapshot : snapshots) {
Path info = new Path(snapshot.getPath(), SnapshotDescriptionUtils.SNAPSHOTINFO_FILE);
// if the snapshot is bad
if (!fs.exists(info)) {
LOG.error("Snapshot information for " + snapshot.getPath() + " doesn't exist");
continue;
}
FSDataInputStream in = null;
try {
in = fs.open(info);
SnapshotDescription desc = SnapshotDescription.parseFrom(in);
snapshotDescs.add(desc);
} catch (IOException e) {
LOG.warn("Found a corrupted snapshot " + snapshot.getPath(), e);
} finally {
if (in != null) {
in.close();
}
}
}
return snapshotDescs;
}
示例4: setupCluster
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Setup the config for the cluster
*/
@BeforeClass
public static void setupCluster() throws Exception {
setupConf(UTIL.getConfiguration());
UTIL.startMiniCluster(NUM_RS);
fs = UTIL.getDFSCluster().getFileSystem();
master = UTIL.getMiniHBaseCluster().getMaster();
rootDir = master.getMasterFileSystem().getRootDir();
snapshots = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
archiveDir = new Path(rootDir, HConstants.HFILE_ARCHIVE_DIRECTORY);
}
示例5: testReloadModifiedDirectory
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testReloadModifiedDirectory() throws IOException {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
Path file2 = new Path(family, "file2");
// create two hfiles under the snapshot
fs.createNewFile(file1);
fs.createNewFile(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
assertTrue("Cache didn't find " + file1, cache.contains(file1.getName()));
// now delete the snapshot and add a file with a different name
fs.delete(snapshot, true);
Path file3 = new Path(family, "new_file");
fs.createNewFile(file3);
FSUtils.logFileSystemState(fs, rootDir, LOG);
assertTrue("Cache didn't find new file:" + file3, cache.contains(file3.getName()));
}
示例6: testLoadsTmpDir
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testLoadsTmpDir() throws Exception {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
// create a file in a 'completed' snapshot
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
fs.create(file1);
// create an 'in progress' snapshot
SnapshotDescription desc = SnapshotDescription.newBuilder().setName("working").build();
snapshot = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
region = new Path(snapshot, "7e91021");
family = new Path(region, "fam");
Path file2 = new Path(family, "file2");
fs.create(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// then make sure the cache finds both files
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
}
示例7: testReloadModifiedDirectory
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testReloadModifiedDirectory() throws IOException {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
Path file2 = new Path(family, "file2");
// create two hfiles under the snapshot
fs.create(file1);
fs.create(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
assertTrue("Cache didn't find " + file1, cache.contains(file1.getName()));
// now delete the snapshot and add a file with a different name
fs.delete(snapshot, true);
Path file3 = new Path(family, "new_file");
fs.create(file3);
FSUtils.logFileSystemState(fs, rootDir, LOG);
assertTrue("Cache didn't find new file:" + file3, cache.contains(file3.getName()));
}
示例8: testLoadsTmpDir
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test
public void testLoadsTmpDir() throws Exception {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
// create a file in a 'completed' snapshot
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
fs.createNewFile(file1);
// create an 'in progress' snapshot
SnapshotDescription desc = SnapshotDescription.newBuilder().setName("working").build();
snapshot = SnapshotDescriptionUtils.getWorkingSnapshotDir(desc, rootDir);
region = new Path(snapshot, "7e91021");
family = new Path(region, "fam");
Path file2 = new Path(family, "file2");
fs.createNewFile(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// then make sure the cache finds both files
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
}
示例9: checkSnapshotSupport
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Called at startup, to verify if snapshot operation is supported, and to avoid
* starting the master if there're snapshots present but the cleaners needed are missing.
* Otherwise we can end up with snapshot data loss.
* @param conf The {@link Configuration} object to use
* @param mfs The MasterFileSystem to use
* @throws IOException in case of file-system operation failure
* @throws UnsupportedOperationException in case cleaners are missing and
* there're snapshot in the system
*/
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
throws IOException, UnsupportedOperationException {
// Verify if snapshot is disabled by the user
String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);
// Extract cleaners from conf
Set<String> hfileCleaners = new HashSet<String>();
String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
Set<String> logCleaners = new HashSet<String>();
cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
if (cleaners != null) Collections.addAll(logCleaners, cleaners);
// check if an older version of snapshot directory was present
Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
FileSystem fs = mfs.getFileSystem();
List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir));
if (ss != null && !ss.isEmpty()) {
LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
}
// If the user has enabled the snapshot, we force the cleaners to be present
// otherwise we still need to check if cleaners are enabled or not and verify
// that there're no snapshot in the .snapshot folder.
if (snapshotEnabled) {
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
logCleaners.add(SnapshotLogCleaner.class.getName());
// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
hfileCleaners.toArray(new String[hfileCleaners.size()]));
conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
logCleaners.toArray(new String[logCleaners.size()]));
} else {
// Verify if cleaners are present
snapshotEnabled = logCleaners.contains(SnapshotLogCleaner.class.getName()) &&
hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
hfileCleaners.contains(HFileLinkCleaner.class.getName());
// Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
if (snapshotEnabled) {
LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " +
"but the '" + HBASE_SNAPSHOT_ENABLED + "' property " +
(userDisabled ? "is set to 'false'." : "is not set."));
}
}
// Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
this.isSnapshotSupported = snapshotEnabled && !userDisabled;
// If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
// otherwise we end up with snapshot data loss.
if (!snapshotEnabled) {
LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
if (fs.exists(snapshotDir)) {
FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
if (snapshots != null) {
LOG.error("Snapshots are present, but cleaners are not enabled.");
checkSnapshotSupport();
}
}
}
}
示例10: cleanupFiles
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@After
public void cleanupFiles() throws Exception {
// cleanup the snapshot directory
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
fs.delete(snapshotDir, true);
}
示例11: testLoadAndDelete
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test(timeout = 10000000)
public void testLoadAndDelete() throws Exception {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
Path file2 = new Path(family, "file2");
// create two hfiles under the snapshot
fs.create(file1);
fs.create(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// then make sure the cache finds them
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
String not = "file-shouldn't-be-found";
assertFalse("Cache found '" + not + "', but it shouldn't have.", cache.contains(not));
// make sure we get a little bit of separation in the modification times
// its okay if we sleep a little longer (b/c of GC pause), as long as we sleep a little
Thread.sleep(10);
LOG.debug("Deleting snapshot.");
// then delete the snapshot and make sure that we can still find the files
if (!fs.delete(snapshot, true)) {
throw new IOException("Couldn't delete " + snapshot + " for an unknown reason.");
}
FSUtils.logFileSystemState(fs, rootDir, LOG);
LOG.debug("Checking to see if file is deleted.");
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
// then trigger a refresh
cache.triggerCacheRefreshForTesting();
// and not it shouldn't find those files
assertFalse("Cache found '" + file1 + "', but it shouldn't have.",
cache.contains(file1.getName()));
assertFalse("Cache found '" + file2 + "', but it shouldn't have.",
cache.contains(file2.getName()));
fs.delete(snapshotDir, true);
}
示例12: testLoadAndDelete
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
@Test(timeout = 10000000)
public void testLoadAndDelete() throws Exception {
// don't refresh the cache unless we tell it to
long period = Long.MAX_VALUE;
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(rootDir);
SnapshotFileCache cache = new SnapshotFileCache(fs, rootDir, period, 10000000,
"test-snapshot-file-cache-refresh", new SnapshotFiles());
Path snapshot = new Path(snapshotDir, "snapshot");
Path region = new Path(snapshot, "7e91021");
Path family = new Path(region, "fam");
Path file1 = new Path(family, "file1");
Path file2 = new Path(family, "file2");
// create two hfiles under the snapshot
fs.createNewFile(file1);
fs.createNewFile(file2);
FSUtils.logFileSystemState(fs, rootDir, LOG);
// then make sure the cache finds them
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
String not = "file-shouldn't-be-found";
assertFalse("Cache found '" + not + "', but it shouldn't have.", cache.contains(not));
// make sure we get a little bit of separation in the modification times
// its okay if we sleep a little longer (b/c of GC pause), as long as we sleep a little
Thread.sleep(10);
LOG.debug("Deleting snapshot.");
// then delete the snapshot and make sure that we can still find the files
if (!fs.delete(snapshot, true)) {
throw new IOException("Couldn't delete " + snapshot + " for an unknown reason.");
}
FSUtils.logFileSystemState(fs, rootDir, LOG);
LOG.debug("Checking to see if file is deleted.");
assertTrue("Cache didn't find:" + file1, cache.contains(file1.getName()));
assertTrue("Cache didn't find:" + file2, cache.contains(file2.getName()));
// then trigger a refresh
cache.triggerCacheRefreshForTesting();
// and not it shouldn't find those files
assertFalse("Cache found '" + file1 + "', but it shouldn't have.",
cache.contains(file1.getName()));
assertFalse("Cache found '" + file2 + "', but it shouldn't have.",
cache.contains(file2.getName()));
fs.delete(snapshotDir, true);
}
示例13: checkSnapshotSupport
import org.apache.hadoop.hbase.snapshot.SnapshotDescriptionUtils; //导入方法依赖的package包/类
/**
* Called at startup, to verify if snapshot operation is supported, and to avoid
* starting the master if there're snapshots present but the cleaners needed are missing.
* Otherwise we can end up with snapshot data loss.
* @param conf The {@link Configuration} object to use
* @param mfs The MasterFileSystem to use
* @throws IOException in case of file-system operation failure
* @throws UnsupportedOperationException in case cleaners are missing and
* there're snapshot in the system
*/
private void checkSnapshotSupport(final Configuration conf, final MasterFileSystem mfs)
throws IOException, UnsupportedOperationException {
// Verify if snapshot is disabled by the user
String enabled = conf.get(HBASE_SNAPSHOT_ENABLED);
boolean snapshotEnabled = conf.getBoolean(HBASE_SNAPSHOT_ENABLED, false);
boolean userDisabled = (enabled != null && enabled.trim().length() > 0 && !snapshotEnabled);
// Extract cleaners from conf
Set<String> hfileCleaners = new HashSet<>();
String[] cleaners = conf.getStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS);
if (cleaners != null) Collections.addAll(hfileCleaners, cleaners);
Set<String> logCleaners = new HashSet<>();
cleaners = conf.getStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS);
if (cleaners != null) Collections.addAll(logCleaners, cleaners);
// check if an older version of snapshot directory was present
Path oldSnapshotDir = new Path(mfs.getRootDir(), HConstants.OLD_SNAPSHOT_DIR_NAME);
FileSystem fs = mfs.getFileSystem();
List<SnapshotDescription> ss = getCompletedSnapshots(new Path(rootDir, oldSnapshotDir), false);
if (ss != null && !ss.isEmpty()) {
LOG.error("Snapshots from an earlier release were found under: " + oldSnapshotDir);
LOG.error("Please rename the directory as " + HConstants.SNAPSHOT_DIR_NAME);
}
// If the user has enabled the snapshot, we force the cleaners to be present
// otherwise we still need to check if cleaners are enabled or not and verify
// that there're no snapshot in the .snapshot folder.
if (snapshotEnabled) {
// Inject snapshot cleaners, if snapshot.enable is true
hfileCleaners.add(SnapshotHFileCleaner.class.getName());
hfileCleaners.add(HFileLinkCleaner.class.getName());
// Set cleaners conf
conf.setStrings(HFileCleaner.MASTER_HFILE_CLEANER_PLUGINS,
hfileCleaners.toArray(new String[hfileCleaners.size()]));
conf.setStrings(HConstants.HBASE_MASTER_LOGCLEANER_PLUGINS,
logCleaners.toArray(new String[logCleaners.size()]));
} else {
// Verify if cleaners are present
snapshotEnabled =
hfileCleaners.contains(SnapshotHFileCleaner.class.getName()) &&
hfileCleaners.contains(HFileLinkCleaner.class.getName());
// Warn if the cleaners are enabled but the snapshot.enabled property is false/not set.
if (snapshotEnabled) {
LOG.warn("Snapshot log and hfile cleaners are present in the configuration, " +
"but the '" + HBASE_SNAPSHOT_ENABLED + "' property " +
(userDisabled ? "is set to 'false'." : "is not set."));
}
}
// Mark snapshot feature as enabled if cleaners are present and user has not disabled it.
this.isSnapshotSupported = snapshotEnabled && !userDisabled;
// If cleaners are not enabled, verify that there're no snapshot in the .snapshot folder
// otherwise we end up with snapshot data loss.
if (!snapshotEnabled) {
LOG.info("Snapshot feature is not enabled, missing log and hfile cleaners.");
Path snapshotDir = SnapshotDescriptionUtils.getSnapshotsDir(mfs.getRootDir());
if (fs.exists(snapshotDir)) {
FileStatus[] snapshots = FSUtils.listStatus(fs, snapshotDir,
new SnapshotDescriptionUtils.CompletedSnaphotDirectoriesFilter(fs));
if (snapshots != null) {
LOG.error("Snapshots are present, but cleaners are not enabled.");
checkSnapshotSupport();
}
}
}
}