本文整理汇总了Java中org.apache.hadoop.fs.LocalFileSystem.delete方法的典型用法代码示例。如果您正苦于以下问题:Java LocalFileSystem.delete方法的具体用法?Java LocalFileSystem.delete怎么用?Java LocalFileSystem.delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.LocalFileSystem
的用法示例。
在下文中一共展示了LocalFileSystem.delete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: stop
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
public void stop() throws Exception {
LocalFileSystem localFileSystem = FileSystem.getLocal(miniHS2.getHiveConf());
miniHS2.stop();
FileFilter filter = new FileFilter() {
@Override
public boolean accept(File pathname) {
if (pathname.isDirectory() &&
pathname.getName().startsWith("MiniMRCluster_")) {
return true;
}
return false;
}
};
File targetDir = new File("target");
File[] files = targetDir.listFiles(filter);
for (File file : files) {
Path clusterRoot = new Path(file.getAbsolutePath());
localFileSystem.delete(clusterRoot, true);
}
}
示例2: setUp
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
LocalFileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_ROOT);
}
示例3: setup
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
LocalFileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) {
Assert.fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_DIR);
}
示例4: testFileCorruption
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
// create a file and verify that checksum corruption results in
// a checksum exception on LocalFS
String dir = PathUtils.getTestDirName(getClass());
Path file = new Path(dir + "/corruption-test.dat");
Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
writeFile(fileSys, file);
int fileLen = (int)fileSys.getFileStatus(file).getLen();
byte [] buf = new byte[fileLen];
InputStream in = fileSys.open(file);
IOUtils.readFully(in, buf, 0, buf.length);
in.close();
// check .crc corruption
checkFileCorruption(fileSys, file, crcFile);
fileSys.delete(file, true);
writeFile(fileSys, file);
// check data corrutpion
checkFileCorruption(fileSys, file, file);
fileSys.delete(file, true);
}
示例5: testCopyToLocal
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Test
/*
* Tests copying from archive file system to a local file system
*/
public void testCopyToLocal() throws Exception {
final String fullHarPathStr = makeArchive();
// make path to copy the file to:
final String tmpDir
= System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp";
final Path tmpPath = new Path(tmpDir);
final LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
localFs.delete(tmpPath, true);
localFs.mkdirs(tmpPath);
assertTrue(localFs.exists(tmpPath));
// Create fresh HarFs:
final HarFileSystem harFileSystem = new HarFileSystem(fs);
try {
final URI harUri = new URI(fullHarPathStr);
harFileSystem.initialize(harUri, fs.getConf());
final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a");
final Path targetPath = new Path(tmpPath, "straus");
// copy the Har file to a local file system:
harFileSystem.copyToLocalFile(false, sourcePath, targetPath);
FileStatus straus = localFs.getFileStatus(targetPath);
// the file should contain just 1 character:
assertEquals(1, straus.getLen());
} finally {
harFileSystem.close();
localFs.delete(tmpPath, true);
}
}
示例6: setUp
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
LocalFileSystem fs = FileSystem.getLocal(conf);
if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
Assert.fail("Can't clean up test root dir");
}
fs.mkdirs(TEST_ROOT);
}
示例7: tearDown
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@AfterClass
public static void tearDown() throws IOException {
Path testDir = new Path(hdfsBaseDir.getParent());
hdfs.delete(testDir, true);
cluster.shutdown();
LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
localFileSystem.delete(testDir, true);
}
示例8: cleanUpSchedulerConfigFile
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void cleanUpSchedulerConfigFile() throws IOException {
LocalFileSystem fs = FileSystem.getLocal(new Configuration());
String myResourcePath = System.getProperty("test.build.data");
Path schedulerConfigFilePath =
new Path(myResourcePath, CapacitySchedulerConf.SCHEDULER_CONF_FILE);
fs.delete(schedulerConfigFilePath, false);
}
示例9: testFileCorruption
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
// create a file and verify that checksum corruption results in
// a checksum exception on LocalFS
String dir = System.getProperty("test.build.data", ".");
Path file = new Path(dir + "/corruption-test.dat");
Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
writeFile(fileSys, file);
int fileLen = (int)fileSys.getFileStatus(file).getLen();
byte [] buf = new byte[fileLen];
InputStream in = fileSys.open(file);
IOUtils.readFully(in, buf, 0, buf.length);
in.close();
// check .crc corruption
checkFileCorruption(fileSys, file, crcFile);
fileSys.delete(file, true);
writeFile(fileSys, file);
// check data corrutpion
checkFileCorruption(fileSys, file, file);
fileSys.delete(file, true);
}
示例10: tearDown
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
Path dataDir = new Path(
testDataPath.getParentFile().getParentFile().getParent());
fs.delete(dataDir, true);
File rootTestFile = new File(testDataPath.getParentFile().getParentFile().getParent());
String rootTestDir = rootTestFile.getAbsolutePath();
Path rootTestPath = new Path(rootTestDir);
LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
localFileSystem.delete(rootTestPath, true);
cluster.shutdown();
}
示例11: testFileCorruption
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
// create a file and verify that checksum corruption results in
// a checksum exception on LocalFS
String dir = System.getProperty("test.build.data", ".");
Path file = new Path(dir + "/corruption-test.dat");
Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
writeFile(fileSys, file);
int fileLen = (int) fileSys.getFileStatus(file).getLen();
byte[] buf = new byte[fileLen];
InputStream in = fileSys.open(file);
IOUtils.readFully(in, buf, 0, buf.length);
in.close();
// check .crc corruption
checkFileCorruption(fileSys, file, crcFile);
fileSys.delete(file, true);
writeFile(fileSys, file);
// check data corrutpion
checkFileCorruption(fileSys, file, file);
fileSys.delete(file, true);
}
示例12: copyFromLocalFile
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
Path dst) throws IOException {
String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new IOException(dst + " already exists");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copying local file from " + src + " to " + dst);
}
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = new ObjectMetadata();
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setServerSideEncryption(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(om);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch (progressEvent.getEventCode()) {
case ProgressEvent.PART_COMPLETED_EVENT_CODE:
statistics.incrementWriteOps(1);
break;
default:
break;
}
}
};
Upload up = transfers.upload(putObjectRequest);
up.addProgressListener(progressListener);
try {
up.waitForUploadResult();
statistics.incrementWriteOps(1);
} catch (InterruptedException e) {
throw new IOException("Got interrupted, cancelling");
}
// This will delete unnecessary fake parent directories
finishedWrite(key);
if (delSrc) {
local.delete(src, false);
}
}
示例13: copyFromLocalFile
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
* <p/>
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
* <p/>
* delSrc indicates if the source should be removed
*
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
Path dst) throws IOException {
String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new IOException(dst + " already exists");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copying local file from " + src + " to " + dst);
}
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = new ObjectMetadata();
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
putObjectRequest.setMetadata(om);
putObjectRequest.setProgressListener(new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch (progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
statistics.incrementWriteOps(1);
break;
default:
break;
}
}
});
try {
client.putObject(putObjectRequest);
statistics.incrementWriteOps(1);
} catch (OSSException | ClientException e) {
throw new IOException("Got interrupted, cancelling");
}
// This will delete unnecessary fake parent directories
finishedWrite(key);
if (delSrc) {
local.delete(src, false);
}
}
示例14: copyFromLocalFile
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
Path dst) throws IOException {
String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new IOException(dst + " already exists");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copying local file from " + src + " to " + dst);
}
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = new ObjectMetadata();
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(om);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch (progressEvent.getEventType()) {
case TRANSFER_PART_COMPLETED_EVENT:
statistics.incrementWriteOps(1);
break;
default:
break;
}
}
};
Upload up = transfers.upload(putObjectRequest);
up.addProgressListener(progressListener);
try {
up.waitForUploadResult();
statistics.incrementWriteOps(1);
} catch (InterruptedException e) {
throw new IOException("Got interrupted, cancelling");
}
// This will delete unnecessary fake parent directories
finishedWrite(key);
if (delSrc) {
local.delete(src, false);
}
}
示例15: copyFromLocalFile
import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
* The src file is on the local disk. Add it to FS at
* the given dst name.
*
* This version doesn't need to create a temporary file to calculate the md5.
* Sadly this doesn't seem to be used by the shell cp :(
*
* delSrc indicates if the source should be removed
* @param delSrc whether to delete the src
* @param overwrite whether to overwrite an existing file
* @param src path
* @param dst path
*/
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
Path dst) throws IOException {
String key = pathToKey(dst);
if (!overwrite && exists(dst)) {
throw new IOException(dst + " already exists");
}
if (LOG.isDebugEnabled()) {
LOG.debug("Copying local file from " + src + " to " + dst);
}
// Since we have a local file, we don't need to stream into a temporary file
LocalFileSystem local = getLocal(getConf());
File srcfile = local.pathToFile(src);
final ObjectMetadata om = new ObjectMetadata();
if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
om.setServerSideEncryption(serverSideEncryptionAlgorithm);
}
PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
putObjectRequest.setCannedAcl(cannedACL);
putObjectRequest.setMetadata(om);
ProgressListener progressListener = new ProgressListener() {
public void progressChanged(ProgressEvent progressEvent) {
switch (progressEvent.getEventCode()) {
case ProgressEvent.PART_COMPLETED_EVENT_CODE:
statistics.incrementWriteOps(1);
break;
}
}
};
Upload up = transfers.upload(putObjectRequest);
up.addProgressListener(progressListener);
try {
up.waitForUploadResult();
statistics.incrementWriteOps(1);
} catch (InterruptedException e) {
throw new IOException("Got interrupted, cancelling");
}
// This will delete unnecessary fake parent directories
finishedWrite(key);
if (delSrc) {
local.delete(src, false);
}
}