当前位置: 首页>>代码示例>>Java>>正文


Java LocalFileSystem.delete方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.LocalFileSystem.delete方法的典型用法代码示例。如果您正苦于以下问题:Java LocalFileSystem.delete方法的具体用法?Java LocalFileSystem.delete怎么用?Java LocalFileSystem.delete使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.LocalFileSystem的用法示例。


在下文中一共展示了LocalFileSystem.delete方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: stop

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
public void stop() throws Exception {
    LocalFileSystem localFileSystem = FileSystem.getLocal(miniHS2.getHiveConf());
    miniHS2.stop();
    FileFilter filter = new FileFilter() {
        @Override
        public boolean accept(File pathname) {
            if (pathname.isDirectory() && 
                    pathname.getName().startsWith("MiniMRCluster_")) {
                return true;
            }
            return false;
        }
    };
    File targetDir = new File("target");
    File[] files = targetDir.listFiles(filter);
    for (File file : files) {
        Path clusterRoot = new Path(file.getAbsolutePath());
        localFileSystem.delete(clusterRoot, true);
    }
}
 
开发者ID:bobfreitas,项目名称:hiveunit-mr2,代码行数:21,代码来源:HiveTestCluster.java

示例2: setUp

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Before
public void setUp() throws Exception {
  LocalFileSystem fs = FileSystem.getLocal(conf);
  if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
    fail("Can't clean up test root dir");
  }
  fs.mkdirs(TEST_ROOT);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:9,代码来源:TestBloomMapFile.java

示例3: setup

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Before
public void setup() throws Exception {
  LocalFileSystem fs = FileSystem.getLocal(conf);
  if (fs.exists(TEST_DIR) && !fs.delete(TEST_DIR, true)) {
    Assert.fail("Can't clean up test root dir");
  }
  fs.mkdirs(TEST_DIR);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:9,代码来源:TestMapFile.java

示例4: testFileCorruption

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = PathUtils.getTestDirName(getClass());
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestFSInputChecker.java

示例5: testCopyToLocal

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Test
/*
 * Tests copying from archive file system to a local file system
 */
public void testCopyToLocal() throws Exception {
  final String fullHarPathStr = makeArchive();

  // make path to copy the file to:
  final String tmpDir
    = System.getProperty("test.build.data","build/test/data") + "/work-dir/har-fs-tmp";
  final Path tmpPath = new Path(tmpDir);
  final LocalFileSystem localFs = FileSystem.getLocal(new Configuration());
  localFs.delete(tmpPath, true);
  localFs.mkdirs(tmpPath);
  assertTrue(localFs.exists(tmpPath));
  
  // Create fresh HarFs:
  final HarFileSystem harFileSystem = new HarFileSystem(fs);
  try {
    final URI harUri = new URI(fullHarPathStr);
    harFileSystem.initialize(harUri, fs.getConf());
    
    final Path sourcePath = new Path(fullHarPathStr + Path.SEPARATOR + "a");
    final Path targetPath = new Path(tmpPath, "straus");
    // copy the Har file to a local file system:
    harFileSystem.copyToLocalFile(false, sourcePath, targetPath);
    FileStatus straus = localFs.getFileStatus(targetPath);
    // the file should contain just 1 character:
    assertEquals(1, straus.getLen());
  } finally {
    harFileSystem.close();
    localFs.delete(tmpPath, true);      
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:35,代码来源:TestHadoopArchives.java

示例6: setUp

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@Override
public void setUp() throws Exception {
  LocalFileSystem fs = FileSystem.getLocal(conf);
  if (fs.exists(TEST_ROOT) && !fs.delete(TEST_ROOT, true)) {
    Assert.fail("Can't clean up test root dir");
  }
  fs.mkdirs(TEST_ROOT);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:TestBloomMapFile.java

示例7: tearDown

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@AfterClass
public static void tearDown() throws IOException {
    Path testDir = new Path(hdfsBaseDir.getParent());
    hdfs.delete(testDir, true);
    cluster.shutdown();
    LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
    localFileSystem.delete(testDir, true);
}
 
开发者ID:vespa-engine,项目名称:vespa,代码行数:9,代码来源:MapReduceTest.java

示例8: cleanUpSchedulerConfigFile

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void cleanUpSchedulerConfigFile() throws IOException {
  LocalFileSystem fs = FileSystem.getLocal(new Configuration());

  String myResourcePath = System.getProperty("test.build.data");
  Path schedulerConfigFilePath =
      new Path(myResourcePath, CapacitySchedulerConf.SCHEDULER_CONF_FILE);
  fs.delete(schedulerConfigFilePath, false);
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:9,代码来源:ClusterWithCapacityScheduler.java

示例9: testFileCorruption

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = System.getProperty("test.build.data", ".");
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int)fileSys.getFileStatus(file).getLen();
  
  byte [] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:30,代码来源:TestFSInputChecker.java

示例10: tearDown

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
@After
public void tearDown() throws Exception {
    Path dataDir = new Path(
            testDataPath.getParentFile().getParentFile().getParent());
    fs.delete(dataDir, true);
    File rootTestFile = new File(testDataPath.getParentFile().getParentFile().getParent());
    String rootTestDir = rootTestFile.getAbsolutePath();
    Path rootTestPath = new Path(rootTestDir);
    LocalFileSystem localFileSystem = FileSystem.getLocal(conf);
    localFileSystem.delete(rootTestPath, true);
    cluster.shutdown();
}
 
开发者ID:bobfreitas,项目名称:hiveunit-mr2,代码行数:13,代码来源:BasicMRTest.java

示例11: testFileCorruption

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
private void testFileCorruption(LocalFileSystem fileSys) throws IOException {
  // create a file and verify that checksum corruption results in 
  // a checksum exception on LocalFS
  
  String dir = System.getProperty("test.build.data", ".");
  Path file = new Path(dir + "/corruption-test.dat");
  Path crcFile = new Path(dir + "/.corruption-test.dat.crc");
  
  writeFile(fileSys, file);
  
  int fileLen = (int) fileSys.getFileStatus(file).getLen();
  
  byte[] buf = new byte[fileLen];

  InputStream in = fileSys.open(file);
  IOUtils.readFully(in, buf, 0, buf.length);
  in.close();
  
  // check .crc corruption
  checkFileCorruption(fileSys, file, crcFile);
  fileSys.delete(file, true);
  
  writeFile(fileSys, file);
  
  // check data corrutpion
  checkFileCorruption(fileSys, file, file);
  
  fileSys.delete(file, true);
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:30,代码来源:TestFSInputChecker.java

示例12: copyFromLocalFile

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:66,代码来源:S3AFileSystem.java

示例13: copyFromLocalFile

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 * <p/>
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 * <p/>
 * delSrc indicates if the source should be removed
 *
 * @param delSrc    whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src       path
 * @param dst       path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src,
                              Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setMetadata(om);
  putObjectRequest.setProgressListener(new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventType()) {
        case TRANSFER_PART_COMPLETED_EVENT:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  });

  try {
    client.putObject(putObjectRequest);
    statistics.incrementWriteOps(1);
  } catch (OSSException | ClientException e) {
    throw new IOException("Got interrupted, cancelling");
  }
  // This will delete unnecessary fake parent directories
  finishedWrite(key);
  if (delSrc) {
    local.delete(src, false);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:58,代码来源:OSSFileSystem.java

示例14: copyFromLocalFile

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setSSEAlgorithm(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventType()) {
        case TRANSFER_PART_COMPLETED_EVENT:
          statistics.incrementWriteOps(1);
          break;
        default:
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:66,代码来源:S3AFileSystem.java

示例15: copyFromLocalFile

import org.apache.hadoop.fs.LocalFileSystem; //导入方法依赖的package包/类
/**
 * The src file is on the local disk.  Add it to FS at
 * the given dst name.
 *
 * This version doesn't need to create a temporary file to calculate the md5.
 * Sadly this doesn't seem to be used by the shell cp :(
 *
 * delSrc indicates if the source should be removed
 * @param delSrc whether to delete the src
 * @param overwrite whether to overwrite an existing file
 * @param src path
 * @param dst path
 */
@Override
public void copyFromLocalFile(boolean delSrc, boolean overwrite, Path src, 
  Path dst) throws IOException {
  String key = pathToKey(dst);

  if (!overwrite && exists(dst)) {
    throw new IOException(dst + " already exists");
  }
  if (LOG.isDebugEnabled()) {
    LOG.debug("Copying local file from " + src + " to " + dst);
  }

  // Since we have a local file, we don't need to stream into a temporary file
  LocalFileSystem local = getLocal(getConf());
  File srcfile = local.pathToFile(src);

  final ObjectMetadata om = new ObjectMetadata();
  if (StringUtils.isNotBlank(serverSideEncryptionAlgorithm)) {
    om.setServerSideEncryption(serverSideEncryptionAlgorithm);
  }
  PutObjectRequest putObjectRequest = new PutObjectRequest(bucket, key, srcfile);
  putObjectRequest.setCannedAcl(cannedACL);
  putObjectRequest.setMetadata(om);

  ProgressListener progressListener = new ProgressListener() {
    public void progressChanged(ProgressEvent progressEvent) {
      switch (progressEvent.getEventCode()) {
        case ProgressEvent.PART_COMPLETED_EVENT_CODE:
          statistics.incrementWriteOps(1);
          break;
      }
    }
  };

  Upload up = transfers.upload(putObjectRequest);
  up.addProgressListener(progressListener);
  try {
    up.waitForUploadResult();
    statistics.incrementWriteOps(1);
  } catch (InterruptedException e) {
    throw new IOException("Got interrupted, cancelling");
  }

  // This will delete unnecessary fake parent directories
  finishedWrite(key);

  if (delSrc) {
    local.delete(src, false);
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:64,代码来源:S3AFileSystem.java


注:本文中的org.apache.hadoop.fs.LocalFileSystem.delete方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。