当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.copyToLocalFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.copyToLocalFile方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.copyToLocalFile方法的具体用法?Java FileSystem.copyToLocalFile怎么用?Java FileSystem.copyToLocalFile使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.copyToLocalFile方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copy

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private boolean copy(String source, String destination) throws IOException {
  if (source == null || destination == null) {
    return false;
  }

  if (temporaryDirectory == null) {
    temporaryDirectory = Files.createTempDirectory("avro-schema-download-folder");
    temporaryDirectory.toFile().deleteOnExit();
  }

  FileSystem sourceFileSystem = new Path(source).getFileSystem(sourceHiveConf);
  String tempPath = temporaryDirectory.toString();
  sourceFileSystem.copyToLocalFile(false, new Path(source), new Path(tempPath));

  FileSystem destinationFileSystem = new Path(destination).getFileSystem(replicaHiveConf);
  destinationFileSystem.copyFromLocalFile(true, new Path(tempPath), new Path(destination));
  LOG.info("Avro schema has been copied from '{}' to '{}'", source, destination);

  return destinationFileSystem.exists(new Path(destination));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:21,代码来源:AvroSerDeTransformation.java

示例2: copyToLocal

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void copyToLocal(Path sourceLocation, Path localLocation) {
  FileSystem sourceFileSystem;
  try {
    sourceFileSystem = sourceLocation.getFileSystem(sourceHiveConf);
    sourceFileSystem.copyToLocalFile(false, sourceLocation, localLocation);
  } catch (IOException e) {
    throw new CircusTrainException("Couldn't copy file from " + sourceLocation + " to" + localLocation, e);
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:10,代码来源:SchemaCopier.java

示例3: testReplayWorksThoughLotsOfFlushing

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
 * Create a region. Close it. Then copy into place a file to replay, one that is bigger than
 * configured flush size so we bring on lots of flushes.  Then reopen and confirm all edits
 * made it in.
 * @throws IOException
 */
@Test (timeout=60000)
public void testReplayWorksThoughLotsOfFlushing() throws IOException {
  Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
  // Set it so we flush every 1M or so.  Thats a lot.
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
  // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
  final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(testName.getMethodName()));
  final String columnFamily = "meta";
  byte [][] columnFamilyAsByteArray = new byte [][] {Bytes.toBytes(columnFamily)};
  htd.addFamily(new HColumnDescriptor(columnFamily));
  HRegionInfo hri = new HRegionInfo(htd.getTableName()) {
    @Override
    public synchronized String getEncodedName() {
      return encodedRegionName;
    }

    // Cache the name because lots of lookups.
    private byte [] encodedRegionNameAsBytes = null;
    @Override
    public synchronized byte[] getEncodedNameAsBytes() {
      if (encodedRegionNameAsBytes == null) {
        this.encodedRegionNameAsBytes = Bytes.toBytes(getEncodedName());
      }
      return this.encodedRegionNameAsBytes;
    }
  };
  Path hbaseRootDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName());
  HRegionFileSystem hrfs =
      new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
  if (fs.exists(hrfs.getRegionDir())) {
    LOG.info("Region directory already exists. Deleting.");
    fs.delete(hrfs.getRegionDir(), true);
  }
  HRegion region = HRegion.createHRegion(hri, hbaseRootDir, conf, htd, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // There should be no store files.
  assertTrue(storeFiles.isEmpty());
  region.close();
  Path regionDir = region.getRegionDir(hbaseRootDir, hri);
  Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
  // This is a little fragile getting this path to a file of 10M of edits.
  Path recoveredEditsFile = new Path(
    System.getProperty("test.build.classes", "target/test-classes"),
      "0000000000000016310");
  // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
  Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
  fs.copyToLocalFile(recoveredEditsFile, destination);
  assertTrue(fs.exists(destination));
  // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
  region = HRegion.openHRegion(region, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
  // we flush at 1MB, that there are at least 3 flushed files that are there because of the
  // replay of edits.
  assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
  // Now verify all edits made it into the region.
  int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
  LOG.info("Checked " + count + " edits made it in");
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:73,代码来源:TestRecoveredEdits.java

示例4: copyOrMoveToLocalFile

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * 此方法用于copy或remove文件或文件夹到分布式系统
 *
 * @param fileSystemInfo
 *            文件系统信息
 * @param src
 *            分布式系统路径
 * @param dist
 *            本地路径
 * @param deleteCrc
 *            是否删除本地生成的crc检验文件
 * @param deleteSrcDir
 *            是否删除分布式系统上的文件
 */
public static void copyOrMoveToLocalFile(FileSystemInfo fileSystemInfo, String src, String dist, boolean deleteCrc,
		boolean deleteSrcDir) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path hdfsPath = new Path(src);
	try {
		// 文件不存在的异常返回
		pathNotExistCheck(src, fs, hdfsPath);
		// 文件存在的情况下进行download操作
		FileStatus fileStatus = fs.getFileStatus(hdfsPath);
		if (fileStatus.isDirectory()) {
			// 如果是dir
			dist = convertToPath(dist) + fileStatus.getPath().getName();
			File localFileDir = new File(dist);
			if (!localFileDir.exists()) {
				localFileDir.mkdirs();
			}
			// 遍历hdfs的dir中的所有文件
			FileStatus contents[] = fs.listStatus(hdfsPath);
			for (int i = 0; i < contents.length; i++) {
				copyOrMoveToLocalFile(fileSystemInfo, contents[i].getPath().toString(), dist, deleteCrc,
						deleteSrcDir);
			}
		} else {
			// 如果是file
			Path localPathOrFilePath = new Path(dist);
			fs.copyToLocalFile(hdfsPath, localPathOrFilePath);
			// 删除local生成的crc校验文件
			if (deleteCrc) {
				String crcFileName = "." + hdfsPath.getName() + ".crc";
				String crcFileAbsolutePath = convertToPath(dist) + crcFileName;
				File crcFile = new File(crcFileAbsolutePath);
				crcFile.deleteOnExit();
			}
		}
		if (deleteSrcDir) {
			fs.delete(hdfsPath, true);
		}

	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}

}
 
开发者ID:zhangjunfang,项目名称:alluxio,代码行数:60,代码来源:HdfsAndAlluxioUtils_update.java


注:本文中的org.apache.hadoop.fs.FileSystem.copyToLocalFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。