本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.copyToLocalFile方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.copyToLocalFile方法的具体用法?Java FileSystem.copyToLocalFile怎么用?Java FileSystem.copyToLocalFile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.fs.FileSystem
的用法示例。
在下文中一共展示了FileSystem.copyToLocalFile方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: copy
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private boolean copy(String source, String destination) throws IOException {
if (source == null || destination == null) {
return false;
}
if (temporaryDirectory == null) {
temporaryDirectory = Files.createTempDirectory("avro-schema-download-folder");
temporaryDirectory.toFile().deleteOnExit();
}
FileSystem sourceFileSystem = new Path(source).getFileSystem(sourceHiveConf);
String tempPath = temporaryDirectory.toString();
sourceFileSystem.copyToLocalFile(false, new Path(source), new Path(tempPath));
FileSystem destinationFileSystem = new Path(destination).getFileSystem(replicaHiveConf);
destinationFileSystem.copyFromLocalFile(true, new Path(tempPath), new Path(destination));
LOG.info("Avro schema has been copied from '{}' to '{}'", source, destination);
return destinationFileSystem.exists(new Path(destination));
}
示例2: copyToLocal
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void copyToLocal(Path sourceLocation, Path localLocation) {
FileSystem sourceFileSystem;
try {
sourceFileSystem = sourceLocation.getFileSystem(sourceHiveConf);
sourceFileSystem.copyToLocalFile(false, sourceLocation, localLocation);
} catch (IOException e) {
throw new CircusTrainException("Couldn't copy file from " + sourceLocation + " to" + localLocation, e);
}
}
示例3: testReplayWorksThoughLotsOfFlushing
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
* Create a region. Close it. Then copy into place a file to replay, one that is bigger than
* configured flush size so we bring on lots of flushes. Then reopen and confirm all edits
* made it in.
* @throws IOException
*/
@Test (timeout=60000)
public void testReplayWorksThoughLotsOfFlushing() throws IOException {
Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
// Set it so we flush every 1M or so. Thats a lot.
conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
// The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
// of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(testName.getMethodName()));
final String columnFamily = "meta";
byte [][] columnFamilyAsByteArray = new byte [][] {Bytes.toBytes(columnFamily)};
htd.addFamily(new HColumnDescriptor(columnFamily));
HRegionInfo hri = new HRegionInfo(htd.getTableName()) {
@Override
public synchronized String getEncodedName() {
return encodedRegionName;
}
// Cache the name because lots of lookups.
private byte [] encodedRegionNameAsBytes = null;
@Override
public synchronized byte[] getEncodedNameAsBytes() {
if (encodedRegionNameAsBytes == null) {
this.encodedRegionNameAsBytes = Bytes.toBytes(getEncodedName());
}
return this.encodedRegionNameAsBytes;
}
};
Path hbaseRootDir = TEST_UTIL.getDataTestDir();
FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName());
HRegionFileSystem hrfs =
new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
if (fs.exists(hrfs.getRegionDir())) {
LOG.info("Region directory already exists. Deleting.");
fs.delete(hrfs.getRegionDir(), true);
}
HRegion region = HRegion.createHRegion(hri, hbaseRootDir, conf, htd, null);
assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
// There should be no store files.
assertTrue(storeFiles.isEmpty());
region.close();
Path regionDir = region.getRegionDir(hbaseRootDir, hri);
Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
// This is a little fragile getting this path to a file of 10M of edits.
Path recoveredEditsFile = new Path(
System.getProperty("test.build.classes", "target/test-classes"),
"0000000000000016310");
// Copy this file under the region's recovered.edits dir so it is replayed on reopen.
Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
fs.copyToLocalFile(recoveredEditsFile, destination);
assertTrue(fs.exists(destination));
// Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
region = HRegion.openHRegion(region, null);
assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
// Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
// we flush at 1MB, that there are at least 3 flushed files that are there because of the
// replay of edits.
assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
// Now verify all edits made it into the region.
int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
LOG.info("Checked " + count + " edits made it in");
}
示例4: copyOrMoveToLocalFile
import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
* 此方法用于copy或remove文件或文件夹到分布式系统
*
* @param fileSystemInfo
* 文件系统信息
* @param src
* 分布式系统路径
* @param dist
* 本地路径
* @param deleteCrc
* 是否删除本地生成的crc检验文件
* @param deleteSrcDir
* 是否删除分布式系统上的文件
*/
public static void copyOrMoveToLocalFile(FileSystemInfo fileSystemInfo, String src, String dist, boolean deleteCrc,
boolean deleteSrcDir) {
FileSystem fs = getFileSystem(fileSystemInfo);
Path hdfsPath = new Path(src);
try {
// 文件不存在的异常返回
pathNotExistCheck(src, fs, hdfsPath);
// 文件存在的情况下进行download操作
FileStatus fileStatus = fs.getFileStatus(hdfsPath);
if (fileStatus.isDirectory()) {
// 如果是dir
dist = convertToPath(dist) + fileStatus.getPath().getName();
File localFileDir = new File(dist);
if (!localFileDir.exists()) {
localFileDir.mkdirs();
}
// 遍历hdfs的dir中的所有文件
FileStatus contents[] = fs.listStatus(hdfsPath);
for (int i = 0; i < contents.length; i++) {
copyOrMoveToLocalFile(fileSystemInfo, contents[i].getPath().toString(), dist, deleteCrc,
deleteSrcDir);
}
} else {
// 如果是file
Path localPathOrFilePath = new Path(dist);
fs.copyToLocalFile(hdfsPath, localPathOrFilePath);
// 删除local生成的crc校验文件
if (deleteCrc) {
String crcFileName = "." + hdfsPath.getName() + ".crc";
String crcFileAbsolutePath = convertToPath(dist) + crcFileName;
File crcFile = new File(crcFileAbsolutePath);
crcFile.deleteOnExit();
}
}
if (deleteSrcDir) {
fs.delete(hdfsPath, true);
}
} catch (IOException e) {
e.printStackTrace();
} finally {
closeFileSystem(fs);
}
}