當前位置: 首頁>>代碼示例>>Java>>正文


Java FileSystem.copyToLocalFile方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.FileSystem.copyToLocalFile方法的典型用法代碼示例。如果您正苦於以下問題:Java FileSystem.copyToLocalFile方法的具體用法?Java FileSystem.copyToLocalFile怎麽用?Java FileSystem.copyToLocalFile使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.copyToLocalFile方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: copy

import org.apache.hadoop.fs.FileSystem; //導入方法依賴的package包/類
private boolean copy(String source, String destination) throws IOException {
  if (source == null || destination == null) {
    return false;
  }

  if (temporaryDirectory == null) {
    temporaryDirectory = Files.createTempDirectory("avro-schema-download-folder");
    temporaryDirectory.toFile().deleteOnExit();
  }

  FileSystem sourceFileSystem = new Path(source).getFileSystem(sourceHiveConf);
  String tempPath = temporaryDirectory.toString();
  sourceFileSystem.copyToLocalFile(false, new Path(source), new Path(tempPath));

  FileSystem destinationFileSystem = new Path(destination).getFileSystem(replicaHiveConf);
  destinationFileSystem.copyFromLocalFile(true, new Path(tempPath), new Path(destination));
  LOG.info("Avro schema has been copied from '{}' to '{}'", source, destination);

  return destinationFileSystem.exists(new Path(destination));
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:21,代碼來源:AvroSerDeTransformation.java

示例2: copyToLocal

import org.apache.hadoop.fs.FileSystem; //導入方法依賴的package包/類
private void copyToLocal(Path sourceLocation, Path localLocation) {
  FileSystem sourceFileSystem;
  try {
    sourceFileSystem = sourceLocation.getFileSystem(sourceHiveConf);
    sourceFileSystem.copyToLocalFile(false, sourceLocation, localLocation);
  } catch (IOException e) {
    throw new CircusTrainException("Couldn't copy file from " + sourceLocation + " to" + localLocation, e);
  }
}
 
開發者ID:HotelsDotCom,項目名稱:circus-train,代碼行數:10,代碼來源:SchemaCopier.java

示例3: testReplayWorksThoughLotsOfFlushing

import org.apache.hadoop.fs.FileSystem; //導入方法依賴的package包/類
/**
 * HBASE-12782 ITBLL fails for me if generator does anything but 5M per maptask.
 * Create a region. Close it. Then copy into place a file to replay, one that is bigger than
 * configured flush size so we bring on lots of flushes.  Then reopen and confirm all edits
 * made it in.
 * @throws IOException
 */
@Test (timeout=60000)
public void testReplayWorksThoughLotsOfFlushing() throws IOException {
  Configuration conf = new Configuration(TEST_UTIL.getConfiguration());
  // Set it so we flush every 1M or so.  Thats a lot.
  conf.setInt(HConstants.HREGION_MEMSTORE_FLUSH_SIZE, 1024*1024);
  // The file of recovered edits has a column family of 'meta'. Also has an encoded regionname
  // of 4823016d8fca70b25503ee07f4c6d79f which needs to match on replay.
  final String encodedRegionName = "4823016d8fca70b25503ee07f4c6d79f";
  HTableDescriptor htd = new HTableDescriptor(TableName.valueOf(testName.getMethodName()));
  final String columnFamily = "meta";
  byte [][] columnFamilyAsByteArray = new byte [][] {Bytes.toBytes(columnFamily)};
  htd.addFamily(new HColumnDescriptor(columnFamily));
  HRegionInfo hri = new HRegionInfo(htd.getTableName()) {
    @Override
    public synchronized String getEncodedName() {
      return encodedRegionName;
    }

    // Cache the name because lots of lookups.
    private byte [] encodedRegionNameAsBytes = null;
    @Override
    public synchronized byte[] getEncodedNameAsBytes() {
      if (encodedRegionNameAsBytes == null) {
        this.encodedRegionNameAsBytes = Bytes.toBytes(getEncodedName());
      }
      return this.encodedRegionNameAsBytes;
    }
  };
  Path hbaseRootDir = TEST_UTIL.getDataTestDir();
  FileSystem fs = FileSystem.get(TEST_UTIL.getConfiguration());
  Path tableDir = FSUtils.getTableDir(hbaseRootDir, htd.getTableName());
  HRegionFileSystem hrfs =
      new HRegionFileSystem(TEST_UTIL.getConfiguration(), fs, tableDir, hri);
  if (fs.exists(hrfs.getRegionDir())) {
    LOG.info("Region directory already exists. Deleting.");
    fs.delete(hrfs.getRegionDir(), true);
  }
  HRegion region = HRegion.createHRegion(hri, hbaseRootDir, conf, htd, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  List<String> storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // There should be no store files.
  assertTrue(storeFiles.isEmpty());
  region.close();
  Path regionDir = region.getRegionDir(hbaseRootDir, hri);
  Path recoveredEditsDir = WALSplitter.getRegionDirRecoveredEditsDir(regionDir);
  // This is a little fragile getting this path to a file of 10M of edits.
  Path recoveredEditsFile = new Path(
    System.getProperty("test.build.classes", "target/test-classes"),
      "0000000000000016310");
  // Copy this file under the region's recovered.edits dir so it is replayed on reopen.
  Path destination = new Path(recoveredEditsDir, recoveredEditsFile.getName());
  fs.copyToLocalFile(recoveredEditsFile, destination);
  assertTrue(fs.exists(destination));
  // Now the file 0000000000000016310 is under recovered.edits, reopen the region to replay.
  region = HRegion.openHRegion(region, null);
  assertEquals(encodedRegionName, region.getRegionInfo().getEncodedName());
  storeFiles = region.getStoreFileList(columnFamilyAsByteArray);
  // Our 0000000000000016310 is 10MB. Most of the edits are for one region. Lets assume that if
  // we flush at 1MB, that there are at least 3 flushed files that are there because of the
  // replay of edits.
  assertTrue("Files count=" + storeFiles.size(), storeFiles.size() > 10);
  // Now verify all edits made it into the region.
  int count = verifyAllEditsMadeItIn(fs, conf, recoveredEditsFile, region);
  LOG.info("Checked " + count + " edits made it in");
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:73,代碼來源:TestRecoveredEdits.java

示例4: copyOrMoveToLocalFile

import org.apache.hadoop.fs.FileSystem; //導入方法依賴的package包/類
/**
 * 此方法用於copy或remove文件或文件夾到分布式係統
 *
 * @param fileSystemInfo
 *            文件係統信息
 * @param src
 *            分布式係統路徑
 * @param dist
 *            本地路徑
 * @param deleteCrc
 *            是否刪除本地生成的crc檢驗文件
 * @param deleteSrcDir
 *            是否刪除分布式係統上的文件
 */
public static void copyOrMoveToLocalFile(FileSystemInfo fileSystemInfo, String src, String dist, boolean deleteCrc,
		boolean deleteSrcDir) {
	FileSystem fs = getFileSystem(fileSystemInfo);
	Path hdfsPath = new Path(src);
	try {
		// 文件不存在的異常返回
		pathNotExistCheck(src, fs, hdfsPath);
		// 文件存在的情況下進行download操作
		FileStatus fileStatus = fs.getFileStatus(hdfsPath);
		if (fileStatus.isDirectory()) {
			// 如果是dir
			dist = convertToPath(dist) + fileStatus.getPath().getName();
			File localFileDir = new File(dist);
			if (!localFileDir.exists()) {
				localFileDir.mkdirs();
			}
			// 遍曆hdfs的dir中的所有文件
			FileStatus contents[] = fs.listStatus(hdfsPath);
			for (int i = 0; i < contents.length; i++) {
				copyOrMoveToLocalFile(fileSystemInfo, contents[i].getPath().toString(), dist, deleteCrc,
						deleteSrcDir);
			}
		} else {
			// 如果是file
			Path localPathOrFilePath = new Path(dist);
			fs.copyToLocalFile(hdfsPath, localPathOrFilePath);
			// 刪除local生成的crc校驗文件
			if (deleteCrc) {
				String crcFileName = "." + hdfsPath.getName() + ".crc";
				String crcFileAbsolutePath = convertToPath(dist) + crcFileName;
				File crcFile = new File(crcFileAbsolutePath);
				crcFile.deleteOnExit();
			}
		}
		if (deleteSrcDir) {
			fs.delete(hdfsPath, true);
		}

	} catch (IOException e) {
		e.printStackTrace();
	} finally {
		closeFileSystem(fs);
	}

}
 
開發者ID:zhangjunfang,項目名稱:alluxio,代碼行數:60,代碼來源:HdfsAndAlluxioUtils_update.java


注:本文中的org.apache.hadoop.fs.FileSystem.copyToLocalFile方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。