当前位置: 首页>>代码示例>>Java>>正文


Java FileSystem.copyFromLocalFile方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.FileSystem.copyFromLocalFile方法的典型用法代码示例。如果您正苦于以下问题:Java FileSystem.copyFromLocalFile方法的具体用法?Java FileSystem.copyFromLocalFile怎么用?Java FileSystem.copyFromLocalFile使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.FileSystem的用法示例。


在下文中一共展示了FileSystem.copyFromLocalFile方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: copy

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private boolean copy(String source, String destination) throws IOException {
  if (source == null || destination == null) {
    return false;
  }

  if (temporaryDirectory == null) {
    temporaryDirectory = Files.createTempDirectory("avro-schema-download-folder");
    temporaryDirectory.toFile().deleteOnExit();
  }

  FileSystem sourceFileSystem = new Path(source).getFileSystem(sourceHiveConf);
  String tempPath = temporaryDirectory.toString();
  sourceFileSystem.copyToLocalFile(false, new Path(source), new Path(tempPath));

  FileSystem destinationFileSystem = new Path(destination).getFileSystem(replicaHiveConf);
  destinationFileSystem.copyFromLocalFile(true, new Path(tempPath), new Path(destination));
  LOG.info("Avro schema has been copied from '{}' to '{}'", source, destination);

  return destinationFileSystem.exists(new Path(destination));
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:21,代码来源:AvroSerDeTransformation.java

示例2: copyLocalFileToDfs

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static Path copyLocalFileToDfs(FileSystem fs, String appId,
    Path srcPath, String dstFileName) throws IOException {
  Path dstPath = new Path(fs.getHomeDirectory(),
      Constants.DEFAULT_APP_NAME + Path.SEPARATOR + appId + Path.SEPARATOR + dstFileName);
  LOG.info("Copying " + srcPath + " to " + dstPath);
  fs.copyFromLocalFile(srcPath, dstPath);
  return dstPath;
}
 
开发者ID:Intel-bigdata,项目名称:TensorFlowOnYARN,代码行数:9,代码来源:Utils.java

示例3: addToLocalResources

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void addToLocalResources(FileSystem fs, String fileSrcPath,
																 String fileDstPath, String appId, Map<String, LocalResource> localResources,
																 String resources) throws IOException {
	String suffix =
			"prkeyrotation" + "/" + appId + "/" + fileDstPath;
	Path dst =
			new Path(fs.getHomeDirectory(), suffix);
	if (fileSrcPath == null) {
		FSDataOutputStream ostream = null;
		try {
			ostream = FileSystem
					.create(fs, dst, new FsPermission((short) 0710));
			ostream.writeUTF(resources);
		} finally {
			IOUtils.closeQuietly(ostream);
		}
	} else {
		fs.copyFromLocalFile(new Path(fileSrcPath), dst);
	}
	FileStatus scFileStatus = fs.getFileStatus(dst);
	LocalResource scRsrc =
			LocalResource.newInstance(
					ConverterUtils.getYarnUrlFromPath(dst),
					LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
					scFileStatus.getLen(), scFileStatus.getModificationTime());
	localResources.put(fileDstPath, scRsrc);
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:28,代码来源:Client.java

示例4: copyToRemote

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void copyToRemote(Path localLocation, Path remoteDestinationLocation) {
  FileSystem destinationFileSystem;
  try {
    destinationFileSystem = remoteDestinationLocation.getFileSystem(replicaHiveConf);
    destinationFileSystem.copyFromLocalFile(localLocation, remoteDestinationLocation);
  } catch (IOException e) {
    throw new CircusTrainException("Couldn't copy file from " + localLocation + " to " + remoteDestinationLocation,
        e);
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:11,代码来源:SchemaCopier.java

示例5: main

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
public static void main(String[] args) throws IOException{
	String uri="hdfs://localhost:9000";
	Configuration conf = new Configuration();
	FileSystem fs = FileSystem.get(URI.create(uri), conf);
	fs.copyFromLocalFile(new Path(uri+"/haha.txt"), new Path("/home/aadish"));

}
 
开发者ID:aadishgoel2013,项目名称:Hadoop-Codes,代码行数:8,代码来源:putting.java

示例6: addToLocalResources

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
private void addToLocalResources(FileSystem fs, String fileSrcPath,
    String fileDstPath, String appId, Map<String, LocalResource> localResources,
    String resources) throws IOException {
  String suffix =
      appName + "/" + appId + "/" + fileDstPath;
  Path dst =
      new Path(fs.getHomeDirectory(), suffix);
  if (fileSrcPath == null) {
    FSDataOutputStream ostream = null;
    try {
      ostream = FileSystem
          .create(fs, dst, new FsPermission((short) 0710));
      ostream.writeUTF(resources);
    } finally {
      IOUtils.closeQuietly(ostream);
    }
  } else {
    fs.copyFromLocalFile(new Path(fileSrcPath), dst);
  }
  FileStatus scFileStatus = fs.getFileStatus(dst);
  LocalResource scRsrc =
      LocalResource.newInstance(
          ConverterUtils.getYarnUrlFromURI(dst.toUri()),
          LocalResourceType.FILE, LocalResourceVisibility.APPLICATION,
          scFileStatus.getLen(), scFileStatus.getModificationTime());
  localResources.put(fileDstPath, scRsrc);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:Client.java

示例7: testImport94Table

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
/**
 * Test import data from 0.94 exported file
 * @throws Exception
 */
@Test
public void testImport94Table() throws Exception {
  final String name = "exportedTableIn94Format";
  URL url = TestImportExport.class.getResource(name);
  File f = new File(url.toURI());
  if (!f.exists()) {
    LOG.warn("FAILED TO FIND " + f + "; skipping out on test");
    return;
  }
  assertTrue(f.exists());
  LOG.info("FILE=" + f);
  Path importPath = new Path(f.toURI());
  FileSystem fs = FileSystem.get(UTIL.getConfiguration());
  fs.copyFromLocalFile(importPath, new Path(FQ_OUTPUT_DIR + Path.SEPARATOR + name));
  String IMPORT_TABLE = name;
  try (Table t = UTIL.createTable(TableName.valueOf(IMPORT_TABLE), Bytes.toBytes("f1"), 3);) {
    String[] args = new String[] {
        "-Dhbase.import.version=0.94" ,
        IMPORT_TABLE, FQ_OUTPUT_DIR
    };
    assertTrue(runImport(args));
    /* exportedTableIn94Format contains 5 rows
    ROW         COLUMN+CELL
    r1          column=f1:c1, timestamp=1383766761171, value=val1
    r2          column=f1:c1, timestamp=1383766771642, value=val2
    r3          column=f1:c1, timestamp=1383766777615, value=val3
    r4          column=f1:c1, timestamp=1383766785146, value=val4
    r5          column=f1:c1, timestamp=1383766791506, value=val5
    */
   assertEquals(5, UTIL.countRows(t));
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:37,代码来源:TestImportExport.java

示例8: loadingClassFromLibDirInJar

import org.apache.hadoop.fs.FileSystem; //导入方法依赖的package包/类
void loadingClassFromLibDirInJar(String libPrefix) throws Exception {
  FileSystem fs = cluster.getFileSystem();

  File innerJarFile1 = buildCoprocessorJar(cpName1);
  File innerJarFile2 = buildCoprocessorJar(cpName2);
  File outerJarFile = new File(TEST_UTIL.getDataTestDir().toString(), "outer.jar");

  ClassLoaderTestHelper.addJarFilesToJar(
    outerJarFile, libPrefix, innerJarFile1, innerJarFile2);

  // copy the jars into dfs
  fs.copyFromLocalFile(new Path(outerJarFile.getPath()),
    new Path(fs.getUri().toString() + Path.SEPARATOR));
  String jarFileOnHDFS = fs.getUri().toString() + Path.SEPARATOR +
    outerJarFile.getName();
  assertTrue("Copy jar file to HDFS failed.",
    fs.exists(new Path(jarFileOnHDFS)));
  LOG.info("Copied jar file to HDFS: " + jarFileOnHDFS);

  // create a table that references the coprocessors
  HTableDescriptor htd = new HTableDescriptor(tableName);
  htd.addFamily(new HColumnDescriptor("test"));
    // without configuration values
  htd.setValue("COPROCESSOR$1", jarFileOnHDFS.toString() + "|" + cpName1 +
    "|" + Coprocessor.PRIORITY_USER);
    // with configuration values
  htd.setValue("COPROCESSOR$2", jarFileOnHDFS.toString() + "|" + cpName2 +
    "|" + Coprocessor.PRIORITY_USER + "|k1=v1,k2=v2,k3=v3");
  Admin admin = TEST_UTIL.getHBaseAdmin();
  if (admin.tableExists(tableName)) {
    if (admin.isTableEnabled(tableName)) {
      admin.disableTable(tableName);
    }
    admin.deleteTable(tableName);
  }
  admin.createTable(htd);
  waitForTable(htd.getTableName());

  // verify that the coprocessors were loaded
  boolean found1 = false, found2 = false, found2_k1 = false,
      found2_k2 = false, found2_k3 = false;
  MiniHBaseCluster hbase = TEST_UTIL.getHBaseCluster();
  for (Region region: hbase.getRegionServer(0).getOnlineRegionsLocalContext()) {
    if (region.getRegionInfo().getRegionNameAsString().startsWith(tableName.getNameAsString())) {
      CoprocessorEnvironment env;
      env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName1);
      if (env != null) {
        found1 = true;
      }
      env = region.getCoprocessorHost().findCoprocessorEnvironment(cpName2);
      if (env != null) {
        found2 = true;
        Configuration conf = env.getConfiguration();
        found2_k1 = conf.get("k1") != null;
        found2_k2 = conf.get("k2") != null;
        found2_k3 = conf.get("k3") != null;
      }
    }
  }
  assertTrue("Class " + cpName1 + " was missing on a region", found1);
  assertTrue("Class " + cpName2 + " was missing on a region", found2);
  assertTrue("Configuration key 'k1' was missing on a region", found2_k1);
  assertTrue("Configuration key 'k2' was missing on a region", found2_k2);
  assertTrue("Configuration key 'k3' was missing on a region", found2_k3);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:66,代码来源:TestClassLoading.java


注:本文中的org.apache.hadoop.fs.FileSystem.copyFromLocalFile方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。