当前位置: 首页>>代码示例>>Java>>正文


Java DistCpOptionSwitch类代码示例

本文整理汇总了Java中org.apache.hadoop.tools.DistCpOptionSwitch的典型用法代码示例。如果您正苦于以下问题:Java DistCpOptionSwitch类的具体用法?Java DistCpOptionSwitch怎么用?Java DistCpOptionSwitch使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


DistCpOptionSwitch类属于org.apache.hadoop.tools包,在下文中一共展示了DistCpOptionSwitch类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: setup

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
/**
 * Implementation of the Mapper::setup() method. This extracts the DistCp-
 * options specified in the Job's configuration, to set up the Job.
 * @param context Mapper's context.
 * @throws IOException On IO failure.
 * @throws InterruptedException If the job is interrupted.
 */
@Override
public void setup(Context context) throws IOException, InterruptedException {
  conf = context.getConfiguration();

  syncFolders = conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false);
  ignoreFailures = conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false);
  skipCrc = conf.getBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(), false);
  overWrite = conf.getBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(), false);
  append = conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(), false);
  preserve = DistCpUtils.unpackAttributes(conf.get(DistCpOptionSwitch.
      PRESERVE_STATUS.getConfigLabel()));

  targetWorkPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
  Path targetFinalPath = new Path(conf.get(
          DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
  targetFS = targetFinalPath.getFileSystem(conf);

  if (targetFS.exists(targetFinalPath) && targetFS.isFile(targetFinalPath)) {
    overWrite = true; // When target is an existing file, overwrite it.
  }

  if (conf.get(DistCpConstants.CONF_LABEL_SSL_CONF) != null) {
    initializeSSLConf(context);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:CopyMapper.java

示例2: getConfiguration

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
private static Configuration getConfiguration() throws IOException {
  Configuration configuration = getConfigurationForCluster();
  final FileSystem fs = cluster.getFileSystem();
  Path workPath = new Path(TARGET_PATH)
          .makeQualified(fs.getUri(), fs.getWorkingDirectory());
  configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
          workPath.toString());
  configuration.set(DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
          workPath.toString());
  configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
          false);
  configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
          false);
  configuration.setBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(),
          true);
  configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
          "br");
  return configuration;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:20,代码来源:TestCopyMapper.java

示例3: setup

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
/**
 * Implementation of the Mapper::setup() method. This extracts the DistCp-
 * options specified in the Job's configuration, to set up the Job.
 * @param context Mapper's context.
 * @throws IOException On IO failure.
 * @throws InterruptedException If the job is interrupted.
 */
@Override
public void setup(Context context) throws IOException, InterruptedException {
  conf = context.getConfiguration();

  syncFolders = conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false);
  ignoreFailures = conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false);
  skipCrc = conf.getBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(), false);
  overWrite = conf.getBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(), false);
  preserve = DistCpUtils.unpackAttributes(conf.get(DistCpOptionSwitch.
      PRESERVE_STATUS.getConfigLabel()));

  targetWorkPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
  Path targetFinalPath = new Path(conf.get(
          DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
  targetFS = targetFinalPath.getFileSystem(conf);

  if (targetFS.exists(targetFinalPath) && targetFS.isFile(targetFinalPath)) {
    overWrite = true; // When target is an existing file, overwrite it.
  }

  if (conf.get(DistCpConstants.CONF_LABEL_SSL_CONF) != null) {
    initializeSSLConf(context);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:32,代码来源:CopyMapper.java

示例4: testCopyWithAppend

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
@Test
public void testCopyWithAppend() throws Exception {
  final FileSystem fs = cluster.getFileSystem();
  // do the first distcp
  testCopy(false);
  // start appending data to source
  appendSourceData();

  // do the distcp again with -update and -append option
  CopyMapper copyMapper = new CopyMapper();
  StubContext stubContext = new StubContext(getConfiguration(), null, 0);
  Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
      stubContext.getContext();
  // Enable append 
  context.getConfiguration().setBoolean(
      DistCpOptionSwitch.APPEND.getConfigLabel(), true);
  copyMapper.setup(context);
  for (Path path: pathList) {
    copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
            new CopyListingFileStatus(cluster.getFileSystem().getFileStatus(
                path)), context);
  }

  verifyCopy(fs, false);
  // verify that we only copied new appended data
  Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE * 2, stubContext
      .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
      .getValue());
  Assert.assertEquals(pathList.size(), stubContext.getReporter().
      getCounter(CopyMapper.Counter.COPY).getValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestCopyMapper.java

示例5: testCopyFailOnBlockSizeDifference

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
  try {

    deleteState();
    createSourceDataWithDifferentBlockSize();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
        = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    EnumSet<DistCpOptions.FileAttribute> fileAttributes
        = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
    configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
        DistCpUtils.packAttributes(fileAttributes));

    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
          new CopyListingFileStatus(fileStatus), context);
    }

    Assert.fail("Copy should have failed because of block-size difference.");
  }
  catch (Exception exception) {
    // Check that the exception suggests the use of -pb/-skipCrc.
    Assert.assertTrue("Failure exception should have suggested the use of -pb.", exception.getCause().getCause().getMessage().contains("pb"));
    Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.", exception.getCause().getCause().getMessage().contains("skipCrc"));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestCopyMapper.java

示例6: testPreserveDefaults

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
@Test
public void testPreserveDefaults() throws IOException {
  FileSystem fs = FileSystem.get(config);
  
  // preserve replication, block size, user, group, permission, 
  // checksum type and timestamps    
  EnumSet<FileAttribute> attributes = 
      DistCpUtils.unpackAttributes(
          DistCpOptionSwitch.PRESERVE_STATUS_DEFAULT.substring(1));

  Path dst = new Path("/tmp/dest2");
  Path src = new Path("/tmp/src2");

  createFile(fs, src);
  createFile(fs, dst);

  fs.setPermission(src, fullPerm);
  fs.setOwner(src, "somebody", "somebody-group");
  fs.setTimes(src, 0, 0);
  fs.setReplication(src, (short) 1);

  fs.setPermission(dst, noPerm);
  fs.setOwner(dst, "nobody", "nobody-group");
  fs.setTimes(dst, 100, 100);
  fs.setReplication(dst, (short) 2);
  
  CopyListingFileStatus srcStatus = new CopyListingFileStatus(fs.getFileStatus(src));

  DistCpUtils.preserve(fs, dst, srcStatus, attributes, false);

  CopyListingFileStatus dstStatus = new CopyListingFileStatus(fs.getFileStatus(dst));

  // FileStatus.equals only compares path field, must explicitly compare all fields
  Assert.assertTrue(srcStatus.getPermission().equals(dstStatus.getPermission()));
  Assert.assertTrue(srcStatus.getOwner().equals(dstStatus.getOwner()));
  Assert.assertTrue(srcStatus.getGroup().equals(dstStatus.getGroup()));
  Assert.assertTrue(srcStatus.getAccessTime() == dstStatus.getAccessTime());
  Assert.assertTrue(srcStatus.getModificationTime() == dstStatus.getModificationTime());
  Assert.assertTrue(srcStatus.getReplication() == dstStatus.getReplication());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:41,代码来源:TestDistCpUtils.java

示例7: setup

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
/**
 * Implementation of the Mapper::setup() method. This extracts the DistCp-
 * options specified in the Job's configuration, to set up the Job.
 * @param context Mapper's context.
 * @throws IOException On IO failure.
 * @throws InterruptedException If the job is interrupted.
 */
@Override
public void setup(Context context) throws IOException, InterruptedException {
  conf = context.getConfiguration();

  syncFolders = conf.getBoolean(DistCpOptionSwitch.SYNC_FOLDERS.getConfigLabel(), false);
  ignoreFailures = conf.getBoolean(DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(), false);
  skipCrc = conf.getBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(), false);
  overWrite = conf.getBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(), false);
  append = conf.getBoolean(DistCpOptionSwitch.APPEND.getConfigLabel(), false);
  preserve = DistCpUtils.unpackAttributes(conf.get(DistCpOptionSwitch.
      PRESERVE_STATUS.getConfigLabel()));

  targetWorkPath = new Path(conf.get(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH));
  Path targetFinalPath = new Path(conf.get(
          DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH));
  targetFS = targetFinalPath.getFileSystem(conf);

  if (targetFS.exists(targetFinalPath) && targetFS.isFile(targetFinalPath)) {
    overWrite = true; // When target is an existing file, overwrite it.
  }

  if (conf.get(DistCpConstants.CONF_LABEL_SSL_CONF) != null) {
    initializeSSLConf(context);
  }
  startEpoch = System.currentTimeMillis();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:CopyMapper.java

示例8: testCopyFailOnBlockSizeDifference

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
  try {

    deleteState();
    createSourceDataWithDifferentBlockSize();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, FileStatus, Text, Text>.Context context
        = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    EnumSet<DistCpOptions.FileAttribute> fileAttributes
        = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
    configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
        DistCpUtils.packAttributes(fileAttributes));

    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
          fileStatus, context);
    }

    Assert.fail("Copy should have failed because of block-size difference.");
  }
  catch (Exception exception) {
    // Check that the exception suggests the use of -pb/-skipCrc.
    Assert.assertTrue("Failure exception should have suggested the use of -pb.", exception.getCause().getCause().getMessage().contains("pb"));
    Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.", exception.getCause().getCause().getMessage().contains("skipCrc"));
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:36,代码来源:TestCopyMapper.java

示例9: testCopyFailOnBlockSizeDifference

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
  try {
    deleteState();
    createSourceDataWithDifferentBlockSize();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
        = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    EnumSet<DistCpOptions.FileAttribute> fileAttributes
        = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
    configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
        DistCpUtils.packAttributes(fileAttributes));

    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),
          path)), new CopyListingFileStatus(fileStatus), context);
    }

    Assert.fail("Copy should have failed because of block-size difference.");
  }
  catch (Exception exception) {
    // Check that the exception suggests the use of -pb/-skipCrc.
    Assert.assertTrue("Failure exception should have suggested the use of -pb.",
        exception.getCause().getCause().getMessage().contains("pb"));
    Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.",
        exception.getCause().getCause().getMessage().contains("skipCrc"));
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:37,代码来源:TestCopyMapper.java

示例10: getFileAttributeSettings

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
private static EnumSet<DistCpOptions.FileAttribute>
        getFileAttributeSettings(Mapper.Context context) {
  String attributeString = context.getConfiguration().get(
          DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel());
  return DistCpUtils.unpackAttributes(attributeString);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:CopyMapper.java

示例11: testCopy

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
private void testCopy(boolean preserveChecksum) throws Exception {
  deleteState();
  if (preserveChecksum) {
    createSourceDataWithDifferentChecksumType();
  } else {
    createSourceData();
  }

  FileSystem fs = cluster.getFileSystem();
  CopyMapper copyMapper = new CopyMapper();
  StubContext stubContext = new StubContext(getConfiguration(), null, 0);
  Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
          = stubContext.getContext();

  Configuration configuration = context.getConfiguration();
  EnumSet<DistCpOptions.FileAttribute> fileAttributes
          = EnumSet.of(DistCpOptions.FileAttribute.REPLICATION);
  if (preserveChecksum) {
    fileAttributes.add(DistCpOptions.FileAttribute.CHECKSUMTYPE);
  }
  configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
          DistCpUtils.packAttributes(fileAttributes));

  copyMapper.setup(context);

  for (Path path: pathList) {
    copyMapper.map(
        new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
        new CopyListingFileStatus(fs.getFileStatus(path)), context);
  }

  // Check that the maps worked.
  verifyCopy(fs, preserveChecksum);
  Assert.assertEquals(pathList.size(), stubContext.getReporter()
      .getCounter(CopyMapper.Counter.COPY).getValue());
  if (!preserveChecksum) {
    Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE, stubContext
        .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
        .getValue());
  } else {
    Assert.assertEquals(nFiles * NON_DEFAULT_BLOCK_SIZE * 2, stubContext
        .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
        .getValue());
  }

  testCopyingExistingFiles(fs, copyMapper, context);
  for (Text value : stubContext.getWriter().values()) {
    Assert.assertTrue(value.toString() + " is not skipped", value
        .toString().startsWith("SKIP:"));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestCopyMapper.java

示例12: doTestIgnoreFailures

import org.apache.hadoop.tools.DistCpOptionSwitch; //导入依赖的package包/类
private void doTestIgnoreFailures(boolean ignoreFailures) {
  try {
    deleteState();
    createSourceData();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
            = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    configuration.setBoolean(
            DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),ignoreFailures);
    configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
            true);
    configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
            true);
    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      if (!fileStatus.isDirectory()) {
        fs.delete(path, true);
        copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
                new CopyListingFileStatus(fileStatus), context);
      }
    }
    if (ignoreFailures) {
      for (Text value : stubContext.getWriter().values()) {
        Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
      }
    }
    Assert.assertTrue("There should have been an exception.", ignoreFailures);
  }
  catch (Exception e) {
    Assert.assertTrue("Unexpected exception: " + e.getMessage(),
            !ignoreFailures);
    e.printStackTrace();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestCopyMapper.java


注:本文中的org.apache.hadoop.tools.DistCpOptionSwitch类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。