当前位置: 首页>>代码示例>>Java>>正文


Java StubContext.getContext方法代码示例

本文整理汇总了Java中org.apache.hadoop.tools.StubContext.getContext方法的典型用法代码示例。如果您正苦于以下问题:Java StubContext.getContext方法的具体用法?Java StubContext.getContext怎么用?Java StubContext.getContext使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.tools.StubContext的用法示例。


在下文中一共展示了StubContext.getContext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testCopyWithAppend

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test
public void testCopyWithAppend() throws Exception {
  final FileSystem fs = cluster.getFileSystem();
  // do the first distcp
  testCopy(false);
  // start appending data to source
  appendSourceData();

  // do the distcp again with -update and -append option
  CopyMapper copyMapper = new CopyMapper();
  StubContext stubContext = new StubContext(getConfiguration(), null, 0);
  Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
      stubContext.getContext();
  // Enable append 
  context.getConfiguration().setBoolean(
      DistCpOptionSwitch.APPEND.getConfigLabel(), true);
  copyMapper.setup(context);
  for (Path path: pathList) {
    copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
            new CopyListingFileStatus(cluster.getFileSystem().getFileStatus(
                path)), context);
  }

  verifyCopy(fs, false);
  // verify that we only copied new appended data
  Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE * 2, stubContext
      .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
      .getValue());
  Assert.assertEquals(pathList.size(), stubContext.getReporter().
      getCounter(CopyMapper.Counter.COPY).getValue());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:32,代码来源:TestCopyMapper.java

示例2: testMakeDirFailure

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
  try {
    deleteState();
    createSourceData();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
            = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    String workPath = new Path("hftp://localhost:1234/*/*/*/?/")
            .makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
    configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
            workPath);
    copyMapper.setup(context);

    copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
            new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);

    Assert.assertTrue("There should have been an exception.", false);
  }
  catch (Exception ignore) {
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestCopyMapper.java

示例3: testCopyFailOnBlockSizeDifference

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
  try {

    deleteState();
    createSourceDataWithDifferentBlockSize();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
        = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    EnumSet<DistCpOptions.FileAttribute> fileAttributes
        = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
    configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
        DistCpUtils.packAttributes(fileAttributes));

    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
          new CopyListingFileStatus(fileStatus), context);
    }

    Assert.fail("Copy should have failed because of block-size difference.");
  }
  catch (Exception exception) {
    // Check that the exception suggests the use of -pb/-skipCrc.
    Assert.assertTrue("Failure exception should have suggested the use of -pb.", exception.getCause().getCause().getMessage().contains("pb"));
    Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.", exception.getCause().getCause().getMessage().contains("skipCrc"));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:TestCopyMapper.java

示例4: testMakeDirFailure

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
  try {
    deleteState();
    createSourceData();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
            = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    String workPath = new Path("webhdfs://localhost:1234/*/*/*/?/")
            .makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
    configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
            workPath);
    copyMapper.setup(context);

    copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
            new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);

    Assert.assertTrue("There should have been an exception.", false);
  }
  catch (Exception ignore) {
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:28,代码来源:TestCopyMapper.java

示例5: testMakeDirFailure

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
  try {
    deleteState();
    createSourceData();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, FileStatus, Text, Text>.Context context
            = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    String workPath = new Path("hftp://localhost:1234/*/*/*/?/")
            .makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
    configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
            workPath);
    copyMapper.setup(context);

    copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
            fs.getFileStatus(pathList.get(0)), context);

    Assert.assertTrue("There should have been an exception.", false);
  }
  catch (Exception ignore) {
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:28,代码来源:TestCopyMapper.java

示例6: testCopyFailOnBlockSizeDifference

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
  try {

    deleteState();
    createSourceDataWithDifferentBlockSize();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, FileStatus, Text, Text>.Context context
        = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    EnumSet<DistCpOptions.FileAttribute> fileAttributes
        = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
    configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
        DistCpUtils.packAttributes(fileAttributes));

    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
          fileStatus, context);
    }

    Assert.fail("Copy should have failed because of block-size difference.");
  }
  catch (Exception exception) {
    // Check that the exception suggests the use of -pb/-skipCrc.
    Assert.assertTrue("Failure exception should have suggested the use of -pb.", exception.getCause().getCause().getMessage().contains("pb"));
    Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.", exception.getCause().getCause().getMessage().contains("skipCrc"));
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:36,代码来源:TestCopyMapper.java

示例7: testMakeDirFailure

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
  try {
    deleteState();
    createSourceData();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
            = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    String workPath = new Path("hftp://localhost:1234/*/*/*/?/")
            .makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
    configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
            workPath);
    copyMapper.setup(context);

    copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),
        pathList.get(0))),
        new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);

    Assert.assertTrue("There should have been an exception.", false);
  }
  catch (Exception ignore) {
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:29,代码来源:TestCopyMapper.java

示例8: testCopyFailOnBlockSizeDifference

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
  try {
    deleteState();
    createSourceDataWithDifferentBlockSize();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
        = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    EnumSet<DistCpOptions.FileAttribute> fileAttributes
        = EnumSet.noneOf(DistCpOptions.FileAttribute.class);
    configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
        DistCpUtils.packAttributes(fileAttributes));

    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),
          path)), new CopyListingFileStatus(fileStatus), context);
    }

    Assert.fail("Copy should have failed because of block-size difference.");
  }
  catch (Exception exception) {
    // Check that the exception suggests the use of -pb/-skipCrc.
    Assert.assertTrue("Failure exception should have suggested the use of -pb.",
        exception.getCause().getCause().getMessage().contains("pb"));
    Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.",
        exception.getCause().getCause().getMessage().contains("skipCrc"));
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:37,代码来源:TestCopyMapper.java

示例9: testCopy

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
private void testCopy(boolean preserveChecksum) throws Exception {
  deleteState();
  if (preserveChecksum) {
    createSourceDataWithDifferentChecksumType();
  } else {
    createSourceData();
  }

  FileSystem fs = cluster.getFileSystem();
  CopyMapper copyMapper = new CopyMapper();
  StubContext stubContext = new StubContext(getConfiguration(), null, 0);
  Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
          = stubContext.getContext();

  Configuration configuration = context.getConfiguration();
  EnumSet<DistCpOptions.FileAttribute> fileAttributes
          = EnumSet.of(DistCpOptions.FileAttribute.REPLICATION);
  if (preserveChecksum) {
    fileAttributes.add(DistCpOptions.FileAttribute.CHECKSUMTYPE);
  }
  configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
          DistCpUtils.packAttributes(fileAttributes));

  copyMapper.setup(context);

  for (Path path: pathList) {
    copyMapper.map(
        new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
        new CopyListingFileStatus(fs.getFileStatus(path)), context);
  }

  // Check that the maps worked.
  verifyCopy(fs, preserveChecksum);
  Assert.assertEquals(pathList.size(), stubContext.getReporter()
      .getCounter(CopyMapper.Counter.COPY).getValue());
  if (!preserveChecksum) {
    Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE, stubContext
        .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
        .getValue());
  } else {
    Assert.assertEquals(nFiles * NON_DEFAULT_BLOCK_SIZE * 2, stubContext
        .getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
        .getValue());
  }

  testCopyingExistingFiles(fs, copyMapper, context);
  for (Text value : stubContext.getWriter().values()) {
    Assert.assertTrue(value.toString() + " is not skipped", value
        .toString().startsWith("SKIP:"));
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:52,代码来源:TestCopyMapper.java

示例10: doTestIgnoreFailures

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
private void doTestIgnoreFailures(boolean ignoreFailures) {
  try {
    deleteState();
    createSourceData();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
            = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    configuration.setBoolean(
            DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),ignoreFailures);
    configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
            true);
    configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
            true);
    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      if (!fileStatus.isDirectory()) {
        fs.delete(path, true);
        copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
                new CopyListingFileStatus(fileStatus), context);
      }
    }
    if (ignoreFailures) {
      for (Text value : stubContext.getWriter().values()) {
        Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
      }
    }
    Assert.assertTrue("There should have been an exception.", ignoreFailures);
  }
  catch (Exception e) {
    Assert.assertTrue("Unexpected exception: " + e.getMessage(),
            !ignoreFailures);
    e.printStackTrace();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestCopyMapper.java

示例11: testSingleFileCopy

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
/**
 * If a single file is being copied to a location where the file (of the same
 * name) already exists, then the file shouldn't be skipped.
 */
@Test(timeout=40000)
public void testSingleFileCopy() {
  try {
    deleteState();
    touchFile(SOURCE_PATH + "/1");
    Path sourceFilePath = pathList.get(0);
    Path targetFilePath = new Path(sourceFilePath.toString().replaceAll(
            SOURCE_PATH, TARGET_PATH));
    touchFile(targetFilePath.toString());

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
            = stubContext.getContext();

    context.getConfiguration().set(
            DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
            targetFilePath.getParent().toString()); // Parent directory.
    copyMapper.setup(context);

    final CopyListingFileStatus sourceFileStatus = new CopyListingFileStatus(
      fs.getFileStatus(sourceFilePath));

    long before = fs.getFileStatus(targetFilePath).getModificationTime();
    copyMapper.map(new Text(DistCpUtils.getRelativePath(
            new Path(SOURCE_PATH), sourceFilePath)), sourceFileStatus, context);
    long after = fs.getFileStatus(targetFilePath).getModificationTime();

    Assert.assertTrue("File should have been skipped", before == after);

    context.getConfiguration().set(
            DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
            targetFilePath.toString()); // Specify the file path.
    copyMapper.setup(context);

    before = fs.getFileStatus(targetFilePath).getModificationTime();
    try { Thread.sleep(2); } catch (Throwable ignore) {}
    copyMapper.map(new Text(DistCpUtils.getRelativePath(
            new Path(SOURCE_PATH), sourceFilePath)), sourceFileStatus, context);
    after = fs.getFileStatus(targetFilePath).getModificationTime();

    Assert.assertTrue("File should have been overwritten.", before < after);

  } catch (Exception exception) {
    Assert.fail("Unexpected exception: " + exception.getMessage());
    exception.printStackTrace();
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:54,代码来源:TestCopyMapper.java

示例12: testGetSplits

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
public void testGetSplits(int nMaps) throws Exception {
  DistCpOptions options = getOptions(nMaps);
  Configuration configuration = new Configuration();
  configuration.set("mapred.map.tasks",
                    String.valueOf(options.getMaxMaps()));
  Path listFile = new Path(cluster.getFileSystem().getUri().toString()
      + "/tmp/testGetSplits_1/fileList.seq");
  CopyListing.getCopyListing(configuration, CREDENTIALS, options).
      buildListing(listFile, options);

  JobContext jobContext = new JobContextImpl(configuration, new JobID());
  UniformSizeInputFormat uniformSizeInputFormat = new UniformSizeInputFormat();
  List<InputSplit> splits
          = uniformSizeInputFormat.getSplits(jobContext);

  int sizePerMap = totalFileSize/nMaps;

  checkSplits(listFile, splits);

  int doubleCheckedTotalSize = 0;
  int previousSplitSize = -1;
  for (int i=0; i<splits.size(); ++i) {
    InputSplit split = splits.get(i);
    int currentSplitSize = 0;
    RecordReader<Text, CopyListingFileStatus> recordReader =
      uniformSizeInputFormat.createRecordReader(split, null);
    StubContext stubContext = new StubContext(jobContext.getConfiguration(),
                                              recordReader, 0);
    final TaskAttemptContext taskAttemptContext
       = stubContext.getContext();
    recordReader.initialize(split, taskAttemptContext);
    while (recordReader.nextKeyValue()) {
      Path sourcePath = recordReader.getCurrentValue().getPath();
      FileSystem fs = sourcePath.getFileSystem(configuration);
      FileStatus fileStatus [] = fs.listStatus(sourcePath);
      if (fileStatus.length > 1) {
        continue;
      }
      currentSplitSize += fileStatus[0].getLen();
    }
    Assert.assertTrue(
         previousSplitSize == -1
             || Math.abs(currentSplitSize - previousSplitSize) < 0.1*sizePerMap
             || i == splits.size()-1);

    doubleCheckedTotalSize += currentSplitSize;
  }

  Assert.assertEquals(totalFileSize, doubleCheckedTotalSize);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:51,代码来源:TestUniformSizeInputFormat.java

示例13: testGetSplits

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test
public void testGetSplits() throws Exception {
  DistCpOptions options = getOptions();
  Configuration configuration = new Configuration();
  configuration.set("mapred.map.tasks",
                    String.valueOf(options.getMaxMaps()));
  CopyListing.getCopyListing(configuration, CREDENTIALS, options).buildListing(
          new Path(cluster.getFileSystem().getUri().toString()
                  +"/tmp/testDynInputFormat/fileList.seq"), options);

  JobContext jobContext = new JobContextImpl(configuration, new JobID());
  DynamicInputFormat<Text, CopyListingFileStatus> inputFormat =
      new DynamicInputFormat<Text, CopyListingFileStatus>();
  List<InputSplit> splits = inputFormat.getSplits(jobContext);

  int nFiles = 0;
  int taskId = 0;

  for (InputSplit split : splits) {
    RecordReader<Text, CopyListingFileStatus> recordReader =
         inputFormat.createRecordReader(split, null);
    StubContext stubContext = new StubContext(jobContext.getConfiguration(),
                                              recordReader, taskId);
    final TaskAttemptContext taskAttemptContext
       = stubContext.getContext();
    
    recordReader.initialize(splits.get(0), taskAttemptContext);
    float previousProgressValue = 0f;
    while (recordReader.nextKeyValue()) {
      CopyListingFileStatus fileStatus = recordReader.getCurrentValue();
      String source = fileStatus.getPath().toString();
      System.out.println(source);
      Assert.assertTrue(expectedFilePaths.contains(source));
      final float progress = recordReader.getProgress();
      Assert.assertTrue(progress >= previousProgressValue);
      Assert.assertTrue(progress >= 0.0f);
      Assert.assertTrue(progress <= 1.0f);
      previousProgressValue = progress;
      ++nFiles;
    }
    Assert.assertTrue(recordReader.getProgress() == 1.0f);

    ++taskId;
  }

  Assert.assertEquals(expectedFilePaths.size(), nFiles);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:48,代码来源:TestDynamicInputFormat.java

示例14: testGetSplits

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test
public void testGetSplits() throws Exception {
  DistCpOptions options = getOptions();
  Configuration configuration = new Configuration();
  configuration.set("mapred.map.tasks",
                    String.valueOf(options.getMaxMaps()));
  CopyListing.getCopyListing(configuration, CREDENTIALS, options).buildListing(
          new Path(cluster.getFileSystem().getUri().toString()
                  +"/tmp/testDynInputFormat/fileList.seq"), options);

  JobContext jobContext = new JobContextImpl(configuration, new JobID());
  DynamicInputFormat<Text, CopyListingFileStatus> inputFormat =
      new DynamicInputFormat<Text, CopyListingFileStatus>();
  List<InputSplit> splits = inputFormat.getSplits(jobContext);

  int nFiles = 0;
  int taskId = 0;

  for (InputSplit split : splits) {
    StubContext stubContext = new StubContext(jobContext.getConfiguration(),
                                              null, taskId);
    final TaskAttemptContext taskAttemptContext
       = stubContext.getContext();

    RecordReader<Text, CopyListingFileStatus> recordReader =
        inputFormat.createRecordReader(split, taskAttemptContext);
    stubContext.setReader(recordReader);
    recordReader.initialize(splits.get(0), taskAttemptContext);
    float previousProgressValue = 0f;
    while (recordReader.nextKeyValue()) {
      CopyListingFileStatus fileStatus = recordReader.getCurrentValue();
      String source = fileStatus.getPath().toString();
      System.out.println(source);
      Assert.assertTrue(expectedFilePaths.contains(source));
      final float progress = recordReader.getProgress();
      Assert.assertTrue(progress >= previousProgressValue);
      Assert.assertTrue(progress >= 0.0f);
      Assert.assertTrue(progress <= 1.0f);
      previousProgressValue = progress;
      ++nFiles;
    }
    Assert.assertTrue(recordReader.getProgress() == 1.0f);

    ++taskId;
  }

  Assert.assertEquals(expectedFilePaths.size(), nFiles);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:49,代码来源:TestDynamicInputFormat.java

示例15: doTestIgnoreFailures

import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
private void doTestIgnoreFailures(boolean ignoreFailures) {
  try {
    deleteState();
    createSourceData();

    FileSystem fs = cluster.getFileSystem();
    CopyMapper copyMapper = new CopyMapper();
    StubContext stubContext = new StubContext(getConfiguration(), null, 0);
    Mapper<Text, FileStatus, Text, Text>.Context context
            = stubContext.getContext();

    Configuration configuration = context.getConfiguration();
    configuration.setBoolean(
            DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),ignoreFailures);
    configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
            true);
    configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
            true);
    copyMapper.setup(context);

    for (Path path : pathList) {
      final FileStatus fileStatus = fs.getFileStatus(path);
      if (!fileStatus.isDirectory()) {
        fs.delete(path, true);
        copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
                fileStatus, context);
      }
    }
    if (ignoreFailures) {
      for (Text value : stubContext.getWriter().values()) {
        Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
      }
    }
    Assert.assertTrue("There should have been an exception.", ignoreFailures);
  }
  catch (Exception e) {
    Assert.assertTrue("Unexpected exception: " + e.getMessage(),
            !ignoreFailures);
    e.printStackTrace();
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:42,代码来源:TestCopyMapper.java


注:本文中的org.apache.hadoop.tools.StubContext.getContext方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。