本文整理汇总了Java中org.apache.hadoop.tools.StubContext.getContext方法的典型用法代码示例。如果您正苦于以下问题:Java StubContext.getContext方法的具体用法?Java StubContext.getContext怎么用?Java StubContext.getContext使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.hadoop.tools.StubContext
的用法示例。
在下文中一共展示了StubContext.getContext方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testCopyWithAppend
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test
public void testCopyWithAppend() throws Exception {
final FileSystem fs = cluster.getFileSystem();
// do the first distcp
testCopy(false);
// start appending data to source
appendSourceData();
// do the distcp again with -update and -append option
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context =
stubContext.getContext();
// Enable append
context.getConfiguration().setBoolean(
DistCpOptionSwitch.APPEND.getConfigLabel(), true);
copyMapper.setup(context);
for (Path path: pathList) {
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(cluster.getFileSystem().getFileStatus(
path)), context);
}
verifyCopy(fs, false);
// verify that we only copied new appended data
Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE * 2, stubContext
.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
.getValue());
Assert.assertEquals(pathList.size(), stubContext.getReporter().
getCounter(CopyMapper.Counter.COPY).getValue());
}
示例2: testMakeDirFailure
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
String workPath = new Path("hftp://localhost:1234/*/*/*/?/")
.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workPath);
copyMapper.setup(context);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);
Assert.assertTrue("There should have been an exception.", false);
}
catch (Exception ignore) {
}
}
示例3: testCopyFailOnBlockSizeDifference
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
try {
deleteState();
createSourceDataWithDifferentBlockSize();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.noneOf(DistCpOptions.FileAttribute.class);
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fileStatus), context);
}
Assert.fail("Copy should have failed because of block-size difference.");
}
catch (Exception exception) {
// Check that the exception suggests the use of -pb/-skipCrc.
Assert.assertTrue("Failure exception should have suggested the use of -pb.", exception.getCause().getCause().getMessage().contains("pb"));
Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.", exception.getCause().getCause().getMessage().contains("skipCrc"));
}
}
示例4: testMakeDirFailure
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
String workPath = new Path("webhdfs://localhost:1234/*/*/*/?/")
.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workPath);
copyMapper.setup(context);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);
Assert.assertTrue("There should have been an exception.", false);
}
catch (Exception ignore) {
}
}
示例5: testMakeDirFailure
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, FileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
String workPath = new Path("hftp://localhost:1234/*/*/*/?/")
.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workPath);
copyMapper.setup(context);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), pathList.get(0))),
fs.getFileStatus(pathList.get(0)), context);
Assert.assertTrue("There should have been an exception.", false);
}
catch (Exception ignore) {
}
}
示例6: testCopyFailOnBlockSizeDifference
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
try {
deleteState();
createSourceDataWithDifferentBlockSize();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, FileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.noneOf(DistCpOptions.FileAttribute.class);
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
fileStatus, context);
}
Assert.fail("Copy should have failed because of block-size difference.");
}
catch (Exception exception) {
// Check that the exception suggests the use of -pb/-skipCrc.
Assert.assertTrue("Failure exception should have suggested the use of -pb.", exception.getCause().getCause().getMessage().contains("pb"));
Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.", exception.getCause().getCause().getMessage().contains("skipCrc"));
}
}
示例7: testMakeDirFailure
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testMakeDirFailure() {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
String workPath = new Path("hftp://localhost:1234/*/*/*/?/")
.makeQualified(fs.getUri(), fs.getWorkingDirectory()).toString();
configuration.set(DistCpConstants.CONF_LABEL_TARGET_WORK_PATH,
workPath);
copyMapper.setup(context);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),
pathList.get(0))),
new CopyListingFileStatus(fs.getFileStatus(pathList.get(0))), context);
Assert.assertTrue("There should have been an exception.", false);
}
catch (Exception ignore) {
}
}
示例8: testCopyFailOnBlockSizeDifference
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test(timeout=40000)
public void testCopyFailOnBlockSizeDifference() {
try {
deleteState();
createSourceDataWithDifferentBlockSize();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.noneOf(DistCpOptions.FileAttribute.class);
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH),
path)), new CopyListingFileStatus(fileStatus), context);
}
Assert.fail("Copy should have failed because of block-size difference.");
}
catch (Exception exception) {
// Check that the exception suggests the use of -pb/-skipCrc.
Assert.assertTrue("Failure exception should have suggested the use of -pb.",
exception.getCause().getCause().getMessage().contains("pb"));
Assert.assertTrue("Failure exception should have suggested the use of -skipCrc.",
exception.getCause().getCause().getMessage().contains("skipCrc"));
}
}
示例9: testCopy
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
private void testCopy(boolean preserveChecksum) throws Exception {
deleteState();
if (preserveChecksum) {
createSourceDataWithDifferentChecksumType();
} else {
createSourceData();
}
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
EnumSet<DistCpOptions.FileAttribute> fileAttributes
= EnumSet.of(DistCpOptions.FileAttribute.REPLICATION);
if (preserveChecksum) {
fileAttributes.add(DistCpOptions.FileAttribute.CHECKSUMTYPE);
}
configuration.set(DistCpOptionSwitch.PRESERVE_STATUS.getConfigLabel(),
DistCpUtils.packAttributes(fileAttributes));
copyMapper.setup(context);
for (Path path: pathList) {
copyMapper.map(
new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fs.getFileStatus(path)), context);
}
// Check that the maps worked.
verifyCopy(fs, preserveChecksum);
Assert.assertEquals(pathList.size(), stubContext.getReporter()
.getCounter(CopyMapper.Counter.COPY).getValue());
if (!preserveChecksum) {
Assert.assertEquals(nFiles * DEFAULT_FILE_SIZE, stubContext
.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
.getValue());
} else {
Assert.assertEquals(nFiles * NON_DEFAULT_BLOCK_SIZE * 2, stubContext
.getReporter().getCounter(CopyMapper.Counter.BYTESCOPIED)
.getValue());
}
testCopyingExistingFiles(fs, copyMapper, context);
for (Text value : stubContext.getWriter().values()) {
Assert.assertTrue(value.toString() + " is not skipped", value
.toString().startsWith("SKIP:"));
}
}
示例10: doTestIgnoreFailures
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
private void doTestIgnoreFailures(boolean ignoreFailures) {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
configuration.setBoolean(
DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),ignoreFailures);
configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
true);
configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
true);
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
if (!fileStatus.isDirectory()) {
fs.delete(path, true);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
new CopyListingFileStatus(fileStatus), context);
}
}
if (ignoreFailures) {
for (Text value : stubContext.getWriter().values()) {
Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
}
}
Assert.assertTrue("There should have been an exception.", ignoreFailures);
}
catch (Exception e) {
Assert.assertTrue("Unexpected exception: " + e.getMessage(),
!ignoreFailures);
e.printStackTrace();
}
}
示例11: testSingleFileCopy
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
/**
* If a single file is being copied to a location where the file (of the same
* name) already exists, then the file shouldn't be skipped.
*/
@Test(timeout=40000)
public void testSingleFileCopy() {
try {
deleteState();
touchFile(SOURCE_PATH + "/1");
Path sourceFilePath = pathList.get(0);
Path targetFilePath = new Path(sourceFilePath.toString().replaceAll(
SOURCE_PATH, TARGET_PATH));
touchFile(targetFilePath.toString());
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, CopyListingFileStatus, Text, Text>.Context context
= stubContext.getContext();
context.getConfiguration().set(
DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
targetFilePath.getParent().toString()); // Parent directory.
copyMapper.setup(context);
final CopyListingFileStatus sourceFileStatus = new CopyListingFileStatus(
fs.getFileStatus(sourceFilePath));
long before = fs.getFileStatus(targetFilePath).getModificationTime();
copyMapper.map(new Text(DistCpUtils.getRelativePath(
new Path(SOURCE_PATH), sourceFilePath)), sourceFileStatus, context);
long after = fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been skipped", before == after);
context.getConfiguration().set(
DistCpConstants.CONF_LABEL_TARGET_FINAL_PATH,
targetFilePath.toString()); // Specify the file path.
copyMapper.setup(context);
before = fs.getFileStatus(targetFilePath).getModificationTime();
try { Thread.sleep(2); } catch (Throwable ignore) {}
copyMapper.map(new Text(DistCpUtils.getRelativePath(
new Path(SOURCE_PATH), sourceFilePath)), sourceFileStatus, context);
after = fs.getFileStatus(targetFilePath).getModificationTime();
Assert.assertTrue("File should have been overwritten.", before < after);
} catch (Exception exception) {
Assert.fail("Unexpected exception: " + exception.getMessage());
exception.printStackTrace();
}
}
示例12: testGetSplits
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
public void testGetSplits(int nMaps) throws Exception {
DistCpOptions options = getOptions(nMaps);
Configuration configuration = new Configuration();
configuration.set("mapred.map.tasks",
String.valueOf(options.getMaxMaps()));
Path listFile = new Path(cluster.getFileSystem().getUri().toString()
+ "/tmp/testGetSplits_1/fileList.seq");
CopyListing.getCopyListing(configuration, CREDENTIALS, options).
buildListing(listFile, options);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
UniformSizeInputFormat uniformSizeInputFormat = new UniformSizeInputFormat();
List<InputSplit> splits
= uniformSizeInputFormat.getSplits(jobContext);
int sizePerMap = totalFileSize/nMaps;
checkSplits(listFile, splits);
int doubleCheckedTotalSize = 0;
int previousSplitSize = -1;
for (int i=0; i<splits.size(); ++i) {
InputSplit split = splits.get(i);
int currentSplitSize = 0;
RecordReader<Text, CopyListingFileStatus> recordReader =
uniformSizeInputFormat.createRecordReader(split, null);
StubContext stubContext = new StubContext(jobContext.getConfiguration(),
recordReader, 0);
final TaskAttemptContext taskAttemptContext
= stubContext.getContext();
recordReader.initialize(split, taskAttemptContext);
while (recordReader.nextKeyValue()) {
Path sourcePath = recordReader.getCurrentValue().getPath();
FileSystem fs = sourcePath.getFileSystem(configuration);
FileStatus fileStatus [] = fs.listStatus(sourcePath);
if (fileStatus.length > 1) {
continue;
}
currentSplitSize += fileStatus[0].getLen();
}
Assert.assertTrue(
previousSplitSize == -1
|| Math.abs(currentSplitSize - previousSplitSize) < 0.1*sizePerMap
|| i == splits.size()-1);
doubleCheckedTotalSize += currentSplitSize;
}
Assert.assertEquals(totalFileSize, doubleCheckedTotalSize);
}
示例13: testGetSplits
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test
public void testGetSplits() throws Exception {
DistCpOptions options = getOptions();
Configuration configuration = new Configuration();
configuration.set("mapred.map.tasks",
String.valueOf(options.getMaxMaps()));
CopyListing.getCopyListing(configuration, CREDENTIALS, options).buildListing(
new Path(cluster.getFileSystem().getUri().toString()
+"/tmp/testDynInputFormat/fileList.seq"), options);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
DynamicInputFormat<Text, CopyListingFileStatus> inputFormat =
new DynamicInputFormat<Text, CopyListingFileStatus>();
List<InputSplit> splits = inputFormat.getSplits(jobContext);
int nFiles = 0;
int taskId = 0;
for (InputSplit split : splits) {
RecordReader<Text, CopyListingFileStatus> recordReader =
inputFormat.createRecordReader(split, null);
StubContext stubContext = new StubContext(jobContext.getConfiguration(),
recordReader, taskId);
final TaskAttemptContext taskAttemptContext
= stubContext.getContext();
recordReader.initialize(splits.get(0), taskAttemptContext);
float previousProgressValue = 0f;
while (recordReader.nextKeyValue()) {
CopyListingFileStatus fileStatus = recordReader.getCurrentValue();
String source = fileStatus.getPath().toString();
System.out.println(source);
Assert.assertTrue(expectedFilePaths.contains(source));
final float progress = recordReader.getProgress();
Assert.assertTrue(progress >= previousProgressValue);
Assert.assertTrue(progress >= 0.0f);
Assert.assertTrue(progress <= 1.0f);
previousProgressValue = progress;
++nFiles;
}
Assert.assertTrue(recordReader.getProgress() == 1.0f);
++taskId;
}
Assert.assertEquals(expectedFilePaths.size(), nFiles);
}
示例14: testGetSplits
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
@Test
public void testGetSplits() throws Exception {
DistCpOptions options = getOptions();
Configuration configuration = new Configuration();
configuration.set("mapred.map.tasks",
String.valueOf(options.getMaxMaps()));
CopyListing.getCopyListing(configuration, CREDENTIALS, options).buildListing(
new Path(cluster.getFileSystem().getUri().toString()
+"/tmp/testDynInputFormat/fileList.seq"), options);
JobContext jobContext = new JobContextImpl(configuration, new JobID());
DynamicInputFormat<Text, CopyListingFileStatus> inputFormat =
new DynamicInputFormat<Text, CopyListingFileStatus>();
List<InputSplit> splits = inputFormat.getSplits(jobContext);
int nFiles = 0;
int taskId = 0;
for (InputSplit split : splits) {
StubContext stubContext = new StubContext(jobContext.getConfiguration(),
null, taskId);
final TaskAttemptContext taskAttemptContext
= stubContext.getContext();
RecordReader<Text, CopyListingFileStatus> recordReader =
inputFormat.createRecordReader(split, taskAttemptContext);
stubContext.setReader(recordReader);
recordReader.initialize(splits.get(0), taskAttemptContext);
float previousProgressValue = 0f;
while (recordReader.nextKeyValue()) {
CopyListingFileStatus fileStatus = recordReader.getCurrentValue();
String source = fileStatus.getPath().toString();
System.out.println(source);
Assert.assertTrue(expectedFilePaths.contains(source));
final float progress = recordReader.getProgress();
Assert.assertTrue(progress >= previousProgressValue);
Assert.assertTrue(progress >= 0.0f);
Assert.assertTrue(progress <= 1.0f);
previousProgressValue = progress;
++nFiles;
}
Assert.assertTrue(recordReader.getProgress() == 1.0f);
++taskId;
}
Assert.assertEquals(expectedFilePaths.size(), nFiles);
}
示例15: doTestIgnoreFailures
import org.apache.hadoop.tools.StubContext; //导入方法依赖的package包/类
private void doTestIgnoreFailures(boolean ignoreFailures) {
try {
deleteState();
createSourceData();
FileSystem fs = cluster.getFileSystem();
CopyMapper copyMapper = new CopyMapper();
StubContext stubContext = new StubContext(getConfiguration(), null, 0);
Mapper<Text, FileStatus, Text, Text>.Context context
= stubContext.getContext();
Configuration configuration = context.getConfiguration();
configuration.setBoolean(
DistCpOptionSwitch.IGNORE_FAILURES.getConfigLabel(),ignoreFailures);
configuration.setBoolean(DistCpOptionSwitch.OVERWRITE.getConfigLabel(),
true);
configuration.setBoolean(DistCpOptionSwitch.SKIP_CRC.getConfigLabel(),
true);
copyMapper.setup(context);
for (Path path : pathList) {
final FileStatus fileStatus = fs.getFileStatus(path);
if (!fileStatus.isDirectory()) {
fs.delete(path, true);
copyMapper.map(new Text(DistCpUtils.getRelativePath(new Path(SOURCE_PATH), path)),
fileStatus, context);
}
}
if (ignoreFailures) {
for (Text value : stubContext.getWriter().values()) {
Assert.assertTrue(value.toString() + " is not skipped", value.toString().startsWith("FAIL:"));
}
}
Assert.assertTrue("There should have been an exception.", ignoreFailures);
}
catch (Exception e) {
Assert.assertTrue("Unexpected exception: " + e.getMessage(),
!ignoreFailures);
e.printStackTrace();
}
}