本文整理匯總了Java中org.apache.hadoop.conf.Configuration.setClass方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setClass方法的具體用法?Java Configuration.setClass怎麽用?Java Configuration.setClass使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.setClass方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static void main(String[] args) {
Configuration conf = new Configuration(); // assume defaults on CP
conf.setClass("mapreduce.job.inputformat.class", DwCAInputFormat.class, InputFormat.class);
conf.setStrings("mapreduce.input.fileinputformat.inputdir", "hdfs://ha-nn/tmp/dwca-lep5.zip");
conf.setClass("key.class", Text.class, Object.class);
conf.setClass("value.class", ExtendedRecord.class, Object.class);
Pipeline p = newPipeline(args, conf);
Coders.registerAvroCoders(p, UntypedOccurrence.class, TypedOccurrence.class, ExtendedRecord.class);
PCollection<KV<Text, ExtendedRecord>> rawRecords =
p.apply("Read DwC-A", HadoopInputFormatIO.<Text, ExtendedRecord>read().withConfiguration(conf));
PCollection<UntypedOccurrence> verbatimRecords = rawRecords.apply(
"Convert to Avro", ParDo.of(fromExtendedRecordKVP()));
verbatimRecords.apply(
"Write Avro files", AvroIO.write(UntypedOccurrence.class).to("hdfs://ha-nn/tmp/dwca-lep5.avro"));
LOG.info("Starting the pipeline");
PipelineResult result = p.run();
result.waitUntilFinish();
LOG.info("Pipeline finished with state: {} ", result.getState());
}
示例2: testAuxServiceRecoverySetup
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testAuxServiceRecoverySetup() throws IOException {
Configuration conf = new YarnConfiguration();
conf.setBoolean(YarnConfiguration.NM_RECOVERY_ENABLED, true);
conf.set(YarnConfiguration.NM_RECOVERY_DIR, TEST_DIR.toString());
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES,
new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
RecoverableServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
RecoverableServiceB.class, Service.class);
try {
final AuxServices aux = new AuxServices();
aux.init(conf);
Assert.assertEquals(2, aux.getServices().size());
File auxStorageDir = new File(TEST_DIR,
AuxServices.STATE_STORE_ROOT_NAME);
Assert.assertEquals(2, auxStorageDir.listFiles().length);
aux.close();
} finally {
FileUtil.fullyDelete(TEST_DIR);
}
}
示例3: setUpCluster
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
public void setUpCluster() throws Exception {
util = getTestingUtil(null);
Configuration conf = util.getConfiguration();
if (!util.isDistributedCluster()) {
// Inject required configuration if we are not running in distributed mode
conf.setInt(HFile.FORMAT_VERSION_KEY, 3);
conf.set(HConstants.CRYPTO_KEYPROVIDER_CONF_KEY, KeyProviderForTesting.class.getName());
conf.set(HConstants.CRYPTO_MASTERKEY_NAME_CONF_KEY, "hbase");
conf.setClass("hbase.regionserver.hlog.reader.impl", SecureProtobufLogReader.class,
Reader.class);
conf.setClass("hbase.regionserver.hlog.writer.impl", SecureProtobufLogWriter.class,
Writer.class);
conf.setBoolean(HConstants.ENABLE_WAL_ENCRYPTION, true);
}
// Check if the cluster configuration can support this test
try {
EncryptionTest.testEncryption(conf, "AES", null);
} catch (Exception e) {
LOG.warn("Encryption configuration test did not pass, skipping test");
return;
}
super.setUpCluster();
initialized = true;
}
示例4: testCreateSnapshot
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(timeout = 30000)
public void testCreateSnapshot() throws Exception {
Path snapRootPath = new Path("/snapPath");
Path chRootedSnapRootPath = new Path("/a/b/snapPath");
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem) chrootFs.getRawFileSystem())
.getRawFileSystem();
chrootFs.createSnapshot(snapRootPath, "snap1");
verify(mockFs).createSnapshot(chRootedSnapRootPath, "snap1");
}
示例5: testAuxUnexpectedStop
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testAuxUnexpectedStop() {
Configuration conf = new Configuration();
conf.setStrings(YarnConfiguration.NM_AUX_SERVICES, new String[] { "Asrv", "Bsrv" });
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Asrv"),
ServiceA.class, Service.class);
conf.setClass(String.format(YarnConfiguration.NM_AUX_SERVICE_FMT, "Bsrv"),
ServiceB.class, Service.class);
final AuxServices aux = new AuxServices();
aux.init(conf);
aux.start();
Service s = aux.getServices().iterator().next();
s.stop();
assertEquals("Auxiliary service stopped, but AuxService unaffected.",
STOPPED, aux.getServiceState());
assertTrue(aux.getServices().isEmpty());
}
示例6: testFailResultCodes
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(timeout = 5000l)
public void testFailResultCodes() throws Exception {
Configuration conf = new YarnConfiguration();
conf.setClass("fs.file.impl", LocalFileSystem.class, FileSystem.class);
LogCLIHelpers cliHelper = new LogCLIHelpers();
cliHelper.setConf(conf);
YarnClient mockYarnClient = createMockYarnClient(YarnApplicationState.FINISHED);
LogsCLI dumper = new LogsCLIForTest(mockYarnClient);
dumper.setConf(conf);
// verify dumping a non-existent application's logs returns a failure code
int exitCode = dumper.run( new String[] {
"-applicationId", "application_0_0" } );
assertTrue("Should return an error code", exitCode != 0);
// verify dumping a non-existent container log is a failure code
exitCode = cliHelper.dumpAContainersLogs("application_0_0", "container_0_0",
"nonexistentnode:1234", "nobody");
assertTrue("Should return an error code", exitCode != 0);
}
示例7: testRandomCompress
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testRandomCompress() throws Exception {
Configuration conf = new Configuration();
conf.setInt(Job.COMPLETION_POLL_INTERVAL_KEY, 100);
Job job = Job.getInstance(conf);
conf = job.getConfiguration();
conf.setInt(MRJobConfig.IO_SORT_MB, 1);
conf.setBoolean(MRJobConfig.MAP_OUTPUT_COMPRESS, true);
conf.setClass("test.mapcollection.class", RandomFactory.class,
RecordFactory.class);
final Random r = new Random();
final long seed = r.nextLong();
LOG.info("SEED: " + seed);
r.setSeed(seed);
conf.set(MRJobConfig.MAP_SORT_SPILL_PERCENT,
Float.toString(Math.max(0.1f, r.nextFloat())));
RandomFactory.setLengths(conf, r, 1 << 14);
conf.setInt("test.spillmap.records", r.nextInt(500));
conf.setLong("test.randomfactory.seed", r.nextLong());
runTest("randomCompress", job);
}
示例8: testPluggableTrash
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void testPluggableTrash() throws IOException {
Configuration conf = new Configuration();
// Test plugged TrashPolicy
conf.setClass("fs.trash.classname", TestTrashPolicy.class, TrashPolicy.class);
Trash trash = new Trash(conf);
assertTrue(trash.getTrashPolicy().getClass().equals(TestTrashPolicy.class));
}
示例9: setUp
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setUp() throws Exception {
// We want to count additional events, so we reset here
mockQueueConstructions = 0;
mockQueuePuts = 0;
int portRetries = 5;
int nnPort;
for (; portRetries > 0; --portRetries) {
// Pick a random port in the range [30000,60000).
nnPort = 30000 + rand.nextInt(30000);
config = new Configuration();
callQueueConfigKey = "ipc." + nnPort + ".callqueue.impl";
config.setClass(callQueueConfigKey,
MockCallQueue.class, BlockingQueue.class);
config.set("hadoop.security.authorization", "true");
FileSystem.setDefaultUri(config, "hdfs://localhost:" + nnPort);
fs = FileSystem.get(config);
try {
cluster = new MiniDFSCluster.Builder(config).nameNodePort(nnPort).build();
cluster.waitActive();
break;
} catch (BindException be) {
// Retry with a different port number.
}
}
if (portRetries == 0) {
// Bail if we get very unlucky with our choice of ports.
fail("Failed to pick an ephemeral port for the NameNode RPC server.");
}
}
示例10: createConfiguration
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public Configuration createConfiguration() {
Configuration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
ResourceScheduler.class);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,
1024);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 10240);
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, false);
conf.setFloat(FairSchedulerConfiguration.PREEMPTION_THRESHOLD, 0f);
return conf;
}
示例11: setupConf
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setupConf() {
conf = new Configuration();
conf.setClass("rpc.engine." + StoppedProtocol.class.getName(),
StoppedRpcEngine.class, RpcEngine.class);
UserGroupInformation.setConfiguration(conf);
}
示例12: serviceInit
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Override
protected synchronized void serviceInit(Configuration conf)
throws Exception {
appHistoryServer = new ApplicationHistoryServer();
conf.setClass(YarnConfiguration.APPLICATION_HISTORY_STORE,
MemoryApplicationHistoryStore.class, ApplicationHistoryStore.class);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STORE,
MemoryTimelineStore.class, TimelineStore.class);
conf.setClass(YarnConfiguration.TIMELINE_SERVICE_STATE_STORE_CLASS,
MemoryTimelineStateStore.class, TimelineStateStore.class);
appHistoryServer.init(conf);
super.serviceInit(conf);
}
示例13: testAclMethodsPathTranslation
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
/**
* Tests that ChRootedFileSystem delegates calls for every ACL method to the
* underlying FileSystem with all Path arguments translated as required to
* enforce chroot.
*/
@Test
public void testAclMethodsPathTranslation() throws IOException {
Configuration conf = new Configuration();
conf.setClass("fs.mockfs.impl", MockFileSystem.class, FileSystem.class);
URI chrootUri = URI.create("mockfs://foo/a/b");
ChRootedFileSystem chrootFs = new ChRootedFileSystem(chrootUri, conf);
FileSystem mockFs = ((FilterFileSystem)chrootFs.getRawFileSystem())
.getRawFileSystem();
Path chrootPath = new Path("/c");
Path rawPath = new Path("/a/b/c");
List<AclEntry> entries = Collections.emptyList();
chrootFs.modifyAclEntries(chrootPath, entries);
verify(mockFs).modifyAclEntries(rawPath, entries);
chrootFs.removeAclEntries(chrootPath, entries);
verify(mockFs).removeAclEntries(rawPath, entries);
chrootFs.removeDefaultAcl(chrootPath);
verify(mockFs).removeDefaultAcl(rawPath);
chrootFs.removeAcl(chrootPath);
verify(mockFs).removeAcl(rawPath);
chrootFs.setAcl(chrootPath, entries);
verify(mockFs).setAcl(rawPath, entries);
chrootFs.getAclStatus(chrootPath);
verify(mockFs).getAclStatus(rawPath);
}
示例14: setup
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Before
public void setup() {
FileUtil.fullyDelete(testDir);
testDir.mkdirs();
conf = new Configuration();
conf.setBoolean(JHAdminConfig.MR_HS_RECOVERY_ENABLE, true);
conf.setClass(JHAdminConfig.MR_HS_STATE_STORE,
HistoryServerLeveldbStateStoreService.class,
HistoryServerStateStoreService.class);
conf.set(JHAdminConfig.MR_HS_LEVELDB_STATE_STORE_PATH,
testDir.getAbsoluteFile().toString());
}
示例15: bindFileSystem
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void bindFileSystem(Configuration configuration) {
for (String scheme : AWSConstants.S3_FS_PROTOCOLS) {
configuration.setBoolean(String.format("fs.%s.impl.disable.cache", scheme), true);
configuration.setClass(String.format("fs.%s.impl", scheme), org.apache.hadoop.fs.s3a.S3AFileSystem.class,
FileSystem.class);
}
}