本文整理匯總了Java中org.apache.hadoop.conf.Configuration.setFloat方法的典型用法代碼示例。如果您正苦於以下問題:Java Configuration.setFloat方法的具體用法?Java Configuration.setFloat怎麽用?Java Configuration.setFloat使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.conf.Configuration
的用法示例。
在下文中一共展示了Configuration.setFloat方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: main
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.setFloat("beta", Float.parseFloat(args[3]));
Job job = Job.getInstance(conf);
job.setJarByClass(UnitSum.class);
ChainMapper.addMapper(job, PassMapper.class, Object.class, Text.class, Text.class, DoubleWritable.class, conf);
ChainMapper.addMapper(job, BetaMapper.class, Text.class, DoubleWritable.class, Text.class, DoubleWritable.class, conf);
job.setReducerClass(SumReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(DoubleWritable.class);
MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, PassMapper.class);
MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, BetaMapper.class);
FileOutputFormat.setOutputPath(job, new Path(args[2]));
job.waitForCompletion(true);
}
示例2: beforeClass
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@BeforeClass
public static void beforeClass() throws Exception {
Configuration conf = HTU.getConfiguration();
conf.setFloat("hbase.regionserver.logroll.multiplier", 0.0003f);
conf.setInt("replication.source.size.capacity", 10240);
conf.setLong("replication.source.sleepforretries", 100);
conf.setInt("hbase.regionserver.maxlogs", 10);
conf.setLong("hbase.master.logcleaner.ttl", 10);
conf.setInt("zookeeper.recovery.retry", 1);
conf.setInt("zookeeper.recovery.retry.intervalmill", 10);
conf.setBoolean(HConstants.REPLICATION_ENABLE_KEY, true);
conf.setBoolean(ServerRegionReplicaUtil.REGION_REPLICA_REPLICATION_CONF_KEY, true);
conf.setLong(HConstants.THREAD_WAKE_FREQUENCY, 100);
conf.setInt("replication.stats.thread.period.seconds", 5);
conf.setBoolean("hbase.tests.use.shortcircuit.reads", false);
conf.setInt(HConstants.HBASE_CLIENT_RETRIES_NUMBER, 5); // less number of retries is needed
conf.setInt("hbase.client.serverside.retries.multiplier", 1);
HTU.startMiniCluster(NB_SERVERS);
}
示例3: main
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public static void main(String[] args) throws Exception {
Configuration conf = new Configuration();
conf.setFloat("beta", Float.parseFloat(args[3]));
Job job = Job.getInstance(conf);
job.setJarByClass(UnitMultiplication.class);
ChainMapper.addMapper(job, TransitionMapper.class, Object.class, Text.class, Text.class, Text.class, conf);
ChainMapper.addMapper(job, PRMapper.class, Object.class, Text.class, Text.class, Text.class, conf);
job.setReducerClass(MultiplicationReducer.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
MultipleInputs.addInputPath(job, new Path(args[0]), TextInputFormat.class, TransitionMapper.class);
MultipleInputs.addInputPath(job, new Path(args[1]), TextInputFormat.class, PRMapper.class);
FileOutputFormat.setOutputPath(job, new Path(args[2]));
job.waitForCompletion(true);
}
示例4: createConfiguration
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public Configuration createConfiguration() {
Configuration conf = new YarnConfiguration();
conf.setClass(YarnConfiguration.RM_SCHEDULER, FairScheduler.class,
ResourceScheduler.class);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MINIMUM_ALLOCATION_MB, 0);
conf.setInt(FairSchedulerConfiguration.RM_SCHEDULER_INCREMENT_ALLOCATION_MB,
1024);
conf.setInt(YarnConfiguration.RM_SCHEDULER_MAXIMUM_ALLOCATION_MB, 10240);
conf.setBoolean(FairSchedulerConfiguration.ASSIGN_MULTIPLE, false);
conf.setFloat(FairSchedulerConfiguration.PREEMPTION_THRESHOLD, 0f);
return conf;
}
示例5: testSplitOffStripe
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testSplitOffStripe() throws Exception {
Configuration conf = HBaseConfiguration.create();
// First test everything with default split count of 2, then split into more.
conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2);
Long[] toSplit = new Long[] { defaultSplitSize - 2, 1L, 1L };
Long[] noSplit = new Long[] { defaultSplitSize - 2, 1L };
long splitTargetSize = (long)(defaultSplitSize / defaultSplitCount);
// Don't split if not eligible for compaction.
StripeCompactionPolicy.StripeInformationProvider si =
createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 2L });
assertNull(createPolicy(conf).selectCompaction(si, al(), false));
// Make sure everything is eligible.
conf.setFloat(CompactionConfiguration.HBASE_HSTORE_COMPACTION_RATIO_KEY, 500f);
StripeCompactionPolicy policy = createPolicy(conf);
verifyWholeStripesCompaction(policy, si, 0, 0, null, 2, splitTargetSize);
// Add some extra stripes...
si = createStripesWithSizes(0, 0, noSplit, noSplit, toSplit);
verifyWholeStripesCompaction(policy, si, 2, 2, null, 2, splitTargetSize);
// In the middle.
si = createStripesWithSizes(0, 0, noSplit, toSplit, noSplit);
verifyWholeStripesCompaction(policy, si, 1, 1, null, 2, splitTargetSize);
// No split-off with different config (larger split size).
// However, in this case some eligible stripe will just be compacted alone.
StripeCompactionPolicy specPolicy = createPolicy(
conf, defaultSplitSize + 1, defaultSplitCount, defaultInitialCount, false);
verifySingleStripeCompaction(specPolicy, si, 1, null);
}
示例6: testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testAutoTunerShouldBeOffWhenMaxMinRangesForMemstoreIsNotGiven() throws Exception {
Configuration conf = HBaseConfiguration.create();
conf.setFloat(HeapMemorySizeUtil.MEMSTORE_SIZE_KEY, 0.02f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.03f);
HeapMemoryManager manager = new HeapMemoryManager(new BlockCacheStub(0),
new MemstoreFlusherStub(0), new RegionServerStub(conf), new RegionServerAccountingStub());
assertFalse(manager.isTunerOn());
}
示例7: initPolicy
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private static void initPolicy(VolumeChoosingPolicy<FsVolumeSpi> policy,
float preferencePercent) {
Configuration conf = new Configuration();
// Set the threshold to consider volumes imbalanced to 1MB
conf.setLong(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_THRESHOLD_KEY,
1024 * 1024); // 1MB
conf.setFloat(
DFS_DATANODE_AVAILABLE_SPACE_VOLUME_CHOOSING_POLICY_BALANCED_SPACE_PREFERENCE_FRACTION_KEY,
preferencePercent);
((Configurable) policy).setConf(conf);
}
示例8: testWhenClusterIsWriteHeavyWithEmptyMemstore
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testWhenClusterIsWriteHeavyWithEmptyMemstore() throws Exception {
BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub();
// Empty block cache and memstore
blockCache.setTestBlockSize(0);
regionServerAccounting.setTestMemstoreSize(0);
Configuration conf = HBaseConfiguration.create();
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f);
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.05f);
conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
// Let the system start with default values for memstore heap and block cache size.
HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
new RegionServerStub(conf), regionServerAccounting);
long oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
long oldBlockCacheSize = blockCache.maxSize;
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
heapMemoryManager.start(choreService);
memStoreFlusher.flushType = FlushType.ABOVE_HIGHER_MARK;
memStoreFlusher.requestFlush(null, false);
memStoreFlusher.requestFlush(null, false);
memStoreFlusher.requestFlush(null, false);
memStoreFlusher.flushType = FlushType.ABOVE_LOWER_MARK;
memStoreFlusher.requestFlush(null, false);
// Allow the tuner to run once and do necessary memory up
Thread.sleep(1500);
// No changes should be made by tuner as we already have lot of empty space
assertEquals(oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
assertEquals(oldBlockCacheSize, blockCache.maxSize);
}
示例9: testSplitOffStripeOffPeak
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testSplitOffStripeOffPeak() throws Exception {
// for HBASE-11439
Configuration conf = HBaseConfiguration.create();
conf.setInt(StripeStoreConfig.MIN_FILES_KEY, 2);
// Select the last 2 files.
StripeCompactionPolicy.StripeInformationProvider si =
createStripesWithSizes(0, 0, new Long[] { defaultSplitSize - 2, 1L, 1L });
assertEquals(2, createPolicy(conf).selectCompaction(si, al(), false).getRequest().getFiles()
.size());
// Make sure everything is eligible in offpeak.
conf.setFloat("hbase.hstore.compaction.ratio.offpeak", 500f);
assertEquals(3, createPolicy(conf).selectCompaction(si, al(), true).getRequest().getFiles()
.size());
}
示例10: initialize
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
private void initialize() throws Exception {
if (hbAdmin == null) {
// make a copy, just to be sure we're not overriding someone else's config
setConf(HBaseConfiguration.create(getConf()));
Configuration conf = getConf();
// disable blockcache for tool invocation, see HBASE-10500
conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0);
this.hbAdmin = new HBaseAdmin(conf);
this.userProvider = UserProvider.instantiate(conf);
this.fsDelegationToken = new FsDelegationToken(userProvider, "renewer");
assignSeqIds = conf.getBoolean(ASSIGN_SEQ_IDS, true);
maxFilesPerRegionPerFamily = conf.getInt(MAX_FILES_PER_REGION_PER_FAMILY, 32);
}
}
示例11: testWhenClusterIsReadHeavyWithEmptyBlockCache
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testWhenClusterIsReadHeavyWithEmptyBlockCache() throws Exception {
BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
RegionServerAccountingStub regionServerAccounting = new RegionServerAccountingStub();
// Empty block cache and memstore
blockCache.setTestBlockSize(0);
regionServerAccounting.setTestMemstoreSize(0);
Configuration conf = HBaseConfiguration.create();
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.75f);
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.10f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.7f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.05f);
conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
// Let the system start with default values for memstore heap and block cache size.
HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
new RegionServerStub(conf), regionServerAccounting);
long oldMemstoreHeapSize = memStoreFlusher.memstoreSize;
long oldBlockCacheSize = blockCache.maxSize;
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
heapMemoryManager.start(choreService);
blockCache.evictBlock(null);
blockCache.evictBlock(null);
blockCache.evictBlock(null);
// Allow the tuner to run once and do necessary memory up
Thread.sleep(1500);
// No changes should be made by tuner as we already have lot of empty space
assertEquals(oldMemstoreHeapSize, memStoreFlusher.memstoreSize);
assertEquals(oldBlockCacheSize, blockCache.maxSize);
}
示例12: testPluggingInHeapMemoryTuner
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testPluggingInHeapMemoryTuner() throws Exception {
BlockCacheStub blockCache = new BlockCacheStub((long) (maxHeapSize * 0.4));
MemstoreFlusherStub memStoreFlusher = new MemstoreFlusherStub((long) (maxHeapSize * 0.4));
Configuration conf = HBaseConfiguration.create();
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MAX_RANGE_KEY, 0.78f);
conf.setFloat(HeapMemoryManager.MEMSTORE_SIZE_MIN_RANGE_KEY, 0.05f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MAX_RANGE_KEY, 0.75f);
conf.setFloat(HeapMemoryManager.BLOCK_CACHE_SIZE_MIN_RANGE_KEY, 0.02f);
conf.setLong(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_PERIOD, 1000);
conf.setInt(DefaultHeapMemoryTuner.NUM_PERIODS_TO_IGNORE, 0);
conf.setClass(HeapMemoryManager.HBASE_RS_HEAP_MEMORY_TUNER_CLASS, CustomHeapMemoryTuner.class,
HeapMemoryTuner.class);
// Let the system start with default values for memstore heap and block cache size.
HeapMemoryManager heapMemoryManager = new HeapMemoryManager(blockCache, memStoreFlusher,
new RegionServerStub(conf), new RegionServerAccountingStub());
final ChoreService choreService = new ChoreService("TEST_SERVER_NAME");
heapMemoryManager.start(choreService);
// Now we wants to be in write mode. Set bigger memstore size from CustomHeapMemoryTuner
CustomHeapMemoryTuner.memstoreSize = 0.78f;
CustomHeapMemoryTuner.blockCacheSize = 0.02f;
// Allow the tuner to run once and do necessary memory up
waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
assertHeapSpace(0.78f, memStoreFlusher.memstoreSize);// Memstore
assertHeapSpace(0.02f, blockCache.maxSize);// BlockCache
// Now we wants to be in read mode. Set bigger memstore size from CustomHeapMemoryTuner
CustomHeapMemoryTuner.blockCacheSize = 0.75f;
CustomHeapMemoryTuner.memstoreSize = 0.05f;
// Allow the tuner to run once and do necessary memory up
waitForTune(memStoreFlusher, memStoreFlusher.memstoreSize);
assertHeapSpace(0.75f, blockCache.maxSize);// BlockCache
assertHeapSpace(0.05f, memStoreFlusher.memstoreSize);// Memstore
}
示例13: testGetFullDirs
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test
public void testGetFullDirs() throws Exception {
Configuration conf = new YarnConfiguration();
conf.set(CommonConfigurationKeys.FS_PERMISSIONS_UMASK_KEY, "077");
FileContext localFs = FileContext.getLocalFSFileContext(conf);
String localDir1 = new File(testDir, "localDir1").getPath();
String localDir2 = new File(testDir, "localDir2").getPath();
String logDir1 = new File(testDir, "logDir1").getPath();
String logDir2 = new File(testDir, "logDir2").getPath();
Path localDir1Path = new Path(localDir1);
Path logDir1Path = new Path(logDir1);
FsPermission dirPermissions = new FsPermission((short) 0410);
localFs.mkdir(localDir1Path, dirPermissions, true);
localFs.mkdir(logDir1Path, dirPermissions, true);
conf.set(YarnConfiguration.NM_LOCAL_DIRS, localDir1 + "," + localDir2);
conf.set(YarnConfiguration.NM_LOG_DIRS, logDir1 + "," + logDir2);
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
0.0f);
NodeManagerMetrics nm = NodeManagerMetrics.create();
LocalDirsHandlerService dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
Assert.assertEquals(0, dirSvc.getLocalDirs().size());
Assert.assertEquals(0, dirSvc.getLogDirs().size());
Assert.assertEquals(1, dirSvc.getDiskFullLocalDirs().size());
Assert.assertEquals(1, dirSvc.getDiskFullLogDirs().size());
// check the metrics
Assert.assertEquals(2, nm.getBadLocalDirs());
Assert.assertEquals(2, nm.getBadLogDirs());
Assert.assertEquals(0, nm.getGoodLocalDirsDiskUtilizationPerc());
Assert.assertEquals(0, nm.getGoodLogDirsDiskUtilizationPerc());
conf.setFloat(YarnConfiguration.NM_MAX_PER_DISK_UTILIZATION_PERCENTAGE,
100.0f);
nm = NodeManagerMetrics.create();
dirSvc = new LocalDirsHandlerService(nm);
dirSvc.init(conf);
Assert.assertEquals(1, dirSvc.getLocalDirs().size());
Assert.assertEquals(1, dirSvc.getLogDirs().size());
Assert.assertEquals(0, dirSvc.getDiskFullLocalDirs().size());
Assert.assertEquals(0, dirSvc.getDiskFullLogDirs().size());
// check the metrics
File dir = new File(localDir1);
int utilizationPerc =
(int) ((dir.getTotalSpace() - dir.getUsableSpace()) * 100 /
dir.getTotalSpace());
Assert.assertEquals(1, nm.getBadLocalDirs());
Assert.assertEquals(1, nm.getBadLogDirs());
Assert.assertEquals(utilizationPerc,
nm.getGoodLocalDirsDiskUtilizationPerc());
Assert
.assertEquals(utilizationPerc, nm.getGoodLogDirsDiskUtilizationPerc());
FileUtils.deleteDirectory(new File(localDir1));
FileUtils.deleteDirectory(new File(localDir2));
FileUtils.deleteDirectory(new File(logDir1));
FileUtils.deleteDirectory(new File(logDir1));
dirSvc.close();
}
示例14: testCompletedMapsForReduceSlowstart
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
public void testCompletedMapsForReduceSlowstart() throws Exception {
MRApp app = new MRApp(2, 1, false, this.getClass().getName(), true);
Configuration conf = new Configuration();
//after half of the map completion, reduce will start
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.5f);
//uberization forces full slowstart (1.0), so disable that
conf.setBoolean(MRJobConfig.JOB_UBERTASK_ENABLE, false);
Job job = app.submit(conf);
app.waitForState(job, JobState.RUNNING);
//all maps would be running
Assert.assertEquals("Num tasks not correct", 3, job.getTasks().size());
Iterator<Task> it = job.getTasks().values().iterator();
Task mapTask1 = it.next();
Task mapTask2 = it.next();
Task reduceTask = it.next();
// all maps must be running
app.waitForState(mapTask1, TaskState.RUNNING);
app.waitForState(mapTask2, TaskState.RUNNING);
TaskAttempt task1Attempt = mapTask1.getAttempts().values().iterator().next();
TaskAttempt task2Attempt = mapTask2.getAttempts().values().iterator().next();
//before sending the TA_DONE, event make sure attempt has come to
//RUNNING state
app.waitForState(task1Attempt, TaskAttemptState.RUNNING);
app.waitForState(task2Attempt, TaskAttemptState.RUNNING);
// reduces must be in NEW state
Assert.assertEquals("Reduce Task state not correct",
TaskState.NEW, reduceTask.getReport().getTaskState());
//send the done signal to the 1st map task
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask1.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
//wait for first map task to complete
app.waitForState(mapTask1, TaskState.SUCCEEDED);
//Once the first map completes, it will schedule the reduces
//now reduce must be running
app.waitForState(reduceTask, TaskState.RUNNING);
//send the done signal to 2nd map and the reduce to complete the job
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
mapTask2.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.getContext().getEventHandler().handle(
new TaskAttemptEvent(
reduceTask.getAttempts().values().iterator().next().getID(),
TaskAttemptEventType.TA_DONE));
app.waitForState(job, JobState.SUCCEEDED);
}
示例15: testReducerRampdownDiagnostics
import org.apache.hadoop.conf.Configuration; //導入方法依賴的package包/類
@Test(timeout = 30000)
public void testReducerRampdownDiagnostics() throws Exception {
LOG.info("Running tesReducerRampdownDiagnostics");
final Configuration conf = new Configuration();
conf.setFloat(MRJobConfig.COMPLETED_MAPS_FOR_REDUCE_SLOWSTART, 0.0f);
final MyResourceManager rm = new MyResourceManager(conf);
rm.start();
final DrainDispatcher dispatcher = (DrainDispatcher) rm.getRMContext()
.getDispatcher();
final RMApp app = rm.submitApp(1024);
dispatcher.await();
final String host = "host1";
final MockNM nm = rm.registerNode(String.format("%s:1234", host), 2048);
nm.nodeHeartbeat(true);
dispatcher.await();
final ApplicationAttemptId appAttemptId = app.getCurrentAppAttempt()
.getAppAttemptId();
rm.sendAMLaunched(appAttemptId);
dispatcher.await();
final JobId jobId = MRBuilderUtils
.newJobId(appAttemptId.getApplicationId(), 0);
final Job mockJob = mock(Job.class);
when(mockJob.getReport()).thenReturn(
MRBuilderUtils.newJobReport(jobId, "job", "user", JobState.RUNNING, 0,
0, 0, 0, 0, 0, 0, "jobfile", null, false, ""));
final MyContainerAllocator allocator = new MyContainerAllocator(rm, conf,
appAttemptId, mockJob);
// add resources to scheduler
dispatcher.await();
// create the container request
final String[] locations = new String[] { host };
allocator.sendRequest(createReq(jobId, 0, 1024, locations, false, true));
for (int i = 0; i < 1;) {
dispatcher.await();
i += allocator.schedule().size();
nm.nodeHeartbeat(true);
}
allocator.sendRequest(createReq(jobId, 0, 1024, locations, true, false));
while (allocator.getTaskAttemptKillEvents().size() == 0) {
dispatcher.await();
allocator.schedule().size();
nm.nodeHeartbeat(true);
}
final String killEventMessage = allocator.getTaskAttemptKillEvents().get(0)
.getMessage();
Assert.assertTrue("No reducer rampDown preemption message",
killEventMessage.contains(RMContainerAllocator.RAMPDOWN_DIAGNOSTIC));
}