本文整理匯總了Java中org.apache.hadoop.yarn.api.records.Resource.equals方法的典型用法代碼示例。如果您正苦於以下問題:Java Resource.equals方法的具體用法?Java Resource.equals怎麽用?Java Resource.equals使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.yarn.api.records.Resource
的用法示例。
在下文中一共展示了Resource.equals方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: compare
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Override
public int compare(Resource clusterResource, Resource lhs, Resource rhs) {
if (lhs.equals(rhs)) {
return 0;
}
float[] lValues = new float[] {
(clusterResource.getMemory() != 0) ? (float) lhs.getMemory() / clusterResource.getMemory() : lhs.getMemory(),
(clusterResource.getVirtualCores() != 0) ? (float) lhs.getVirtualCores() / clusterResource.getVirtualCores() : lhs.getVirtualCores(),
(clusterResource.getGpuCores() != 0) ? (float) lhs.getGpuCores() / clusterResource.getGpuCores() : 0.0f };
Arrays.sort(lValues);
float[] rValues = new float[] {
(clusterResource.getMemory() != 0) ? (float) rhs.getMemory() / clusterResource.getMemory() : rhs.getMemory(),
(clusterResource.getVirtualCores() != 0) ? (float) rhs.getVirtualCores() / clusterResource.getVirtualCores() : rhs.getVirtualCores(),
(clusterResource.getGpuCores() != 0) ? (float) rhs.getGpuCores() / clusterResource.getGpuCores() : 0.0f };
Arrays.sort(rValues);
int diff = 0;
for(int i = 0; i < 3; i++) {
float l = lValues[i];
float r = rValues[i];
if (l < r) {
diff = -1;
} else if (l > r) {
diff = 1;
}
}
return diff;
}
示例2: assignContainer
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
@Override
public Resource assignContainer(FSSchedulerNode node) {
Resource assigned = Resources.none();
if (LOG.isDebugEnabled()) {
LOG.debug("Node " + node.getNodeName() + " offered to queue: " +
getName());
}
if (!assignContainerPreCheck(node)) {
return assigned;
}
Comparator<Schedulable> comparator = policy.getComparator();
writeLock.lock();
try {
Collections.sort(runnableApps, comparator);
} finally {
writeLock.unlock();
}
// Release write lock here for better performance and avoiding deadlocks.
// runnableApps can be in unsorted state because of this section,
// but we can accept it in practice since the probability is low.
readLock.lock();
try {
for (FSAppAttempt sched : runnableApps) {
if (SchedulerAppUtils.isBlacklisted(sched, node, LOG)) {
continue;
}
assigned = sched.assignContainer(node);
if (!assigned.equals(Resources.none())) {
break;
}
}
} finally {
readLock.unlock();
}
return assigned;
}
示例3: updateNodeResource
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
/**
* Process resource update on a node.
*/
public synchronized void updateNodeResource(RMNode nm,
ResourceOption resourceOption) {
SchedulerNode node = getSchedulerNode(nm.getNodeID());
Resource newResource = resourceOption.getResource();
Resource oldResource = node.getTotalResource();
if(!oldResource.equals(newResource)) {
// Notify NodeLabelsManager about this change
rmContext.getNodeLabelManager().updateNodeResource(nm.getNodeID(),
newResource);
// Log resource change
LOG.info("Update resource on node: " + node.getNodeName()
+ " from: " + oldResource + ", to: "
+ newResource);
nodes.remove(nm.getNodeID());
updateMaximumAllocation(node, false);
// update resource to node
node.setTotalResource(newResource);
nodes.put(nm.getNodeID(), (N)node);
updateMaximumAllocation(node, true);
// update resource to clusterResource
Resources.subtractFrom(clusterResource, oldResource);
Resources.addTo(clusterResource, newResource);
} else {
// Log resource change
LOG.warn("Update resource on node: " + node.getNodeName()
+ " with the same resource: " + newResource);
}
}
示例4: checkFSQueue
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
private void checkFSQueue(ResourceManager rm,
SchedulerApplication schedulerApp, Resource usedResources,
Resource availableResources) throws Exception {
// waiting for RM's scheduling apps
int retry = 0;
Resource assumedFairShare = Resource.newInstance(8192, 8);
while (true) {
Thread.sleep(100);
if (assumedFairShare.equals(((FairScheduler)rm.getResourceScheduler())
.getQueueManager().getRootQueue().getFairShare())) {
break;
}
retry++;
if (retry > 30) {
Assert.fail("Apps are not scheduled within assumed timeout");
}
}
FairScheduler scheduler = (FairScheduler) rm.getResourceScheduler();
FSParentQueue root = scheduler.getQueueManager().getRootQueue();
// ************ check cluster used Resources ********
assertTrue(root.getPolicy() instanceof DominantResourceFairnessPolicy);
assertEquals(usedResources,root.getResourceUsage());
// ************ check app headroom ****************
FSAppAttempt schedulerAttempt =
(FSAppAttempt) schedulerApp.getCurrentAppAttempt();
assertEquals(availableResources, schedulerAttempt.getHeadroom());
// ************ check queue metrics ****************
QueueMetrics queueMetrics = scheduler.getRootQueueMetrics();
assertMetrics(queueMetrics, 1, 0, 1, 0, 2, availableResources.getMemory(),
availableResources.getVirtualCores(), usedResources.getMemory(),
usedResources.getVirtualCores());
}
示例5: equals
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
public static boolean equals(Resource lhs, Resource rhs) {
return lhs.equals(rhs);
}
示例6: addInterval
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
/**
* Add a resource for the specified interval
*
* @param reservationInterval the interval for which the resource is to be
* added
* @param capacity the resource to be added
* @return true if addition is successful, false otherwise
*/
public boolean addInterval(ReservationInterval reservationInterval,
ReservationRequest capacity) {
Resource totCap =
Resources.multiply(capacity.getCapability(),
(float) capacity.getNumContainers());
if (totCap.equals(ZERO_RESOURCE)) {
return true;
}
writeLock.lock();
try {
long startKey = reservationInterval.getStartTime();
long endKey = reservationInterval.getEndTime();
NavigableMap<Long, Resource> ticks =
cumulativeCapacity.headMap(endKey, false);
if (ticks != null && !ticks.isEmpty()) {
Resource updatedCapacity = Resource.newInstance(0, 0, 0);
Entry<Long, Resource> lowEntry = ticks.floorEntry(startKey);
if (lowEntry == null) {
// This is the earliest starting interval
cumulativeCapacity.put(startKey, totCap);
} else {
updatedCapacity = Resources.add(lowEntry.getValue(), totCap);
// Add a new tick only if the updated value is different
// from the previous tick
if ((startKey == lowEntry.getKey())
&& (isSameAsPrevious(lowEntry.getKey(), updatedCapacity))) {
cumulativeCapacity.remove(lowEntry.getKey());
} else {
cumulativeCapacity.put(startKey, updatedCapacity);
}
}
// Increase all the capacities of overlapping intervals
Set<Entry<Long, Resource>> overlapSet =
ticks.tailMap(startKey, false).entrySet();
for (Entry<Long, Resource> entry : overlapSet) {
updatedCapacity = Resources.add(entry.getValue(), totCap);
entry.setValue(updatedCapacity);
}
} else {
// This is the first interval to be added
cumulativeCapacity.put(startKey, totCap);
}
Resource nextTick = cumulativeCapacity.get(endKey);
if (nextTick != null) {
// If there is overlap, remove the duplicate entry
if (isSameAsPrevious(endKey, nextTick)) {
cumulativeCapacity.remove(endKey);
}
} else {
// Decrease capacity as this is end of the interval
cumulativeCapacity.put(endKey, Resources.subtract(cumulativeCapacity
.floorEntry(endKey).getValue(), totCap));
}
return true;
} finally {
writeLock.unlock();
}
}
示例7: removeInterval
import org.apache.hadoop.yarn.api.records.Resource; //導入方法依賴的package包/類
/**
* Removes a resource for the specified interval
*
* @param reservationInterval the interval for which the resource is to be
* removed
* @param capacity the resource to be removed
* @return true if removal is successful, false otherwise
*/
public boolean removeInterval(ReservationInterval reservationInterval,
ReservationRequest capacity) {
Resource totCap =
Resources.multiply(capacity.getCapability(),
(float) capacity.getNumContainers());
if (totCap.equals(ZERO_RESOURCE)) {
return true;
}
writeLock.lock();
try {
long startKey = reservationInterval.getStartTime();
long endKey = reservationInterval.getEndTime();
// update the start key
NavigableMap<Long, Resource> ticks =
cumulativeCapacity.headMap(endKey, false);
// Decrease all the capacities of overlapping intervals
SortedMap<Long, Resource> overlapSet = ticks.tailMap(startKey);
if (overlapSet != null && !overlapSet.isEmpty()) {
Resource updatedCapacity = Resource.newInstance(0, 0, 0);
long currentKey = -1;
for (Iterator<Entry<Long, Resource>> overlapEntries =
overlapSet.entrySet().iterator(); overlapEntries.hasNext();) {
Entry<Long, Resource> entry = overlapEntries.next();
currentKey = entry.getKey();
updatedCapacity = Resources.subtract(entry.getValue(), totCap);
// update each entry between start and end key
cumulativeCapacity.put(currentKey, updatedCapacity);
}
// Remove the first overlap entry if it is same as previous after
// updation
Long firstKey = overlapSet.firstKey();
if (isSameAsPrevious(firstKey, overlapSet.get(firstKey))) {
cumulativeCapacity.remove(firstKey);
}
// Remove the next entry if it is same as end entry after updation
if ((currentKey != -1) && (isSameAsNext(currentKey, updatedCapacity))) {
cumulativeCapacity.remove(cumulativeCapacity.higherKey(currentKey));
}
}
return true;
} finally {
writeLock.unlock();
}
}