本文整理汇总了Java中org.apache.hadoop.fs.InvalidRequestException类的典型用法代码示例。如果您正苦于以下问题:Java InvalidRequestException类的具体用法?Java InvalidRequestException怎么用?Java InvalidRequestException使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
InvalidRequestException类属于org.apache.hadoop.fs包,在下文中一共展示了InvalidRequestException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: validate
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
public static void validate(CachePoolInfo info) throws IOException {
if (info == null) {
throw new InvalidRequestException("CachePoolInfo is null");
}
if ((info.getLimit() != null) && (info.getLimit() < 0)) {
throw new InvalidRequestException("Limit is negative.");
}
if (info.getMaxRelativeExpiryMs() != null) {
long maxRelativeExpiryMs = info.getMaxRelativeExpiryMs();
if (maxRelativeExpiryMs < 0l) {
throw new InvalidRequestException("Max relative expiry is negative.");
}
if (maxRelativeExpiryMs > Expiration.MAX_RELATIVE_EXPIRY_MS) {
throw new InvalidRequestException("Max relative expiry is too big.");
}
}
validateName(info.poolName);
}
示例2: checkLimit
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
/**
* Throws an exception if the CachePool does not have enough capacity to
* cache the given path at the replication factor.
*
* @param pool CachePool where the path is being cached
* @param path Path that is being cached
* @param replication Replication factor of the path
* @throws InvalidRequestException if the pool does not have enough capacity
*/
private void checkLimit(CachePool pool, String path,
short replication) throws InvalidRequestException {
CacheDirectiveStats stats = computeNeeded(path, replication);
if (pool.getLimit() == CachePoolInfo.LIMIT_UNLIMITED) {
return;
}
if (pool.getBytesNeeded() + (stats.getBytesNeeded() * replication) > pool
.getLimit()) {
throw new InvalidRequestException("Caching path " + path + " of size "
+ stats.getBytesNeeded() / replication + " bytes at replication "
+ replication + " would exceed pool " + pool.getPoolName()
+ "'s remaining capacity of "
+ (pool.getLimit() - pool.getBytesNeeded()) + " bytes.");
}
}
示例3: removeInternal
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
private void removeInternal(CacheDirective directive)
throws InvalidRequestException {
assert namesystem.hasWriteLock();
// Remove the corresponding entry in directivesByPath.
String path = directive.getPath();
List<CacheDirective> directives = directivesByPath.get(path);
if (directives == null || !directives.remove(directive)) {
throw new InvalidRequestException("Failed to locate entry " +
directive.getId() + " by path " + directive.getPath());
}
if (directives.size() == 0) {
directivesByPath.remove(path);
}
// Fix up the stats from removing the pool
final CachePool pool = directive.getPool();
directive.addBytesNeeded(-directive.getBytesNeeded());
directive.addFilesNeeded(-directive.getFilesNeeded());
directivesById.remove(directive.getId());
pool.getDirectiveList().remove(directive);
assert directive.getPool() == null;
setNeedsRescan();
}
示例4: unregisterSlot
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
public synchronized void unregisterSlot(SlotId slotId)
throws InvalidRequestException {
if (!enabled) {
if (LOG.isTraceEnabled()) {
LOG.trace("unregisterSlot: ShortCircuitRegistry is " +
"not enabled.");
}
throw new UnsupportedOperationException();
}
ShmId shmId = slotId.getShmId();
RegisteredShm shm = segments.get(shmId);
if (shm == null) {
throw new InvalidRequestException("there is no shared memory segment " +
"registered with shmId " + shmId);
}
Slot slot = shm.getSlot(slotId.getSlotIdx());
slot.makeInvalid();
shm.unregisterSlot(slotId.getSlotIdx());
slots.remove(slot.getBlockId(), slot);
}
示例5: makeRequest
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
@Override
public BatchedEntries<CacheDirectiveEntry> makeRequest(Long prevKey)
throws IOException {
BatchedEntries<CacheDirectiveEntry> entries = null;
TraceScope scope = Trace.startSpan("listCacheDirectives", traceSampler);
try {
entries = namenode.listCacheDirectives(prevKey, filter);
} catch (IOException e) {
if (e.getMessage().contains("Filtering by ID is unsupported")) {
// Retry case for old servers, do the filtering client-side
long id = filter.getId();
filter = removeIdFromFilter(filter);
// Using id - 1 as prevId should get us a window containing the id
// This is somewhat brittle, since it depends on directives being
// returned in order of ascending ID.
entries = namenode.listCacheDirectives(id - 1, filter);
for (int i=0; i<entries.size(); i++) {
CacheDirectiveEntry entry = entries.get(i);
if (entry.getInfo().getId().equals((Long)id)) {
return new SingleEntry(entry);
}
}
throw new RemoteException(InvalidRequestException.class.getName(),
"Did not find requested id " + id);
}
throw e;
} finally {
scope.close();
}
Preconditions.checkNotNull(entries);
return entries;
}
示例6: getSlot
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
synchronized public final Slot getSlot(int slotIdx)
throws InvalidRequestException {
if (!allocatedSlots.get(slotIdx)) {
throw new InvalidRequestException(this + ": slot " + slotIdx +
" does not exist.");
}
return slots[slotIdx];
}
示例7: registerSlot
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
/**
* Register a slot.
*
* This function looks at a slot which has already been initialized (by
* another process), and registers it with us. Then, it returns the
* relevant Slot object.
*
* @return The slot.
*
* @throws InvalidRequestException
* If the slot index we're trying to allocate has not been
* initialized, or is already in use.
*/
synchronized public final Slot registerSlot(int slotIdx,
ExtendedBlockId blockId) throws InvalidRequestException {
if (slotIdx < 0) {
throw new InvalidRequestException(this + ": invalid negative slot " +
"index " + slotIdx);
}
if (slotIdx >= slots.length) {
throw new InvalidRequestException(this + ": invalid slot " +
"index " + slotIdx);
}
if (allocatedSlots.get(slotIdx)) {
throw new InvalidRequestException(this + ": slot " + slotIdx +
" is already in use.");
}
Slot slot = new Slot(calculateSlotAddress(slotIdx), blockId);
if (!slot.isValid()) {
throw new InvalidRequestException(this + ": slot " + slotIdx +
" is not marked as valid.");
}
slots[slotIdx] = slot;
allocatedSlots.set(slotIdx, true);
if (LOG.isTraceEnabled()) {
LOG.trace(this + ": registerSlot " + slotIdx + ": allocatedSlots=" + allocatedSlots +
StringUtils.getStackTrace(Thread.currentThread()));
}
return slot;
}
示例8: validatePoolName
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
private static String validatePoolName(CacheDirectiveInfo directive)
throws InvalidRequestException {
String pool = directive.getPool();
if (pool == null) {
throw new InvalidRequestException("No pool specified.");
}
if (pool.isEmpty()) {
throw new InvalidRequestException("Invalid empty pool name.");
}
return pool;
}
示例9: validatePath
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
private static String validatePath(CacheDirectiveInfo directive)
throws InvalidRequestException {
if (directive.getPath() == null) {
throw new InvalidRequestException("No path specified.");
}
String path = directive.getPath().toUri().getPath();
if (!DFSUtil.isValidName(path)) {
throw new InvalidRequestException("Invalid path '" + path + "'.");
}
return path;
}
示例10: validateReplication
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
private static short validateReplication(CacheDirectiveInfo directive,
short defaultValue) throws InvalidRequestException {
short repl = (directive.getReplication() != null)
? directive.getReplication() : defaultValue;
if (repl <= 0) {
throw new InvalidRequestException("Invalid replication factor " + repl
+ " <= 0");
}
return repl;
}
示例11: validateExpiryTime
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
/**
* Calculates the absolute expiry time of the directive from the
* {@link CacheDirectiveInfo.Expiration}. This converts a relative Expiration
* into an absolute time based on the local clock.
*
* @param info to validate.
* @param maxRelativeExpiryTime of the info's pool.
* @return the expiration time, or the pool's max absolute expiration if the
* info's expiration was not set.
* @throws InvalidRequestException if the info's Expiration is invalid.
*/
private static long validateExpiryTime(CacheDirectiveInfo info,
long maxRelativeExpiryTime) throws InvalidRequestException {
LOG.trace("Validating directive {} pool maxRelativeExpiryTime {}", info,
maxRelativeExpiryTime);
final long now = new Date().getTime();
final long maxAbsoluteExpiryTime = now + maxRelativeExpiryTime;
if (info == null || info.getExpiration() == null) {
return maxAbsoluteExpiryTime;
}
Expiration expiry = info.getExpiration();
if (expiry.getMillis() < 0l) {
throw new InvalidRequestException("Cannot set a negative expiration: "
+ expiry.getMillis());
}
long relExpiryTime, absExpiryTime;
if (expiry.isRelative()) {
relExpiryTime = expiry.getMillis();
absExpiryTime = now + relExpiryTime;
} else {
absExpiryTime = expiry.getMillis();
relExpiryTime = absExpiryTime - now;
}
// Need to cap the expiry so we don't overflow a long when doing math
if (relExpiryTime > Expiration.MAX_RELATIVE_EXPIRY_MS) {
throw new InvalidRequestException("Expiration "
+ expiry.toString() + " is too far in the future!");
}
// Fail if the requested expiry is greater than the max
if (relExpiryTime > maxRelativeExpiryTime) {
throw new InvalidRequestException("Expiration " + expiry.toString()
+ " exceeds the max relative expiration time of "
+ maxRelativeExpiryTime + " ms.");
}
return absExpiryTime;
}
示例12: getById
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
/**
* Get a CacheDirective by ID, validating the ID and that the directive
* exists.
*/
private CacheDirective getById(long id) throws InvalidRequestException {
// Check for invalid IDs.
if (id <= 0) {
throw new InvalidRequestException("Invalid negative ID.");
}
// Find the directive.
CacheDirective directive = directivesById.get(id);
if (directive == null) {
throw new InvalidRequestException("No directive with ID " + id
+ " found.");
}
return directive;
}
示例13: getCachePool
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
/**
* Get a CachePool by name, validating that it exists.
*/
private CachePool getCachePool(String poolName)
throws InvalidRequestException {
CachePool pool = cachePools.get(poolName);
if (pool == null) {
throw new InvalidRequestException("Unknown pool " + poolName);
}
return pool;
}
示例14: addDirectiveFromEditLog
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
/**
* Adds a directive, skipping most error checking. This should only be called
* internally in special scenarios like edit log replay.
*/
CacheDirectiveInfo addDirectiveFromEditLog(CacheDirectiveInfo directive)
throws InvalidRequestException {
long id = directive.getId();
CacheDirective entry = new CacheDirective(directive);
CachePool pool = cachePools.get(directive.getPool());
addInternal(entry, pool);
if (nextDirectiveId <= id) {
nextDirectiveId = id + 1;
}
return entry.toInfo();
}
示例15: modifyDirectiveFromEditLog
import org.apache.hadoop.fs.InvalidRequestException; //导入依赖的package包/类
/**
* Modifies a directive, skipping most error checking. This is for careful
* internal use only. modifyDirective can be non-deterministic since its error
* checking depends on current system time, which poses a problem for edit log
* replay.
*/
void modifyDirectiveFromEditLog(CacheDirectiveInfo info)
throws InvalidRequestException {
// Check for invalid IDs.
Long id = info.getId();
if (id == null) {
throw new InvalidRequestException("Must supply an ID.");
}
CacheDirective prevEntry = getById(id);
CacheDirectiveInfo newInfo = createFromInfoAndDefaults(info, prevEntry);
removeInternal(prevEntry);
addInternal(new CacheDirective(newInfo), getCachePool(newInfo.getPool()));
}