本文整理汇总了Java中org.apache.hadoop.HadoopIllegalArgumentException类的典型用法代码示例。如果您正苦于以下问题:Java HadoopIllegalArgumentException类的具体用法?Java HadoopIllegalArgumentException怎么用?Java HadoopIllegalArgumentException使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HadoopIllegalArgumentException类属于org.apache.hadoop包,在下文中一共展示了HadoopIllegalArgumentException类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: parseStaticMapping
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
private void parseStaticMapping(Configuration conf) {
String staticMapping = conf.get(
CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES,
CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES_DEFAULT);
Collection<String> mappings = StringUtils.getStringCollection(
staticMapping, ";");
for (String users : mappings) {
Collection<String> userToGroups = StringUtils.getStringCollection(users,
"=");
if (userToGroups.size() < 1 || userToGroups.size() > 2) {
throw new HadoopIllegalArgumentException("Configuration "
+ CommonConfigurationKeys.HADOOP_USER_GROUP_STATIC_OVERRIDES
+ " is invalid");
}
String[] userToGroupsArray = userToGroups.toArray(new String[userToGroups
.size()]);
String user = userToGroupsArray[0];
List<String> groups = Collections.emptyList();
if (userToGroupsArray.length == 2) {
groups = (List<String>) StringUtils
.getStringCollection(userToGroupsArray[1]);
}
staticUserToGroupsMap.put(user, groups);
}
}
示例2: joinElection
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
/**
* To participate in election, the app will call joinElection. The result will
* be notified by a callback on either the becomeActive or becomeStandby app
* interfaces. <br/>
* After this the elector will automatically monitor the leader status and
* perform re-election if necessary<br/>
* The app could potentially start off in standby mode and ignore the
* becomeStandby call.
*
* @param data
* to be set by the app. non-null data must be set.
* @throws HadoopIllegalArgumentException
* if valid data is not supplied
*/
public synchronized void joinElection(byte[] data)
throws HadoopIllegalArgumentException {
if (data == null) {
throw new HadoopIllegalArgumentException("data cannot be null");
}
if (wantToBeInElection) {
LOG.info("Already in election. Not re-connecting.");
return;
}
appData = new byte[data.length];
System.arraycopy(data, 0, appData, 0, data.length);
if (LOG.isDebugEnabled()) {
LOG.debug("Attempting active election for " + this);
}
joinElectionInternal();
}
示例3: processOptions
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
name = StringUtils.popOptionWithArgument("-n", args);
String v = StringUtils.popOptionWithArgument("-v", args);
if (v != null) {
value = XAttrCodec.decodeValue(v);
}
xname = StringUtils.popOptionWithArgument("-x", args);
if (name != null && xname != null) {
throw new HadoopIllegalArgumentException(
"Can not specify both '-n name' and '-x name' option.");
}
if (name == null && xname == null) {
throw new HadoopIllegalArgumentException(
"Must specify '-n name' or '-x name' option.");
}
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing.");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments.");
}
}
示例4: put
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
@Override
public E put(final E entry) {
if (!(entry instanceof Entry)) {
throw new HadoopIllegalArgumentException(
"!(entry instanceof Entry), entry.getClass()=" + entry.getClass());
}
evictExpiredEntries();
final E existing = super.put(entry);
if (existing != null) {
queue.remove(existing);
}
final Entry e = (Entry)entry;
setExpirationTime(e, creationExpirationPeriod);
queue.offer(e);
evictEntries();
return existing;
}
示例5: build
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
/**
* Build the RPC Server.
* @throws IOException on error
* @throws HadoopIllegalArgumentException when mandatory fields are not set
*/
public Server build() throws IOException, HadoopIllegalArgumentException {
if (this.conf == null) {
throw new HadoopIllegalArgumentException("conf is not set");
}
if (this.protocol == null) {
throw new HadoopIllegalArgumentException("protocol is not set");
}
if (this.instance == null) {
throw new HadoopIllegalArgumentException("instance is not set");
}
return getProtocolEngine(this.protocol, this.conf).getServer(
this.protocol, this.instance, this.bindAddress, this.port,
this.numHandlers, this.numReaders, this.queueSizePerHandler,
this.verbose, this.conf, this.secretManager, this.portRangeConfig);
}
示例6: RSRawEncoder
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
public RSRawEncoder(int numDataUnits, int numParityUnits) {
super(numDataUnits, numParityUnits);
if (numDataUnits + numParityUnits >= RSUtil.GF.getFieldSize()) {
throw new HadoopIllegalArgumentException(
"Invalid numDataUnits and numParityUnits");
}
encodeMatrix = new byte[getNumAllUnits() * numDataUnits];
RSUtil.genCauchyMatrix(encodeMatrix, getNumAllUnits(), numDataUnits);
if (isAllowingVerboseDump()) {
DumpUtil.dumpMatrix(encodeMatrix, numDataUnits, getNumAllUnits());
}
gfTables = new byte[getNumAllUnits() * numDataUnits * 32];
RSUtil.initTables(numDataUnits, numParityUnits, encodeMatrix,
numDataUnits * numDataUnits, gfTables);
if (isAllowingVerboseDump()) {
System.out.println(DumpUtil.bytesToHex(gfTables, -1));
}
}
示例7: checkParameterBuffers
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
/**
* Check and ensure the buffers are of the length specified by dataLen, also
* ensure the buffers are direct buffers or not according to isDirectBuffer.
* @param buffers the buffers to check
* @param allowNull whether to allow any element to be null or not
* @param dataLen the length of data available in the buffer to ensure with
* @param isDirectBuffer is direct buffer or not to ensure with
* @param isOutputs is output buffer or not
*/
protected void checkParameterBuffers(ByteBuffer[] buffers, boolean
allowNull, int dataLen, boolean isDirectBuffer, boolean isOutputs) {
for (ByteBuffer buffer : buffers) {
if (buffer == null && !allowNull) {
throw new HadoopIllegalArgumentException(
"Invalid buffer found, not allowing null");
} else if (buffer != null) {
if (buffer.remaining() != dataLen) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, not of length " + dataLen);
}
if (buffer.isDirect() != isDirectBuffer) {
throw new HadoopIllegalArgumentException(
"Invalid buffer, isDirect should be " + isDirectBuffer);
}
if (isOutputs) {
resetBuffer(buffer, dataLen);
}
}
}
}
示例8: processOptions
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
name = StringUtils.popOptionWithArgument("-n", args);
String en = StringUtils.popOptionWithArgument("-e", args);
if (en != null) {
try {
encoding = enValueOfFunc.apply(StringUtils.toUpperCase(en));
} catch (IllegalArgumentException e) {
throw new IllegalArgumentException(
"Invalid/unsupported encoding option specified: " + en);
}
Preconditions.checkArgument(encoding != null,
"Invalid/unsupported encoding option specified: " + en);
}
boolean r = StringUtils.popOption("-R", args);
setRecursive(r);
dump = StringUtils.popOption("-d", args);
if (!dump && name == null) {
throw new HadoopIllegalArgumentException(
"Must specify '-n name' or '-d' option.");
}
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing.");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments.");
}
}
示例9: getRMDefaultPortNumber
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
@Private
public static int getRMDefaultPortNumber(String addressPrefix,
Configuration conf) {
if (addressPrefix.equals(YarnConfiguration.RM_ADDRESS)) {
return YarnConfiguration.DEFAULT_RM_PORT;
} else if (addressPrefix.equals(YarnConfiguration.RM_SCHEDULER_ADDRESS)) {
return YarnConfiguration.DEFAULT_RM_SCHEDULER_PORT;
} else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_ADDRESS)) {
return YarnConfiguration.DEFAULT_RM_WEBAPP_PORT;
} else if (addressPrefix.equals(YarnConfiguration.RM_WEBAPP_HTTPS_ADDRESS)) {
return YarnConfiguration.DEFAULT_RM_WEBAPP_HTTPS_PORT;
} else if (addressPrefix
.equals(YarnConfiguration.RM_RESOURCE_TRACKER_ADDRESS)) {
return YarnConfiguration.DEFAULT_RM_RESOURCE_TRACKER_PORT;
} else if (addressPrefix.equals(YarnConfiguration.RM_ADMIN_ADDRESS)) {
return YarnConfiguration.DEFAULT_RM_ADMIN_PORT;
} else {
throw new HadoopIllegalArgumentException(
"Invalid RM RPC address Prefix: " + addressPrefix
+ ". The valid value should be one of "
+ getServiceAddressConfKeys(conf));
}
}
示例10: getPolicy
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
private static Policy getPolicy(final Configuration conf) {
final boolean enabled = conf.getBoolean(
DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_KEY,
DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_ENABLE_DEFAULT);
if (!enabled) {
return Policy.DISABLE;
}
final String policy = conf.get(
DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY,
DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_DEFAULT);
for(int i = 1; i < Policy.values().length; i++) {
final Policy p = Policy.values()[i];
if (p.name().equalsIgnoreCase(policy)) {
return p;
}
}
throw new HadoopIllegalArgumentException("Illegal configuration value for "
+ DFSConfigKeys.DFS_CLIENT_WRITE_REPLACE_DATANODE_ON_FAILURE_POLICY_KEY
+ ": " + policy);
}
示例11: processOptions
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
@Override
protected void processOptions(LinkedList<String> args) throws IOException {
CommandFormat cf = new CommandFormat(0, Integer.MAX_VALUE, "R");
cf.parse(args);
setRecursive(cf.getOpt("R"));
if (args.isEmpty()) {
throw new HadoopIllegalArgumentException("<path> is missing");
}
if (args.size() > 1) {
throw new HadoopIllegalArgumentException("Too many arguments");
}
}
示例12: addToParent
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
private void addToParent(INodeDirectory parent, INode child) {
if (parent == dir.rootDir && FSDirectory.isReservedName(child)) {
throw new HadoopIllegalArgumentException("File name \""
+ child.getLocalName() + "\" is reserved. Please "
+ " change the name of the existing file or directory to another "
+ "name before upgrading to this release.");
}
// NOTE: This does not update space counts for parents
if (!parent.addChild(child)) {
return;
}
dir.cacheName(child);
if (child.isFile()) {
updateBlocksMap(child.asFile(), fsn.getBlockManager());
}
}
示例13: checkXAttrSize
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
/**
* Verifies that the combined size of the name and value of an xattr is within
* the configured limit. Setting a limit of zero disables this check.
*/
private static void checkXAttrSize(FSDirectory fsd, XAttr xAttr) {
if (fsd.getXattrMaxSize() == 0) {
return;
}
int size = xAttr.getName().getBytes(Charsets.UTF_8).length;
if (xAttr.getValue() != null) {
size += xAttr.getValue().length;
}
if (size > fsd.getXattrMaxSize()) {
throw new HadoopIllegalArgumentException(
"The XAttr is too big. The maximum combined size of the"
+ " name and value is " + fsd.getXattrMaxSize()
+ ", but the total size is " + size);
}
}
示例14: create
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
public static DFSZKFailoverController create(Configuration conf) {
Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf);
String nsId = DFSUtil.getNamenodeNameServiceId(conf);
if (!HAUtil.isHAEnabled(localNNConf, nsId)) {
throw new HadoopIllegalArgumentException(
"HA is not enabled for this namenode.");
}
String nnId = HAUtil.getNameNodeId(localNNConf, nsId);
if (nnId == null) {
String msg = "Could not get the namenode ID of this node. " +
"You may run zkfc on the node other than namenode.";
throw new HadoopIllegalArgumentException(msg);
}
NameNode.initializeGenericKeys(localNNConf, nsId, nnId);
DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS);
NNHAServiceTarget localTarget = new NNHAServiceTarget(
localNNConf, nsId, nnId);
return new DFSZKFailoverController(localNNConf, localTarget);
}
示例15: leastPowerOfTwo
import org.apache.hadoop.HadoopIllegalArgumentException; //导入依赖的package包/类
/**
* @return the least power of two greater than or equal to n, i.e. return
* the least integer x with x >= n and x a power of two.
*
* @throws HadoopIllegalArgumentException
* if n <= 0.
*/
public static int leastPowerOfTwo(final int n) {
if (n <= 0) {
throw new HadoopIllegalArgumentException("n = " + n + " <= 0");
}
final int highestOne = Integer.highestOneBit(n);
if (highestOne == n) {
return n; // n is a power of two.
}
final int roundUp = highestOne << 1;
if (roundUp < 0) {
final long overflow = ((long) highestOne) << 1;
throw new ArithmeticException(
"Overflow: for n = " + n + ", the least power of two (the least"
+ " integer x with x >= n and x a power of two) = "
+ overflow + " > Integer.MAX_VALUE = " + Integer.MAX_VALUE);
}
return roundUp;
}