本文整理匯總了Java中org.apache.hadoop.classification.InterfaceStability.Unstable方法的典型用法代碼示例。如果您正苦於以下問題:Java InterfaceStability.Unstable方法的具體用法?Java InterfaceStability.Unstable怎麽用?Java InterfaceStability.Unstable使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.classification.InterfaceStability
的用法示例。
在下文中一共展示了InterfaceStability.Unstable方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: getPropertySources
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
/**
* Gets information about why a property was set. Typically this is the
* path to the resource objects (file, URL, etc.) the property came from, but
* it can also indicate that it was set programmatically, or because of the
* command line.
*
* @param name - The property name to get the source of.
* @return null - If the property or its source wasn't found. Otherwise,
* returns a list of the sources of the resource. The older sources are
* the first ones in the list. So for example if a configuration is set from
* the command line, and then written out to a file that is read back in the
* first entry would indicate that it was set from the command line, while
* the second one would indicate the file that the new configuration was read
* in from.
*/
@InterfaceStability.Unstable
public synchronized String[] getPropertySources(String name) {
if (properties == null) {
// If properties is null, it means a resource was newly added
// but the props were cleared so as to load it upon future
// requests. So lets force a load by asking a properties list.
getProps();
}
// Return a null right away if our properties still
// haven't loaded or the resource mapping isn't defined
if (properties == null || updatingResource == null) {
return null;
} else {
String[] source = updatingResource.get(name);
if(source == null) {
return null;
} else {
return Arrays.copyOf(source, source.length);
}
}
}
示例2: getPropertySources
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
/**
* Gets information about why a property was set. Typically this is the
* path to the resource objects (file, URL, etc.) the property came from, but
* it can also indicate that it was set programatically, or because of the
* command line.
*
* @param name - The property name to get the source of.
* @return null - If the property or its source wasn't found. Otherwise,
* returns a list of the sources of the resource. The older sources are
* the first ones in the list. So for example if a configuration is set from
* the command line, and then written out to a file that is read back in the
* first entry would indicate that it was set from the command line, while
* the second one would indicate the file that the new configuration was read
* in from.
*/
@InterfaceStability.Unstable
public synchronized String[] getPropertySources(String name) {
if (properties == null) {
// If properties is null, it means a resource was newly added
// but the props were cleared so as to load it upon future
// requests. So lets force a load by asking a properties list.
getProps();
}
// Return a null right away if our properties still
// haven't loaded or the resource mapping isn't defined
if (properties == null || updatingResource == null) {
return null;
} else {
String[] source = updatingResource.get(name);
if(source == null) {
return null;
} else {
return Arrays.copyOf(source, source.length);
}
}
}
示例3: newInstance
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@InterfaceAudience.Private
@InterfaceStability.Unstable
public static QueueStatistics newInstance(long submitted, long running,
long pending, long completed, long killed, long failed, long activeUsers,
long availableMemoryMB, long allocatedMemoryMB, long pendingMemoryMB,
long reservedMemoryMB, long availableVCores, long allocatedVCores,
long pendingVCores, long reservedVCores) {
QueueStatistics statistics = Records.newRecord(QueueStatistics.class);
statistics.setNumAppsSubmitted(submitted);
statistics.setNumAppsRunning(running);
statistics.setNumAppsPending(pending);
statistics.setNumAppsCompleted(completed);
statistics.setNumAppsKilled(killed);
statistics.setNumAppsFailed(failed);
statistics.setNumActiveUsers(activeUsers);
statistics.setAvailableMemoryMB(availableMemoryMB);
statistics.setAllocatedMemoryMB(allocatedMemoryMB);
statistics.setPendingMemoryMB(pendingMemoryMB);
statistics.setReservedMemoryMB(reservedMemoryMB);
statistics.setAvailableVCores(availableVCores);
statistics.setAllocatedVCores(allocatedVCores);
statistics.setPendingVCores(pendingVCores);
statistics.setReservedVCores(reservedVCores);
return statistics;
}
示例4: toString
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@Override
@InterfaceStability.Unstable
public String toString() {
StringBuilder content = new StringBuilder();
content.append("CreateEvent [INodeType=").append(iNodeType)
.append(", path=").append(path)
.append(", ctime=").append(ctime)
.append(", replication=").append(replication)
.append(", ownerName=").append(ownerName)
.append(", groupName=").append(groupName)
.append(", perms=").append(perms).append(", ");
if (symlinkTarget != null) {
content.append("symlinkTarget=").append(symlinkTarget).append(", ");
}
content.append("overwrite=").append(overwrite)
.append(", defaultBlockSize=").append(defaultBlockSize)
.append("]");
return content.toString();
}
示例5: toString
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
/**
* String value includes statistics as well as stream state.
* <b>Important: there are no guarantees as to the stability
* of this value.</b>
* @return a string value for printing in logs/diagnostics
*/
@Override
@InterfaceStability.Unstable
public String toString() {
synchronized (this) {
final StringBuilder sb = new StringBuilder(
"COSInputStream{");
sb.append(uri);
sb.append(" wrappedStream=")
.append(wrappedStream != null ? "open" : "closed");
sb.append(" read policy=").append(inputPolicy);
sb.append(" pos=").append(pos);
sb.append(" nextReadPos=").append(nextReadPos);
sb.append(" contentLength=").append(contentLength);
sb.append(" contentRangeStart=").append(contentRangeStart);
sb.append(" contentRangeFinish=").append(contentRangeFinish);
sb.append(" remainingInCurrentRequest=")
.append(remainingInCurrentRequest());
sb.append('\n');
sb.append('}');
return sb.toString();
}
}
示例6: SaslRpcServer
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@InterfaceAudience.Private
@InterfaceStability.Unstable
public SaslRpcServer(AuthMethod authMethod) throws IOException {
this.authMethod = authMethod;
mechanism = authMethod.getMechanismName();
switch (authMethod) {
case SIMPLE: {
return; // no sasl for simple
}
case TOKEN: {
protocol = "";
serverId = SaslRpcServer.SASL_DEFAULT_REALM;
break;
}
case KERBEROS: {
String fullName = UserGroupInformation.getCurrentUser().getUserName();
if (LOG.isDebugEnabled())
LOG.debug("Kerberos principal name is " + fullName);
// don't use KerberosName because we don't want auth_to_local
String[] parts = fullName.split("[/@]", 3);
protocol = parts[0];
// should verify service host is present here rather than in create()
// but lazy tests are using a UGI that isn't a SPN...
serverId = (parts.length < 2) ? "" : parts[1];
break;
}
default:
// we should never be able to get here
throw new AccessControlException(
"Server does not support SASL " + authMethod);
}
}
示例7: setLoginUser
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@InterfaceAudience.Private
@InterfaceStability.Unstable
@VisibleForTesting
public synchronized static void setLoginUser(UserGroupInformation ugi) {
// if this is to become stable, should probably logout the currently
// logged in ugi if it's different
loginUser = ugi;
}
示例8: sendResponse
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@InterfaceStability.Unstable
@InterfaceAudience.LimitedPrivate({"HDFS"})
public void sendResponse() throws IOException {
int count = responseWaitCount.decrementAndGet();
assert count >= 0 : "response has already been sent";
if (count == 0) {
connection.sendResponse(this);
}
}
示例9: abortResponse
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@InterfaceStability.Unstable
@InterfaceAudience.LimitedPrivate({"HDFS"})
public void abortResponse(Throwable t) throws IOException {
// don't send response if the call was already sent or aborted.
if (responseWaitCount.getAndSet(-1) > 0) {
// clone the call to prevent a race with the other thread stomping
// on the response while being sent. the original call is
// effectively discarded since the wait count won't hit zero
Call call = new Call(this);
setupResponse(new ByteArrayOutputStream(), call,
RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
null, t.getClass().getName(), StringUtils.stringifyException(t));
call.sendResponse();
}
}
示例10: getClient
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@VisibleForTesting
@InterfaceAudience.Private
@InterfaceStability.Unstable
static Client getClient(Configuration conf) {
return CLIENTS.getClient(conf, SocketFactory.getDefault(),
RpcResponseWrapper.class);
}
示例11: getConnectionIds
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@InterfaceAudience.Private
@InterfaceStability.Unstable
Set<ConnectionId> getConnectionIds() {
synchronized (connections) {
return connections.keySet();
}
}
示例12: resetConnection
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
/**
* Forcibly reset the stream, by aborting the connection. The next
* {@code read()} operation will trigger the opening of a new HTTPS
* connection.
*
* This is potentially very inefficient, and should only be invoked
* in extreme circumstances. It logs at info for this reason.
* @return true if the connection was actually reset
* @throws IOException if invoked on a closed stream
*/
@InterfaceStability.Unstable
public synchronized boolean resetConnection() throws IOException {
checkNotClosed();
boolean connectionOpen = wrappedStream != null;
if (connectionOpen) {
LOG.info("Forced reset of connection to {}", uri);
closeStream("reset()", contentRangeFinish, true);
}
return connectionOpen;
}
示例13: getClient
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@VisibleForTesting
@InterfaceAudience.Private
@InterfaceStability.Unstable
static Client getClient(Configuration conf) {
return CLIENTS.getClient(conf, SocketFactory.getDefault(),
RpcWritable.Buffer.class);
}
示例14: newInstance
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
@InterfaceAudience.Public
@InterfaceStability.Unstable
public static UpdateContainerRequest newInstance(int version,
ContainerId containerId, ContainerUpdateType updateType,
Resource targetCapability) {
UpdateContainerRequest request =
Records.newRecord(UpdateContainerRequest.class);
request.setContainerVersion(version);
request.setContainerId(containerId);
request.setContainerUpdateType(updateType);
request.setCapability(targetCapability);
return request;
}
示例15: getAsyncReturn
import org.apache.hadoop.classification.InterfaceStability; //導入方法依賴的package包/類
/** @return the async return value from {@link AsyncCallHandler}. */
@InterfaceStability.Unstable
@SuppressWarnings("unchecked")
public static <R, T extends Throwable> AsyncGet<R, T> getAsyncReturn() {
final AsyncGet<R, T> asyncGet = (AsyncGet<R, T>)ASYNC_RETURN.get();
if (asyncGet != null) {
ASYNC_RETURN.set(null);
return asyncGet;
} else {
return (AsyncGet<R, T>) getLowerLayerAsyncReturn();
}
}