本文整理匯總了Java中org.apache.hadoop.classification.InterfaceAudience.LimitedPrivate方法的典型用法代碼示例。如果您正苦於以下問題:Java InterfaceAudience.LimitedPrivate方法的具體用法?Java InterfaceAudience.LimitedPrivate怎麽用?Java InterfaceAudience.LimitedPrivate使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.classification.InterfaceAudience
的用法示例。
在下文中一共展示了InterfaceAudience.LimitedPrivate方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: removeContext
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Removes the context from the context config items
*
* @param contextCfgItemName
*/
@Deprecated
@InterfaceAudience.LimitedPrivate({"MapReduce"})
public static void removeContext(String contextCfgItemName) {
synchronized (contexts) {
contexts.remove(contextCfgItemName);
}
}
示例2: getDelegationTokens
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Get delegation tokens for the file systems accessed for a given
* path.
* @param p Path for which delegations tokens are requested.
* @param renewer the account name that is allowed to renew the token.
* @return List of delegation tokens.
* @throws IOException
*/
@InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
public List<Token<?>> getDelegationTokens(
Path p, String renewer) throws IOException {
Set<AbstractFileSystem> afsSet = resolveAbstractFileSystems(p);
List<Token<?>> tokenList =
new ArrayList<Token<?>>();
for (AbstractFileSystem afs : afsSet) {
List<Token<?>> afsTokens = afs.getDelegationTokens(renewer);
tokenList.addAll(afsTokens);
}
return tokenList;
}
示例3: sendResponse
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceStability.Unstable
@InterfaceAudience.LimitedPrivate({"HDFS"})
public void sendResponse() throws IOException {
int count = responseWaitCount.decrementAndGet();
assert count >= 0 : "response has already been sent";
if (count == 0) {
connection.sendResponse(this);
}
}
示例4: abortResponse
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceStability.Unstable
@InterfaceAudience.LimitedPrivate({"HDFS"})
public void abortResponse(Throwable t) throws IOException {
// don't send response if the call was already sent or aborted.
if (responseWaitCount.getAndSet(-1) > 0) {
// clone the call to prevent a race with the other thread stomping
// on the response while being sent. the original call is
// effectively discarded since the wait count won't hit zero
Call call = new Call(this);
setupResponse(new ByteArrayOutputStream(), call,
RpcStatusProto.FATAL, RpcErrorCodeProto.ERROR_RPC_SERVER,
null, t.getClass().getName(), StringUtils.stringifyException(t));
call.sendResponse();
}
}
示例5: super
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Creates a dummy DataNode for testing purpose.
*/
@VisibleForTesting
@InterfaceAudience.LimitedPrivate("HDFS")
DataNode(final Configuration conf) {
super(conf);
this.blockScanner = new BlockScanner(this, conf);
this.fileDescriptorPassingDisabledReason = null;
this.maxNumberOfBlocksToLog = 0;
this.confVersion = null;
this.usersWithLocalPathAccess = null;
this.connectToDnViaHostname = false;
this.getHdfsBlockLocationsEnabled = false;
this.pipelineSupportECN = false;
}
示例6: getServiceAuthorizationManager
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.LimitedPrivate({"HDFS", "MapReduce"})
public ServiceAuthorizationManager getServiceAuthorizationManager() {
return serviceAuthorizationManager;
}
示例7: postponeResponse
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceStability.Unstable
@InterfaceAudience.LimitedPrivate({"HDFS"})
public void postponeResponse() {
int count = responseWaitCount.incrementAndGet();
assert count > 0 : "response has already been sent";
}
示例8: access
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* <p/>
* The default implementation of this method calls {@link #getFileStatus(Path)}
* and checks the returned permissions against the requested permissions.
* Note that the getFileStatus call will be subject to authorization checks.
* Typically, this requires search (execute) permissions on each directory in
* the path's prefix, but this is implementation-defined. Any file system
* that provides a richer authorization model (such as ACLs) may override the
* default implementation so that it checks against that model instead.
* <p>
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns. Most applications should
* prefer running specific file system actions as the desired user represented
* by a {@link UserGroupInformation}.
*
* @param path Path to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if the path does not exist
* @throws UnsupportedFileSystemException if file system for <code>path</code>
* is not supported
* @throws IOException see specific implementation
*
* Exceptions applicable to file systems accessed over RPC:
* @throws RpcClientException If an exception occurred in the RPC client
* @throws RpcServerException If an exception occurred in the RPC server
* @throws UnexpectedServerException If server implementation throws
* undeclared exception to RPC server
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(final Path path, final FsAction mode)
throws AccessControlException, FileNotFoundException,
UnsupportedFileSystemException, IOException {
final Path absPath = fixRelativePart(path);
new FSLinkResolver<Void>() {
@Override
public Void next(AbstractFileSystem fs, Path p) throws IOException,
UnresolvedLinkException {
fs.access(p, mode);
return null;
}
}.resolve(this, absPath);
}
示例9: getCanonicalServiceName
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Get a canonical service name for this file system. The token cache is
* the only user of the canonical service name, and uses it to lookup this
* filesystem's service tokens.
* If file system provides a token of its own then it must have a canonical
* name, otherwise canonical name can be null.
*
* Default Impl: If the file system has child file systems
* (such as an embedded file system) then it is assumed that the fs has no
* tokens of its own and hence returns a null name; otherwise a service
* name is built using Uri and port.
*
* @return a service string that uniquely identifies this file system, null
* if the filesystem does not implement tokens
* @see SecurityUtil#buildDTServiceName(URI, int)
*/
@InterfaceAudience.LimitedPrivate({ "HDFS", "MapReduce" })
public String getCanonicalServiceName() {
return (getChildFileSystems() == null)
? SecurityUtil.buildDTServiceName(getUri(), getDefaultPort())
: null;
}
示例10: getWrappedStream
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Get a reference to the wrapped input stream. Used by unit tests.
*
* @return the underlying input stream
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
public InputStream getWrappedStream() {
return in;
}
示例11: access
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Checks if the user can access a path. The mode specifies which access
* checks to perform. If the requested permissions are granted, then the
* method returns normally. If access is denied, then the method throws an
* {@link AccessControlException}.
* <p/>
* The default implementation of this method calls {@link #getFileStatus(Path)}
* and checks the returned permissions against the requested permissions.
* Note that the getFileStatus call will be subject to authorization checks.
* Typically, this requires search (execute) permissions on each directory in
* the path's prefix, but this is implementation-defined. Any file system
* that provides a richer authorization model (such as ACLs) may override the
* default implementation so that it checks against that model instead.
* <p>
* In general, applications should avoid using this method, due to the risk of
* time-of-check/time-of-use race conditions. The permissions on a file may
* change immediately after the access call returns. Most applications should
* prefer running specific file system actions as the desired user represented
* by a {@link UserGroupInformation}.
*
* @param path Path to check
* @param mode type of access to check
* @throws AccessControlException if access is denied
* @throws FileNotFoundException if the path does not exist
* @throws IOException see specific implementation
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, IOException {
checkAccessPermissions(this.getFileStatus(path), mode);
}
示例12: getWrappedStream
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Get a reference to the wrapped output stream.
*
* @return the underlying output stream
*/
@InterfaceAudience.LimitedPrivate({"HDFS"})
public OutputStream getWrappedStream() {
return wrappedStream;
}
示例13: access
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* The specification of this method matches that of
* {@link FileContext#access(Path, FsAction)}
* except that an UnresolvedLinkException may be thrown if a symlink is
* encountered in the path.
*/
@InterfaceAudience.LimitedPrivate({"HDFS", "Hive"})
public void access(Path path, FsAction mode) throws AccessControlException,
FileNotFoundException, UnresolvedLinkException, IOException {
FileSystem.checkAccessPermissions(this.getFileStatus(path), mode);
}
示例14: getDelegationTokens
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Get one or more delegation tokens associated with the filesystem. Normally
* a file system returns a single delegation token. A file system that manages
* multiple file systems underneath, could return set of delegation tokens for
* all the file systems it manages
*
* @param renewer the account name that is allowed to renew the token.
* @return List of delegation tokens.
* If delegation tokens not supported then return a list of size zero.
* @throws IOException
*/
@InterfaceAudience.LimitedPrivate( { "HDFS", "MapReduce" })
public List<Token<?>> getDelegationTokens(String renewer) throws IOException {
return new ArrayList<Token<?>>(0);
}
示例15: getChildFileSystems
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Get all the immediate child FileSystems embedded in this FileSystem.
* It does not recurse and get grand children. If a FileSystem
* has multiple child FileSystems, then it should return a unique list
* of those FileSystems. Default is to return null to signify no children.
*
* @return FileSystems used by this FileSystem
*/
@InterfaceAudience.LimitedPrivate({ "HDFS" })
@VisibleForTesting
public FileSystem[] getChildFileSystems() {
return null;
}