本文整理汇总了Java中org.apache.hadoop.nfs.nfs3.Nfs3Constant类的典型用法代码示例。如果您正苦于以下问题:Java Nfs3Constant类的具体用法?Java Nfs3Constant怎么用?Java Nfs3Constant使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
Nfs3Constant类属于org.apache.hadoop.nfs.nfs3包,在下文中一共展示了Nfs3Constant类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: getAccessRights
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static int getAccessRights(int mode, int type) {
int rtn = 0;
if (isSet(mode, Nfs3Constant.ACCESS_MODE_READ)) {
rtn |= Nfs3Constant.ACCESS3_READ;
// LOOKUP is only meaningful for dir
if (type == NfsFileType.NFSDIR.toValue()) {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_WRITE)) {
rtn |= Nfs3Constant.ACCESS3_MODIFY;
rtn |= Nfs3Constant.ACCESS3_EXTEND;
// Set delete bit, UNIX may ignore it for regular file since it's up to
// parent dir op permission
rtn |= Nfs3Constant.ACCESS3_DELETE;
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
if (type == NfsFileType.NFSREG.toValue()) {
rtn |= Nfs3Constant.ACCESS3_EXECUTE;
} else {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
return rtn;
}
示例2: addDeprecatedKeys
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
private static void addDeprecatedKeys() {
Configuration.addDeprecations(new DeprecationDelta[] {
new DeprecationDelta("nfs3.server.port",
NfsConfigKeys.DFS_NFS_SERVER_PORT_KEY),
new DeprecationDelta("nfs3.mountd.port",
NfsConfigKeys.DFS_NFS_MOUNTD_PORT_KEY),
new DeprecationDelta("dfs.nfs.exports.cache.size",
Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY),
new DeprecationDelta("dfs.nfs.exports.cache.expirytime.millis",
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY),
new DeprecationDelta("hadoop.nfs.userupdate.milly",
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY),
new DeprecationDelta("nfs.usergroup.update.millis",
IdMappingConstant.USERGROUPID_UPDATE_MILLIS_KEY),
new DeprecationDelta("nfs.static.mapping.file",
IdMappingConstant.STATIC_ID_MAPPING_FILE_KEY),
new DeprecationDelta("dfs.nfs3.enableDump",
NfsConfigKeys.DFS_NFS_FILE_DUMP_KEY),
new DeprecationDelta("dfs.nfs3.dump.dir",
NfsConfigKeys.DFS_NFS_FILE_DUMP_DIR_KEY),
new DeprecationDelta("dfs.nfs3.max.open.files",
NfsConfigKeys.DFS_NFS_MAX_OPEN_FILES_KEY),
new DeprecationDelta("dfs.nfs3.stream.timeout",
NfsConfigKeys.DFS_NFS_STREAM_TIMEOUT_KEY),
new DeprecationDelta("dfs.nfs3.export.point",
NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY),
new DeprecationDelta("nfs.allow.insecure.ports",
NfsConfigKeys.DFS_NFS_PORT_MONITORING_DISABLED_KEY),
new DeprecationDelta("dfs.nfs.keytab.file",
NfsConfigKeys.DFS_NFS_KEYTAB_FILE_KEY),
new DeprecationDelta("dfs.nfs.kerberos.principal",
NfsConfigKeys.DFS_NFS_KERBEROS_PRINCIPAL_KEY),
new DeprecationDelta("dfs.nfs.rtmax",
NfsConfigKeys.DFS_NFS_MAX_READ_TRANSFER_SIZE_KEY),
new DeprecationDelta("dfs.nfs.wtmax",
NfsConfigKeys.DFS_NFS_MAX_WRITE_TRANSFER_SIZE_KEY),
new DeprecationDelta("dfs.nfs.dtmax",
NfsConfigKeys.DFS_NFS_MAX_READDIR_TRANSFER_SIZE_KEY) });
}
示例3: testCreate
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
@Test(timeout = 60000)
public void testCreate() throws Exception {
HdfsFileStatus status = nn.getRpcServer().getFileInfo(testdir);
long dirId = status.getFileId();
XDR xdr_req = new XDR();
FileHandle handle = new FileHandle(dirId);
CREATE3Request req = new CREATE3Request(handle, "fubar",
Nfs3Constant.CREATE_UNCHECKED, new SetAttr3(), 0);
req.serialize(xdr_req);
// Attempt by an unpriviledged user should fail.
CREATE3Response response1 = nfsd.create(xdr_req.asReadOnlyWrap(),
securityHandlerUnpriviledged,
new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3ERR_ACCES,
response1.getStatus());
// Attempt by a priviledged user should pass.
CREATE3Response response2 = nfsd.create(xdr_req.asReadOnlyWrap(),
securityHandler, new InetSocketAddress("localhost", 1234));
assertEquals("Incorrect return code:", Nfs3Status.NFS3_OK,
response2.getStatus());
}
示例4: deserialize
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static CREATE3Request deserialize(XDR xdr) throws IOException {
FileHandle handle = readHandle(xdr);
String name = xdr.readString();
int mode = xdr.readInt();
SetAttr3 objAttr = new SetAttr3();
long verf = 0;
if ((mode == Nfs3Constant.CREATE_UNCHECKED)
|| (mode == Nfs3Constant.CREATE_GUARDED)) {
objAttr.deserialize(xdr);
} else if (mode == Nfs3Constant.CREATE_EXCLUSIVE) {
verf = xdr.readHyper();
} else {
throw new IOException("Wrong create mode:" + mode);
}
return new CREATE3Request(handle, name, mode, objAttr, verf);
}
示例5: getInstance
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static synchronized NfsExports getInstance(Configuration conf) {
if (exports == null) {
String matchHosts = conf.get(
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY,
CommonConfigurationKeys.NFS_EXPORTS_ALLOWED_HOSTS_KEY_DEFAULT);
int cacheSize = conf.getInt(Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_SIZE_DEFAULT);
long expirationPeriodNano = conf.getLong(
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_KEY,
Nfs3Constant.NFS_EXPORTS_CACHE_EXPIRYTIME_MILLIS_DEFAULT) * 1000 * 1000;
try {
exports = new NfsExports(cacheSize, expirationPeriodNano, matchHosts);
} catch (IllegalArgumentException e) {
LOG.error("Invalid NFS Exports provided: ", e);
return exports;
}
}
return exports;
}
示例6: RpcProgramNfs3
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public RpcProgramNfs3(List<String> exports, Configuration config)
throws IOException {
super("NFS3", "localhost", Nfs3Constant.PORT, Nfs3Constant.PROGRAM,
Nfs3Constant.VERSION, Nfs3Constant.VERSION, 100);
config.set(FsPermission.UMASK_LABEL, "000");
iug = new IdUserGroup();
writeManager = new WriteManager(iug, config);
clientCache = new DFSClientCache(config);
superUserClient = new DFSClient(NameNode.getAddress(config), config);
replication = (short) config.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
blockSize = config.getLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY,
DFSConfigKeys.DFS_BLOCK_SIZE_DEFAULT);
bufferSize = config.getInt("io.file.buffer.size", 4096);
writeDumpDir = config.get("dfs.nfs3.dump.dir", "/tmp/.hdfs-nfs");
boolean enableDump = config.getBoolean("dfs.nfs3.enableDump", true);
if (!enableDump) {
writeDumpDir = null;
} else {
clearDirectory(writeDumpDir);
}
}
示例7: getAccessRights
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static int getAccessRights(int mode, int type) {
int rtn = 0;
if (isSet(mode, Nfs3Constant.ACCESS_MODE_READ)) {
rtn |= Nfs3Constant.ACCESS3_READ;
// LOOKUP is only meaningful for dir
if (type == NfsFileType.NFSDIR.toValue()) {
rtn |= Nfs3Constant.ACCESS3_LOOKUP;
}
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_WRITE)) {
rtn |= Nfs3Constant.ACCESS3_MODIFY;
rtn |= Nfs3Constant.ACCESS3_EXTEND;
// Set delete bit, UNIX may ignore it for regular file since it's up to
// parent dir op permission
rtn |= Nfs3Constant.ACCESS3_DELETE;
}
if (isSet(mode, Nfs3Constant.ACCESS_MODE_EXECUTE)) {
if (type == NfsFileType.NFSREG.toValue()) {
rtn |= Nfs3Constant.ACCESS3_EXECUTE;
}
}
return rtn;
}
示例8: create
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
static XDR create() {
XDR request = new XDR();
RpcCall.write(request, 0x8000004c, Nfs3Constant.PROGRAM,
Nfs3Constant.VERSION, Nfs3Constant.NFSPROC3_CREATE);
// credentials
request.writeInt(0); // auth null
request.writeInt(0); // length zero
// verifier
request.writeInt(0); // auth null
request.writeInt(0); // length zero
SetAttr3 objAttr = new SetAttr3();
CREATE3Request createReq = new CREATE3Request(new FileHandle("/"),
"out-of-order-write" + System.currentTimeMillis(), 0, objAttr, 0);
createReq.serialize(request);
return request;
}
示例9: write
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
static XDR write(FileHandle handle, int xid, long offset, int count,
byte[] data) {
XDR request = new XDR();
RpcCall.write(request, xid, Nfs3Constant.PROGRAM, Nfs3Constant.VERSION,
Nfs3Constant.NFSPROC3_WRITE);
// credentials
request.writeInt(0); // auth null
request.writeInt(0); // length zero
// verifier
request.writeInt(0); // auth null
request.writeInt(0); // length zero
WRITE3Request write1 = new WRITE3Request(handle, offset, count,
WriteStableHow.UNSTABLE, ByteBuffer.wrap(data));
write1.serialize(request);
return request;
}
示例10: main
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
public static void main(String[] args) throws InterruptedException {
PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
RpcProgramMountd.PORT);
XDR mappingRequest = PortmapRequest.create(mapEntry);
RegistrationClient registrationClient = new RegistrationClient(
"localhost", Nfs3Constant.SUN_RPCBIND, mappingRequest);
registrationClient.run();
Thread t1 = new Runtest1();
//Thread t2 = testa.new Runtest2();
t1.start();
//t2.start();
t1.join();
//t2.join();
//testDump();
}
示例11: streamCleanup
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
/**
* Check stream status to decide if it should be closed
*
* @return true, remove stream; false, keep stream
*/
public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
Preconditions.checkState(
streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
if (!activeState) {
return true;
}
boolean flag = false;
// Check the stream timeout
if (checkStreamTimeout(streamTimeout)) {
if (LOG.isDebugEnabled()) {
LOG.debug("stream can be closed for fileId:" + fileId);
}
flag = true;
}
return flag;
}
示例12: WriteManager
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
this.iug = iug;
this.config = config;
streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
LOG.info("Stream timeout is " + streamTimeout + "ms.");
if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
LOG.info("Reset stream timeout to minimum value " +
Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
}
maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
LOG.info("Maximum open streams is " + maxStreams);
this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}
示例13: NfsExports
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
/**
* Constructor.
* @param cacheSize The size of the access privilege cache.
* @param expirationPeriodNano The period
* @param matchingHosts A string specifying one or multiple matchers.
*/
NfsExports(int cacheSize, long expirationPeriodNano, String matchHosts) {
this.cacheExpirationPeriod = expirationPeriodNano;
accessCache = new LightWeightCache<AccessCacheEntry, AccessCacheEntry>(
cacheSize, cacheSize, expirationPeriodNano, 0);
String[] matchStrings = matchHosts.split(
Nfs3Constant.EXPORTS_ALLOWED_HOSTS_SEPARATOR);
mMatches = new ArrayList<Match>(matchStrings.length);
for(String mStr : matchStrings) {
if (LOG.isDebugEnabled()) {
LOG.debug("Processing match string '" + mStr + "'");
}
mStr = mStr.trim();
if(!mStr.isEmpty()) {
mMatches.add(getMatch(mStr));
}
}
}
示例14: streamCleanup
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
/**
* Check stream status to decide if it should be closed
* @return true, remove stream; false, keep stream
*/
public synchronized boolean streamCleanup(long fileId, long streamTimeout) {
Preconditions
.checkState(streamTimeout >= Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT);
if (!activeState) {
return true;
}
boolean flag = false;
// Check the stream timeout
if (checkStreamTimeout(streamTimeout)) {
if (LOG.isDebugEnabled()) {
LOG.debug("stream can be closed for fileId:" + fileId);
}
flag = true;
}
return flag;
}
示例15: WriteManager
import org.apache.hadoop.nfs.nfs3.Nfs3Constant; //导入依赖的package包/类
WriteManager(IdUserGroup iug, final Configuration config) {
this.iug = iug;
this.config = config;
streamTimeout = config.getLong(Nfs3Constant.OUTPUT_STREAM_TIMEOUT,
Nfs3Constant.OUTPUT_STREAM_TIMEOUT_DEFAULT);
LOG.info("Stream timeout is " + streamTimeout + "ms.");
if (streamTimeout < Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT) {
LOG.info("Reset stream timeout to minimum value "
+ Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT + "ms.");
streamTimeout = Nfs3Constant.OUTPUT_STREAM_TIMEOUT_MIN_DEFAULT;
}
maxStreams = config.getInt(Nfs3Constant.MAX_OPEN_FILES,
Nfs3Constant.MAX_OPEN_FILES_DEFAULT);
LOG.info("Maximum open streams is "+ maxStreams);
this.fileContextCache = new OpenFileCtxCache(config, streamTimeout);
}