本文整理匯總了Java中org.apache.hadoop.net.DNS.getDefaultIP方法的典型用法代碼示例。如果您正苦於以下問題:Java DNS.getDefaultIP方法的具體用法?Java DNS.getDefaultIP怎麽用?Java DNS.getDefaultIP使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.net.DNS
的用法示例。
在下文中一共展示了DNS.getDefaultIP方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: register
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
示例2: register
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = dataNodeProto.registerDatanode(dnRegistration);
dnRegistration.setNamespaceInfo(nsInfo);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage, BlockListAsLongs.EMPTY)
};
dataNodeProto.blockReport(dnRegistration, bpid, reports,
new BlockReportContext(1, 0, System.nanoTime(), 0L));
}
示例3: register
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports,
new BlockReportContext(1, 0, System.nanoTime()));
}
示例4: getUniqueRackPrefix
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
static private String getUniqueRackPrefix() {
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) {
System.out.println("Could not find ip address of \"default\" inteface.");
}
int rand = 0;
try {
rand = SecureRandom.getInstance("SHA1PRNG").nextInt(Integer.MAX_VALUE);
} catch (NoSuchAlgorithmException e) {
rand = (new Random()).nextInt(Integer.MAX_VALUE);
}
return "/Rack-" + rand + "-"+ ip + "-" +
System.currentTimeMillis();
}
示例5: createNewStorageId
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
public static String createNewStorageId(int port) {
/* Return
* "DS-randInt-ipaddr-currentTimeMillis"
* It is considered extermely rare for all these numbers to match
* on a different machine accidentally for the following
* a) SecureRandom(INT_MAX) is pretty much random (1 in 2 billion), and
* b) Good chance ip address would be different, and
* c) Even on the same machine, Datanode is designed to use different ports.
* d) Good chance that these are started at different times.
* For a confict to occur all the 4 above have to match!.
* The format of this string can be changed anytime in future without
* affecting its functionality.
*/
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) {
LOG.warn("Could not find ip address of \"default\" inteface.");
}
int rand = getSecureRandom().nextInt(Integer.MAX_VALUE);
return "DS-" + rand + "-"+ ip + "-" + port + "-" +
System.currentTimeMillis();
}
示例6: createNewStorageId
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
/**
* @return a unique storage ID of form "DS-randInt-ipaddr-port-timestamp"
*/
static String createNewStorageId(int port) {
// It is unlikely that we will create a non-unique storage ID
// for the following reasons:
// a) SecureRandom is a cryptographically strong random number generator
// b) IP addresses will likely differ on different hosts
// c) DataNode xfer ports will differ on the same host
// d) StorageIDs will likely be generated at different times (in ms)
// A conflict requires that all four conditions are violated.
// NB: The format of this string can be changed in the future without
// requiring that old SotrageIDs be updated.
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) {
LOG.warn("Could not find an IP address for the \"default\" inteface.");
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
return "DS-" + rand + "-" + ip + "-" + port + "-" + Time.now();
}
示例7: register
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例8: register
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
DataNode.generateUuid(), getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo),
new ExportedBlockKeys(), VersionInfo.getVersion());
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(DatanodeStorage.generateUuid());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例9: register
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"), "", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""), new ExportedBlockKeys(),
VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {new StorageBlockReport(storage,
BlockReport.builder(NUM_BUCKETS).build())};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例10: register
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
void register() throws IOException {
// get versions from the namenode
nsInfo = nameNodeProto.versionRequest();
dnRegistration = new DatanodeRegistration(
new DatanodeID(DNS.getDefaultIP("default"),
DNS.getDefaultHost("default", "default"),
"", getNodePort(dnIdx),
DFSConfigKeys.DFS_DATANODE_HTTP_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_HTTPS_DEFAULT_PORT,
DFSConfigKeys.DFS_DATANODE_IPC_DEFAULT_PORT),
new DataStorage(nsInfo, ""),
new ExportedBlockKeys(), VersionInfo.getVersion());
DataNode.setNewStorageID(dnRegistration);
// register datanode
dnRegistration = nameNodeProto.registerDatanode(dnRegistration);
//first block reports
storage = new DatanodeStorage(dnRegistration.getStorageID());
final StorageBlockReport[] reports = {
new StorageBlockReport(storage,
new BlockListAsLongs(null, null).getBlockListAsLongs())
};
nameNodeProto.blockReport(dnRegistration,
nameNode.getNamesystem().getBlockPoolId(), reports);
}
示例11: newBlockPoolID
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
/**
* Generate new blockpoolID.
*
* @return new blockpoolID
*/
static String newBlockPoolID() throws UnknownHostException{
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException e) {
LOG.warn("Could not find ip address of \"default\" inteface.");
throw e;
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
String bpid = "BP-" + rand + "-"+ ip + "-" + Time.now();
return bpid;
}
示例12: getUniqueRackPrefix
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
static private String getUniqueRackPrefix() {
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) {
System.out.println("Could not find ip address of \"default\" inteface.");
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
return "/Rack-" + rand + "-"+ ip + "-" + Time.now();
}
示例13: getNodeName
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
/**
* Get data-node in the form
* <host name> : <port>
* where port is a 6 digit integer.
* This is necessary in order to provide lexocographic ordering.
* Host names are all the same, the ordering goes by port numbers.
*/
private static String getNodeName(int port) throws IOException {
String machineName = DNS.getDefaultIP("default");
String sPort = String.valueOf(100000 + port);
if(sPort.length() > 6)
throw new IOException("Too many data-nodes.");
return machineName + ":" + sPort;
}
示例14: newBlockPoolID
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
static String newBlockPoolID() throws UnknownHostException {
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException e) {
System.out.println("Could not find ip address of \"default\" inteface.");
throw e;
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
String bpid = "BP-" + rand + "-" + ip + "-" + Time.now();
return bpid;
}
示例15: getUniqueRackPrefix
import org.apache.hadoop.net.DNS; //導入方法依賴的package包/類
static private String getUniqueRackPrefix() {
String ip = "unknownIP";
try {
ip = DNS.getDefaultIP("default");
} catch (UnknownHostException ignored) {
System.out.println("Could not find ip address of \"default\" inteface.");
}
int rand = DFSUtil.getSecureRandom().nextInt(Integer.MAX_VALUE);
return "/Rack-" + rand + "-" + ip + "-" + Time.now();
}