本文整理匯總了Java中org.apache.hadoop.classification.InterfaceAudience.Private方法的典型用法代碼示例。如果您正苦於以下問題:Java InterfaceAudience.Private方法的具體用法?Java InterfaceAudience.Private怎麽用?Java InterfaceAudience.Private使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類org.apache.hadoop.classification.InterfaceAudience
的用法示例。
在下文中一共展示了InterfaceAudience.Private方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: createDefaultChannelConnector
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
SelectChannelConnector ret = new SelectChannelConnector();
ret.setLowResourceMaxIdleTime(10000);
ret.setAcceptQueueSize(128);
ret.setResolveNames(false);
ret.setUseDirectBuffers(false);
if(Shell.WINDOWS) {
// result of setting the SO_REUSEADDR flag is different on Windows
// http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
// without this 2 NN's can start on the same machine and listen on
// the same port with indeterminate routing of incoming requests to them
ret.setReuseAddress(false);
}
ret.setHeaderBufferSize(1024*64);
return ret;
}
示例2: getClient
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@VisibleForTesting
@InterfaceAudience.Private
@InterfaceStability.Unstable
static Client getClient(Configuration conf) {
return CLIENTS.getClient(conf, SocketFactory.getDefault(),
RpcResponseWrapper.class);
}
示例3: getReader
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.Private
@Override
public DatumReader getReader(Class<Object> clazz) {
try {
return new ReflectDatumReader(clazz);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
示例4: getOptions
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/** Returns the current find options, creating them if necessary. */
@InterfaceAudience.Private
FindOptions getOptions() {
if (options == null) {
options = createOptions();
}
return options;
}
示例5: init
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@Override
@InterfaceAudience.Private
public void init(String contextName, ContextFactory factory) {
super.init(contextName, factory);
parseAndSetPeriod(PERIOD_PROPERTY);
metricsServers =
Util.parse(getAttribute(SERVERS_PROPERTY), DEFAULT_PORT);
unitsTable = getAttributeTable(UNITS_PROPERTY);
slopeTable = getAttributeTable(SLOPE_PROPERTY);
tmaxTable = getAttributeTable(TMAX_PROPERTY);
dmaxTable = getAttributeTable(DMAX_PROPERTY);
multicastEnabled = Boolean.parseBoolean(getAttribute(MULTICAST_PROPERTY));
String multicastTtlValue = getAttribute(MULTICAST_TTL_PROPERTY);
if (multicastEnabled) {
if (multicastTtlValue == null) {
multicastTtl = DEFAULT_MULTICAST_TTL;
} else {
multicastTtl = Integer.parseInt(multicastTtlValue);
}
}
try {
if (multicastEnabled) {
LOG.info("Enabling multicast for Ganglia with TTL " + multicastTtl);
datagramSocket = new MulticastSocket();
((MulticastSocket) datagramSocket).setTimeToLive(multicastTtl);
} else {
datagramSocket = new DatagramSocket();
}
} catch (IOException e) {
LOG.error(e);
}
}
示例6: close
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.Private
@Override
public void close() {
for (MetricsContext ctxt : subctxt) {
ctxt.close();
}
}
示例7: setTokenServiceUseIp
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* For use only by tests and initialization
*/
@InterfaceAudience.Private
@VisibleForTesting
public static void setTokenServiceUseIp(boolean flag) {
useIpForTokenService = flag;
hostResolver = !useIpForTokenService
? new QualifiedHostResolver()
: new StandardHostResolver();
}
示例8: emitRecord
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@Override
@InterfaceAudience.Private
public void emitRecord(String contextName, String recordName,
OutputRecord outRec)
throws IOException {
// Setup so that the records have the proper leader names so they are
// unambiguous at the ganglia level, and this prevents a lot of rework
StringBuilder sb = new StringBuilder();
sb.append(contextName);
sb.append('.');
if (contextName.equals("jvm") && outRec.getTag("processName") != null) {
sb.append(outRec.getTag("processName"));
sb.append('.');
}
sb.append(recordName);
sb.append('.');
int sbBaseLen = sb.length();
// emit each metric in turn
for (String metricName : outRec.getMetricNames()) {
Object metric = outRec.getMetric(metricName);
String type = typeTable.get(metric.getClass());
if (type != null) {
sb.append(metricName);
emitMetric(sb.toString(), type, metric.toString());
sb.setLength(sbBaseLen);
} else {
LOG.warn("Unknown metrics type: " + metric.getClass());
}
}
}
示例9: write
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@Override
@InterfaceAudience.Private
public void write(DataOutput out) throws IOException {
out.writeLong(blockSize);
out.writeInt(bytesPerChecksum);
out.writeInt(writePacketSize);
out.writeShort(replication);
out.writeInt(fileBufferSize);
WritableUtils.writeEnum(out, checksumType);
}
示例10: restartResourceManager
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.Private
@VisibleForTesting
public synchronized void restartResourceManager(int index)
throws InterruptedException {
if (resourceManagers[index] != null) {
resourceManagers[index].stop();
resourceManagers[index] = null;
}
Configuration conf = getConfig();
resourceManagers[index] = new ResourceManager();
initResourceManager(index, getConfig());
startResourceManager(index);
}
示例11: flush
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.Private
@Override
protected void flush() throws IOException {
for (MetricsContext ctxt : subctxt) {
try {
((AbstractMetricsContext)ctxt).flush();
} catch (IOException e) {
LOG.warn("flush failed: " + ctxt.getContextName(), e);
}
}
}
示例12: AbstractCounters
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.Private
public AbstractCounters(CounterGroupFactory<C, G> gf) {
groupFactory = gf;
}
示例13: getWriter
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
@InterfaceAudience.Private
@Override
public DatumWriter getWriter(Class<Object> clazz) {
return new ReflectDatumWriter();
}
示例14: isShowQuotas
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* Should quotas get shown as part of the report?
* @return if quotas should be shown then true otherwise false
*/
@InterfaceAudience.Private
boolean isShowQuotas() {
return showQuotas;
}
示例15: isShowQuotabyType
import org.apache.hadoop.classification.InterfaceAudience; //導入方法依賴的package包/類
/**
* should print quota by storage types
* @return true if enables quota by storage types
*/
@InterfaceAudience.Private
boolean isShowQuotabyType() {
return showQuotabyType;
}