当前位置: 首页>>代码示例>>Java>>正文


Java InterfaceAudience类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.classification.InterfaceAudience的典型用法代码示例。如果您正苦于以下问题:Java InterfaceAudience类的具体用法?Java InterfaceAudience怎么用?Java InterfaceAudience使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


InterfaceAudience类属于org.apache.hadoop.hbase.classification包,在下文中一共展示了InterfaceAudience类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getWriteEntry

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
/**
 * Will block until a write entry has been assigned by they WAL subsystem.
 * @return A WriteEntry gotten from local WAL subsystem. Must be completed by calling
 * mvcc#complete or mvcc#completeAndWait.
 * @throws InterruptedIOException
 * @see
 * #setWriteEntry(org.apache.hadoop.hbase.regionserver.MultiVersionConcurrencyControl.WriteEntry)
 */
@InterfaceAudience.Private // For internal use only.
public MultiVersionConcurrencyControl.WriteEntry getWriteEntry() throws InterruptedIOException {
  try {
    this.seqNumAssignedLatch.await();
  } catch (InterruptedException ie) {
    // If interrupted... clear out our entry else we can block up mvcc.
    MultiVersionConcurrencyControl mvcc = getMvcc();
    LOG.debug("mvcc=" + mvcc + ", writeEntry=" + this.writeEntry);
    if (mvcc != null) {
      if (this.writeEntry != null) {
        mvcc.complete(this.writeEntry);
      }
    }
    InterruptedIOException iie = new InterruptedIOException();
    iie.initCause(ie);
    throw iie;
  }
  return this.writeEntry;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:WALKey.java

示例2: Context

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
public Context(
    final Configuration conf,
    final FileSystem fs,
    final ReplicationPeerConfig peerConfig,
    final String peerId,
    final UUID clusterId,
    final ReplicationPeer replicationPeer,
    final MetricsSource metrics,
    final TableDescriptors tableDescriptors) {
  this.peerConfig = peerConfig;
  this.conf = conf;
  this.fs = fs;
  this.clusterId = clusterId;
  this.peerId = peerId;
  this.replicationPeer = replicationPeer;
  this.metrics = metrics;
  this.tableDescriptors = tableDescriptors;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:ReplicationEndpoint.java

示例3: init

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
protected void init(final byte[] encodedRegionName,
                    final TableName tablename,
                    long logSeqNum,
                    final long now,
                    List<UUID> clusterIds,
                    long nonceGroup,
                    long nonce,
                    MultiVersionConcurrencyControl mvcc) {
  this.logSeqNum = logSeqNum;
  this.writeTime = now;
  this.clusterIds = clusterIds;
  this.encodedRegionName = encodedRegionName;
  this.tablename = tablename;
  this.nonceGroup = nonceGroup;
  this.nonce = nonce;
  this.mvcc = mvcc;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:WALKey.java

示例4: createDefaultChannelConnector

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
public static Connector createDefaultChannelConnector() {
  SelectChannelConnector ret = new SelectChannelConnector();
  ret.setLowResourceMaxIdleTime(10000);
  ret.setAcceptQueueSize(128);
  ret.setResolveNames(false);
  ret.setUseDirectBuffers(false);
  if(Shell.WINDOWS) {
    // result of setting the SO_REUSEADDR flag is different on Windows
    // http://msdn.microsoft.com/en-us/library/ms740621(v=vs.85).aspx
    // without this 2 NN's can start on the same machine and listen on
    // the same port with indeterminate routing of incoming requests to them
    ret.setReuseAddress(false);
  }
  ret.setHeaderBufferSize(1024*64);
  return ret;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:HttpServer.java

示例5: createProcedureInfo

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
/**
 * Helper to create the ProcedureInfo from Procedure.
 */
@InterfaceAudience.Private
public static ProcedureInfo createProcedureInfo(final Procedure proc, final NonceKey nonceKey) {
  RemoteProcedureException exception = proc.hasException() ? proc.getException() : null;
  return new ProcedureInfo(
    proc.getProcId(),
    proc.toStringClass(),
    proc.getOwner(),
    proc.getState(),
    proc.hasParent() ? proc.getParentProcId() : -1,
    nonceKey,
    exception != null ?
        RemoteProcedureException.toProto(exception.getSource(), exception.getCause()) : null,
    proc.getLastUpdate(),
    proc.getStartTime(),
    proc.getResult());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:20,代码来源:Procedure.java

示例6: getDefaultExecutor

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
public static ThreadPoolExecutor getDefaultExecutor(Configuration conf) {
  int maxThreads = conf.getInt("hbase.htable.threads.max", Integer.MAX_VALUE);
  if (maxThreads == 0) {
    maxThreads = 1; // is there a better default?
  }
  long keepAliveTime = conf.getLong("hbase.htable.threads.keepalivetime", 60);

  // Using the "direct handoff" approach, new threads will only be created
  // if it is necessary and will grow unbounded. This could be bad but in HCM
  // we only create as many Runnables as there are region servers. It means
  // it also scales when new region servers are added.
  ThreadPoolExecutor pool = new ThreadPoolExecutor(1, maxThreads, keepAliveTime, TimeUnit.SECONDS,
      new SynchronousQueue<Runnable>(), Threads.newDaemonThreadFactory("htable"));
  pool.allowCoreThreadTimeOut(true);
  return pool;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:HTable.java

示例7: callMethod

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@Override
@InterfaceAudience.Private
public void callMethod(Descriptors.MethodDescriptor method,
                       RpcController controller,
                       Message request, Message responsePrototype,
                       RpcCallback<Message> callback) {
  Message response = null;
  try {
    response = callExecService(controller, method, request, responsePrototype);
  } catch (IOException ioe) {
    LOG.warn("Call failed on IOException", ioe);
    ResponseConverter.setControllerException(controller, ioe);
  }
  if (callback != null) {
    callback.run(response);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:18,代码来源:CoprocessorRpcChannel.java

示例8: ProcedureInfo

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
public ProcedureInfo(
    final long procId,
    final String procName,
    final String procOwner,
    final ProcedureState procState,
    final long parentId,
    final NonceKey nonceKey,
    final ForeignExceptionMessage exception,
    final long lastUpdate,
    final long startTime,
    final byte[] result) {
  this.procId = procId;
  this.procName = procName;
  this.procOwner = procOwner;
  this.procState = procState;
  this.parentId = parentId;
  this.nonceKey = nonceKey;
  this.lastUpdate = lastUpdate;
  this.startTime = startTime;

  // If the procedure is completed, we should treat exception and result differently
  this.exception = exception;
  this.result = result;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:26,代码来源:ProcedureInfo.java

示例9: convert

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
/**
 * Helper to convert the protobuf object.
 * @return Convert the current Protocol Buffers Procedure to {@link ProcedureInfo}
 * instance.
 */
@InterfaceAudience.Private
public static ProcedureInfo convert(final ProcedureProtos.Procedure procProto) {
  NonceKey nonceKey = null;
  if (procProto.getNonce() != HConstants.NO_NONCE) {
    nonceKey = new NonceKey(procProto.getNonceGroup(), procProto.getNonce());
  }

  return new ProcedureInfo(
    procProto.getProcId(),
    procProto.getClassName(),
    procProto.getOwner(),
    procProto.getState(),
    procProto.hasParentId() ? procProto.getParentId() : -1,
    nonceKey,
    procProto.hasException() ? procProto.getException() : null,
    procProto.getLastUpdate(),
    procProto.getStartTime(),
    procProto.hasResult() ? procProto.getResult().toByteArray() : null);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:25,代码来源:ProcedureInfo.java

示例10: Context

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
public Context(
    final Configuration conf,
    final FileSystem fs,
    final ReplicationPeerConfig peerConfig,
    final String peerId,
    final UUID clusterId,
    final ReplicationPeer replicationPeer,
    final MetricsSource metrics) {
  this.peerConfig = peerConfig;
  this.conf = conf;
  this.fs = fs;
  this.clusterId = clusterId;
  this.peerId = peerId;
  this.replicationPeer = replicationPeer;
  this.metrics = metrics;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:18,代码来源:ReplicationEndpoint.java

示例11: callMethod

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@Override
@InterfaceAudience.Private
public void callMethod(Descriptors.MethodDescriptor method,
                       RpcController controller,
                       Message request, Message responsePrototype,
                       RpcCallback<Message> callback) {
  Message response = null;
  try {
    response = callExecService(method, request, responsePrototype);
  } catch (IOException ioe) {
    LOG.warn("Call failed on IOException", ioe);
    ResponseConverter.setControllerException(controller, ioe);
  }
  if (callback != null) {
    callback.run(response);
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:18,代码来源:CoprocessorRpcChannel.java

示例12: legacyWarning

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
/**
 * limits the amount of logging to once per coprocessor class.
 * Used in concert with {@link #useLegacyMethod(Class, String, Class[])} when a runtime issue
 * prevents properly supporting the legacy version of a coprocessor API.
 * Since coprocessors can be in tight loops this serves to limit the amount of log spam we create.
 */
@InterfaceAudience.Private
protected void legacyWarning(final Class<? extends Coprocessor> clazz, final String message) {
  if(legacyWarning.add(clazz)) {
    LOG.error("You have a legacy coprocessor loaded and there are events we can't map to the " +
        " deprecated API. Your coprocessor will not see these events.  Please update '" + clazz +
        "'. Details of the problem: " + message);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:15,代码来源:CoprocessorHost.java

示例13: RegionStateTransitionContext

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
public RegionStateTransitionContext(TransitionCode code, long openSeqNum, long masterSystemTime,
    HRegionInfo... hris) {
  this.code = code;
  this.openSeqNum = openSeqNum;
  this.masterSystemTime = masterSystemTime;
  this.hris = hris;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:9,代码来源:RegionServerServices.java

示例14: setTimeoutFailure

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
@InterfaceAudience.Private
protected synchronized boolean setTimeoutFailure() {
  if (state == ProcedureState.WAITING_TIMEOUT) {
    long timeDiff = EnvironmentEdgeManager.currentTime() - lastUpdate;
    setFailure("ProcedureExecutor", new TimeoutIOException(
      "Operation timed out after " + StringUtils.humanTimeDiff(timeDiff)));
    return true;
  }
  return false;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:Procedure.java

示例15: setProcId

import org.apache.hadoop.hbase.classification.InterfaceAudience; //导入依赖的package包/类
/**
 * Called by the ProcedureExecutor to assign the ID to the newly created procedure.
 */
@VisibleForTesting
@InterfaceAudience.Private
protected void setProcId(final long procId) {
  this.procId = procId;
  this.startTime = EnvironmentEdgeManager.currentTime();
  setState(ProcedureState.RUNNABLE);
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:Procedure.java


注:本文中的org.apache.hadoop.hbase.classification.InterfaceAudience类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。