当前位置: 首页>>代码示例>>Java>>正文


Java MemoryBoundedLogMessageBuffer类代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer的典型用法代码示例。如果您正苦于以下问题:Java MemoryBoundedLogMessageBuffer类的具体用法?Java MemoryBoundedLogMessageBuffer怎么用?Java MemoryBoundedLogMessageBuffer使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


MemoryBoundedLogMessageBuffer类属于org.apache.hadoop.hbase.monitoring包,在下文中一共展示了MemoryBoundedLogMessageBuffer类的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: HMaster

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize the local HRegionServer
 * <li>Start the ActiveMasterManager.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in
 * #finishActiveMasterInitialization(MonitoredTask) after
 * the master becomes the active one.
 *
 * @throws InterruptedException
 * @throws KeeperException
 * @throws IOException
 */
public HMaster(final Configuration conf, CoordinatedStateManager csm)
    throws IOException, KeeperException, InterruptedException {
  super(conf, csm);
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
    conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
    ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));

  // Disable usage of meta replicas in the master
  this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapreduce.task.attempt.id") == null) {
    this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
  }

  // should we check the compression codec type at master side, default true, HBASE-6370
  this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);

  // should we check encryption settings at master side, default true
  this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);

  this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));

  // preload table descriptor at startup
  this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);

  // Do we publish the status?

  boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
      HConstants.STATUS_PUBLISHED_DEFAULT);
  Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
      conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.Publisher.class);

  if (shouldPublish) {
    if (publisherClass == null) {
      LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
          " is not set - not publishing status");
    } else {
      clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
      getChoreService().scheduleChore(clusterStatusPublisherChore);
    }
  }

  // Some unit tests don't need a cluster, so no zookeeper at all
  if (!conf.getBoolean("hbase.testing.nocluster", false)) {
    activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
    int infoPort = putUpJettyServer();
    startActiveMasterManager(infoPort);
  } else {
    activeMasterManager = null;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:77,代码来源:HMaster.java

示例2: getRegionServerFatalLogBuffer

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
  return rsFatals;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:4,代码来源:HMaster.java

示例3: HMaster

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
/**
 * Initializes the HMaster. The steps are as follows:
 * <p/>
 * <ol>
 * <li>Initialize the local HRegionServer
 * <li>Start the ActiveMasterManager.
 * </ol>
 * <p/>
 * Remaining steps of initialization occur in
 * #finishActiveMasterInitialization(MonitoredTask) after
 * the master becomes the active one.
 *
 * @throws InterruptedException
 * @throws KeeperException
 * @throws IOException
 */
public HMaster(final Configuration conf, CoordinatedStateManager csm)
        throws IOException, KeeperException, InterruptedException {
    super(conf, csm);
    this.rsFatals = new MemoryBoundedLogMessageBuffer(
            conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));

    LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
            ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));

    Replication.decorateMasterConfiguration(this.conf);

    // Hack! Maps DFSClient => Master for logs.  HDFS made this
    // config param for task trackers, but we can piggyback off of it.
    if (this.conf.get("mapreduce.task.attempt.id") == null) {
        this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
    }

    // should we check the compression codec type at master side, default true, HBASE-6370
    this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);

    // should we check encryption settings at master side, default true
    this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);

    this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));
    //暴露给hadoop用的接口

    // preload table descriptor at startup
    this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);

    // Do we publish the status?

    boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
            HConstants.STATUS_PUBLISHED_DEFAULT);
    Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
            conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
                    ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
                    ClusterStatusPublisher.Publisher.class);

    if (shouldPublish) {
        if (publisherClass == null) {
            LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
                    ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
                    " is not set - not publishing status");
        } else {
            clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
            Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
        }
    }
    activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
    int infoPort = putUpJettyServer();
    startActiveMasterManager(infoPort);
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:69,代码来源:HMaster.java

示例4: getRegionServerFatalLogBuffer

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
public MemoryBoundedLogMessageBuffer getRegionServerFatalLogBuffer() {
    return rsFatals;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:4,代码来源:HMaster.java

示例5: HMaster

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize the local HRegionServer
 * <li>Start the ActiveMasterManager.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in
 * #finishActiveMasterInitialization(MonitoredTask) after
 * the master becomes the active one.
 */
public HMaster(final Configuration conf)
    throws IOException, KeeperException {
  super(conf);
  TraceUtil.initTracer(conf);
  try {
    this.rsFatals = new MemoryBoundedLogMessageBuffer(
        conf.getLong("hbase.master.buffer.for.rs.fatals", 1 * 1024 * 1024));
    LOG.info("hbase.rootdir=" + getRootDir() +
        ", hbase.cluster.distributed=" + this.conf.getBoolean(HConstants.CLUSTER_DISTRIBUTED, false));

    // Disable usage of meta replicas in the master
    this.conf.setBoolean(HConstants.USE_META_REPLICAS, false);

    decorateMasterConfiguration(this.conf);

    // Hack! Maps DFSClient => Master for logs.  HDFS made this
    // config param for task trackers, but we can piggyback off of it.
    if (this.conf.get("mapreduce.task.attempt.id") == null) {
      this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
    }

    // should we check the compression codec type at master side, default true, HBASE-6370
    this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);

    // should we check encryption settings at master side, default true
    this.masterCheckEncryption = conf.getBoolean("hbase.master.check.encryption", true);

    this.metricsMaster = new MetricsMaster(new MetricsMasterWrapperImpl(this));

    // preload table descriptor at startup
    this.preLoadTableDescriptors = conf.getBoolean("hbase.master.preload.tabledescriptors", true);

    this.maxBlancingTime = getMaxBalancingTime();
    this.maxRitPercent = conf.getDouble(HConstants.HBASE_MASTER_BALANCER_MAX_RIT_PERCENT,
        HConstants.DEFAULT_HBASE_MASTER_BALANCER_MAX_RIT_PERCENT);

    // Do we publish the status?

    boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
        HConstants.STATUS_PUBLISHED_DEFAULT);
    Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
        conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
            ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
            ClusterStatusPublisher.Publisher.class);

    if (shouldPublish) {
      if (publisherClass == null) {
        LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
            ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
            " is not set - not publishing status");
      } else {
        clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
        getChoreService().scheduleChore(clusterStatusPublisherChore);
      }
    }

    // Some unit tests don't need a cluster, so no zookeeper at all
    if (!conf.getBoolean("hbase.testing.nocluster", false)) {
      this.activeMasterManager = new ActiveMasterManager(zooKeeper, this.serverName, this);
    } else {
      this.activeMasterManager = null;
    }
  } catch (Throwable t) {
    // Make sure we log the exception. HMaster is often started via reflection and the
    // cause of failed startup is lost.
    LOG.error("Failed construction of Master", t);
    throw t;
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:82,代码来源:HMaster.java

示例6: HMaster

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize HMaster RPC and address
 * <li>Connect to ZooKeeper.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in {@link #run()} so that they
 * run in their own thread rather than within the context of the constructor.
 * @throws InterruptedException
 */
public HMaster(final Configuration conf)
throws IOException, KeeperException, InterruptedException {
  this.conf = new Configuration(conf);
  // Disable the block cache on the master
  this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  // Set how many times to retry talking to another server over HConnection.
  HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);
  // Server to handle client requests.
  String hostname = DNS.getDefaultHost(
    conf.get("hbase.master.dns.interface", "default"),
    conf.get("hbase.master.dns.nameserver", "default"));
  int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
  // Creation of a HSA will force a resolve.
  InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
  if (initialIsa.getAddress() == null) {
    throw new IllegalArgumentException("Failed resolve of " + this.isa);
  }
  int numHandlers = conf.getInt("hbase.master.handler.count",
    conf.getInt("hbase.regionserver.handler.count", 25));
  this.rpcServer = HBaseRPC.getServer(this,
    new Class<?>[]{HMasterInterface.class, HMasterRegionInterface.class},
      initialIsa.getHostName(), // BindAddress is IP we got for this server.
      initialIsa.getPort(),
      numHandlers,
      0, // we dont use high priority handlers in master
      conf.getBoolean("hbase.rpc.verbose", false), conf,
      0); // this is a DNC w/o high priority handlers
  // Set our address.
  this.isa = this.rpcServer.getListenerAddress();
  this.serverName = new ServerName(this.isa.getHostName(),
    this.isa.getPort(), System.currentTimeMillis());
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
      conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  // initialize server principal (if using secure Hadoop)
  User.login(conf, "hbase.master.keytab.file",
    "hbase.master.kerberos.principal", this.isa.getHostName());

  // set the thread name now we have an address
  setName(MASTER + "-" + this.serverName.toString());

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapred.task.id") == null) {
    this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString());
  }

  this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true);
  this.rpcServer.startThreads();
  this.metrics = new MasterMetrics(getServerName().toString());
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:66,代码来源:HMaster.java

示例7: HMaster

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize the local HRegionServer
 * <li>Start the ActiveMasterManager.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in
 * {@link #finishActiveMasterInitialization(MonitoredTask)} after
 * the master becomes the active one.
 *
 * @throws InterruptedException
 * @throws KeeperException
 * @throws IOException
 */
public HMaster(final Configuration conf, CoordinatedStateManager csm)
    throws IOException, KeeperException, InterruptedException {
  super(conf, csm);
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
    conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  LOG.info("hbase.rootdir=" + FSUtils.getRootDir(this.conf) +
      ", hbase.cluster.distributed=" + this.conf.getBoolean("hbase.cluster.distributed", false));

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapreduce.task.attempt.id") == null) {
    this.conf.set("mapreduce.task.attempt.id", "hb_m_" + this.serverName.toString());
  }

  //should we check the compression codec type at master side, default true, HBASE-6370
  this.masterCheckCompression = conf.getBoolean("hbase.master.check.compression", true);

  this.metricsMaster = new MetricsMaster( new MetricsMasterWrapperImpl(this));

  // Do we publish the status?
  boolean shouldPublish = conf.getBoolean(HConstants.STATUS_PUBLISHED,
      HConstants.STATUS_PUBLISHED_DEFAULT);
  Class<? extends ClusterStatusPublisher.Publisher> publisherClass =
      conf.getClass(ClusterStatusPublisher.STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS,
          ClusterStatusPublisher.Publisher.class);

  if (shouldPublish) {
    if (publisherClass == null) {
      LOG.warn(HConstants.STATUS_PUBLISHED + " is true, but " +
          ClusterStatusPublisher.DEFAULT_STATUS_PUBLISHER_CLASS +
          " is not set - not publishing status");
    } else {
      clusterStatusPublisherChore = new ClusterStatusPublisher(this, conf, publisherClass);
      Threads.setDaemonThreadRunning(clusterStatusPublisherChore.getThread());
    }
  }
  startActiveMasterManager();
  putUpJettyServer();
  LOG.info("Shen Li: HMaster hostname " + getServerName().getHostname());
}
 
开发者ID:shenli-uiuc,项目名称:PyroDB,代码行数:61,代码来源:HMaster.java

示例8: HMaster

import org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer; //导入依赖的package包/类
/**
 * Initializes the HMaster. The steps are as follows:
 * <p>
 * <ol>
 * <li>Initialize HMaster RPC and address
 * <li>Connect to ZooKeeper.
 * </ol>
 * <p>
 * Remaining steps of initialization occur in {@link #run()} so that they
 * run in their own thread rather than within the context of the constructor.
 * @throws InterruptedException
 */
public HMaster(final Configuration conf)
throws IOException, KeeperException, InterruptedException {
  this.conf = new Configuration(conf);
  // Disable the block cache on the master
  this.conf.setFloat(HConstants.HFILE_BLOCK_CACHE_SIZE_KEY, 0.0f);
  // Set how many times to retry talking to another server over HConnection.
  HConnectionManager.setServerSideHConnectionRetries(this.conf, LOG);
  // Server to handle client requests.
  String hostname = conf.get("hbase.master.ipc.address",
    Strings.domainNamePointerToHostName(DNS.getDefaultHost(
      conf.get("hbase.master.dns.interface", "default"),
      conf.get("hbase.master.dns.nameserver", "default"))));
  int port = conf.getInt(HConstants.MASTER_PORT, HConstants.DEFAULT_MASTER_PORT);
  // Test that the hostname is reachable
  InetSocketAddress initialIsa = new InetSocketAddress(hostname, port);
  if (initialIsa.getAddress() == null) {
    throw new IllegalArgumentException("Failed resolve of hostname " + initialIsa);
  }
  int numHandlers = conf.getInt("hbase.master.handler.count",
    conf.getInt("hbase.regionserver.handler.count", 25));
  this.rpcServer = HBaseRPC.getServer(this,
    new Class<?>[]{HMasterInterface.class, HMasterRegionInterface.class},
      initialIsa.getHostName(), // This is bindAddress if set else it's hostname
      initialIsa.getPort(),
      numHandlers,
      0, // we dont use high priority handlers in master
      conf.getBoolean("hbase.rpc.verbose", false), conf,
      0); // this is a DNC w/o high priority handlers
  // Set our address.
  this.isa = this.rpcServer.getListenerAddress();
  this.serverName = new ServerName(this.isa.getHostName(),
    this.isa.getPort(), System.currentTimeMillis());
  this.rsFatals = new MemoryBoundedLogMessageBuffer(
      conf.getLong("hbase.master.buffer.for.rs.fatals", 1*1024*1024));

  // login the zookeeper client principal (if using security)
  ZKUtil.loginClient(this.conf, "hbase.zookeeper.client.keytab.file",
    "hbase.zookeeper.client.kerberos.principal", this.isa.getHostName());

  // initialize server principal (if using secure Hadoop)
  User.login(conf, "hbase.master.keytab.file",
    "hbase.master.kerberos.principal", this.isa.getHostName());

  // set the thread name now we have an address
  setName(MASTER + "-" + this.serverName.toString());

  Replication.decorateMasterConfiguration(this.conf);

  // Hack! Maps DFSClient => Master for logs.  HDFS made this
  // config param for task trackers, but we can piggyback off of it.
  if (this.conf.get("mapred.task.id") == null) {
    this.conf.set("mapred.task.id", "hb_m_" + this.serverName.toString());
  }

  this.zooKeeper = new ZooKeeperWatcher(conf, MASTER + ":" + isa.getPort(), this, true);
  this.rpcServer.startThreads();
  this.metrics = new MasterMetrics(getServerName().toString());

  // Health checker thread.
  int sleepTime = this.conf.getInt(HConstants.HEALTH_CHORE_WAKE_FREQ,
    HConstants.DEFAULT_THREAD_WAKE_FREQUENCY);
  if (isHealthCheckerConfigured()) {
    healthCheckChore = new HealthCheckChore(sleepTime, this, getConfiguration());
  }

  this.shouldSplitMetaSeparately = conf.getBoolean(HLog.SEPARATE_HLOG_FOR_META, false);
  waitingOnLogSplitting = this.conf.getBoolean("hbase.master.wait.for.log.splitting", false);
}
 
开发者ID:zwqjsj0404,项目名称:HBase-Research,代码行数:81,代码来源:HMaster.java


注:本文中的org.apache.hadoop.hbase.monitoring.MemoryBoundedLogMessageBuffer类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。