當前位置: 首頁>>代碼示例>>Java>>正文


Java HConstants類代碼示例

本文整理匯總了Java中org.apache.hadoop.hbase.HConstants的典型用法代碼示例。如果您正苦於以下問題:Java HConstants類的具體用法?Java HConstants怎麽用?Java HConstants使用的例子?那麽, 這裏精選的類代碼示例或許可以為您提供幫助。


HConstants類屬於org.apache.hadoop.hbase包,在下文中一共展示了HConstants類的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: getServerNameFromWALDirectoryName

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * This function returns region server name from a log file name which is in one of the following
 * formats:
 * <ul>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;-splitting/...</li>
 *   <li>hdfs://&lt;name node&gt;/hbase/.logs/&lt;server name&gt;/...</li>
 * </ul>
 * @param logFile
 * @return null if the passed in logFile isn't a valid WAL file path
 */
public static ServerName getServerNameFromWALDirectoryName(Path logFile) {
  String logDirName = logFile.getParent().getName();
  // We were passed the directory and not a file in it.
  if (logDirName.equals(HConstants.HREGION_LOGDIR_NAME)) {
    logDirName = logFile.getName();
  }
  ServerName serverName = null;
  if (logDirName.endsWith(SPLITTING_EXT)) {
    logDirName = logDirName.substring(0, logDirName.length() - SPLITTING_EXT.length());
  }
  try {
    serverName = ServerName.parseServerName(logDirName);
  } catch (IllegalArgumentException ex) {
    serverName = null;
    LOG.warn("Cannot parse a server name from path=" + logFile + "; " + ex.getMessage());
  }
  if (serverName != null && serverName.getStartcode() < 0) {
    LOG.warn("Invalid log file path=" + logFile);
    serverName = null;
  }
  return serverName;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:DefaultWALProvider.java

示例2: setupOnce

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
static void setupOnce() throws Exception {
  // Using the our load balancer to control region plans
  conf.setClass(HConstants.HBASE_MASTER_LOADBALANCER_CLASS,
    MyLoadBalancer.class, LoadBalancer.class);
  conf.setClass(CoprocessorHost.REGION_COPROCESSOR_CONF_KEY,
    MyRegionObserver.class, RegionObserver.class);
  // Reduce the maximum attempts to speed up the test
  conf.setInt("hbase.assignment.maximum.attempts", 3);
  // Put meta on master to avoid meta server shutdown handling
  conf.set("hbase.balancer.tablesOnMaster", "hbase:meta");
  conf.setInt("hbase.master.maximum.ping.server.attempts", 3);
  conf.setInt("hbase.master.ping.server.retry.sleep.interval", 1);

  TEST_UTIL.startMiniCluster(1, 4, null, MyMaster.class, MyRegionServer.class);
  admin = TEST_UTIL.getHBaseAdmin();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:17,代碼來源:TestAssignmentManagerOnCluster.java

示例3: testHBaseFsckWithFewerMetaReplicaZnodes

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
@Test
public void testHBaseFsckWithFewerMetaReplicaZnodes() throws Exception {
  ClusterConnection c = (ClusterConnection)ConnectionFactory.createConnection(
      TEST_UTIL.getConfiguration());
  RegionLocations rl = c.locateRegion(TableName.META_TABLE_NAME, HConstants.EMPTY_START_ROW,
      false, false);
  HBaseFsckRepair.closeRegionSilentlyAndWait(c,
      rl.getRegionLocation(2).getServerName(), rl.getRegionLocation(2).getRegionInfo());
  ZooKeeperWatcher zkw = TEST_UTIL.getZooKeeperWatcher();
  ZKUtil.deleteNode(zkw, zkw.getZNodeForReplica(2));
  // check that problem exists
  HBaseFsck hbck = doFsck(TEST_UTIL.getConfiguration(), false);
  assertErrors(hbck, new ERROR_CODE[]{ERROR_CODE.UNKNOWN,ERROR_CODE.NO_META_REGION});
  // fix the problem
  hbck = doFsck(TEST_UTIL.getConfiguration(), true);
  // run hbck again to make sure we don't see any errors
  hbck = doFsck(TEST_UTIL.getConfiguration(), false);
  assertErrors(hbck, new ERROR_CODE[]{});
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestMetaWithReplicas.java

示例4: initSplit

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
Configuration initSplit() {
  // Always compact if there is more than one store file.
  TEST_UTIL.getConfiguration().setInt("hbase.hstore.compactionThreshold", 2);
  // Make lease timeout longer, lease checks less frequent
  TEST_UTIL.getConfiguration().setInt(
      "hbase.master.lease.thread.wakefrequency", 5 * 1000);
  TEST_UTIL.getConfiguration().setInt(HConstants.HBASE_CLIENT_SCANNER_TIMEOUT_PERIOD, 10 * 1000);
  // Increase the amount of time between client retries
  TEST_UTIL.getConfiguration().setLong("hbase.client.pause", 15 * 1000);
  // This size should make it so we always split using the addContent
  // below.  After adding all data, the first region is 1.3M
  TEST_UTIL.getConfiguration().setLong(HConstants.HREGION_MAX_FILESIZE,
      1024 * 128);
  TEST_UTIL.getConfiguration().setBoolean("hbase.testing.nocluster",
      true);
  TEST_UTIL.getConfiguration().setBoolean(CoprocessorHost.ABORT_ON_ERROR_KEY, false);

  return TEST_UTIL.getConfiguration();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:TestCoprocessorInterface.java

示例5: addEdits

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
protected void addEdits(WAL log,
                        HRegionInfo hri,
                        HTableDescriptor htd,
                        int times,
                        MultiVersionConcurrencyControl mvcc)
    throws IOException {
  final byte[] row = Bytes.toBytes("row");
  for (int i = 0; i < times; i++) {
    long timestamp = System.currentTimeMillis();
    WALEdit cols = new WALEdit();
    cols.add(new KeyValue(row, row, row, timestamp, row));
    WALKey key = new WALKey(hri.getEncodedNameAsBytes(), htd.getTableName(),
        WALKey.NO_SEQUENCE_ID, timestamp, WALKey.EMPTY_UUIDS, HConstants.NO_NONCE,
        HConstants.NO_NONCE, mvcc);
    log.append(htd, hri, key, cols, true);
  }
  log.sync();
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:19,代碼來源:TestFSHLog.java

示例6: start

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
private int start() throws Exception {
  Configuration conf = getConf();
  HRegionServer.loadWinterConf(conf, null);
  CoordinatedStateManager cp = CoordinatedStateManagerFactory.getCoordinatedStateManager(conf);
  try {
    // If 'local', don't start a region server here. Defer to
    // LocalHBaseCluster. It manages 'local' clusters.
    if (LocalHBaseCluster.isLocal(conf)) {
      LOG.warn("Not starting a distinct region server because " + HConstants.CLUSTER_DISTRIBUTED
          + " is false");
    } else {
      logProcessInfo(getConf());
      HRegionServer hrs = HRegionServer.constructRegionServer(regionServerClass, conf, cp);
      hrs.start();
      hrs.join();
      if (hrs.isAborted()) {
        throw new RuntimeException("HRegionServer Aborted");
      }
    }
  } catch (Throwable t) {
    LOG.error("Region server exiting", t);
    return 1;
  }
  return 0;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HRegionServerCommandLine.java

示例7: handleReportForDutyResponse

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
@Override
protected void handleReportForDutyResponse(RegionServerStartupResponse c) throws IOException {
  if (firstRS.getAndSet(false)) {
    InetSocketAddress address = super.getRpcServer().getListenerAddress();
    if (address == null) {
      throw new IOException("Listener channel is closed");
    }
    for (NameStringPair e : c.getMapEntriesList()) {
      String key = e.getName();
      // The hostname the master sees us as.
      if (key.equals(HConstants.KEY_FOR_HOSTNAME_SEEN_BY_MASTER)) {
        String hostnameFromMasterPOV = e.getValue();
        assertEquals(address.getHostName(), hostnameFromMasterPOV);
      }
    }
    while (!masterActive) {
      Threads.sleep(100);
    }
    super.kill();
  } else {
    super.handleReportForDutyResponse(c);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:24,代碼來源:TestRSKilledWhenInitializing.java

示例8: revokeGlobal

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * Revoke permissions globally from the given user. Will wait until all active
 * AccessController instances have updated their permissions caches or will
 * throw an exception upon timeout (10 seconds).
 */
public static void revokeGlobal(final HBaseTestingUtility util, final String user,
    final Permission.Action... actions) throws Exception {
  SecureTestUtil.updateACLs(util, new Callable<Void>() {
    @Override
    public Void call() throws Exception {
      try (Connection connection = ConnectionFactory.createConnection(util.getConfiguration())) {
        try (Table acl = connection.getTable(AccessControlLists.ACL_TABLE_NAME)) {
          BlockingRpcChannel service = acl.coprocessorService(HConstants.EMPTY_START_ROW);
          AccessControlService.BlockingInterface protocol =
              AccessControlService.newBlockingStub(service);
          ProtobufUtil.revoke(null, protocol, user, actions);
        }
      }
      return null;
    }
  });
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:SecureTestUtil.java

示例9: moreRowsMayExistAfter

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
public boolean moreRowsMayExistAfter(Cell kv) {
  if (this.isReversed) {
    if (rowComparator.compareRows(kv.getRowArray(), kv.getRowOffset(),
        kv.getRowLength(), stopRow, 0, stopRow.length) <= 0) {
      return false;
    } else {
      return true;
    }
  }
  if (!Bytes.equals(stopRow , HConstants.EMPTY_END_ROW) &&
      rowComparator.compareRows(kv.getRowArray(),kv.getRowOffset(),
          kv.getRowLength(), stopRow, 0, stopRow.length) >= 0) {
    // KV >= STOPROW
    // then NO there is nothing left.
    return false;
  } else {
    return true;
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:20,代碼來源:ScanQueryMatcher.java

示例10: Writer

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * @param dataBlockEncoder data block encoding algorithm to use
 */
public Writer(HFileDataBlockEncoder dataBlockEncoder, HFileContext fileContext) {
  this.dataBlockEncoder = dataBlockEncoder != null
      ? dataBlockEncoder : NoOpDataBlockEncoder.INSTANCE;
  defaultBlockEncodingCtx = new HFileBlockDefaultEncodingContext(null,
      HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);
  dataBlockEncodingCtx = this.dataBlockEncoder
      .newDataBlockEncodingContext(HConstants.HFILEBLOCK_DUMMY_HEADER, fileContext);

  if (fileContext.getBytesPerChecksum() < HConstants.HFILEBLOCK_HEADER_SIZE) {
    throw new RuntimeException("Unsupported value of bytesPerChecksum. " +
        " Minimum is " + HConstants.HFILEBLOCK_HEADER_SIZE + " but the configured value is " +
        fileContext.getBytesPerChecksum());
  }

  baosInMemory = new ByteArrayOutputStream();

  prevOffsetByType = new long[BlockType.values().length];
  for (int i = 0; i < prevOffsetByType.length; ++i)
    prevOffsetByType[i] = -1;

  this.fileContext = fileContext;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:26,代碼來源:HFileBlock.java

示例11: startWriting

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * Starts writing into the block. The previous block's data is discarded.
 *
 * @return the stream the user can write their data into
 * @throws IOException
 */
public DataOutputStream startWriting(BlockType newBlockType)
    throws IOException {
  if (state == State.BLOCK_READY && startOffset != -1) {
    // We had a previous block that was written to a stream at a specific
    // offset. Save that offset as the last offset of a block of that type.
    prevOffsetByType[blockType.getId()] = startOffset;
  }

  startOffset = -1;
  blockType = newBlockType;

  baosInMemory.reset();
  baosInMemory.write(HConstants.HFILEBLOCK_DUMMY_HEADER);

  state = State.WRITING;

  // We will compress it later in finishBlock()
  userDataStream = new DataOutputStream(baosInMemory);
  if (newBlockType == BlockType.DATA) {
    this.dataBlockEncoder.startBlockEncoding(dataBlockEncodingCtx, userDataStream);
  }
  this.unencodedDataSizeWritten = 0;
  return userDataStream;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:31,代碼來源:HFileBlock.java

示例12: testRegionOpenFailsDueToIOException

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * If region open fails with IOException in openRegion() while doing tableDescriptors.get()
 * the region should not add into regionsInTransitionInRS map
 * @throws Exception
 */
@Test
public void testRegionOpenFailsDueToIOException() throws Exception {
  HRegionInfo REGIONINFO = new HRegionInfo(TableName.valueOf("t"),
      HConstants.EMPTY_START_ROW, HConstants.EMPTY_START_ROW);
  HRegionServer regionServer = TEST_UTIL.getHBaseCluster().getRegionServer(0);
  TableDescriptors htd = Mockito.mock(TableDescriptors.class);
  Object orizinalState = Whitebox.getInternalState(regionServer,"tableDescriptors");
  Whitebox.setInternalState(regionServer, "tableDescriptors", htd);
  Mockito.doThrow(new IOException()).when(htd).get((TableName) Mockito.any());
  try {
    ProtobufUtil.openRegion(null, regionServer.getRSRpcServices(),
      regionServer.getServerName(), REGIONINFO);
    fail("It should throw IOException ");
  } catch (IOException e) {
  }
  Whitebox.setInternalState(regionServer, "tableDescriptors", orizinalState);
  assertFalse("Region should not be in RIT",
      regionServer.getRegionsInTransitionInRS().containsKey(REGIONINFO.getEncodedNameAsBytes()));
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:25,代碼來源:TestZKBasedOpenCloseRegion.java

示例13: process

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * Pass a processor to region to process multiple rows atomically.
 * 
 * The RowProcessor implementations should be the inner classes of your
 * RowProcessorEndpoint. This way the RowProcessor can be class-loaded with
 * the Coprocessor endpoint together.
 *
 * See {@code TestRowProcessorEndpoint} for example.
 *
 * The request contains information for constructing processor 
 * (see {@link #constructRowProcessorFromRequest}. The processor object defines
 * the read-modify-write procedure.
 */
@Override
public void process(RpcController controller, ProcessRequest request,
    RpcCallback<ProcessResponse> done) {
  ProcessResponse resultProto = null;
  try {
    RowProcessor<S,T> processor = constructRowProcessorFromRequest(request);
    Region region = env.getRegion();
    long nonceGroup = request.hasNonceGroup() ? request.getNonceGroup() : HConstants.NO_NONCE;
    long nonce = request.hasNonce() ? request.getNonce() : HConstants.NO_NONCE;
    region.processRowsWithLocks(processor, nonceGroup, nonce);
    T result = processor.getResult();
    ProcessResponse.Builder b = ProcessResponse.newBuilder();
    b.setRowProcessorResult(result.toByteString());
    resultProto = b.build();
  } catch (Exception e) {
    ResponseConverter.setControllerException(controller, new IOException(e));
  }
  done.run(resultProto);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:33,代碼來源:BaseRowProcessorEndpoint.java

示例14: mutateLabelsRegion

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * Adds the mutations to labels region and set the results to the finalOpStatus. finalOpStatus
 * might have some entries in it where the OpStatus is FAILURE. We will leave those and set in
 * others in the order.
 * @param mutations
 * @param finalOpStatus
 * @return whether we need a ZK update or not.
 */
private boolean mutateLabelsRegion(List<Mutation> mutations, OperationStatus[] finalOpStatus)
    throws IOException {
  OperationStatus[] opStatus = this.labelsRegion.batchMutate(mutations
    .toArray(new Mutation[mutations.size()]), HConstants.NO_NONCE, HConstants.NO_NONCE);
  int i = 0;
  boolean updateZk = false;
  for (OperationStatus status : opStatus) {
    // Update the zk when atleast one of the mutation was added successfully.
    updateZk = updateZk || (status.getOperationStatusCode() == OperationStatusCode.SUCCESS);
    for (; i < finalOpStatus.length; i++) {
      if (finalOpStatus[i] == null) {
        finalOpStatus[i] = status;
        break;
      }
    }
  }
  return updateZk;
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:27,代碼來源:DefaultVisibilityLabelServiceImpl.java

示例15: ProcessBasedLocalHBaseCluster

import org.apache.hadoop.hbase.HConstants; //導入依賴的package包/類
/**
 * Constructor. Modifies the passed configuration.
 * @param hbaseHome the top directory of the HBase source tree
 */
public ProcessBasedLocalHBaseCluster(Configuration conf,
    int numDataNodes, int numRegionServers) {
  this.conf = conf;
  this.hbaseHome = HBaseHomePath.getHomePath();
  this.numMasters = 1;
  this.numRegionServers = numRegionServers;
  this.workDir = hbaseHome + "/target/local_cluster";
  this.numDataNodes = numDataNodes;

  hbaseDaemonScript = hbaseHome + "/bin/hbase-daemon.sh";
  zkClientPort = HBaseTestingUtility.randomFreePort();

  this.rsPorts = sortedPorts(numRegionServers);
  this.masterPorts = sortedPorts(numMasters);

  conf.set(HConstants.ZOOKEEPER_QUORUM, HConstants.LOCALHOST);
  conf.setInt(HConstants.ZOOKEEPER_CLIENT_PORT, zkClientPort);
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:23,代碼來源:ProcessBasedLocalHBaseCluster.java


注:本文中的org.apache.hadoop.hbase.HConstants類示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。