当前位置: 首页>>代码示例>>Java>>正文


Java TraditionalBinaryPrefix类代码示例

本文整理汇总了Java中org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix的典型用法代码示例。如果您正苦于以下问题:Java TraditionalBinaryPrefix类的具体用法?Java TraditionalBinaryPrefix怎么用?Java TraditionalBinaryPrefix使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


TraditionalBinaryPrefix类属于org.apache.hadoop.util.StringUtils包,在下文中一共展示了TraditionalBinaryPrefix类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: debugDumpState

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
private void debugDumpState(String string) {
  if (!LOG.isDebugEnabled()) return;
  StringBuilder sb = new StringBuilder();
  sb.append("\n" + string + "; current stripe state is as such:");
  sb.append("\n level 0 with ")
      .append(state.level0Files.size())
      .append(
        " files: "
            + TraditionalBinaryPrefix.long2String(
              StripeCompactionPolicy.getTotalFileSize(state.level0Files), "", 1) + ";");
  for (int i = 0; i < state.stripeFiles.size(); ++i) {
    String endRow = (i == state.stripeEndRows.length)
        ? "(end)" : "[" + Bytes.toString(state.stripeEndRows[i]) + "]";
    sb.append("\n stripe ending in ")
        .append(endRow)
        .append(" with ")
        .append(state.stripeFiles.get(i).size())
        .append(
          " files: "
              + TraditionalBinaryPrefix.long2String(
                StripeCompactionPolicy.getTotalFileSize(state.stripeFiles.get(i)), "", 1) + ";");
  }
  sb.append("\n").append(state.stripeFiles.size()).append(" stripes total.");
  sb.append("\n").append(getStorefileCount()).append(" files total.");
  LOG.debug(sb.toString());
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:27,代码来源:StripeStoreFileManager.java

示例2: MemStoreFlusher

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
/**
 * @param conf
 * @param server
 */
public MemStoreFlusher(final Configuration conf,
    final HRegionServer server) {
  super();
  this.conf = conf;
  this.server = server;
  this.threadWakeFrequency =
    conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
  long max = ManagementFactory.getMemoryMXBean().getHeapMemoryUsage().getMax();
  float globalMemStorePercent = HeapMemorySizeUtil.getGlobalMemStorePercent(conf, true);
  this.globalMemStoreLimit = (long) (max * globalMemStorePercent);
  this.globalMemStoreLimitLowMarkPercent =
      HeapMemorySizeUtil.getGlobalMemStoreLowerMark(conf, globalMemStorePercent);
  this.globalMemStoreLimitLowMark =
      (long) (this.globalMemStoreLimit * this.globalMemStoreLimitLowMarkPercent);

  this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime",
    90000);
  int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2);
  this.flushHandlers = new FlushHandler[handlerCount];
  LOG.info("globalMemStoreLimit="
      + TraditionalBinaryPrefix.long2String(this.globalMemStoreLimit, "", 1)
      + ", globalMemStoreLimitLowMark="
      + TraditionalBinaryPrefix.long2String(this.globalMemStoreLimitLowMark, "", 1)
      + ", maxHeap=" + TraditionalBinaryPrefix.long2String(max, "", 1));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:30,代码来源:MemStoreFlusher.java

示例3: commitFile

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
private StoreFile commitFile(final Path path, final long logCacheFlushId, MonitoredTask status)
    throws IOException {
  // Write-out finished successfully, move into the right spot
  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path, this, true);

  status.setStatus("Flushing " + this + ": reopening flushed file");
  StoreFile sf = createStoreFileAndReader(dstPath);

  StoreFile.Reader r = sf.getReader();
  this.storeSize += r.length();
  this.totalUncompressedBytes += r.getTotalUncompressedBytes();

  if (LOG.isInfoEnabled()) {
    LOG.info("Added " + sf + ", entries=" + r.getEntries() + ", sequenceid=" + logCacheFlushId
        + ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
  }
  return sf;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:HStore.java

示例4: MemStoreFlusher

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
/**
 * @param conf
 * @param server
 */
public MemStoreFlusher(final Configuration conf,
    final HRegionServer server) {
  super();
  this.conf = conf;
  this.server = server;
  this.threadWakeFrequency =
      conf.getLong(HConstants.THREAD_WAKE_FREQUENCY, 10 * 1000);
  this.blockingWaitTime = conf.getInt("hbase.hstore.blockingWaitTime",
    90000);
  int handlerCount = conf.getInt("hbase.hstore.flusher.count", 2);
  this.flushHandlers = new FlushHandler[handlerCount];
  LOG.info("globalMemStoreLimit="
      + TraditionalBinaryPrefix
          .long2String(this.server.getRegionServerAccounting().getGlobalMemStoreLimit(), "", 1)
      + ", globalMemStoreLimitLowMark="
      + TraditionalBinaryPrefix.long2String(
        this.server.getRegionServerAccounting().getGlobalMemStoreLimitLowMark(), "", 1)
      + ", Offheap="
      + (this.server.getRegionServerAccounting().isOffheap()));
}
 
开发者ID:apache,项目名称:hbase,代码行数:25,代码来源:MemStoreFlusher.java

示例5: commitFile

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
/**
 * @param path The pathname of the tmp file into which the store was flushed
 * @param logCacheFlushId
 * @param status
 * @return store file created.
 * @throws IOException
 */
private HStoreFile commitFile(Path path, long logCacheFlushId, MonitoredTask status)
    throws IOException {
  // Write-out finished successfully, move into the right spot
  Path dstPath = fs.commitStoreFile(getColumnFamilyName(), path);

  status.setStatus("Flushing " + this + ": reopening flushed file");
  HStoreFile sf = createStoreFileAndReader(dstPath);

  StoreFileReader r = sf.getReader();
  this.storeSize.addAndGet(r.length());
  this.totalUncompressedBytes.addAndGet(r.getTotalUncompressedBytes());

  if (LOG.isInfoEnabled()) {
    LOG.info("Added " + sf + ", entries=" + r.getEntries() +
      ", sequenceid=" + logCacheFlushId +
      ", filesize=" + TraditionalBinaryPrefix.long2String(r.length(), "", 1));
  }
  return sf;
}
 
开发者ID:apache,项目名称:hbase,代码行数:27,代码来源:HStore.java

示例6: formatUsageString

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
private String formatUsageString(long currentVmemUsage, long vmemLimit,
    long currentPmemUsage, long pmemLimit) {
  return String.format("%sB of %sB physical memory used; " +
      "%sB of %sB virtual memory used",
      TraditionalBinaryPrefix.long2String(currentPmemUsage, "", 1),
      TraditionalBinaryPrefix.long2String(pmemLimit, "", 1),
      TraditionalBinaryPrefix.long2String(currentVmemUsage, "", 1),
      TraditionalBinaryPrefix.long2String(vmemLimit, "", 1));
}
 
开发者ID:naver,项目名称:hadoop,代码行数:10,代码来源:ContainersMonitorImpl.java

示例7: parseLong

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
private long parseLong(String[] args, int offset) {
  if (offset ==  args.length) {
    throw new IllegalArgumentException("<n> not specified in " + cmd);
  }
  long n = StringUtils.TraditionalBinaryPrefix.string2long(args[offset]);
  if (n <= 0) {
    throw new IllegalArgumentException("n = " + n + " <= 0 in " + cmd);
  }
  return n;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:11,代码来源:DistCpV1.java

示例8: doCopyFile

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
/**
 * Copies single file to the path specified by tmpfile.
 * @param srcstat  src path and metadata
 * @param tmpfile  temporary file to which copy is to be done
 * @param absdst   actual destination path to which copy is to be done
 * @param reporter
 * @return Number of bytes copied
 */
private long doCopyFile(FileStatus srcstat, Path tmpfile, Path absdst,
                        Reporter reporter) throws IOException {
  long bytesCopied = 0L;
  Path srcPath = srcstat.getPath();
  // open src file
  try (FSDataInputStream in = srcPath.getFileSystem(job).open(srcPath)) {
    reporter.incrCounter(Counter.BYTESEXPECTED, srcstat.getLen());
    // open tmp file
    try (FSDataOutputStream out = create(tmpfile, reporter, srcstat)) {
      LOG.info("Copying file " + srcPath + " of size " +
               srcstat.getLen() + " bytes...");
    
      // copy file
      for(int bytesRead; (bytesRead = in.read(buffer)) >= 0; ) {
        out.write(buffer, 0, bytesRead);
        bytesCopied += bytesRead;
        reporter.setStatus(
            String.format("%.2f ", bytesCopied*100.0/srcstat.getLen())
            + absdst + " [ " +
            TraditionalBinaryPrefix.long2String(bytesCopied, "", 1) + " / "
            + TraditionalBinaryPrefix.long2String(srcstat.getLen(), "", 1)
            + " ]");
      }
    }
  }
  return bytesCopied;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:36,代码来源:DistCpV1.java

示例9: getFileDetails

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
/**
 * Extracts some details about the files to compact that are commonly needed by compactors.
 *
 * @param filesToCompact Files.
 * @param allFiles       Whether all files are included for compaction
 * @return The result.
 */
protected FileDetails getFileDetails(Collection<StoreFile> filesToCompact, boolean allFiles)
    throws IOException {
  FileDetails fd = new FileDetails();
  long oldestHFileTimeStampToKeepMVCC =
      System.currentTimeMillis() - (1000L * 60 * 60 * 24 * this.keepSeqIdPeriod);

  for (StoreFile file : filesToCompact) {
    if (allFiles && (file.getModificationTimeStamp() < oldestHFileTimeStampToKeepMVCC)) {
      // when isAllFiles is true, all files are compacted so we can calculate the smallest
      // MVCC value to keep
      if (fd.minSeqIdToKeep < file.getMaxMemstoreTS()) {
        fd.minSeqIdToKeep = file.getMaxMemstoreTS();
      }
    }
    long seqNum = file.getMaxSequenceId();
    fd.maxSeqId = Math.max(fd.maxSeqId, seqNum);
    StoreFile.Reader r = file.getReader();
    if (r == null) {
      LOG.warn("Null reader for " + file.getPath());
      continue;
    }
    // NOTE: use getEntries when compacting instead of getFilterEntries, otherwise under-sized
    // blooms can cause progress to be miscalculated or if the user switches bloom
    // type (e.g. from ROW to ROWCOL)
    long keyCount = r.getEntries();
    fd.maxKeyCount += keyCount;
    // calculate the latest MVCC readpoint in any of the involved store files
    Map<byte[], byte[]> fileInfo = r.loadFileInfo();
    byte tmp[] = null;
    // Get and set the real MVCCReadpoint for bulk loaded files, which is the
    // SeqId number.
    if (r.isBulkLoaded()) {
      fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, r.getSequenceID());
    } else {
      tmp = fileInfo.get(HFileWriterV2.MAX_MEMSTORE_TS_KEY);
      if (tmp != null) {
        fd.maxMVCCReadpoint = Math.max(fd.maxMVCCReadpoint, Bytes.toLong(tmp));
      }
    }
    tmp = fileInfo.get(FileInfo.MAX_TAGS_LEN);
    if (tmp != null) {
      fd.maxTagsLength = Math.max(fd.maxTagsLength, Bytes.toInt(tmp));
    }
    // If required, calculate the earliest put timestamp of all involved storefiles.
    // This is used to remove family delete marker during compaction.
    long earliestPutTs = 0;
    if (allFiles) {
      tmp = fileInfo.get(StoreFile.EARLIEST_PUT_TS);
      if (tmp == null) {
        // There's a file with no information, must be an old one
        // assume we have very old puts
        fd.earliestPutTs = earliestPutTs = HConstants.OLDEST_TIMESTAMP;
      } else {
        earliestPutTs = Bytes.toLong(tmp);
        fd.earliestPutTs = Math.min(fd.earliestPutTs, earliestPutTs);
      }
    }
    if (LOG.isDebugEnabled()) {
      LOG.debug("Compacting " + file + ", keycount=" + keyCount + ", bloomtype=" + r
          .getBloomFilterType().toString() + ", size=" + TraditionalBinaryPrefix
          .long2String(r.length(), "", 1) + ", encoding=" + r.getHFileReader()
          .getDataBlockEncoding() + ", seqNum=" + seqNum + (allFiles ?
          ", earliestPutTs=" + earliestPutTs :
          ""));
    }
  }
  return fd;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:76,代码来源:Compactor.java

示例10: removeUnneededFiles

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
private void removeUnneededFiles() throws IOException {
  if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) return;
  if (getFamily().getMinVersions() > 0) {
    LOG.debug("Skipping expired store file removal due to min version being " + getFamily()
        .getMinVersions());
    return;
  }
  this.lock.readLock().lock();
  Collection<StoreFile> delSfs = null;
  try {
    synchronized (filesCompacting) {
      long cfTtl = getStoreFileTtl();
      if (cfTtl != Long.MAX_VALUE) {
        delSfs = storeEngine.getStoreFileManager()
            .getUnneededFiles(EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting);
        addToCompactingFiles(delSfs);
      }
    }
  } finally {
    this.lock.readLock().unlock();
  }
  if (delSfs == null || delSfs.isEmpty()) return;

  Collection<StoreFile> newFiles = new ArrayList<StoreFile>(); // No new
  // files.
  writeCompactionWalRecord(delSfs, newFiles);
  replaceStoreFiles(delSfs, newFiles);
  completeCompaction(delSfs);
  LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in " + this
      + " of " + this.getRegionInfo().getRegionNameAsString() + "; total size for store is "
      + TraditionalBinaryPrefix.long2String(storeSize, "", 1));
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:33,代码来源:HStore.java

示例11: toString

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
@Override
public String toString() {
  String fsList = filesToCompact.stream().filter(f -> f.getReader() != null)
      .map(f -> TraditionalBinaryPrefix.long2String(f.getReader().length(), "", 1))
      .collect(Collectors.joining(", "));

  return "regionName=" + regionName + ", storeName=" + storeName + ", fileCount=" +
      this.getFiles().size() + ", fileSize=" +
      TraditionalBinaryPrefix.long2String(totalSize, "", 1) +
      ((fsList.isEmpty()) ? "" : " (" + fsList + ")") + ", priority=" + priority + ", time=" +
      selectionTime;
}
 
开发者ID:apache,项目名称:hbase,代码行数:13,代码来源:CompactionRequestImpl.java

示例12: logCompactionEndMessage

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
/**
 * Log a very elaborate compaction completion message.
 * @param cr Request.
 * @param sfs Resulting files.
 * @param compactionStartTime Start time.
 */
private void logCompactionEndMessage(
    CompactionRequestImpl cr, List<HStoreFile> sfs, long now, long compactionStartTime) {
  StringBuilder message = new StringBuilder(
    "Completed" + (cr.isMajor() ? " major" : "") + " compaction of "
    + cr.getFiles().size() + (cr.isAllFiles() ? " (all)" : "") + " file(s) in "
    + this + " of " + this.getRegionInfo().getShortNameToLog() + " into ");
  if (sfs.isEmpty()) {
    message.append("none, ");
  } else {
    for (HStoreFile sf: sfs) {
      message.append(sf.getPath().getName());
      message.append("(size=");
      message.append(TraditionalBinaryPrefix.long2String(sf.getReader().length(), "", 1));
      message.append("), ");
    }
  }
  message.append("total size for store is ")
    .append(StringUtils.TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1))
    .append(". This selection was in queue for ")
    .append(StringUtils.formatTimeDiff(compactionStartTime, cr.getSelectionTime()))
    .append(", and took ").append(StringUtils.formatTimeDiff(now, compactionStartTime))
    .append(" to execute.");
  LOG.info(message.toString());
  if (LOG.isTraceEnabled()) {
    int fileCount = storeEngine.getStoreFileManager().getStorefileCount();
    long resultSize = getTotalSize(sfs);
    String traceMessage = "COMPACTION start,end,size out,files in,files out,store size,"
      + "store files [" + compactionStartTime + "," + now + "," + resultSize + ","
        + cr.getFiles().size() + "," + sfs.size() + "," +  storeSize + "," + fileCount + "]";
    LOG.trace(traceMessage);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:39,代码来源:HStore.java

示例13: removeUnneededFiles

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
private void removeUnneededFiles() throws IOException {
  if (!conf.getBoolean("hbase.store.delete.expired.storefile", true)) return;
  if (getColumnFamilyDescriptor().getMinVersions() > 0) {
    LOG.debug("Skipping expired store file removal due to min version being {}",
        getColumnFamilyDescriptor().getMinVersions());
    return;
  }
  this.lock.readLock().lock();
  Collection<HStoreFile> delSfs = null;
  try {
    synchronized (filesCompacting) {
      long cfTtl = getStoreFileTtl();
      if (cfTtl != Long.MAX_VALUE) {
        delSfs = storeEngine.getStoreFileManager().getUnneededFiles(
            EnvironmentEdgeManager.currentTime() - cfTtl, filesCompacting);
        addToCompactingFiles(delSfs);
      }
    }
  } finally {
    this.lock.readLock().unlock();
  }

  if (CollectionUtils.isEmpty(delSfs)) {
    return;
  }

  Collection<HStoreFile> newFiles = Collections.emptyList(); // No new files.
  writeCompactionWalRecord(delSfs, newFiles);
  replaceStoreFiles(delSfs, newFiles);
  completeCompaction(delSfs);
  LOG.info("Completed removal of " + delSfs.size() + " unnecessary (expired) file(s) in "
      + this + " of " + this.getRegionInfo().getRegionNameAsString()
      + "; total size for store is "
      + TraditionalBinaryPrefix.long2String(storeSize.get(), "", 1));
}
 
开发者ID:apache,项目名称:hbase,代码行数:36,代码来源:HStore.java

示例14: replayFlush

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
/**
 * Similar to commit, but called in secondary region replicas for replaying the
 * flush cache from primary region. Adds the new files to the store, and drops the
 * snapshot depending on dropMemstoreSnapshot argument.
 * @param fileNames names of the flushed files
 * @param dropMemstoreSnapshot whether to drop the prepared memstore snapshot
 * @throws IOException
 */
@Override
public void replayFlush(List<String> fileNames, boolean dropMemstoreSnapshot)
    throws IOException {
  List<HStoreFile> storeFiles = new ArrayList<>(fileNames.size());
  for (String file : fileNames) {
    // open the file as a store file (hfile link, etc)
    StoreFileInfo storeFileInfo = fs.getStoreFileInfo(getColumnFamilyName(), file);
    HStoreFile storeFile = createStoreFileAndReader(storeFileInfo);
    storeFiles.add(storeFile);
    HStore.this.storeSize.addAndGet(storeFile.getReader().length());
    HStore.this.totalUncompressedBytes
        .addAndGet(storeFile.getReader().getTotalUncompressedBytes());
    if (LOG.isInfoEnabled()) {
      LOG.info("Region: " + HStore.this.getRegionInfo().getEncodedName() +
        " added " + storeFile + ", entries=" + storeFile.getReader().getEntries() +
          ", sequenceid=" + +storeFile.getReader().getSequenceID() + ", filesize="
          + TraditionalBinaryPrefix.long2String(storeFile.getReader().length(), "", 1));
    }
  }

  long snapshotId = -1; // -1 means do not drop
  if (dropMemstoreSnapshot && snapshot != null) {
    snapshotId = snapshot.getId();
    snapshot.close();
  }
  HStore.this.updateStorefiles(storeFiles, snapshotId);
}
 
开发者ID:apache,项目名称:hbase,代码行数:36,代码来源:HStore.java

示例15: bytesString

import org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix; //导入依赖的package包/类
static String bytesString(long b) {
  return b + " bytes (" +
      TraditionalBinaryPrefix.long2String(b, "", 1) + ")";
}
 
开发者ID:naver,项目名称:hadoop,代码行数:5,代码来源:DistCpV1.java


注:本文中的org.apache.hadoop.util.StringUtils.TraditionalBinaryPrefix类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。