当前位置: 首页>>代码示例>>Java>>正文


Java RemoteExceptionHandler.checkIOException方法代码示例

本文整理汇总了Java中org.apache.hadoop.hbase.RemoteExceptionHandler.checkIOException方法的典型用法代码示例。如果您正苦于以下问题:Java RemoteExceptionHandler.checkIOException方法的具体用法?Java RemoteExceptionHandler.checkIOException怎么用?Java RemoteExceptionHandler.checkIOException使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hbase.RemoteExceptionHandler的用法示例。


在下文中一共展示了RemoteExceptionHandler.checkIOException方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: processWorkItems

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private void processWorkItems(String key, List<Pair<HRegionLocation, Entry>> actions)
    throws IOException {
  RegionServerWriter rsw = null;

  long startTime = System.nanoTime();
  try {
    rsw = getRegionServerWriter(key);
    rsw.sink.replayEntries(actions);

    // Pass along summary statistics
    rsw.incrementEdits(actions.size());
    rsw.incrementNanoTime(System.nanoTime() - startTime);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.fatal(" Got while writing log entry to log", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:19,代码来源:WALSplitter.java

示例2: bootstrap

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating hbase:meta region");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstrap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
    setInfoFamilyCachingForMeta(metaDescriptor, false);
    HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor, null, true, true);
    setInfoFamilyCachingForMeta(metaDescriptor, true);
    HRegion.closeHRegion(meta);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:21,代码来源:MasterFileSystem.java

示例3: nextRegion

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private HRegionInfo nextRegion() throws IOException {
  try {
    Result results = getMetaRow();
    if (results == null) {
      return null;
    }
    HRegionInfo region = HRegionInfo.getHRegionInfo(results);
    if (region == null) {
      throw new NoSuchElementException("meta region entry missing " +
          Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
    }
    if (!region.getTable().equals(this.tableName)) {
      return null;
    }
    return region;
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("meta scanner error", e);
    metaScanner.close();
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:24,代码来源:HMerge.java

示例4: nextRegion

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private HRegionInfo nextRegion() throws IOException {
  try {
    Result results = getMetaRow();
    if (results == null) {
      return null;
    }
    byte[] regionInfoValue = results.getValue(HConstants.CATALOG_FAMILY,
        HConstants.REGIONINFO_QUALIFIER);
    if (regionInfoValue == null || regionInfoValue.length == 0) {
      throw new NoSuchElementException("meta region entry missing " +
          Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
    }
    HRegionInfo region = Writables.getHRegionInfo(regionInfoValue);
    if (!Bytes.equals(region.getTableName(), this.tableName)) {
      return null;
    }
    return region;
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("meta scanner error", e);
    metaScanner.close();
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:26,代码来源:HMerge.java

示例5: bootstrap

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating hbase:meta region");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstrap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    HTableDescriptor metaDescriptor = new FSTableDescriptors(c).get(TableName.META_TABLE_NAME);
    setInfoFamilyCachingForMeta(metaDescriptor, false);
    HRegion meta = HRegion.createHRegion(metaHRI, rd, c, metaDescriptor);
    setInfoFamilyCachingForMeta(metaDescriptor, true);
    HRegion.closeHRegion(meta);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:21,代码来源:MasterFileSystem.java

示例6: bootstrap

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating hbase:meta region");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstrap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    setInfoFamilyCachingForMeta(false);
    HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
        HTableDescriptor.META_TABLEDESC);
    setInfoFamilyCachingForMeta(true);
    HRegion.closeHRegion(meta);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:21,代码来源:MasterFileSystem.java

示例7: processWorkItems

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private void processWorkItems(String key, List<Pair<HRegionLocation, HLog.Entry>> actions)
    throws IOException {
  RegionServerWriter rsw = null;

  long startTime = System.nanoTime();
  try {
    rsw = getRegionServerWriter(key);
    rsw.sink.replayEntries(actions);

    // Pass along summary statistics
    rsw.incrementEdits(actions.size());
    rsw.incrementNanoTime(System.nanoTime() - startTime);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.fatal(" Got while writing log entry to log", e);
    throw e;
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:19,代码来源:HLogSplitter.java

示例8: nextRegion

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private HRegionInfo nextRegion() throws IOException {
  try {
    Result results = getMetaRow();
    if (results == null) {
      return null;
    }
    HRegionInfo region = HRegionInfo.getHRegionInfo(results);
    if (region == null) {
      throw new NoSuchElementException("meta region entry missing " +
          Bytes.toString(HConstants.CATALOG_FAMILY) + ":" +
          Bytes.toString(HConstants.REGIONINFO_QUALIFIER));
    }
    if (!Bytes.equals(region.getTableName(), this.tableName)) {
      return null;
    }
    return region;
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("meta scanner error", e);
    metaScanner.close();
    throw e;
  }
}
 
开发者ID:daidong,项目名称:DominoHBase,代码行数:24,代码来源:HMerge.java

示例9: chore

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
@Override
protected void chore() {
  try {
    FileStatus[] files = FSUtils.listStatus(this.fs, this.oldFileDir);
    checkAndDeleteEntries(files);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.warn("Error while cleaning the logs", e);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:11,代码来源:CleanerChore.java

示例10: bootstrap

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private static void bootstrap(final Path rd, final Configuration c)
throws IOException {
  LOG.info("BOOTSTRAP: creating ROOT and first META regions");
  try {
    // Bootstrapping, make sure blockcache is off.  Else, one will be
    // created here in bootstap and it'll need to be cleaned up.  Better to
    // not make it in first place.  Turn off block caching for bootstrap.
    // Enable after.
    HRegionInfo rootHRI = new HRegionInfo(HRegionInfo.ROOT_REGIONINFO);
    setInfoFamilyCachingForRoot(false);
    HRegionInfo metaHRI = new HRegionInfo(HRegionInfo.FIRST_META_REGIONINFO);
    setInfoFamilyCachingForMeta(false);
    HRegion root = HRegion.createHRegion(rootHRI, rd, c,
        HTableDescriptor.ROOT_TABLEDESC);
    HRegion meta = HRegion.createHRegion(metaHRI, rd, c,
        HTableDescriptor.META_TABLEDESC);
    setInfoFamilyCachingForRoot(true);
    setInfoFamilyCachingForMeta(true);
    // Add first region from the META table to the ROOT region.
    HRegion.addRegionToMETA(root, meta);
    root.close();
    root.getLog().closeAndDelete();
    meta.close();
    meta.getLog().closeAndDelete();
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("bootstrap", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:31,代码来源:MasterFileSystem.java

示例11: writeBuffer

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
private void writeBuffer(RegionEntryBuffer buffer) throws IOException {
  List<Entry> entries = buffer.entryBuffer;
  if (entries.isEmpty()) {
    LOG.warn(this.getName() + " got an empty buffer, skipping");
    return;
  }

  WriterAndPath wap = null;

  long startTime = System.nanoTime();
  try {
    int editsCount = 0;

    for (Entry logEntry : entries) {
      if (wap == null) {
        wap = outputSink.getWriterAndPath(logEntry);
        if (wap == null) {
          // getWriterAndPath decided we don't need to write these edits
          // Message was already logged
          return;
        }
      }
      wap.w.append(logEntry);
      outputSink.updateRegionMaximumEditLogSeqNum(logEntry);
      editsCount++;
    }
    // Pass along summary statistics
    wap.incrementEdits(editsCount);
    wap.incrementNanoTime(System.nanoTime() - startTime);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.fatal(this.getName() + " Got while writing log entry to log", e);
    throw e;
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:36,代码来源:HLogSplitter.java

示例12: append

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
@Override
void append(RegionEntryBuffer buffer) throws IOException {
  List<Entry> entries = buffer.entryBuffer;
  if (entries.isEmpty()) {
    LOG.warn("got an empty buffer, skipping");
    return;
  }

  WriterAndPath wap = null;

  long startTime = System.nanoTime();
  try {
    int editsCount = 0;

    for (Entry logEntry : entries) {
      if (wap == null) {
        wap = getWriterAndPath(logEntry);
        if (wap == null) {
          if (LOG.isDebugEnabled()) {
            LOG.debug("getWriterAndPath decided we don't need to write edits for " + logEntry);
          }
          return;
        }
      }
      wap.w.append(logEntry);
      this.updateRegionMaximumEditLogSeqNum(logEntry);
      editsCount++;
    }
    // Pass along summary statistics
    wap.incrementEdits(editsCount);
    wap.incrementNanoTime(System.nanoTime() - startTime);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.fatal(" Got while writing log entry to log", e);
    throw e;
  }
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:38,代码来源:WALSplitter.java

示例13: append

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
void append(RegionEntryBuffer buffer) throws IOException {
  List<Entry> entries = buffer.entryBuffer;
  if (entries.isEmpty()) {
    LOG.warn("got an empty buffer, skipping");
    return;
  }

  WriterAndPath wap = null;

  long startTime = System.nanoTime();
  try {
    int editsCount = 0;

    for (Entry logEntry : entries) {
      if (wap == null) {
        wap = getWriterAndPath(logEntry);
        if (wap == null) {
          // getWriterAndPath decided we don't need to write these edits
          return;
        }
      }
      wap.w.append(logEntry);
      this.updateRegionMaximumEditLogSeqNum(logEntry);
      editsCount++;
    }
    // Pass along summary statistics
    wap.incrementEdits(editsCount);
    wap.incrementNanoTime(System.nanoTime() - startTime);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.fatal(" Got while writing log entry to log", e);
    throw e;
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:35,代码来源:HLogSplitter.java

示例14: completeCompaction

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
@VisibleForTesting
protected void completeCompaction(final Collection<StoreFile> compactedFiles)
    throws IOException {
  try {
    // Do not delete old store files until we have sent out notification of
    // change in case old files are still being accessed by outstanding scanners.
    // Don't do this under writeLock; see HBASE-4485 for a possible deadlock
    // scenario that could have happened if continue to hold the lock.
    notifyChangedReadersObservers();
    // At this point the store will use new files for all scanners.

    // let the archive util decide if we should archive or delete the files
    LOG.debug("Removing store files after compaction...");
    for (StoreFile compactedFile : compactedFiles) {
      compactedFile.closeReader(true);
    }
    this.fs.removeStoreFiles(this.getColumnFamilyName(), compactedFiles);
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.error("Failed removing compacted files in " + this +
      ". Files we were trying to remove are " + compactedFiles.toString() +
      "; some of them may have been already removed", e);
  }

  // 4. Compute new store size
  this.storeSize = 0L;
  this.totalUncompressedBytes = 0L;
  for (StoreFile hsf : this.storeEngine.getStoreFileManager().getStorefiles()) {
    StoreFile.Reader r = hsf.getReader();
    if (r == null) {
      LOG.warn("StoreFile " + hsf + " has a null Reader");
      continue;
    }
    this.storeSize += r.length();
    this.totalUncompressedBytes += r.getTotalUncompressedBytes();
  }
}
 
开发者ID:tenggyut,项目名称:HIndex,代码行数:38,代码来源:HStore.java

示例15: chore

import org.apache.hadoop.hbase.RemoteExceptionHandler; //导入方法依赖的package包/类
@Override
protected void chore() {
  try {
    FileStatus [] files = FSUtils.listStatus(this.fs, this.oldLogDir, null);
    if (files == null) return;
    FILE: for (FileStatus file : files) {
      Path filePath = file.getPath();
      if (HLog.validateHLogFilename(filePath.getName())) {
        for (LogCleanerDelegate logCleaner : logCleanersChain) {
          if (logCleaner.isStopped()) {
            LOG.warn("A log cleaner is stopped, won't delete any log.");
            return;
          }

          if (!logCleaner.isLogDeletable(filePath) ) {
            // this log is not deletable, continue to process next log file
            continue FILE;
          }
        }
        // delete this log file if it passes all the log cleaners
        this.fs.delete(filePath, true);
      } else {
        LOG.warn("Found a wrongly formated file: "
            + file.getPath().getName());
        this.fs.delete(filePath, true);
      }
    }
  } catch (IOException e) {
    e = RemoteExceptionHandler.checkIOException(e);
    LOG.warn("Error while cleaning the logs", e);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:33,代码来源:LogCleaner.java


注:本文中的org.apache.hadoop.hbase.RemoteExceptionHandler.checkIOException方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。