当前位置: 首页>>代码示例>>Java>>正文


Java MultipleIOException.createIOException方法代码示例

本文整理汇总了Java中org.apache.hadoop.io.MultipleIOException.createIOException方法的典型用法代码示例。如果您正苦于以下问题:Java MultipleIOException.createIOException方法的具体用法?Java MultipleIOException.createIOException怎么用?Java MultipleIOException.createIOException使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.io.MultipleIOException的用法示例。


在下文中一共展示了MultipleIOException.createIOException方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: closeAll

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Close all DFSClient instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:25,代码来源:DFSClientCache.java

示例2: finishWritingAndClose

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
public List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting(false);
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:23,代码来源:WALSplitter.java

示例3: deleteStoreFilesWithoutArchiving

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * This method is preferable to {@link #deleteFilesWithoutArchiving(Collection)} since it consumes
 * less resources, but is limited in terms of usefulness
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<StoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<IOException>(0);
  for (StoreFile hsf : compactedFiles) {
    try {
      hsf.deleteReader();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
 
开发者ID:fengchen8086,项目名称:ditb,代码行数:28,代码来源:HFileArchiver.java

示例4: deleteFilesWithoutArchiving

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Simple delete of regular files from the {@link FileSystem}.
 * <p>
 * This method is a more generic implementation that the other deleteXXX methods in this class,
 * allowing more code reuse at the cost of a couple more, short-lived objects (which should have
 * minimum impact on the jvm).
 * @param fs {@link FileSystem} where the files live
 * @param files {@link Collection} of files to be deleted
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteFilesWithoutArchiving(Collection<File> files) throws IOException {
  List<IOException> errors = new ArrayList<IOException>(0);
  for (File file : files) {
    try {
      LOG.debug("Deleting region file:" + file);
      file.delete();
    } catch (IOException e) {
      LOG.error("Failed to delete file:" + file);
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
 
开发者ID:fengchen8086,项目名称:LCIndex-HBase-0.94.16,代码行数:27,代码来源:HFileArchiver.java

示例5: finishWritingAndClose

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting();
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (thrown != null && !thrown.isEmpty()) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
 
开发者ID:grokcoder,项目名称:pbase,代码行数:23,代码来源:WALSplitter.java

示例6: closeAll

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Close all DFSClient instances in the Cache.
 *
 * @param onlyAutomatic
 *     only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  ConcurrentMap<String, DFSClient> map = clientCache.asMap();

  for (Entry<String, DFSClient> item : map.entrySet()) {
    final DFSClient client = item.getValue();
    if (client != null) {
      try {
        client.close();
      } catch (IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
开发者ID:hopshadoop,项目名称:hops,代码行数:27,代码来源:DFSClientCache.java

示例7: deleteFilesWithoutArchiving

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Simple delete of regular files from the {@link FileSystem}.
 * <p>
 * This method is a more generic implementation that the other deleteXXX
 * methods in this class, allowing more code reuse at the cost of a couple
 * more, short-lived objects (which should have minimum impact on the jvm).
 * @param fs {@link FileSystem} where the files live
 * @param files {@link Collection} of files to be deleted
 * @throws IOException if a file cannot be deleted. All files will be
 *           attempted to deleted before throwing the exception, rather than
 *           failing at the first file.
 */
private static void deleteFilesWithoutArchiving(Collection<File> files) throws IOException {
  List<IOException> errors = new ArrayList<IOException>(0);
  for (File file : files) {
    try {
      LOG.debug("Deleting region file:" + file);
      file.delete();
    } catch (IOException e) {
      LOG.error("Failed to delete file:" + file);
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
 
开发者ID:wanhao,项目名称:IRIndex,代码行数:28,代码来源:HFileArchiver.java

示例8: finishWritingAndClose

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * @return null if failed to report progress
 * @throws IOException
 */
@Override
public List<Path> finishWritingAndClose() throws IOException {
  boolean isSuccessful = false;
  List<Path> result = null;
  try {
    isSuccessful = finishWriting(false);
  } finally {
    result = close();
    List<IOException> thrown = closeLogWriters(null);
    if (CollectionUtils.isNotEmpty(thrown)) {
      throw MultipleIOException.createIOException(thrown);
    }
  }
  if (isSuccessful) {
    splits = result;
  }
  return splits;
}
 
开发者ID:apache,项目名称:hbase,代码行数:23,代码来源:WALSplitter.java

示例9: writeThenClose

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
private Path writeThenClose(RegionEntryBuffer buffer) throws IOException {
  WriterAndPath wap = appendBuffer(buffer, false);
  if(wap != null) {
    String encodedRegionName = Bytes.toString(buffer.encodedRegionName);
    Long value = regionRecoverStatMap.putIfAbsent(encodedRegionName, wap.editsWritten);
    if (value != null) {
      Long newValue = regionRecoverStatMap.get(encodedRegionName) + wap.editsWritten;
      regionRecoverStatMap.put(encodedRegionName, newValue);
    }
  }

  Path dst = null;
  List<IOException> thrown = new ArrayList<>();
  if(wap != null){
    dst = closeWriter(Bytes.toString(buffer.encodedRegionName), wap, thrown);
  }
  if (!thrown.isEmpty()) {
    throw MultipleIOException.createIOException(thrown);
  }
  return dst;
}
 
开发者ID:apache,项目名称:hbase,代码行数:22,代码来源:WALSplitter.java

示例10: deleteStoreFilesWithoutArchiving

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Just do a simple delete of the given store files
 * <p>
 * A best effort is made to delete each of the files, rather than bailing on the first failure.
 * <p>
 * @param compactedFiles store files to delete from the file system.
 * @throws IOException if a file cannot be deleted. All files will be attempted to deleted before
 *           throwing the exception, rather than failing at the first file.
 */
private static void deleteStoreFilesWithoutArchiving(Collection<HStoreFile> compactedFiles)
    throws IOException {
  LOG.debug("Deleting store files without archiving.");
  List<IOException> errors = new ArrayList<>(0);
  for (HStoreFile hsf : compactedFiles) {
    try {
      hsf.deleteStoreFile();
    } catch (IOException e) {
      LOG.error("Failed to delete store file:" + hsf.getPath());
      errors.add(e);
    }
  }
  if (errors.size() > 0) {
    throw MultipleIOException.createIOException(errors);
  }
}
 
开发者ID:apache,项目名称:hbase,代码行数:26,代码来源:HFileArchiver.java

示例11: closeAll

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
synchronized void closeAll() throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();
  for(; !map.isEmpty(); ) {
    Map.Entry<Key, FileSystem> e = map.entrySet().iterator().next();
    final Key key = e.getKey();
    final FileSystem fs = e.getValue();

    //remove from cache
    remove(key, fs);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre,代码行数:25,代码来源:FileSystem.java

示例12: closeAll

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Close all FileSystem instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  // Make a copy of the keys in the map since we'll be modifying
  // the map while iterating over it, which isn't safe.
  List<Key> keys = new ArrayList<Key>();
  keys.addAll(map.keySet());

  for (Key key : keys) {
    final FileSystem fs = map.get(key);

    if (onlyAutomatic && !toAutoClose.contains(key)) {
      continue;
    }

    //remove from cache
    map.remove(key);
    toAutoClose.remove(key);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
开发者ID:nucypher,项目名称:hadoop-oss,代码行数:38,代码来源:FileSystem.java

示例13: closeAll

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Close all FileSystem instances in the Cache.
 * @param onlyAutomatic only close those that are marked for automatic closing
 */
synchronized void closeAll(boolean onlyAutomatic) throws IOException {
  List<IOException> exceptions = new ArrayList<IOException>();

  // Make a copy of the keys in the map since we'll be modifying
  // the map while iterating over it, which isn't safe.
  List<Key> keys = new ArrayList<Key>();
  keys.addAll(map.keySet());

  for (Key key : keys) {
    final FileSystem fs = map.get(key);

    if (onlyAutomatic && !toAutoClose.contains(key)) {
      continue;
    }

    //remove from cache
    remove(key, fs);

    if (fs != null) {
      try {
        fs.close();
      }
      catch(IOException ioe) {
        exceptions.add(ioe);
      }
    }
  }

  if (!exceptions.isEmpty()) {
    throw MultipleIOException.createIOException(exceptions);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:FileSystem.java

示例14: addVolumeAndBlockPool

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
private void addVolumeAndBlockPool(Collection<StorageLocation> dataLocations,
    Storage.StorageDirectory sd, final Collection<String> bpids)
    throws IOException {
  final File dir = sd.getCurrentDir();
  final StorageType storageType =
      getStorageTypeFromLocations(dataLocations, sd.getRoot());

  final FsVolumeImpl fsVolume = new FsVolumeImpl(
      this, sd.getStorageUuid(), dir, this.conf, storageType);
  final ReplicaMap tempVolumeMap = new ReplicaMap(fsVolume);

  List<IOException> exceptions = Lists.newArrayList();
  for (final String bpid : bpids) {
    try {
      fsVolume.addBlockPool(bpid, this.conf);
      fsVolume.getVolumeMap(bpid, tempVolumeMap, ramDiskReplicaTracker);
    } catch (IOException e) {
      LOG.warn("Caught exception when adding " + fsVolume +
          ". Will throw later.", e);
      exceptions.add(e);
    }
  }
  if (!exceptions.isEmpty()) {
    // The states of FsDatasteImpl are not modified, thus no need to rolled back.
    throw MultipleIOException.createIOException(exceptions);
  }

  volumeMap.addAll(tempVolumeMap);
  storageMap.put(sd.getStorageUuid(),
      new DatanodeStorage(sd.getStorageUuid(),
          DatanodeStorage.State.NORMAL,
          storageType));
  asyncDiskService.addVolume(sd.getCurrentDir());
  volumes.addVolume(fsVolume);

  LOG.info("Added volume - " + dir + ", StorageType: " + storageType);
}
 
开发者ID:yncxcw,项目名称:FlexMap,代码行数:38,代码来源:FsDatasetImpl.java

示例15: loadTableInfo

import org.apache.hadoop.io.MultipleIOException; //导入方法依赖的package包/类
/**
 * Populate hbi's from regionInfos loaded from file system. 
 */
private void loadTableInfo() throws IOException {
  List<IOException> ioes = new ArrayList<IOException>();
  // generate region split structure
  for (HbckInfo hbi : regionInfo.values()) {
    // only load entries that haven't been loaded yet.
    if (hbi.metaEntry == null) {
      try {
        loadMetaEntry(hbi);
      } catch (IOException ioe) {
        String msg = "Unable to load region info for table " + hbi.hdfsTableName
          + "!  It may be an invalid format or version file.  You may want to "
          + "remove " + hbi.foundRegionDir.getPath()
          + " region from hdfs and retry.";
        errors.report(msg);
        LOG.error(msg, ioe);
        ioes.add(new RegionInfoLoadException(msg, ioe));
        continue;
      }
    }

    // get table name from hdfs, populate various HBaseFsck tables.
    String tableName = hbi.hdfsTableName;
    TInfo modTInfo = tablesInfo.get(tableName);
    if (modTInfo == null) {
      modTInfo = new TInfo(tableName);
    }
    modTInfo.addRegionInfo(hbi);
    tablesInfo.put(tableName, modTInfo);
  }

  if (ioes.size() != 0) {
    throw MultipleIOException.createIOException(ioes);
  }
}
 
开发者ID:lifeng5042,项目名称:RStore,代码行数:38,代码来源:HBaseFsck.java


注:本文中的org.apache.hadoop.io.MultipleIOException.createIOException方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。