当前位置: 首页>>代码示例>>Java>>正文


Java Canceler类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.util.Canceler的典型用法代码示例。如果您正苦于以下问题:Java Canceler类的具体用法?Java Canceler怎么用?Java Canceler使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Canceler类属于org.apache.hadoop.hdfs.util包,在下文中一共展示了Canceler类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: uploadImageFromStorage

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Requests that the NameNode download an image from this node.  Allows for
 * optional external cancelation.
 *
 * @param fsName the http address for the remote NN
 * @param conf Configuration
 * @param storage the storage directory to transfer the image from
 * @param nnf the NameNodeFile type of the image
 * @param txid the transaction ID of the image to be uploaded
 * @param canceler optional canceler to check for abort of upload
 * @throws IOException if there is an I/O error or cancellation
 */
public static void uploadImageFromStorage(URL fsName, Configuration conf,
    NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
    throws IOException {
  URL url = new URL(fsName, ImageServlet.PATH_SPEC);
  long startTime = Time.monotonicNow();
  try {
    uploadImage(url, conf, storage, nnf, txid, canceler);
  } catch (HttpPutFailedException e) {
    if (e.getResponseCode() == HttpServletResponse.SC_CONFLICT) {
      // this is OK - this means that a previous attempt to upload
      // this checkpoint succeeded even though we thought it failed.
      LOG.info("Image upload with txid " + txid + 
          " conflicted with a previous image upload to the " +
          "same NameNode. Continuing...", e);
      return;
    } else {
      throw e;
    }
  }
  double xferSec = Math.max(
      ((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
  LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName
      + " in " + xferSec + " seconds");
}
 
开发者ID:naver,项目名称:hadoop,代码行数:37,代码来源:TransferFsImage.java

示例2: testCheckpointWhenNoNewTransactionsHappened

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);
 
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  
  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());
 
  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  Thread.sleep(2000);
  
  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
 
开发者ID:naver,项目名称:hadoop,代码行数:33,代码来源:TestStandbyCheckpoints.java

示例3: uploadImageFromStorage

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Requests that the NameNode download an image from this node.  Allows for
 * optional external cancelation.
 *
 * @param fsName the http address for the remote NN
 * @param conf Configuration
 * @param storage the storage directory to transfer the image from
 * @param nnf the NameNodeFile type of the image
 * @param txid the transaction ID of the image to be uploaded
 * @param canceler optional canceler to check for abort of upload
 * @throws IOException if there is an I/O error or cancellation
 */
public static TransferResult uploadImageFromStorage(URL fsName, Configuration conf,
    NNStorage storage, NameNodeFile nnf, long txid, Canceler canceler)
    throws IOException {
  URL url = new URL(fsName, ImageServlet.PATH_SPEC);
  long startTime = Time.monotonicNow();
  try {
    uploadImage(url, conf, storage, nnf, txid, canceler);
  } catch (HttpPutFailedException e) {
    // translate the error code to a result, which is a bit more obvious in usage
    TransferResult result = TransferResult.getResultForCode(e.getResponseCode());
    if (result.shouldReThrowException) {
      throw e;
    }
    return result;
  }
  double xferSec = Math.max(
      ((float) (Time.monotonicNow() - startTime)) / 1000.0, 0.001);
  LOG.info("Uploaded image with txid " + txid + " to namenode at " + fsName
      + " in " + xferSec + " seconds");
  return TransferResult.SUCCESS;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:TransferFsImage.java

示例4: testCheckpointWhenNoNewTransactionsHappened

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test(timeout = 300000)
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nns[1] = cluster.getNameNode(1);

  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nns[1]);

  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());
 
  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nns[0], nns[1]);
  Thread.sleep(2000);
  
  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:TestStandbyCheckpoints.java

示例5: saveNamespace

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Save the contents of the FS image to a new image file in each of the
 * current storage directories.
 * @param canceler 
 */
public synchronized void saveNamespace(FSNamesystem source,
    Canceler canceler) throws IOException {
  assert editLog != null : "editLog must be initialized";
  storage.attemptRestoreRemovedStorage();

  boolean editLogWasOpen = editLog.isSegmentOpen();
  
  if (editLogWasOpen) {
    editLog.endCurrentLogSegment(true);
  }
  long imageTxId = getLastAppliedOrWrittenTxId();
  try {
    saveFSImageInAllDirs(source, imageTxId, canceler);
    storage.writeAll();
  } finally {
    if (editLogWasOpen) {
      editLog.startLogSegment(imageTxId + 1, true);
      // Take this opportunity to note the current transaction.
      // Even if the namespace save was cancelled, this marker
      // is only used to determine what transaction ID is required
      // for startup. So, it doesn't hurt to update it unnecessarily.
      storage.writeTransactionIdFileToStorage(imageTxId + 1);
    }
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:31,代码来源:FSImage.java

示例6: testCheckpointWhenNoNewTransactionsHappened

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);
 
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  
  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());
 
  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  Thread.sleep(2000);
  
  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1))
    .saveNamespace((FSNamesystem) Mockito.anyObject(),
        (Canceler)Mockito.anyObject());       
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:33,代码来源:TestStandbyCheckpoints.java

示例7: testCheckpointWhenNoNewTransactionsHappened

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Test for the case when the SBN is configured to checkpoint based
 * on a time period, but no transactions are happening on the
 * active. Thus, it would want to save a second checkpoint at the
 * same txid, which is a no-op. This test makes sure this doesn't
 * cause any problem.
 */
@Test
public void testCheckpointWhenNoNewTransactionsHappened()
    throws Exception {
  // Checkpoint as fast as we can, in a tight loop.
  cluster.getConfiguration(1).setInt(
      DFSConfigKeys.DFS_NAMENODE_CHECKPOINT_PERIOD_KEY, 0);
  cluster.restartNameNode(1);
  nn1 = cluster.getNameNode(1);
 
  FSImage spyImage1 = NameNodeAdapter.spyOnFsImage(nn1);
  
  // We shouldn't save any checkpoints at txid=0
  Thread.sleep(1000);
  Mockito.verify(spyImage1, Mockito.never())
    .saveNamespace((FSNamesystem) Mockito.anyObject());
 
  // Roll the primary and wait for the standby to catch up
  HATestUtil.waitForStandbyToCatchUp(nn0, nn1);
  Thread.sleep(2000);
  
  // We should make exactly one checkpoint at this new txid. 
  Mockito.verify(spyImage1, Mockito.times(1)).saveNamespace(
      (FSNamesystem) Mockito.anyObject(), Mockito.eq(NameNodeFile.IMAGE),
      (Canceler) Mockito.anyObject());
}
 
开发者ID:Seagate,项目名称:hadoop-on-lustre2,代码行数:33,代码来源:TestStandbyCheckpoints.java

示例8: SaveNamespaceContext

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
SaveNamespaceContext(
    FSNamesystem sourceNamesystem,
    long txid,
    Canceler canceller) {
  this.sourceNamesystem = sourceNamesystem;
  this.txid = txid;
  this.canceller = canceller;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:9,代码来源:SaveNamespaceContext.java

示例9: writeFileToPutRequest

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
private static void writeFileToPutRequest(Configuration conf,
    HttpURLConnection connection, File imageFile, Canceler canceler)
    throws FileNotFoundException, IOException {
  connection.setRequestProperty(CONTENT_TYPE, "application/octet-stream");
  connection.setRequestProperty(CONTENT_TRANSFER_ENCODING, "binary");
  OutputStream output = connection.getOutputStream();
  FileInputStream input = new FileInputStream(imageFile);
  try {
    copyFileToStream(output, imageFile, input,
        ImageServlet.getThrottler(conf), canceler);
  } finally {
    IOUtils.closeStream(input);
    IOUtils.closeStream(output);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TransferFsImage.java

示例10: saveLegacyOIVImage

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Save FSimage in the legacy format. This is not for NN consumption,
 * but for tools like OIV.
 */
public void saveLegacyOIVImage(FSNamesystem source, String targetDir,
    Canceler canceler) throws IOException {
  FSImageCompression compression =
      FSImageCompression.createCompression(conf);
  long txid = getLastAppliedOrWrittenTxId();
  SaveNamespaceContext ctx = new SaveNamespaceContext(source, txid,
      canceler);
  FSImageFormat.Saver saver = new FSImageFormat.Saver(ctx);
  String imageFileName = NNStorage.getLegacyOIVImageFileName(txid);
  File imageFile = new File(targetDir, imageFileName);
  saver.save(imageFile, compression);
  archivalManager.purgeOldLegacyOIVImages(targetDir, txid);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:18,代码来源:FSImage.java

示例11: saveNamespace

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Save the contents of the FS image to a new image file in each of the
 * current storage directories.
 */
public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf,
    Canceler canceler) throws IOException {
  assert editLog != null : "editLog must be initialized";
  LOG.info("Save namespace ...");
  storage.attemptRestoreRemovedStorage();

  boolean editLogWasOpen = editLog.isSegmentOpen();
  
  if (editLogWasOpen) {
    editLog.endCurrentLogSegment(true);
  }
  long imageTxId = getLastAppliedOrWrittenTxId();
  if (!addToCheckpointing(imageTxId)) {
    throw new IOException(
        "FS image is being downloaded from another NN at txid " + imageTxId);
  }
  try {
    try {
      saveFSImageInAllDirs(source, nnf, imageTxId, canceler);
      storage.writeAll();
    } finally {
      if (editLogWasOpen) {
        editLog.startLogSegment(imageTxId + 1, true);
        // Take this opportunity to note the current transaction.
        // Even if the namespace save was cancelled, this marker
        // is only used to determine what transaction ID is required
        // for startup. So, it doesn't hurt to update it unnecessarily.
        storage.writeTransactionIdFileToStorage(imageTxId + 1);
      }
    }
  } finally {
    removeFromCheckpointing(imageTxId);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:FSImage.java

示例12: saveFSImageToTempFile

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/** Save the fsimage to a temp file */
private File saveFSImageToTempFile() throws IOException {
  SaveNamespaceContext context = new SaveNamespaceContext(fsn, txid,
      new Canceler());
  FSImageFormatProtobuf.Saver saver = new FSImageFormatProtobuf.Saver(context);
  FSImageCompression compression = FSImageCompression.createCompression(conf);
  File imageFile = getImageFile(testDir, txid);
  fsn.readLock();
  try {
    saver.save(imageFile, compression);
  } finally {
    fsn.readUnlock();
  }
  return imageFile;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:16,代码来源:TestFSImageWithSnapshot.java

示例13: saveNamespace

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Save the contents of the FS image to a new image file in each of the
 * current storage directories.
 */
public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf,
    Canceler canceler) throws IOException {
  assert editLog != null : "editLog must be initialized";
  LOG.info("Save namespace ...");
  storage.attemptRestoreRemovedStorage();

  boolean editLogWasOpen = editLog.isSegmentOpen();
  
  if (editLogWasOpen) {
    editLog.endCurrentLogSegment(true);
  }
  long imageTxId = getLastAppliedOrWrittenTxId();
  if (!addToCheckpointing(imageTxId)) {
    throw new IOException(
        "FS image is being downloaded from another NN at txid " + imageTxId);
  }
  try {
    try {
      saveFSImageInAllDirs(source, nnf, imageTxId, canceler);
      if (!source.isRollingUpgrade()) {
        storage.writeAll();
      }
    } finally {
      if (editLogWasOpen) {
        editLog.startLogSegmentAndWriteHeaderTxn(imageTxId + 1,
            source.getEffectiveLayoutVersion());
        // Take this opportunity to note the current transaction.
        // Even if the namespace save was cancelled, this marker
        // is only used to determine what transaction ID is required
        // for startup. So, it doesn't hurt to update it unnecessarily.
        storage.writeTransactionIdFileToStorage(imageTxId + 1);
      }
    }
  } finally {
    removeFromCheckpointing(imageTxId);
  }
  //Update NameDirSize Metric
  getStorage().updateNameDirSize();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:44,代码来源:FSImage.java

示例14: saveNamespace

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/**
 * Save the contents of the FS image to a new image file in each of the
 * current storage directories.
 */
public synchronized void saveNamespace(FSNamesystem source, NameNodeFile nnf,
    Canceler canceler) throws IOException {
  assert editLog != null : "editLog must be initialized";
  LOG.info("Save namespace ...");
  storage.attemptRestoreRemovedStorage();

  boolean editLogWasOpen = editLog.isSegmentOpen();
  
  if (editLogWasOpen) {
    editLog.endCurrentLogSegment(true);
  }
  long imageTxId = getLastAppliedOrWrittenTxId();
  try {
    saveFSImageInAllDirs(source, nnf, imageTxId, canceler);
    storage.writeAll();
  } finally {
    if (editLogWasOpen) {
      editLog.startLogSegment(imageTxId + 1, true);
      // Take this opportunity to note the current transaction.
      // Even if the namespace save was cancelled, this marker
      // is only used to determine what transaction ID is required
      // for startup. So, it doesn't hurt to update it unnecessarily.
      storage.writeTransactionIdFileToStorage(imageTxId + 1);
    }
  }
}
 
开发者ID:Nextzero,项目名称:hadoop-2.6.0-cdh5.4.3,代码行数:31,代码来源:FSImage.java

示例15: saveFSImageToTempFile

import org.apache.hadoop.hdfs.util.Canceler; //导入依赖的package包/类
/** Save the fsimage to a temp file */
private File saveFSImageToTempFile() throws IOException {
  SaveNamespaceContext context = new SaveNamespaceContext(fsn, txid,
      new Canceler());
  FSImageFormat.Saver saver = new FSImageFormat.Saver(context);
  FSImageCompression compression = FSImageCompression.createCompression(conf);
  File imageFile = getImageFile(testDir, txid);
  fsn.readLock();
  try {
    saver.save(imageFile, compression);
  } finally {
    fsn.readUnlock();
  }
  return imageFile;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:16,代码来源:TestFSImageWithSnapshot.java


注:本文中的org.apache.hadoop.hdfs.util.Canceler类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。