当前位置: 首页>>代码示例>>Java>>正文


Java TransferFsImage类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.server.namenode.TransferFsImage的典型用法代码示例。如果您正苦于以下问题:Java TransferFsImage类的具体用法?Java TransferFsImage怎么用?Java TransferFsImage使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


TransferFsImage类属于org.apache.hadoop.hdfs.server.namenode包,在下文中一共展示了TransferFsImage类的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: fetchImage

import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; //导入依赖的package包/类
/**
 * Download the most recent fsimage from the name node, and save it to a local
 * file in the given directory.
 * 
 * @param argv
 *          List of of command line parameters.
 * @param idx
 *          The index of the command that is being processed.
 * @return an exit code indicating success or failure.
 * @throws IOException
 */
public int fetchImage(final String[] argv, final int idx) throws IOException {
  Configuration conf = getConf();
  final URL infoServer = DFSUtil.getInfoServer(
      HAUtil.getAddressOfActive(getDFS()), conf,
      DFSUtil.getHttpClientScheme(conf)).toURL();
  SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
          new File(argv[idx]));
      return null;
    }
  });
  return 0;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:27,代码来源:DFSAdmin.java

示例2: setupInputStream

import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; //导入依赖的package包/类
/**
 * Get input stream for the image through http connection.
 */
private void setupInputStream() throws IOException {
  HttpURLConnection connection = (HttpURLConnection) url.openConnection();

  // set timeout for connecting and reading
  connection.setConnectTimeout(httpTimeout);
  connection.setReadTimeout(httpTimeout);

  if (connection.getResponseCode() != HttpURLConnection.HTTP_OK) {
    throw new IOException("Fetch of " + url + " failed with status code "
        + connection.getResponseCode() + "\nResponse message:\n"
        + connection.getResponseMessage());
  }

  String contentLength = connection
      .getHeaderField(TransferFsImage.CONTENT_LENGTH);
  if (contentLength != null) {
    // store image size
    advertisedSize = Long.parseLong(contentLength);
    if (advertisedSize <= 0) {
      throw new IOException("Invalid " + TransferFsImage.CONTENT_LENGTH
          + " header: " + contentLength);
    }
  } else {
    throw new IOException(TransferFsImage.CONTENT_LENGTH
        + " header is not provided " + "by the server when trying to fetch "
        + url);
  }

  // get the digest
  digest = TransferFsImage.parseMD5Header(connection);
  if (digest == null) {
    // digest must be provided, otherwise the image is not valid
    throw new IOException("Image digest not provided for url: " + url);
  }

  // get the input stream directly from the connection
  inputStream = connection.getInputStream();
  initialized = true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:43,代码来源:URLImageInputStream.java

示例3: setVerificationHeaders

import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; //导入依赖的package包/类
/**
 * 1. Set length of the file being served. 
 * 2. Find md5 for the given file, and
 * set it in the response. If md5 cannot be read, fail the connection.
 */
private static void setVerificationHeaders(HttpServletResponse response,
    File file) throws IOException {
  response.setHeader(TransferFsImage.CONTENT_LENGTH,
      String.valueOf(file.length()));
  MD5Hash hash = MD5FileUtils.readStoredMd5ForFile(file);
  // we require to send md5, if md5 is not present, we do not consider this
  // image as valid
  if (hash == null) {
    throw new IOException("No md5 digest could be obtained for image file: "
        + file);
  }
  response.setHeader(TransferFsImage.MD5_HEADER, hash.toString());
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:GetJournalImageServlet.java

示例4: fetchImage

import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; //导入依赖的package包/类
/**
 * Download the most recent fsimage from the name node, and save it to a local
 * file in the given directory.
 * 
 * @param argv
 *          List of of command line parameters.
 * @param idx
 *          The index of the command that is being processed.
 * @return an exit code indicating success or failure.
 * @throws IOException
 */
public int fetchImage(final String[] argv, final int idx) throws IOException {
  final String infoServer = DFSUtil.getInfoServer(
      HAUtil.getAddressOfActive(getDFS()), getConf(), false);
  SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() {
    @Override
    public Void run() throws Exception {
      TransferFsImage.downloadMostRecentImageToDirectory(infoServer,
          new File(argv[idx]));
      return null;
    }
  });
  return 0;
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:25,代码来源:DFSAdmin.java

示例5: doGet

import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; //导入依赖的package包/类
@Override
public void doGet(final HttpServletRequest request,
    final HttpServletResponse response) throws ServletException, IOException {
  FileInputStream editFileIn = null;
  try {
    final ServletContext context = getServletContext();
    final Configuration conf = (Configuration) getServletContext()
        .getAttribute(JspHelper.CURRENT_CONF);
    final String journalId = request.getParameter(JOURNAL_ID_PARAM);
    QuorumJournalManager.checkJournalId(journalId);
    final JNStorage storage = JournalNodeHttpServer
        .getJournalFromContext(context, journalId).getStorage();

    // Check security
    if (!checkRequestorOrSendError(conf, request, response)) {
      return;
    }

    // Check that the namespace info is correct
    if (!checkStorageInfoOrSendError(storage, request, response)) {
      return;
    }
    
    long segmentTxId = ServletUtil.parseLongParam(request,
        SEGMENT_TXID_PARAM);

    FileJournalManager fjm = storage.getJournalManager();
    File editFile;

    synchronized (fjm) {
      // Synchronize on the FJM so that the file doesn't get finalized
      // out from underneath us while we're in the process of opening
      // it up.
      EditLogFile elf = fjm.getLogFile(
          segmentTxId);
      if (elf == null) {
        response.sendError(HttpServletResponse.SC_NOT_FOUND,
            "No edit log found starting at txid " + segmentTxId);
        return;
      }
      editFile = elf.getFile();
      ImageServlet.setVerificationHeadersForGet(response, editFile);
      ImageServlet.setFileNameHeaders(response, editFile);
      editFileIn = new FileInputStream(editFile);
    }
    
    DataTransferThrottler throttler = ImageServlet.getThrottler(conf);

    // send edits
    TransferFsImage.copyFileToStream(response.getOutputStream(), editFile,
        editFileIn, throttler);

  } catch (Throwable t) {
    String errMsg = "getedit failed. " + StringUtils.stringifyException(t);
    response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, errMsg);
    throw new IOException(errMsg);
  } finally {
    IOUtils.closeStream(editFileIn);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:61,代码来源:GetJournalEditServlet.java

示例6: doGet

import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; //导入依赖的package包/类
@Override
public void doGet(final HttpServletRequest request,
    final HttpServletResponse response) throws ServletException, IOException {
  try {
    final GetImageParams parsedParams = new GetImageParams(request, response);
    
    // here we only support getImage
    if (!parsedParams.isGetImage()) {
      throw new IOException("Only getImage requests are supported");
    }
    
    // get access to journal node storage
    final ServletContext context = getServletContext();
    final Configuration conf = (Configuration) getServletContext()
        .getAttribute(JspHelper.CURRENT_CONF);
    final String journalId = request.getParameter(JOURNAL_ID_PARAM);
    QuorumJournalManager.checkJournalId(journalId);

    final Journal journal = JournalNodeHttpServer.getJournalFromContext(
        context, journalId);
    final JNStorage imageStorage = journal.getImageStorage();
    
    final JournalMetrics metrics = journal.getMetrics();
    if (metrics != null) {
      metrics.numGetImageDoGet.inc();
    }

    // Check that the namespace info is correct
    if (!GetJournalEditServlet.checkStorageInfoOrSendError(imageStorage,
        request, response)) {
      return;
    }
    
    // we will serve image at txid
    long txid = parsedParams.getTxId();
    File imageFile = imageStorage.getImageFile(txid);
    
    // no such image in the storage
    if (imageFile == null) {
      throw new IOException("Could not find image with txid " + txid);
    }
    
    // set verification headers 
    setVerificationHeaders(response, imageFile);
    
    // send fsImage
    TransferFsImage.getFileServer(response.getOutputStream(), imageFile,
        GetImageServlet.getThrottler(conf, parsedParams.isThrottlerDisabled()));

  } catch (Throwable t) {
    GetJournalEditServlet.handleFailure(t, response, "getImage");
  } 
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:54,代码来源:GetJournalImageServlet.java

示例7: doGet

import org.apache.hadoop.hdfs.server.namenode.TransferFsImage; //导入依赖的package包/类
@Override
public void doGet(final HttpServletRequest request,
    final HttpServletResponse response) throws ServletException, IOException {
  FileInputStream editFileIn = null;
  try {
    final ServletContext context = getServletContext();
    final Configuration conf = (Configuration) getServletContext()
        .getAttribute(JspHelper.CURRENT_CONF);
    final String journalId = request.getParameter(JOURNAL_ID_PARAM);
    QuorumJournalManager.checkJournalId(journalId);
    final JNStorage storage = JournalNodeHttpServer
        .getJournalFromContext(context, journalId).getStorage();

    // Check security
    if (!checkRequestorOrSendError(conf, request, response)) {
      return;
    }

    // Check that the namespace info is correct
    if (!checkStorageInfoOrSendError(storage, request, response)) {
      return;
    }
    
    long segmentTxId = ServletUtil.parseLongParam(request,
        SEGMENT_TXID_PARAM);

    FileJournalManager fjm = storage.getJournalManager();
    File editFile;

    synchronized (fjm) {
      // Synchronize on the FJM so that the file doesn't get finalized
      // out from underneath us while we're in the process of opening
      // it up.
      EditLogFile elf = fjm.getLogFile(
          segmentTxId);
      if (elf == null) {
        response.sendError(HttpServletResponse.SC_NOT_FOUND,
            "No edit log found starting at txid " + segmentTxId);
        return;
      }
      editFile = elf.getFile();
      GetImageServlet.setVerificationHeaders(response, editFile);
      GetImageServlet.setFileNameHeaders(response, editFile);
      editFileIn = new FileInputStream(editFile);
    }
    
    DataTransferThrottler throttler = GetImageServlet.getThrottler(conf);

    // send edits
    TransferFsImage.getFileServer(response, editFile, editFileIn, throttler);

  } catch (Throwable t) {
    String errMsg = "getedit failed. " + StringUtils.stringifyException(t);
    response.sendError(HttpServletResponse.SC_INTERNAL_SERVER_ERROR, errMsg);
    throw new IOException(errMsg);
  } finally {
    IOUtils.closeStream(editFileIn);
  }
}
 
开发者ID:ict-carch,项目名称:hadoop-plus,代码行数:60,代码来源:GetJournalEditServlet.java


注:本文中的org.apache.hadoop.hdfs.server.namenode.TransferFsImage类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。