当前位置: 首页>>代码示例>>Java>>正文


Java FSConstants类代码示例

本文整理汇总了Java中org.apache.hadoop.hdfs.protocol.FSConstants的典型用法代码示例。如果您正苦于以下问题:Java FSConstants类的具体用法?Java FSConstants怎么用?Java FSConstants使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


FSConstants类属于org.apache.hadoop.hdfs.protocol包,在下文中一共展示了FSConstants类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: verifyFile

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Verify the file length and file crc.
 */
private static boolean verifyFile(FileSystem fs, Path filePath, 
    int fileLen, DataChecksum checksum) throws IOException {
  FileStatus stat = fs.getFileStatus(filePath);
  if (stat.getLen() != fileLen) {
    return false;
  }
  
  int fileCRC = fs.getFileCrc(filePath); 
     
  LOG.info("Expected checksum: " + (int)checksum.getValue() + ", get: " + fileCRC);
  
  InputStream in = fs.open(filePath);
  DataChecksum newChecksum = DataChecksum.newDataChecksum(FSConstants.CHECKSUM_TYPE, 
      1);
  int toRead = fileLen;
  byte[] buffer = new byte[1024 * 1024];
  while (toRead > 0) {
    int numRead = in.read(buffer);
    newChecksum.update(buffer, 0, numRead);
    toRead -= numRead;
  }
      
  LOG.info("Read CRC: " + (int)newChecksum.getValue());
  return (int)checksum.getValue() == fileCRC && (int)newChecksum.getValue() == fileCRC;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:29,代码来源:TestAppendStress.java

示例2: testNonFederation

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Tests to make sure the returned addresses are correct in case of default
 * configuration with no federation
 */
@Test
public void testNonFederation() throws Exception {
  Configuration conf = new Configuration();

  // Returned namenode address should match default address
  conf.set("fs.default.name", "hdfs://localhost:1000");
  verifyAddresses(conf, TestType.NAMENODE, "127.0.0.1:1000");

  // Returned namenode address should match service RPC address
  conf = new Configuration();
  conf.set(NameNode.DATANODE_PROTOCOL_ADDRESS, "localhost:1000");
  conf.set(FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
  verifyAddresses(conf, TestType.NAMENODE, "127.0.0.1:1000");

  // Returned address should match RPC address
  conf = new Configuration();
  conf.set(FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY, "localhost:1001");
  verifyAddresses(conf, TestType.NAMENODE, "127.0.0.1:1001");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:TestGetConf.java

示例3: corruptReplica

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
public static boolean corruptReplica(Block block, int replica, MiniDFSCluster cluster) throws IOException {
  Random random = new Random();
  boolean corrupted = false;
  for (int i=replica*2; i<replica*2+2; i++) {
    File blockFile = new File(cluster.getBlockDirectory("data" + (i+1)), block.getBlockName());
    if (blockFile.exists()) {
      corruptFile(blockFile, random);
      corrupted = true;
      continue;
    }
    File blockFileInlineChecksum = new File(cluster.getBlockDirectory("data"
        + (i + 1)), BlockInlineChecksumWriter.getInlineChecksumFileName(
        block, FSConstants.CHECKSUM_TYPE, cluster.conf.getInt(
            "io.bytes.per.checksum", FSConstants.DEFAULT_BYTES_PER_CHECKSUM)));
    if (blockFileInlineChecksum.exists()) {
      corruptFile(blockFileInlineChecksum, random);
      corrupted = true;
      continue;
    }
  }
  return corrupted;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:23,代码来源:TestDatanodeBlockScanner.java

示例4: canRollBack

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
public static boolean canRollBack(StorageDirectory sd, Storage storage)
    throws IOException {
  File prevDir = sd.getPreviousDir();
  if (!prevDir.exists()) { // use current directory then
    LOG.info("Storage directory " + sd.getRoot()
        + " does not contain previous fs state.");
    // read and verify consistency with other directories
    sd.read();
    return false;
  }

  // read and verify consistency of the prev dir
  sd.read(sd.getPreviousVersionFile());

  if (storage.getLayoutVersion() != FSConstants.LAYOUT_VERSION) {
    throw new IOException("Cannot rollback to storage version "
        + storage.getLayoutVersion()
        + " using this version of the NameNode, which uses storage version "
        + FSConstants.LAYOUT_VERSION + ". "
        + "Please use the previous version of HDFS to perform the rollback.");
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:24,代码来源:NNStorage.java

示例5: testEditsLog

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Perform operations such as setting quota, deletion of files, rename and
 * ensure system can apply edits log during startup.
 */
public void testEditsLog() throws Exception {
  DistributedFileSystem fs = (DistributedFileSystem) cluster.getFileSystem();
  Path src1 = new Path(dir, "testEditsLog/srcdir/src1");
  Path dst1 = new Path(dir, "testEditsLog/dstdir/dst1");
  createFile(fs, src1);
  fs.mkdirs(dst1.getParent());
  createFile(fs, dst1);
  
  // Set quota so that dst1 parent cannot allow under it new files/directories 
  fs.setQuota(dst1.getParent(), 2, FSConstants.QUOTA_DONT_SET);
  // Free up quota for a subsequent rename
  fs.delete(dst1, true);
  rename(src1, dst1, true, false);
  
  // Restart the cluster and ensure the above operations can be
  // loaded from the edits log
  restartCluster();
  fs = (DistributedFileSystem)cluster.getFileSystem();
  assertFalse(fs.exists(src1));   // ensure src1 is already renamed
  assertTrue(fs.exists(dst1));    // ensure rename dst exists
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:26,代码来源:TestDFSRename.java

示例6: oneTimeSetUp

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Setup hadoop mini-cluster for test.
 */
private static void oneTimeSetUp() throws IOException {
  ((Log4JLogger)HftpFileSystem.LOG).getLogger().setLevel(Level.ALL);

  final long seed = RAN.nextLong();
  System.out.println("seed=" + seed);
  RAN.setSeed(seed);

  config = new Configuration();
  config.set(FSConstants.SLAVE_HOST_NAME, "localhost");

  cluster = new MiniDFSCluster(config, 2, true, null);
  hdfs = cluster.getFileSystem();
  final String hftpuri = "hftp://" + config.get("dfs.http.address");
  System.out.println("hftpuri=" + hftpuri);
  hftpFs = (HftpFileSystem) new Path(hftpuri).getFileSystem(config);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:TestHftpFileSystem.java

示例7: setupDatanodeAddress

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
private void setupDatanodeAddress(Configuration conf, InetSocketAddress toSet,
    boolean setupHostsFile) throws IOException {
  String rpcAddress = "127.0.0.1:0";
  if (setupHostsFile) {
    String hostsFile = conf.get(FSConstants.DFS_HOSTS, "").trim();
    if (hostsFile.length() == 0) {
      throw new IOException("Parameter dfs.hosts is not setup in conf");
    }
    // Setup datanode in the include file, if it is defined in the conf
    String addressInString = NetUtils.toIpPort(toSet);
    if (addressInString != null) {
      rpcAddress = addressInString;
    }
  }
  conf.set(FSConstants.DFS_DATANODE_ADDRESS_KEY, rpcAddress);
  conf.set(FSConstants.DFS_DATANODE_HTTP_ADDRESS_KEY, "127.0.0.1:0");
  conf.set(FSConstants.DFS_DATANODE_IPC_ADDRESS_KEY, "127.0.0.1:0");
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:MiniDFSCluster.java

示例8: testIncludesExcludesConfigure

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Test that includes/excludes will be ignored
 * if dfs.ignore.missing.include.files is set 
 */
@Test
public void testIncludesExcludesConfigure() throws IOException {
  String inFile = "/tmp/inFileNotExists";
  String exFile = "/tmp/exFileNotExists";
  File include = new File(inFile);
  File exclude = new File(exFile);
  include.delete();
  exclude.delete();
  assertFalse(include.exists());
  assertFalse(exclude.exists());

  Configuration conf = new Configuration();
  conf.set("dfs.hosts.ignoremissing", "true");
  conf.set(FSConstants.DFS_HOSTS, inFile);
  conf.set("dfs.hosts.exclude", exFile);
  cluster = new MiniDFSCluster(conf, 3, true, null);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:TestNameNodeReconfigure.java

示例9: initialize

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
@Override
public void initialize(URI name, Configuration conf) throws IOException {
  super.initialize(name, conf);
  setConf(conf);
  try {
    this.ugi = UnixUserGroupInformation.login(conf, true);
  } catch (LoginException le) {
    throw new IOException(StringUtils.stringifyException(le));
  }
  initializedWith = name;
  if (conf.getBoolean(FSConstants.CLIENT_CONFIGURATION_LOOKUP_DONE, false)) {
    try {
      initializedWith = new URI(conf.get(FileSystem.FS_DEFAULT_NAME_KEY));
    } catch (URISyntaxException e) {
      LOG.error(e);
    }
  }
  nnAddr = NetUtils.createSocketAddr(name.toString());
  doStrictContentLengthCheck = conf.getBoolean(STRICT_CONTENT_LENGTH, false);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:21,代码来源:HftpFileSystem.java

示例10: setQuota

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Sets or resets quotas for a directory.
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)
 */
void setQuota(String src, long namespaceQuota, long diskspaceQuota)
                                               throws IOException {
  // sanity check
  if ((namespaceQuota <= 0 && namespaceQuota != FSConstants.QUOTA_DONT_SET &&
       namespaceQuota != FSConstants.QUOTA_RESET) ||
      (diskspaceQuota <= 0 && diskspaceQuota != FSConstants.QUOTA_DONT_SET &&
       diskspaceQuota != FSConstants.QUOTA_RESET)) {
    throw new IllegalArgumentException("Invalid values for quota : " +
                                       namespaceQuota + " and " +
                                       diskspaceQuota);

  }

  try {
    namenode.setQuota(src, namespaceQuota, diskspaceQuota);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   NSQuotaExceededException.class,
                                   DSQuotaExceededException.class);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:27,代码来源:DFSClient.java

示例11: getNNServiceRpcAddresses

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Returns list of InetSocketAddresses corresponding to namenodes from the
 * configuration. Note this is to be used by datanodes to get the list of
 * namenode addresses to talk to.
 * 
 * Returns namenode address specifically configured for datanodes (using
 * service ports), if found. If not, regular RPC address configured for other
 * clients is returned.
 * 
 * @param conf configuration
 * @return list of InetSocketAddress
 * @throws IOException on error
 */
public static List<InetSocketAddress> getNNServiceRpcAddresses(
    Configuration conf) throws IOException {
  // Use default address as fall back
  String defaultAddress;
  try {
    defaultAddress = NameNode.getDefaultAddress(conf);
  } catch (IllegalArgumentException e) {
    defaultAddress = null;
  }
  
  List<InetSocketAddress> addressList = getAddresses(conf, defaultAddress,
      NameNode.DATANODE_PROTOCOL_ADDRESS, FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY);
  if (addressList == null) {
    throw new IOException("Incorrect configuration: namenode address "
        + NameNode.DATANODE_PROTOCOL_ADDRESS + " or "  
        + FSConstants.DFS_NAMENODE_RPC_ADDRESS_KEY
        + " is not configured.");
  }
  return addressList;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:34,代码来源:DFSUtil.java

示例12: initializeStreams

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
public void initializeStreams(int bytesPerChecksum, int checksumSize,
    Block block, String inAddr, int namespaceId, DataNode datanode)
    throws FileNotFoundException, IOException {
  if (this.blockDataWriter == null) {
    blockDataWriter = blockDataFile.getWriter(-1);
  }
  if (this.cout == null) {
    this.cout = new FileOutputStream(
        new RandomAccessFile(metafile, "rw").getFD());
  }
  checksumOut = new DataOutputStream(new BufferedOutputStream(cout,
      FSConstants.SMALL_BUFFER_SIZE));

  setParameters(bytesPerChecksum, checksumSize, block, inAddr, namespaceId,
      datanode);
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:17,代码来源:BlockWithChecksumFileWriter.java

示例13: NameNodeRouter

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
public NameNodeRouter(ClientProxyCommons commons) throws IOException {
  this.commons = commons;
  this.clusterId = commons.conf.getInt(FSConstants.DFS_CLUSTER_ID, RequestMetaInfo.NO_CLUSTER_ID);
  if (this.clusterId == RequestMetaInfo.NO_CLUSTER_ID) {
    String msg = "Cluster ID is not set in configuration.";
    LOG.error(msg);
    throw new IllegalArgumentException(msg);
  }
  handlers = new HashMap<String, NameNodeHandler>();
  try {
    for (String nameserviceId : commons.conf.getStringCollection(
        FSConstants.DFS_FEDERATION_NAMESERVICES)) {
      LOG.info("Initializing NameNodeHandler for clusterId: " + clusterId +
          "nameserviceId: " + nameserviceId);
      handlers.put(nameserviceId, new NameNodeHandler(commons, nameserviceId));
    }
  } catch (URISyntaxException e) {
    LOG.error("Malformed URI", e);
    throw new IOException(e);
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:22,代码来源:NameNodeRouter.java

示例14: setUp

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
public void setUp() throws Exception {
  try {
    Configuration conf = new Configuration();
    // Bind port automatically
    conf.setInt(StorageServiceConfigKeys.PROXY_THRIFT_PORT_KEY, 0);
    conf.setInt(StorageServiceConfigKeys.PROXY_RPC_PORT_KEY, 0);

    cluster = new MiniAvatarCluster(conf, 2, true, null, null, 1, true);

    proxyService = new ClientProxyService(new ClientProxyCommons(conf, conf.get(
        FSConstants.DFS_CLUSTER_NAME)));

    benchmark = new NNLatencyBenchmark();
    benchmark.setConf(conf);
  } catch (IOException e) {
    tearDown();
    throw e;
  }
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:20,代码来源:TestNNLatencyBenchmark.java

示例15: isServiceSpecified

import org.apache.hadoop.hdfs.protocol.FSConstants; //导入依赖的package包/类
/**
 * Checks if the service argument is specified in the command arguments.
 */
public static boolean isServiceSpecified(String command, Configuration conf,
    String[] argv) {
  if (conf.get(FSConstants.DFS_FEDERATION_NAMESERVICES) != null) {
    for (int i = 0; i < argv.length; i++) {
      if (argv[i].equals("-service")) {
        // found service specs
        return true;
      }
    }
    // no service specs
    printServiceErrorMessage(command, conf);
    return false;
  }
  return true;
}
 
开发者ID:rhli,项目名称:hadoop-EAR,代码行数:19,代码来源:AvatarShell.java


注:本文中的org.apache.hadoop.hdfs.protocol.FSConstants类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。