当前位置: 首页>>代码示例>>Java>>正文


Java DomainSocket.getLoadingFailureReason方法代码示例

本文整理汇总了Java中org.apache.hadoop.net.unix.DomainSocket.getLoadingFailureReason方法的典型用法代码示例。如果您正苦于以下问题:Java DomainSocket.getLoadingFailureReason方法的具体用法?Java DomainSocket.getLoadingFailureReason怎么用?Java DomainSocket.getLoadingFailureReason使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.net.unix.DomainSocket的用法示例。


在下文中一共展示了DomainSocket.getLoadingFailureReason方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: DomainSocketFactory

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
public DomainSocketFactory(Conf conf) {
  final String feature;
  if (conf.isShortCircuitLocalReads() && (!conf.isUseLegacyBlockReaderLocal())) {
    feature = "The short-circuit local reads feature";
  } else if (conf.isDomainSocketDataTraffic()) {
    feature = "UNIX domain socket data traffic";
  } else {
    feature = null;
  }

  if (feature == null) {
    PerformanceAdvisory.LOG.debug(
        "Both short-circuit local reads and UNIX domain socket are disabled.");
  } else {
    if (conf.getDomainSocketPath().isEmpty()) {
      throw new HadoopIllegalArgumentException(feature + " is enabled but "
          + DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY + " is not set.");
    } else if (DomainSocket.getLoadingFailureReason() != null) {
      LOG.warn(feature + " cannot be used because "
          + DomainSocket.getLoadingFailureReason());
    } else {
      LOG.debug(feature + " is enabled.");
    }
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:26,代码来源:DomainSocketFactory.java

示例2: getPathInfo

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
/**
 * Get information about a domain socket path.
 *
 * @param addr         The inet address to use.
 * @param conf         The client configuration.
 *
 * @return             Information about the socket path.
 */
public PathInfo getPathInfo(InetSocketAddress addr, DFSClient.Conf conf) {
  // If there is no domain socket path configured, we can't use domain
  // sockets.
  if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
  // If we can't do anything with the domain socket, don't create it.
  if (!conf.isDomainSocketDataTraffic() &&
      (!conf.isShortCircuitLocalReads() || conf.isUseLegacyBlockReaderLocal())) {
    return PathInfo.NOT_CONFIGURED;
  }
  // If the DomainSocket code is not loaded, we can't create
  // DomainSocket objects.
  if (DomainSocket.getLoadingFailureReason() != null) {
    return PathInfo.NOT_CONFIGURED;
  }
  // UNIX domain sockets can only be used to talk to local peers
  if (!DFSClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
  String escapedPath = DomainSocket.getEffectivePath(
      conf.getDomainSocketPath(), addr.getPort());
  PathState status = pathMap.getIfPresent(escapedPath);
  if (status == null) {
    return new PathInfo(escapedPath, PathState.VALID);
  } else {
    return new PathInfo(escapedPath, status);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:34,代码来源:DomainSocketFactory.java

示例3: getPathInfo

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
/**
 * Get information about a domain socket path.
 *
 * @param addr         The inet address to use.
 * @param conf         The client configuration.
 *
 * @return             Information about the socket path.
 */
public PathInfo getPathInfo(InetSocketAddress addr, ShortCircuitConf conf) {
  // If there is no domain socket path configured, we can't use domain
  // sockets.
  if (conf.getDomainSocketPath().isEmpty()) return PathInfo.NOT_CONFIGURED;
  // If we can't do anything with the domain socket, don't create it.
  if (!conf.isDomainSocketDataTraffic() &&
      (!conf.isShortCircuitLocalReads() || conf.isUseLegacyBlockReaderLocal())) {
    return PathInfo.NOT_CONFIGURED;
  }
  // If the DomainSocket code is not loaded, we can't create
  // DomainSocket objects.
  if (DomainSocket.getLoadingFailureReason() != null) {
    return PathInfo.NOT_CONFIGURED;
  }
  // UNIX domain sockets can only be used to talk to local peers
  if (!DFSUtilClient.isLocalAddress(addr)) return PathInfo.NOT_CONFIGURED;
  String escapedPath = DomainSocket.getEffectivePath(
      conf.getDomainSocketPath(), addr.getPort());
  PathState status = pathMap.getIfPresent(escapedPath);
  if (status == null) {
    return new PathInfo(escapedPath, PathState.VALID);
  } else {
    return new PathInfo(escapedPath, status);
  }
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:34,代码来源:DomainSocketFactory.java

示例4: getDomainPeerServer

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
static DomainPeerServer getDomainPeerServer(Configuration conf,
    int port) throws IOException {
  String domainSocketPath =
      conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
  if (domainSocketPath.isEmpty()) {
    if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
          DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT) &&
       (!conf.getBoolean(DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
        DFSConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
      LOG.warn("Although short-circuit local reads are configured, " +
          "they are disabled because you didn't configure " +
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
    }
    return null;
  }
  if (DomainSocket.getLoadingFailureReason() != null) {
    throw new RuntimeException("Although a UNIX domain socket " +
        "path is configured as " + domainSocketPath + ", we cannot " +
        "start a localDataXceiverServer because " +
        DomainSocket.getLoadingFailureReason());
  }
  DomainPeerServer domainPeerServer =
    new DomainPeerServer(domainSocketPath, port);
  domainPeerServer.setReceiveBufferSize(
      HdfsConstants.DEFAULT_DATA_SOCKET_SIZE);
  return domainPeerServer;
}
 
开发者ID:naver,项目名称:hadoop,代码行数:29,代码来源:DataNode.java

示例5: setupCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  sockDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), 
      "TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  // Enabling data transfer encryption should have no effect when using
  // short-circuit local reads.  This is a regression test for HDFS-5353.
  conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
  // We want to test reading from stale sockets.
  conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1);
  conf.setLong(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
      5 * 60 * 1000);
  conf.setInt(DFSConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32);
  // Avoid using the FileInputStreamCache.
  conf.setInt(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_STREAMS_CACHE_SIZE_KEY, 0);
  DomainSocket.disableBindPathValidation();
  DFSInputStream.tcpReadsDisabledForTesting = true;
  setupCluster(1, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:30,代码来源:TestParallelShortCircuitReadUnCached.java

示例6: setupCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  DFSInputStream.tcpReadsDisabledForTesting = true;
  sockDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, true);
  DomainSocket.disableBindPathValidation();
  setupCluster(1, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestParallelShortCircuitReadNoChecksum.java

示例7: setupCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  DFSInputStream.tcpReadsDisabledForTesting = true;
  sockDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, true);
  conf.setBoolean(DFSConfigKeys.
      DFS_CLIENT_READ_SHORTCIRCUIT_SKIP_CHECKSUM_KEY, false);
  DomainSocket.disableBindPathValidation();
  setupCluster(1, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:15,代码来源:TestParallelShortCircuitRead.java

示例8: setupCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  DFSInputStream.tcpReadsDisabledForTesting = true;
  sockDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY, false);
  conf.setBoolean(DFSConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
  DomainSocket.disableBindPathValidation();
  setupCluster(1, conf);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:14,代码来源:TestParallelUnixDomainRead.java

示例9: getDomainPeerServer

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
private static DomainPeerServer getDomainPeerServer(Configuration conf,
    int port) throws IOException {
  String domainSocketPath =
      conf.getTrimmed(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_DEFAULT);
  if (domainSocketPath.isEmpty()) {
    if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
          HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT) &&
       (!conf.getBoolean(HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL,
        HdfsClientConfigKeys.DFS_CLIENT_USE_LEGACY_BLOCKREADERLOCAL_DEFAULT))) {
      LOG.warn("Although short-circuit local reads are configured, " +
          "they are disabled because you didn't configure " +
          DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY);
    }
    return null;
  }
  if (DomainSocket.getLoadingFailureReason() != null) {
    throw new RuntimeException("Although a UNIX domain socket " +
        "path is configured as " + domainSocketPath + ", we cannot " +
        "start a localDataXceiverServer because " +
        DomainSocket.getLoadingFailureReason());
  }
  DomainPeerServer domainPeerServer =
    new DomainPeerServer(domainSocketPath, port);
  int recvBufferSize = conf.getInt(
      DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_KEY,
      DFSConfigKeys.DFS_DATANODE_TRANSFER_SOCKET_RECV_BUFFER_SIZE_DEFAULT);
  if (recvBufferSize > 0) {
    domainPeerServer.setReceiveBufferSize(recvBufferSize);
  }
  return domainPeerServer;
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:33,代码来源:DataNode.java

示例10: setupCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  sockDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), 
      "TestParallelShortCircuitReadUnCached._PORT.sock").getAbsolutePath());
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
  // Enabling data transfer encryption should have no effect when using
  // short-circuit local reads.  This is a regression test for HDFS-5353.
  conf.setBoolean(DFSConfigKeys.DFS_ENCRYPT_DATA_TRANSFER_KEY, true);
  conf.setBoolean(DFSConfigKeys.DFS_BLOCK_ACCESS_TOKEN_ENABLE_KEY, true);
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
      false);
  conf.setBoolean(HdfsClientConfigKeys.
      DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
  // We want to test reading from stale sockets.
  conf.setInt(DFSConfigKeys.DFS_DATANODE_SOCKET_REUSE_KEEPALIVE_KEY, 1);
  conf.setLong(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_EXPIRY_MSEC_KEY,
      5 * 60 * 1000);
  conf.setInt(HdfsClientConfigKeys.DFS_CLIENT_SOCKET_CACHE_CAPACITY_KEY, 32);
  // Avoid using the FileInputStreamCache.
  conf.setInt(HdfsClientConfigKeys.Read.ShortCircuit.STREAMS_CACHE_SIZE_KEY,
      0);
  DomainSocket.disableBindPathValidation();
  DFSInputStream.tcpReadsDisabledForTesting = true;
  setupCluster(1, conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:30,代码来源:TestParallelShortCircuitReadUnCached.java

示例11: setupCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  DFSInputStream.tcpReadsDisabledForTesting = true;
  sockDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, true);
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.SKIP_CHECKSUM_KEY,
      false);
  DomainSocket.disableBindPathValidation();
  setupCluster(1, conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:15,代码来源:TestParallelShortCircuitRead.java

示例12: setupCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@BeforeClass
static public void setupCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  DFSInputStream.tcpReadsDisabledForTesting = true;
  sockDir = new TemporarySocketDirectory();
  HdfsConfiguration conf = new HdfsConfiguration();
  conf.set(DFSConfigKeys.DFS_DOMAIN_SOCKET_PATH_KEY,
    new File(sockDir.getDir(), "TestParallelLocalRead.%d.sock").getAbsolutePath());
  conf.setBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY, false);
  conf.setBoolean(HdfsClientConfigKeys.DFS_CLIENT_DOMAIN_SOCKET_DATA_TRAFFIC, true);
  DomainSocket.disableBindPathValidation();
  setupCluster(1, conf);
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:14,代码来源:TestParallelUnixDomainRead.java

示例13: DataNode

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
/**
 * Create the DataNode given a configuration, an array of dataDirs,
 * and a namenode proxy
 */
DataNode(final Configuration conf,
         final List<StorageLocation> dataDirs,
         final SecureResources resources) throws IOException {
  super(conf);
  this.blockScanner = new BlockScanner(this, conf);
  this.lastDiskErrorCheck = 0;
  this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
      DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);

  this.usersWithLocalPathAccess = Arrays.asList(
      conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
  this.connectToDnViaHostname = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
      DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
  this.getHdfsBlockLocationsEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED, 
      DFSConfigKeys.DFS_HDFS_BLOCKS_METADATA_ENABLED_DEFAULT);
  this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
      DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
  this.isPermissionEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
      DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
  this.pipelineSupportECN = conf.getBoolean(
      DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED,
      DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT);

  confVersion = "core-" +
      conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
      ",hdfs-" +
      conf.get("hadoop.hdfs.configuration.version", "UNSPECIFIED");

  // Determine whether we should try to pass file descriptors to clients.
  if (conf.getBoolean(DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_KEY,
            DFSConfigKeys.DFS_CLIENT_READ_SHORTCIRCUIT_DEFAULT)) {
    String reason = DomainSocket.getLoadingFailureReason();
    if (reason != null) {
      LOG.warn("File descriptor passing is disabled because " + reason);
      this.fileDescriptorPassingDisabledReason = reason;
    } else {
      LOG.info("File descriptor passing is enabled.");
      this.fileDescriptorPassingDisabledReason = null;
    }
  } else {
    this.fileDescriptorPassingDisabledReason =
        "File descriptor passing was not configured.";
    LOG.debug(this.fileDescriptorPassingDisabledReason);
  }

  try {
    hostName = getHostName(conf);
    LOG.info("Configured hostname is " + hostName);
    startDataNode(conf, dataDirs, resources);
  } catch (IOException ie) {
    shutdown();
    throw ie;
  }
  final int dncCacheMaxSize =
      conf.getInt(DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY,
          DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT) ;
  datanodeNetworkCounts =
      CacheBuilder.newBuilder()
          .maximumSize(dncCacheMaxSize)
          .build(new CacheLoader<String, Map<String, Long>>() {
            @Override
            public Map<String, Long> load(String key) throws Exception {
              final Map<String, Long> ret = new HashMap<String, Long>();
              ret.put("networkErrors", 0L);
              return ret;
            }
          });
}
 
开发者ID:naver,项目名称:hadoop,代码行数:76,代码来源:DataNode.java

示例14: teardownCluster

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
@AfterClass
static public void teardownCluster() throws Exception {
  if (DomainSocket.getLoadingFailureReason() != null) return;
  sockDir.close();
  TestParallelReadUtil.teardownCluster();
}
 
开发者ID:naver,项目名称:hadoop,代码行数:7,代码来源:TestParallelShortCircuitReadUnCached.java

示例15: DataNode

import org.apache.hadoop.net.unix.DomainSocket; //导入方法依赖的package包/类
/**
 * Create the DataNode given a configuration, an array of dataDirs,
 * and a namenode proxy.
 */
DataNode(final Configuration conf,
         final List<StorageLocation> dataDirs,
         final SecureResources resources) throws IOException {
  super(conf);
  this.tracer = createTracer(conf);
  this.tracerConfigurationManager =
      new TracerConfigurationManager(DATANODE_HTRACE_PREFIX, conf);
  this.blockScanner = new BlockScanner(this, conf);
  this.lastDiskErrorCheck = 0;
  this.maxNumberOfBlocksToLog = conf.getLong(DFS_MAX_NUM_BLOCKS_TO_LOG_KEY,
      DFS_MAX_NUM_BLOCKS_TO_LOG_DEFAULT);

  this.usersWithLocalPathAccess = Arrays.asList(
      conf.getTrimmedStrings(DFSConfigKeys.DFS_BLOCK_LOCAL_PATH_ACCESS_USER_KEY));
  this.connectToDnViaHostname = conf.getBoolean(
      DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME,
      DFSConfigKeys.DFS_DATANODE_USE_DN_HOSTNAME_DEFAULT);
  this.supergroup = conf.get(DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_KEY,
      DFSConfigKeys.DFS_PERMISSIONS_SUPERUSERGROUP_DEFAULT);
  this.isPermissionEnabled = conf.getBoolean(
      DFSConfigKeys.DFS_PERMISSIONS_ENABLED_KEY,
      DFSConfigKeys.DFS_PERMISSIONS_ENABLED_DEFAULT);
  this.pipelineSupportECN = conf.getBoolean(
      DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED,
      DFSConfigKeys.DFS_PIPELINE_ECN_ENABLED_DEFAULT);

  confVersion = "core-" +
      conf.get("hadoop.common.configuration.version", "UNSPECIFIED") +
      ",hdfs-" +
      conf.get("hadoop.hdfs.configuration.version", "UNSPECIFIED");

  this.checkDiskErrorInterval =
      ThreadLocalRandom.current().nextInt(5000, (int) (5000 * 1.25));

  // Determine whether we should try to pass file descriptors to clients.
  if (conf.getBoolean(HdfsClientConfigKeys.Read.ShortCircuit.KEY,
            HdfsClientConfigKeys.Read.ShortCircuit.DEFAULT)) {
    String reason = DomainSocket.getLoadingFailureReason();
    if (reason != null) {
      LOG.warn("File descriptor passing is disabled because " + reason);
      this.fileDescriptorPassingDisabledReason = reason;
    } else {
      LOG.info("File descriptor passing is enabled.");
      this.fileDescriptorPassingDisabledReason = null;
    }
  } else {
    this.fileDescriptorPassingDisabledReason =
        "File descriptor passing was not configured.";
    LOG.debug(this.fileDescriptorPassingDisabledReason);
  }

  try {
    hostName = getHostName(conf);
    LOG.info("Configured hostname is " + hostName);
    startDataNode(conf, dataDirs, resources);
  } catch (IOException ie) {
    shutdown();
    throw ie;
  }
  final int dncCacheMaxSize =
      conf.getInt(DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_KEY,
          DFS_DATANODE_NETWORK_COUNTS_CACHE_MAX_SIZE_DEFAULT) ;
  datanodeNetworkCounts =
      CacheBuilder.newBuilder()
          .maximumSize(dncCacheMaxSize)
          .build(new CacheLoader<String, Map<String, Long>>() {
            @Override
            public Map<String, Long> load(String key) throws Exception {
              final Map<String, Long> ret = new HashMap<String, Long>();
              ret.put("networkErrors", 0L);
              return ret;
            }
          });

  initOOBTimeout();
}
 
开发者ID:aliyun-beta,项目名称:aliyun-oss-hadoop-fs,代码行数:81,代码来源:DataNode.java


注:本文中的org.apache.hadoop.net.unix.DomainSocket.getLoadingFailureReason方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。