當前位置: 首頁>>代碼示例>>Java>>正文


Java LocalFileSystem.getLocal方法代碼示例

本文整理匯總了Java中org.apache.hadoop.fs.LocalFileSystem.getLocal方法的典型用法代碼示例。如果您正苦於以下問題:Java LocalFileSystem.getLocal方法的具體用法?Java LocalFileSystem.getLocal怎麽用?Java LocalFileSystem.getLocal使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在org.apache.hadoop.fs.LocalFileSystem的用法示例。


在下文中一共展示了LocalFileSystem.getLocal方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。

示例1: testLocalFs

import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
public static void testLocalFs() throws IOException {
  LocalFileSystem fs = LocalFileSystem.getLocal(conf);
  System.out.println(fs);
  String testDir = "/home/winter/temp/temp";
  String innerDir = "test";
  FileStatus[] statusArray = fs.listStatus(new Path(testDir));
  if (statusArray == null || statusArray.length == 0) {
    Path innerDirPath = new Path(new Path(testDir), innerDir);
    Path innerFilePath = new Path(new Path(testDir), "hdfs-site");
    fs.mkdirs(innerDirPath);
    fs.copyFromLocalFile(new Path("/home/winter/hdfs-site.xml"), innerFilePath);
    fs.setReplication(innerFilePath, (short) 3);
  }
  for (FileStatus status : statusArray) {
    if (status.isDir()) {
      System.out.println("winter file is a dir: " + status.getPath());
    } else {
      System.out.println("winter file is a file: " + status.getPath().getName());
    }
  }
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:22,代碼來源:TestLocalFS.java

示例2: LCIndexParameters

import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
public LCIndexParameters(FileSystem defaultFS, Configuration conf, IndexType indexType,
    Path hdfsRegionDir) throws IOException {
  this.conf = conf;
  this.indexType = indexType;
  localIP = InetAddress.getLocalHost();
  this.hdfsRegionDir = hdfsRegionDir;
  regionId = hdfsRegionDir.getName();
  tableName = hdfsRegionDir.getParent().getName();
  String localRootStr = conf.get(LCIndexConstant.LCINDEX_LOCAL_DIR);
  if (localRootStr == null) throw new RuntimeException(
      "set " + LCIndexConstant.LCINDEX_LOCAL_DIR + " when using LCIndex");
  // System.out.println("hdfs region dir: " + hdfsRegionDir);
  localRoot = new Path(localRootStr);
  localRegionDir = new Path(new Path(localRoot, tableName), regionId);
  localTmpDir = new Path(localRegionDir, LCIndexConstant.LCINDEX_TMP_DIR_NAME);
  boolean useLocal = conf.getBoolean(LCIndexConstant.LCINDEX_USE_LOCAL_FS, true);
  if (useLocal) {
    fs = LocalFileSystem.getLocal(conf);
  } else {
    fs = defaultFS;
  }
  if (fs == null) {
    throw new IOException("localFS is null in LCIndex");
  }
  String hosts = conf.get(LCIndexConstant.LCINDEX_REGIONSERVER_HOSTNAMES);
  if (hosts == null) {
    throw new IOException("lcindex miss RegionServer hosts, assign "
        + LCIndexConstant.LCINDEX_REGIONSERVER_HOSTNAMES + " at first, multi hosts with "
        + LCIndexConstant.LCINDEX_REGIONSERVER_HOSTNAMES_DELIMITER);
  }
  String parts[] = hosts.split(LCIndexConstant.LCINDEX_REGIONSERVER_HOSTNAMES_DELIMITER);
  lcRegionServerHostnames = new ArrayList<>();
  for (String hostname : parts) {
    lcRegionServerHostnames.add(hostname);
  }
}
 
開發者ID:fengchen8086,項目名稱:ditb,代碼行數:37,代碼來源:LCIndexParameters.java

示例3: readHFile

import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
private void readHFile(Configuration hadoopConf, Configuration hbaseConf, String fsStr,
    String fileName) throws IOException {
  CacheConfig tmpCacheConfig = new CacheConfig(hbaseConf);
  FileSystem fs = null;
  if (fsStr.equalsIgnoreCase("local")) {
    fs = LocalFileSystem.getLocal(hadoopConf);
  } else {
    fs = FileSystem.get(hadoopConf);
  }
  Path path = new Path(fileName);
  if (!fs.exists(path)) {
    System.out.println("WinterTestAID file not exists: " + path);
  } else {
    System.out.println("WinterTestAID reading lccindex hfile: " + path);
    StoreFile sf = new StoreFile(fs, path, hbaseConf, tmpCacheConfig, BloomType.NONE, null);
    Reader reader = sf.createReader();
    System.out.println("WinterTestAID store file attr: " + sf.mWinterGetAttribute());
    StoreFileScanner sss = reader.getStoreFileScanner(false, false);
    sss.seek(KeyValue.LOWESTKEY);
    System.out.println("WinterTestAID store peek value: "
        + LCCIndexConstant.mWinterToPrint(sss.peek()));
    KeyValue kv;
    int counter = 0, printInterval = 1, totalSize = 0;
    while ((kv = sss.next()) != null) {
      if (counter == 0) {
        counter = printInterval;
        System.out
            .println("WinterTestAID hfile keyvalue: " + LCCIndexConstant.mWinterToPrint(kv));
      }
      --counter;
      ++totalSize;
    }
    sss.close();
    reader.close(false);
    System.out.println("WinterTestAID total size: " + totalSize);
    System.out.println("WinterTestAID winter inner mWinterGetScannersForStoreFiles start: "
        + LCCIndexConstant.convertUnknownBytes(reader.getFirstKey()));
  }
}
 
開發者ID:fengchen8086,項目名稱:LCIndex-HBase-0.94.16,代碼行數:40,代碼來源:ReadHFile.java

示例4: read

import org.apache.hadoop.fs.LocalFileSystem; //導入方法依賴的package包/類
@Override
void read(final Random dataRandom2, final ByteArrayOutputStream inMemoryFile)
    throws Exception {

  byte[] data = inMemoryFile.toByteArray();
  // Open the in-memory file for read
  InputStream fileForRead = null;
  if (useFileSystem) {
    // Write data to a file and then test it.
    // This is useful for testing "seek" because FSInputStream is a Seekable.
    Configuration conf = new Configuration();
    FileSystem fs = LocalFileSystem.getLocal(conf);
    Path file = new Path(System.getProperty("user.dir") + "/test_seek.ssf");
    OutputStream out = fs.create(file);
    out.write(data);
    out.close();
    fileForRead = fs.open(file);
  } else {
    fileForRead = new ByteArrayInputStream(data) {
      /**
       * Only expose at most availableBytes until those bytes are all read.
       * This is to simulate a growing file.
       */
      @Override
      public int available() {
        if (pos < availableBytes) {
          return Math.min(super.available(), availableBytes - pos);
        } else {
          return super.available();
        }
      }
    };
  }

  SimpleSeekableFormatInputStream in = new SimpleSeekableFormatInputStream(fileForRead);
  DataInputStream dataIn = new DataInputStream(in);

  long seekedPosition = in.seekForward();
  {
    // We should not be at the beginning of the stream any more.
    InterleavedInputStream interleavedIn = in.getInterleavedIn();
    long blocks = interleavedIn.getRawOffset() / interleavedIn.getCompleteBlockSize();
    long blocksAvailable = (availableBytes - interleavedIn.getMetaDataBlockSize()) / interleavedIn.getCompleteBlockSize();
    blocksAvailable = Math.max(0, blocksAvailable);
    Assert.assertTrue(blocks >= blocksAvailable);
  }

  long currentUncompressedPosition = 0;
  for (int r = 0; r < numRecord; r++) {
    // Regenerate the same random bytes
    byte[] b = new byte[dataRandom2.nextInt(maxRecordSize)];
    UtilsForTests.nextBytes(dataRandom2, b, 16);
    if (currentUncompressedPosition >= seekedPosition) {
      // Read from the file
      byte[] b2 = new byte[b.length];
      dataIn.readFully(b2);
      UtilsForTests.assertArrayEquals("record " + r + " with length " + b.length,
          b, b2);
    }
    currentUncompressedPosition += b.length;
  }

  // Verify EOF
  Assert.assertEquals(-1, in.read());
  byte[] temp = new byte[100];
  Assert.assertEquals(-1, in.read(temp));
}
 
開發者ID:rhli,項目名稱:hadoop-EAR,代碼行數:68,代碼來源:TestSimpleSeekableFormatStreams.java


注:本文中的org.apache.hadoop.fs.LocalFileSystem.getLocal方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。