当前位置: 首页>>代码示例>>Java>>正文


Java SwiftTestUtils.compareByteArrays方法代码示例

本文整理汇总了Java中org.apache.hadoop.fs.swift.util.SwiftTestUtils.compareByteArrays方法的典型用法代码示例。如果您正苦于以下问题:Java SwiftTestUtils.compareByteArrays方法的具体用法?Java SwiftTestUtils.compareByteArrays怎么用?Java SwiftTestUtils.compareByteArrays使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.fs.swift.util.SwiftTestUtils的用法示例。


在下文中一共展示了SwiftTestUtils.compareByteArrays方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: testFilePartUpload

import org.apache.hadoop.fs.swift.util.SwiftTestUtils; //导入方法依赖的package包/类
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUpload() throws Throwable {

  final Path path = new Path("/test/testFilePartUpload");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status;

    final Path qualifiedPath = path.makeQualified(fs);
    status = fs.getFileStatus(qualifiedPath);
    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);

    //last bit of test -which seems to play up on partitions, which we download
    //to a skip
    try {
      validatePathLen(path, len);
    } catch (AssertionError e) {
      //downgrade to a skip
      throw new AssumptionViolatedException(e, null);
    }

  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:77,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例2: testManyPartitionedFile

import org.apache.hadoop.fs.swift.util.SwiftTestUtils; //导入方法依赖的package包/类
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = fs.listStatus(path);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:39,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例3: testOverwritePartitionedFile

import org.apache.hadoop.fs.swift.util.SwiftTestUtils; //导入方法依赖的package包/类
/**
 * Test that when a partitioned file is overwritten by a smaller one,
 * all the old partitioned files go away
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testOverwritePartitionedFile() throws Throwable {
  final Path path = new Path("/test/testOverwritePartitionedFile");

  final int len1 = 8192;
  final byte[] src1 = SwiftTestUtils.dataset(len1, 'A', 'Z');
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     1024);
  out.write(src1, 0, len1);
  out.close();
  long expected = getExpectedPartitionsWritten(len1,
                                               PART_SIZE_BYTES,
                                               false);
  assertPartitionsWritten("initial upload", out, expected);
  assertExists("Exists", path);
  FileStatus status = fs.getFileStatus(path);
  assertEquals("Length", len1, status.getLen());
  //now write a shorter file with a different dataset
  final int len2 = 4095;
  final byte[] src2 = SwiftTestUtils.dataset(len2, 'a', 'z');
  out = fs.create(path,
                  true,
                  getBufferSize(),
                  (short) 1,
                  1024);
  out.write(src2, 0, len2);
  out.close();
  status = fs.getFileStatus(path);
  assertEquals("Length", len2, status.getLen());
  byte[] dest = readDataset(fs, path, len2);
  //compare data
  SwiftTestUtils.compareByteArrays(src2, dest, len2);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:42,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例4: testRenamePartitionedFile

import org.apache.hadoop.fs.swift.util.SwiftTestUtils; //导入方法依赖的package包/类
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testRenamePartitionedFile() throws Throwable {
  Path src = new Path("/test/testRenamePartitionedFileSrc");

  int len = data.length;
  SwiftTestUtils.writeDataset(fs, src, data, len, 1024, false);
  assertExists("Exists", src);

  String partOneName = SwiftUtils.partitionFilenameFromNumber(1);
  Path srcPart = new Path(src, partOneName);
  Path dest = new Path("/test/testRenamePartitionedFileDest");
  Path destPart = new Path(src, partOneName);
  assertExists("Partition Exists", srcPart);
  fs.rename(src, dest);
  assertPathExists(fs, "dest file missing", dest);
  FileStatus status = fs.getFileStatus(dest);
  assertEquals("Length of renamed file is wrong", len, status.getLen());
  byte[] destData = readDataset(fs, dest, len);
  //compare data
  SwiftTestUtils.compareByteArrays(data, destData, len);
  String srcLs = SwiftTestUtils.ls(fs, src);
  String destLs = SwiftTestUtils.ls(fs, dest);

  assertPathDoesNotExist("deleted file still found in " + srcLs, src);

  assertPathDoesNotExist("partition file still found in " + srcLs, srcPart);
}
 
开发者ID:naver,项目名称:hadoop,代码行数:28,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例5: testManyPartitionedFile

import org.apache.hadoop.fs.swift.util.SwiftTestUtils; //导入方法依赖的package包/类
/**
 * Test sticks up a very large partitioned file and verifies that
 * it comes back unchanged.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testManyPartitionedFile() throws Throwable {
  final Path path = new Path("/test/testManyPartitionedFile");

  int len = PART_SIZE_BYTES * 15;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
开发者ID:openstack,项目名称:sahara-extra,代码行数:39,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例6: testQualifiedPath

import org.apache.hadoop.fs.swift.util.SwiftTestUtils; //导入方法依赖的package包/类
/**
 * Test writes partitioned file writing that path is qualified.
 * @throws Throwable
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testQualifiedPath() throws Throwable {
  final Path path = path("/test/qualifiedPath");
  int len = PART_SIZE_BYTES * 4;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  out.write(src, 0, src.length);
  int expected =
    getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
  out.close();
  assertPartitionsWritten("write completed", out, expected);
  assertEquals("too few bytes written", len,
               SwiftNativeFileSystem.getBytesWritten(out));
  assertEquals("too few bytes uploaded", len,
               SwiftNativeFileSystem.getBytesUploaded(out));
  //now we verify that the data comes back. If it
  //doesn't, it means that the ordering of the partitions
  //isn't right
  byte[] dest = readDataset(fs, path, len);
  //compare data
  SwiftTestUtils.compareByteArrays(src, dest, len);
  //finally, check the data
  FileStatus[] stats = getStore().listSegments(fs.getFileStatus(path), true);
  assertEquals("wrong entry count in "
               + SwiftTestUtils.dumpStats(path.toString(), stats),
               expected, stats.length);
}
 
开发者ID:openstack,项目名称:sahara-extra,代码行数:37,代码来源:TestSwiftFileSystemPartitionedUploads.java

示例7: testFilePartUploadNoLengthCheck

import org.apache.hadoop.fs.swift.util.SwiftTestUtils; //导入方法依赖的package包/类
/**
 * tests functionality for big files ( > 5Gb) upload
 */
@Test(timeout = SWIFT_BULK_IO_TEST_TIMEOUT)
public void testFilePartUploadNoLengthCheck() throws IOException, URISyntaxException {

  final Path path = new Path("/test/testFilePartUploadLengthCheck");

  int len = 8192;
  final byte[] src = SwiftTestUtils.dataset(len, 32, 144);
  FSDataOutputStream out = fs.create(path,
                                     false,
                                     getBufferSize(),
                                     (short) 1,
                                     BLOCK_SIZE);

  try {
    int totalPartitionsToWrite = len / PART_SIZE_BYTES;
    assertPartitionsWritten("Startup", out, 0);
    //write 2048
    int firstWriteLen = 2048;
    out.write(src, 0, firstWriteLen);
    //assert
    long expected = getExpectedPartitionsWritten(firstWriteLen,
                                                 PART_SIZE_BYTES,
                                                 false);
    SwiftUtils.debug(LOG, "First write: predict %d partitions written",
                     expected);
    assertPartitionsWritten("First write completed", out, expected);
    //write the rest
    int remainder = len - firstWriteLen;
    SwiftUtils.debug(LOG, "remainder: writing: %d bytes", remainder);

    out.write(src, firstWriteLen, remainder);
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, false);
    assertPartitionsWritten("Remaining data", out, expected);
    out.close();
    expected =
      getExpectedPartitionsWritten(len, PART_SIZE_BYTES, true);
    assertPartitionsWritten("Stream closed", out, expected);

    Header[] headers = fs.getStore().getObjectHeaders(path, true);
    for (Header header : headers) {
      LOG.info(header.toString());
    }

    byte[] dest = readDataset(fs, path, len);
    LOG.info("Read dataset from " + path + ": data length =" + len);
    //compare data
    SwiftTestUtils.compareByteArrays(src, dest, len);
    FileStatus status = fs.getFileStatus(path);

    //now see what block location info comes back.
    //This will vary depending on the Swift version, so the results
    //aren't checked -merely that the test actually worked
    BlockLocation[] locations = fs.getFileBlockLocations(status, 0, len);
    assertNotNull("Null getFileBlockLocations()", locations);
    assertTrue("empty array returned for getFileBlockLocations()",
               locations.length > 0);
  } finally {
    IOUtils.closeStream(out);
  }
}
 
开发者ID:naver,项目名称:hadoop,代码行数:65,代码来源:TestSwiftFileSystemPartitionedUploads.java


注:本文中的org.apache.hadoop.fs.swift.util.SwiftTestUtils.compareByteArrays方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。