本文整理汇总了Java中org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator类的典型用法代码示例。如果您正苦于以下问题:Java HashBucketIterator类的具体用法?Java HashBucketIterator怎么用?Java HashBucketIterator使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
HashBucketIterator类属于org.apache.flink.runtime.operators.hash.MutableHashTable包,在下文中一共展示了HashBucketIterator类的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: testMutableHashMapPerformance
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testMutableHashMapPerformance() {
try {
final int NUM_MEM_PAGES = SIZE * NUM_PAIRS / PAGE_SIZE;
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_PAIRS, 1, false);
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(0, 1, false);
MutableObjectIterator<IntPair> probeTester = new UniformIntPairGenerator(NUM_PAIRS, 1, false);
MutableObjectIterator<IntPair> updater = new UniformIntPairGenerator(NUM_PAIRS, 1, false);
MutableObjectIterator<IntPair> updateTester = new UniformIntPairGenerator(NUM_PAIRS, 1, false);
long start = 0L;
long end = 0L;
long first = System.currentTimeMillis();
System.out.println("Creating and filling MutableHashMap...");
start = System.currentTimeMillis();
MutableHashTable<IntPair, IntPair> table = new MutableHashTable<IntPair, IntPair>(serializer, serializer, comparator, comparator, pairComparator, getMemory(NUM_MEM_PAGES, PAGE_SIZE), ioManager);
table.open(buildInput, probeInput);
end = System.currentTimeMillis();
System.out.println("HashMap ready. Time: " + (end-start) + " ms");
System.out.println("Starting first probing run...");
start = System.currentTimeMillis();
IntPair compare = new IntPair();
HashBucketIterator<IntPair, IntPair> iter;
IntPair target = new IntPair();
while(probeTester.next(compare) != null) {
iter = table.getMatchesFor(compare);
iter.next(target);
assertEquals(target.getKey(), compare.getKey());
assertEquals(target.getValue(), compare.getValue());
assertTrue(iter.next(target) == null);
}
end = System.currentTimeMillis();
System.out.println("Probing done. Time: " + (end-start) + " ms");
System.out.println("Starting update...");
start = System.currentTimeMillis();
while(updater.next(compare) != null) {
compare.setValue(compare.getValue()*-1);
iter = table.getMatchesFor(compare);
iter.next(target);
iter.writeBack(compare);
//assertFalse(iter.next(target));
}
end = System.currentTimeMillis();
System.out.println("Update done. Time: " + (end-start) + " ms");
System.out.println("Starting second probing run...");
start = System.currentTimeMillis();
while(updateTester.next(compare) != null) {
compare.setValue(compare.getValue()*-1);
iter = table.getMatchesFor(compare);
iter.next(target);
assertEquals(target.getKey(), compare.getKey());
assertEquals(target.getValue(), compare.getValue());
assertTrue(iter.next(target) == null);
}
end = System.currentTimeMillis();
System.out.println("Probing done. Time: " + (end-start) + " ms");
table.close();
end = System.currentTimeMillis();
System.out.println("Overall time: " + (end-first) + " ms");
assertEquals("Memory lost", NUM_MEM_PAGES, table.getFreedMemory().size());
}
catch (Exception e) {
e.printStackTrace();
fail("Error: " + e.getMessage());
}
}
示例2: testInMemoryMutableHashTable
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testInMemoryMutableHashTable() throws IOException
{
final int NUM_KEYS = 100000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key
MutableObjectIterator<Record> buildInput = new UniformRecordGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<Record> probeInput = new UniformRecordGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<Record, Record> join = new MutableHashTable<Record, Record>(
this.recordBuildSideAccesssor, this.recordProbeSideAccesssor,
this.recordBuildSideComparator, this.recordProbeSideComparator, this.pactRecordComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final Record recordReuse = new Record();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
HashBucketIterator<Record, Record> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
示例3: testSpillingHashJoinOneRecursionPerformance
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testSpillingHashJoinOneRecursionPerformance() throws IOException
{
final int NUM_KEYS = 1000000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key
MutableObjectIterator<Record> buildInput = new UniformRecordGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<Record> probeInput = new UniformRecordGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<Record, Record> join = new MutableHashTable<Record, Record>(
this.recordBuildSideAccesssor, this.recordProbeSideAccesssor,
this.recordBuildSideComparator, this.recordProbeSideComparator, this.pactRecordComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final Record recordReuse = new Record();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
HashBucketIterator<Record, Record> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
示例4: testFailingHashJoinTooManyRecursions
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testFailingHashJoinTooManyRecursions() throws IOException
{
// the following two values are known to have a hash-code collision on the first recursion level.
// we use them to make sure one partition grows over-proportionally large
final int REPEATED_VALUE_1 = 40559;
final int REPEATED_VALUE_2 = 92882;
final int REPEATED_VALUE_COUNT = 3000000;
final int NUM_KEYS = 1000000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key, plus 400k pairs with two colliding keys
MutableObjectIterator<Record> build1 = new UniformRecordGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
MutableObjectIterator<Record> build2 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
MutableObjectIterator<Record> build3 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
List<MutableObjectIterator<Record>> builds = new ArrayList<MutableObjectIterator<Record>>();
builds.add(build1);
builds.add(build2);
builds.add(build3);
MutableObjectIterator<Record> buildInput = new UnionIterator<Record>(builds);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<Record> probe1 = new UniformRecordGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
MutableObjectIterator<Record> probe2 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
MutableObjectIterator<Record> probe3 = new ConstantsKeyValuePairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
List<MutableObjectIterator<Record>> probes = new ArrayList<MutableObjectIterator<Record>>();
probes.add(probe1);
probes.add(probe2);
probes.add(probe3);
MutableObjectIterator<Record> probeInput = new UnionIterator<Record>(probes);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<Record, Record> join = new MutableHashTable<Record, Record>(
this.recordBuildSideAccesssor, this.recordProbeSideAccesssor,
this.recordBuildSideComparator, this.recordProbeSideComparator, this.pactRecordComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final Record recordReuse = new Record();
try {
while (join.nextRecord()) {
HashBucketIterator<Record, Record> buildSide = join.getBuildSideIterator();
if (buildSide.next(recordReuse) == null) {
fail("No build side values found for a probe key.");
}
while (buildSide.next(recordReuse) != null);
}
fail("Hash Join must have failed due to too many recursions.");
}
catch (Exception ex) {
// expected
}
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
示例5: testSparseProbeSpilling
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testSparseProbeSpilling() throws IOException, MemoryAllocationException
{
final int NUM_BUILD_KEYS = 1000000;
final int NUM_BUILD_VALS = 1;
final int NUM_PROBE_KEYS = 20;
final int NUM_PROBE_VALS = 1;
MutableObjectIterator<Record> buildInput = new UniformRecordGenerator(
NUM_BUILD_KEYS, NUM_BUILD_VALS, false);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 128);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
final MutableHashTable<Record, Record> join = new MutableHashTable<Record, Record>(
this.recordBuildSideAccesssor, this.recordProbeSideAccesssor,
this.recordBuildSideComparator, this.recordProbeSideComparator, this.pactRecordComparator,
memSegments, ioManager);
join.open(buildInput, new UniformRecordGenerator(NUM_PROBE_KEYS, NUM_PROBE_VALS, true));
int expectedNumResults = (Math.min(NUM_PROBE_KEYS, NUM_BUILD_KEYS) * NUM_BUILD_VALS)
* NUM_PROBE_VALS;
final Record recordReuse = new Record();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
HashBucketIterator<Record, Record> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}
示例6: validateSpillingDuringInsertion
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void validateSpillingDuringInsertion() throws IOException, MemoryAllocationException
{
final int NUM_BUILD_KEYS = 500000;
final int NUM_BUILD_VALS = 1;
final int NUM_PROBE_KEYS = 10;
final int NUM_PROBE_VALS = 1;
MutableObjectIterator<Record> buildInput = new UniformRecordGenerator(NUM_BUILD_KEYS, NUM_BUILD_VALS, false);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 85);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
final MutableHashTable<Record, Record> join = new MutableHashTable<Record, Record>(
this.recordBuildSideAccesssor, this.recordProbeSideAccesssor,
this.recordBuildSideComparator, this.recordProbeSideComparator, this.pactRecordComparator,
memSegments, ioManager);
join.open(buildInput, new UniformRecordGenerator(NUM_PROBE_KEYS, NUM_PROBE_VALS, true));
final Record recordReuse = new Record();
int numRecordsInJoinResult = 0;
int expectedNumResults = (Math.min(NUM_PROBE_KEYS, NUM_BUILD_KEYS) * NUM_BUILD_VALS)
* NUM_PROBE_VALS;
while (join.nextRecord()) {
HashBucketIterator<Record, Record> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}
示例7: testInMemoryMutableHashTableIntPair
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testInMemoryMutableHashTableIntPair() throws IOException
{
final int NUM_KEYS = 100000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// create the I/O access for spilling
final IOManager ioManager = new IOManager();
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
HashBucketIterator<IntPair, IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
示例8: testSpillingHashJoinOneRecursionPerformanceIntPair
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testSpillingHashJoinOneRecursionPerformanceIntPair() throws IOException
{
final int NUM_KEYS = 1000000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probeInput = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
HashBucketIterator<IntPair, IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", NUM_KEYS * BUILD_VALS_PER_KEY * PROBE_VALS_PER_KEY, numRecordsInJoinResult);
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
示例9: testFailingHashJoinTooManyRecursionsIntPair
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testFailingHashJoinTooManyRecursionsIntPair() throws IOException
{
// the following two values are known to have a hash-code collision on the first recursion level.
// we use them to make sure one partition grows over-proportionally large
final int REPEATED_VALUE_1 = 40559;
final int REPEATED_VALUE_2 = 92882;
final int REPEATED_VALUE_COUNT = 3000000;
final int NUM_KEYS = 1000000;
final int BUILD_VALS_PER_KEY = 3;
final int PROBE_VALS_PER_KEY = 10;
// create a build input that gives 3 million pairs with 3 values sharing the same key, plus 400k pairs with two colliding keys
MutableObjectIterator<IntPair> build1 = new UniformIntPairGenerator(NUM_KEYS, BUILD_VALS_PER_KEY, false);
MutableObjectIterator<IntPair> build2 = new ConstantsIntPairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
MutableObjectIterator<IntPair> build3 = new ConstantsIntPairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
List<MutableObjectIterator<IntPair>> builds = new ArrayList<MutableObjectIterator<IntPair>>();
builds.add(build1);
builds.add(build2);
builds.add(build3);
MutableObjectIterator<IntPair> buildInput = new UnionIterator<IntPair>(builds);
// create a probe input that gives 10 million pairs with 10 values sharing a key
MutableObjectIterator<IntPair> probe1 = new UniformIntPairGenerator(NUM_KEYS, PROBE_VALS_PER_KEY, true);
MutableObjectIterator<IntPair> probe2 = new ConstantsIntPairsIterator(REPEATED_VALUE_1, 17, REPEATED_VALUE_COUNT);
MutableObjectIterator<IntPair> probe3 = new ConstantsIntPairsIterator(REPEATED_VALUE_2, 23, REPEATED_VALUE_COUNT);
List<MutableObjectIterator<IntPair>> probes = new ArrayList<MutableObjectIterator<IntPair>>();
probes.add(probe1);
probes.add(probe2);
probes.add(probe3);
MutableObjectIterator<IntPair> probeInput = new UnionIterator<IntPair>(probes);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 896);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
// ----------------------------------------------------------------------------------------
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
memSegments, ioManager);
join.open(buildInput, probeInput);
final IntPair recordReuse = new IntPair();
try {
while (join.nextRecord())
{
HashBucketIterator<IntPair, IntPair> buildSide = join.getBuildSideIterator();
if (buildSide.next(recordReuse) == null) {
fail("No build side values found for a probe key.");
}
while (buildSide.next(recordReuse) != null);
}
fail("Hash Join must have failed due to too many recursions.");
}
catch (Exception ex) {
// expected
}
join.close();
// ----------------------------------------------------------------------------------------
this.memManager.release(join.getFreedMemory());
}
示例10: testSparseProbeSpillingIntPair
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void testSparseProbeSpillingIntPair() throws IOException, MemoryAllocationException
{
final int NUM_BUILD_KEYS = 1000000;
final int NUM_BUILD_VALS = 1;
final int NUM_PROBE_KEYS = 20;
final int NUM_PROBE_VALS = 1;
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_BUILD_KEYS, NUM_BUILD_VALS, false);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 128);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
memSegments, ioManager);
join.open(buildInput, new UniformIntPairGenerator(NUM_PROBE_KEYS, NUM_PROBE_VALS, true));
int expectedNumResults = (Math.min(NUM_PROBE_KEYS, NUM_BUILD_KEYS) * NUM_BUILD_VALS)
* NUM_PROBE_VALS;
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
while (join.nextRecord()) {
HashBucketIterator<IntPair, IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}
示例11: validateSpillingDuringInsertionIntPair
import org.apache.flink.runtime.operators.hash.MutableHashTable.HashBucketIterator; //导入依赖的package包/类
@Test
public void validateSpillingDuringInsertionIntPair() throws IOException, MemoryAllocationException
{
final int NUM_BUILD_KEYS = 500000;
final int NUM_BUILD_VALS = 1;
final int NUM_PROBE_KEYS = 10;
final int NUM_PROBE_VALS = 1;
MutableObjectIterator<IntPair> buildInput = new UniformIntPairGenerator(NUM_BUILD_KEYS, NUM_BUILD_VALS, false);
// allocate the memory for the HashTable
List<MemorySegment> memSegments;
try {
memSegments = this.memManager.allocatePages(MEM_OWNER, 85);
}
catch (MemoryAllocationException maex) {
fail("Memory for the Join could not be provided.");
return;
}
final MutableHashTable<IntPair, IntPair> join = new MutableHashTable<IntPair, IntPair>(
this.pairBuildSideAccesssor, this.pairProbeSideAccesssor,
this.pairBuildSideComparator, this.pairProbeSideComparator, this.pairComparator,
memSegments, ioManager);
join.open(buildInput, new UniformIntPairGenerator(NUM_PROBE_KEYS, NUM_PROBE_VALS, true));
final IntPair recordReuse = new IntPair();
int numRecordsInJoinResult = 0;
int expectedNumResults = (Math.min(NUM_PROBE_KEYS, NUM_BUILD_KEYS) * NUM_BUILD_VALS)
* NUM_PROBE_VALS;
while (join.nextRecord()) {
HashBucketIterator<IntPair, IntPair> buildSide = join.getBuildSideIterator();
while (buildSide.next(recordReuse) != null) {
numRecordsInJoinResult++;
}
}
Assert.assertEquals("Wrong number of records in join result.", expectedNumResults, numRecordsInJoinResult);
join.close();
this.memManager.release(join.getFreedMemory());
}