本文整理汇总了Java中com.datatorrent.contrib.hdht.tfile.TFileImpl.DefaultTFileImpl方法的典型用法代码示例。如果您正苦于以下问题:Java TFileImpl.DefaultTFileImpl方法的具体用法?Java TFileImpl.DefaultTFileImpl怎么用?Java TFileImpl.DefaultTFileImpl使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类com.datatorrent.contrib.hdht.tfile.TFileImpl
的用法示例。
在下文中一共展示了TFileImpl.DefaultTFileImpl方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: setupStore
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
public TestStoreHDHT setupStore(TestInfo testMeta)
{
String eventSchemaString = SchemaUtils.jarResourceFileToString(configureFile);
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
store = new TestStoreHDHT();
store.setCacheWindowDuration(2);
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
return store;
}
示例2: serializationTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void serializationTest() throws Exception
{
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
KryoCloneUtils.cloneObject(new Kryo(), store);
store.beginWindow(0L);
store.endWindow();
store.teardown();
}
示例3: storeFormatTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void storeFormatTest() throws Exception
{
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
long windowId = 0L;
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
store.beginWindow(windowId);
store.endWindow();
store.checkpointed(windowId);
store.committed(windowId);
windowId++;
store.beginWindow(windowId);
byte[] storeFormat = store.load(AppDataSingleSchemaDimensionStoreHDHT.DEFAULT_BUCKET_ID,
DimensionsStoreHDHT.STORE_FORMAT_KEY);
Assert.assertEquals(DimensionsStoreHDHT.STORE_FORMAT_VERSION, GPOUtils.deserializeInt(storeFormat));
store.endWindow();
store.teardown();
}
示例4: testTFile
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
private void testTFile(String compression) throws IOException
{
TFileImpl timpl = new TFileImpl.DefaultTFileImpl();
timpl.setCompressName(compression);
writeFile(0, timpl, "TFileUnit" + compression);
testSeqRead(0, timpl, "TFileUnit" + compression);
testRandomRead(0, timpl, "TFileUnit" + compression);
}
示例5: testDefaultTFileHDSFileAccess
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void testDefaultTFileHDSFileAccess() throws Exception
{
//Create DefaultTFileImpl
TFileImpl timpl = new TFileImpl.DefaultTFileImpl();
testHDSFileAccess(timpl);
}
示例6: simpleQueueManagerTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void simpleQueueManagerTest() throws Exception
{
final int numQueries = 3;
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setCacheWindowDuration(2);
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
DimensionalConfigurationSchema eventSchema = store.configurationSchema;
DimensionsQueueManager dqm = new DimensionsQueueManager(store, store.schemaRegistry);
Map<String, Set<String>> fieldToAggregator = Maps.newHashMap();
fieldToAggregator.put("impressions", Sets.newHashSet("SUM"));
fieldToAggregator.put("cost", Sets.newHashSet("SUM"));
FieldsAggregatable fieldsAggregatable = new FieldsAggregatable(fieldToAggregator);
GPOMutable key = AppDataSingleSchemaDimensionStoreHDHTTest.createQueryKey(eventSchema,
"google",
"safeway");
DataQueryDimensional dqd = new DataQueryDimensional("1",
DataQueryDimensional.TYPE,
numQueries,
TimeBucket.MINUTE,
key,
fieldsAggregatable,
true);
LOG.debug("{}", dqd.getDimensionsDescriptor());
LOG.debug("{}", ((DimensionalSchema)store.schemaRegistry.getSchema(dqd.getSchemaKeys()))
.getDimensionalConfigurationSchema().getDimensionsDescriptorToID());
dqm.enqueue(dqd, null, null);
Assert.assertEquals(numQueries, store.getQueries().size());
}
示例7: simpleQueueManagerTestCustomTimeBucket
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void simpleQueueManagerTestCustomTimeBucket() throws Exception
{
final int numQueries = 3;
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchemaCustomTimeBucket.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setCacheWindowDuration(2);
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.setUseSystemTimeForLatestTimeBuckets(false);
store.setMinTimestamp(600000L);
store.setMaxTimestamp(1000000L);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
DimensionalConfigurationSchema eventSchema = store.configurationSchema;
DimensionsQueueManager dqm = new DimensionsQueueManager(store, store.schemaRegistry);
Map<String, Set<String>> fieldToAggregator = Maps.newHashMap();
fieldToAggregator.put("impressions", Sets.newHashSet("SUM"));
fieldToAggregator.put("cost", Sets.newHashSet("SUM"));
FieldsAggregatable fieldsAggregatable = new FieldsAggregatable(fieldToAggregator);
GPOMutable key = AppDataSingleSchemaDimensionStoreHDHTTest.createQueryKey(eventSchema,
"google",
"safeway");
DataQueryDimensional dqd = new DataQueryDimensional("1",
DataQueryDimensional.TYPE,
numQueries,
TimeBucket.MINUTE,
key,
fieldsAggregatable,
true);
LOG.debug("{}", dqd.getDimensionsDescriptor());
LOG.debug("{}", ((DimensionalSchema)store.schemaRegistry.getSchema(dqd.getSchemaKeys()))
.getDimensionalConfigurationSchema().getDimensionsDescriptorToID());
dqm.enqueue(dqd, null, null);
QueryBundle<DataQueryDimensional, QueryMeta, MutableLong> qb = dqm.dequeue();
for (Map<String, EventKey> eventKeys : qb.getMetaQuery().getEventKeys()) {
Assert.assertEquals(0, eventKeys.get("SUM").getDimensionDescriptorID());
}
Assert.assertEquals(numQueries, store.getQueries().size());
}
示例8: simpleRollingQueueManagerTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void simpleRollingQueueManagerTest() throws Exception
{
final int numQueries = 3;
final int rollingCount = 5;
final int hdhtQueryCount = 7;
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setCacheWindowDuration(2);
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
DimensionalConfigurationSchema eventSchema = store.configurationSchema;
DimensionsQueueManager dqm = new DimensionsQueueManager(store, store.schemaRegistry);
Map<String, Set<String>> fieldToAggregator = Maps.newHashMap();
fieldToAggregator.put("impressions", Sets.newHashSet("SUM"));
fieldToAggregator.put("cost", Sets.newHashSet("SUM"));
FieldsAggregatable fieldsAggregatable = new FieldsAggregatable(fieldToAggregator);
GPOMutable key = AppDataSingleSchemaDimensionStoreHDHTTest.createQueryKey(eventSchema,
"google",
"safeway");
DataQueryDimensional dqd = new DataQueryDimensional("1",
DataQueryDimensional.TYPE,
numQueries,
TimeBucket.MINUTE,
key,
fieldsAggregatable,
true);
dqd.setSlidingAggregateSize(rollingCount);
dqm.enqueue(dqd, null, null);
Assert.assertEquals(hdhtQueryCount, store.getQueries().size());
}
示例9: queryStarQueueManagerTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void queryStarQueueManagerTest() throws Exception
{
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setCacheWindowDuration(2);
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
DimensionalConfigurationSchema eventSchema = store.configurationSchema;
@SuppressWarnings("unchecked")
DimensionsQueueManager dqm = new DimensionsQueueManager(store, store.schemaRegistry,
new SimpleDataQueryDimensionalExpander((Map)store.seenEnumValues));
Map<String, Set<String>> fieldToAggregator = Maps.newHashMap();
fieldToAggregator.put("impressions", Sets.newHashSet("SUM"));
fieldToAggregator.put("cost", Sets.newHashSet("SUM"));
FieldsAggregatable fieldsAggregatable = new FieldsAggregatable(fieldToAggregator);
GPOMutable key = AppDataSingleSchemaDimensionStoreHDHTTest.createQueryKey(eventSchema,
"google",
"safeway");
Map<String, Set<Object>> keyToValues = Maps.newHashMap();
keyToValues.put("publisher", Sets.newHashSet());
keyToValues.put("advertiser", Sets.newHashSet());
DataQueryDimensional dqd = new DataQueryDimensional("1",
DataQueryDimensional.TYPE,
1,
new CustomTimeBucket(TimeBucket.MINUTE),
key.getFieldDescriptor(),
keyToValues,
fieldsAggregatable,
true);
dqm.enqueue(dqd, null, null);
Assert.assertEquals(9, store.getQueries().size());
}
示例10: storeWindowIDTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void storeWindowIDTest()
{
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
long windowId = 0L;
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
store.beginWindow(windowId);
byte[] windowIDBytes = store.load(AppDataSingleSchemaDimensionStoreHDHT.DEFAULT_BUCKET_ID,
DimensionsStoreHDHT.WINDOW_ID_KEY);
Assert.assertArrayEquals(null, windowIDBytes);
store.endWindow();
store.checkpointed(windowId);
store.committed(windowId);
for (int windowCounter = 0;
windowCounter < 2;
windowCounter++) {
windowId++;
store.beginWindow(windowId);
windowIDBytes = store.load(AppDataSingleSchemaDimensionStoreHDHT.DEFAULT_BUCKET_ID,
DimensionsStoreHDHT.WINDOW_ID_KEY);
Assert.assertEquals(windowId - 1L, GPOUtils.deserializeLong(windowIDBytes));
store.endWindow();
store.checkpointed(windowId);
store.committed(windowId);
}
store.teardown();
}
示例11: dataSerializationTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void dataSerializationTest()
{
final String publisher = "google";
final String advertiser = "safeway";
final long impressions = 10L;
final double cost = 1.0;
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
//Aggregate Event
DimensionalConfigurationSchema eventSchema = store.configurationSchema;
Aggregate ae = createEvent(eventSchema,
publisher,
advertiser,
60000L,
TimeBucket.MINUTE,
impressions,
cost);
//Key bytes
byte[] keyBytes = store.getKeyBytesGAE(ae);
//Value bytes
byte[] valueBytes = store.getValueBytesGAE(ae);
Aggregate deserializedAE = store.fromKeyValueGAE(new Slice(keyBytes), valueBytes);
deserializedAE.getEventKey().getKey().setFieldDescriptor(ae.getEventKey().getKey().getFieldDescriptor());
deserializedAE.getAggregates().setFieldDescriptor(ae.getAggregates().getFieldDescriptor());
Assert.assertEquals("Test aggregates", ae.getAggregates(), deserializedAE.getAggregates());
Assert.assertEquals("event keys must be equal", ae.getEventKey(), deserializedAE.getEventKey());
store.beginWindow(0L);
store.endWindow();
store.teardown();
}
示例12: readTestHelper
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
private void readTestHelper(boolean useHDHTPut) throws Exception
{
final String publisher = "google";
final String advertiser = "safeway";
final long impressions = 10L;
final double cost = 1.0;
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
long windowId = 0;
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
//STARTING WINDOW 0
store.beginWindow(windowId);
DimensionalConfigurationSchema eventSchema = store.configurationSchema;
//Aggregate Event
Aggregate ae = createEvent(eventSchema,
publisher,
advertiser,
60000L,
TimeBucket.MINUTE,
impressions,
cost);
if (!useHDHTPut) {
store.input.put(ae);
Assert.assertEquals("The item must be in the cache.", ae, store.cache.get(ae.getEventKey()));
} else {
store.put(AppDataSingleSchemaDimensionStoreHDHT.DEFAULT_BUCKET_ID,
new Slice(store.getKeyBytesGAE(ae)),
store.getValueBytesGAE(ae));
Assert.assertEquals("The item must be in the cache.", ae, store.load(ae.getEventKey()));
}
store.endWindow();
store.checkpointed(windowId);
store.committed(windowId);
//STARTING WINDOW 1
windowId++;
store.beginWindow(windowId);
store.endWindow();
store.checkpointed(windowId);
store.committed(windowId);
//STARTING WINDOW 2
windowId++;
store.beginWindow(windowId);
byte[] keyBytes = store.getKeyBytesGAE(ae);
byte[] valueBytes =
store.getUncommitted(AppDataSingleSchemaDimensionStoreHDHT.DEFAULT_BUCKET_ID, new Slice(keyBytes));
if (valueBytes == null) {
valueBytes = store.get(AppDataSingleSchemaDimensionStoreHDHT.DEFAULT_BUCKET_ID, new Slice(keyBytes));
}
LOG.debug("value bytes size {}", valueBytes.length);
Aggregate aeDeserialized = store.fromKeyValueGAE(new Slice(keyBytes), valueBytes);
aeDeserialized.getKeys().setFieldDescriptor(ae.getKeys().getFieldDescriptor());
aeDeserialized.getAggregates().setFieldDescriptor(ae.getAggregates().getFieldDescriptor());
Assert.assertEquals("The values must be equal", ae, aeDeserialized);
store.endWindow();
store.teardown();
}
示例13: cacheFlushTest
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void cacheFlushTest()
{
final String publisher = "google";
final String advertiser = "safeway";
final long impressions = 10L;
final double cost = 1.0;
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setCacheWindowDuration(1);
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.setup(new OperatorContextTestHelper.TestIdOperatorContext(1, new DefaultAttributeMap()));
DimensionalConfigurationSchema eventSchema = store.configurationSchema;
//Aggregate Event
Aggregate ae = createEvent(eventSchema,
publisher,
advertiser,
60000L,
TimeBucket.MINUTE,
impressions,
cost);
long windowId = 0L;
store.beginWindow(windowId);
Assert.assertEquals(0, store.cache.size());
store.input.put(ae);
Assert.assertEquals(1, store.cache.size());
store.endWindow();
store.checkpointed(windowId);
store.committed(windowId);
windowId++;
store.beginWindow(windowId);
Assert.assertEquals(0, store.cache.size());
Assert.assertEquals(ae, store.load(ae.getEventKey()));
store.endWindow();
store.checkpointed(windowId);
store.committed(windowId);
windowId++;
store.teardown();
}
示例14: testResponseDelayMillis
import com.datatorrent.contrib.hdht.tfile.TFileImpl; //导入方法依赖的package包/类
@Test
public void testResponseDelayMillis()
{
String eventSchemaString = SchemaUtils.jarResourceFileToString("dimensionsTestSchema.json");
String basePath = testMeta.getDir();
TFileImpl hdsFile = new TFileImpl.DefaultTFileImpl();
hdsFile.setBasePath(basePath);
AppDataSingleSchemaDimensionStoreHDHT store = new AppDataSingleSchemaDimensionStoreHDHT();
store.setCacheWindowDuration(2);
store.setConfigurationSchemaJSON(eventSchemaString);
store.setFileStore(hdsFile);
store.setFlushIntervalCount(1);
store.setFlushSize(0);
store.queryResult.setSink((Sink)resultSink);
//AttributeMap attributeMap = new StreamContext(id);
final OperatorContext context = new com.datatorrent.stram.engine.OperatorContext(1, null, null);
store.setup(context);
long windowId = 1;
store.beginWindow(windowId);
doSchemaQuery(store);
try {
Thread.sleep(100);
} catch (Exception e) {
// ignore
}
store.endWindow();
//get result from result sink
String result = (String)resultSink.tuple;
int offset = result.indexOf(FIELD_responseDelayMillis);
Assert.assertTrue(offset > 0);
String subResult = result.substring(offset);
offset = subResult.indexOf(",");
int delayMillis = Integer.valueOf(subResult.substring(FIELD_responseDelayMillis.length(), offset));
Assert.assertTrue(delayMillis == WindowUtils.getAppWindowDurationMs(context));
}