当前位置: 首页>>代码示例>>Java>>正文


Java Struct类代码示例

本文整理汇总了Java中org.apache.kafka.connect.data.Struct的典型用法代码示例。如果您正苦于以下问题:Java Struct类的具体用法?Java Struct怎么用?Java Struct使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


Struct类属于org.apache.kafka.connect.data包,在下文中一共展示了Struct类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: delete

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void delete() {
  final Schema keySchema = SchemaBuilder.struct()
      .field("id", Schema.STRING_SCHEMA)
      .build();
  final Struct key = new Struct(keySchema)
      .put("id", "asdf");
  final SinkRecord record = new SinkRecord(
      "testing",
      1,
      keySchema,
      key,
      null,
      null,
      123L
  );

  this.task.put(Arrays.asList(record));
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-cassandra,代码行数:20,代码来源:CassandraSinkTaskTest.java

示例2: readerWithProjection

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test(expected = DataException.class)
public void readerWithProjection() throws Throwable {
    Map<String, Object> cfg = new HashMap<String, Object>() {{
        put(ParquetFileReader.FILE_READER_PARQUET_PROJECTION, projectionSchema.toString());
        put(AgnosticFileReader.FILE_READER_AGNOSTIC_EXTENSIONS_PARQUET, getFileExtension());
    }};
    reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
    while (reader.hasNext()) {
        Struct record = reader.next();
        assertNotNull(record.schema().field(FIELD_INDEX));
        assertNotNull(record.schema().field(FIELD_NAME));
        assertNull(record.schema().field(FIELD_SURNAME));
    }

    reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
    readAllData();
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:18,代码来源:ParquetFileReaderTest.java

示例3: readAllDataWithoutHeader

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void readAllDataWithoutHeader() throws Throwable {
    Path file = createDataFile(false);
    FileReader reader = getReader(fs, file, new HashMap<String, Object>() {{
        put(DelimitedTextFileReader.FILE_READER_DELIMITED_TOKEN, ",");
        put(DelimitedTextFileReader.FILE_READER_DELIMITED_HEADER, "false");
        put(AgnosticFileReader.FILE_READER_AGNOSTIC_EXTENSIONS_DELIMITED, getFileExtension());
    }});

    assertTrue(reader.hasNext());

    int recordCount = 0;
    while (reader.hasNext()) {
        Struct record = reader.next();
        checkData(record, recordCount);
        recordCount++;
    }
    assertEquals("The number of records in the file does not match", NUM_RECORDS, recordCount);

}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:21,代码来源:DelimitedTextFileReaderTest.java

示例4: simpleKeyValue

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void simpleKeyValue() {
    Map<String, String> map = new HashMap<>();
    map.put(FluentdSourceConnectorConfig.FLUENTD_SCHEMAS_ENABLE, "true");
    FluentdSourceConnectorConfig config = new FluentdSourceConnectorConfig(map);
    EventEntry eventEntry = EventEntry.of(
            Instant.now(),
            ValueFactory.newMap(
                    ValueFactory.newString("message"),
                    ValueFactory.newString("This is a message."),
                    ValueFactory.newString("flag"),
                    ValueFactory.newBoolean(true)));

    MessagePackConverver converter = new MessagePackConverver(config);
    SourceRecord sourceRecord = converter.convert("topic", "tag", 0L, eventEntry);

    assertEquals(Schema.STRING_SCHEMA, sourceRecord.keySchema());
    assertEquals("tag", sourceRecord.key());
    assertEquals("topic", sourceRecord.valueSchema().name());
    Struct struct = (Struct) sourceRecord.value();
    assertEquals("This is a message.", struct.get("message"));
    assertTrue(struct.getBoolean("flag"));
}
 
开发者ID:fluent,项目名称:kafka-connect-fluentd,代码行数:24,代码来源:MessagePackConverterTest.java

示例5: readerWithProjection

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test(expected = DataException.class)
public void readerWithProjection() throws Throwable {
    Map<String, Object> cfg = new HashMap<String, Object>() {{
        put(ParquetFileReader.FILE_READER_PARQUET_PROJECTION, projectionSchema.toString());
    }};
    reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
    while (reader.hasNext()) {
        Struct record = reader.next();
        assertNotNull(record.schema().field(FIELD_INDEX));
        assertNotNull(record.schema().field(FIELD_NAME));
        assertNull(record.schema().field(FIELD_SURNAME));
    }

    reader = getReader(FileSystem.newInstance(fsUri, new Configuration()), dataFile, cfg);
    readAllData();
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:17,代码来源:ParquetFileReaderTest.java

示例6: expectStart

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private void expectStart(final List<ConsumerRecord<String, byte[]>> preexistingRecords,
                         final Map<byte[], Struct> deserializations) throws Exception {
    storeLog.start();
    PowerMock.expectLastCall().andAnswer(new IAnswer<Object>() {
        @Override
        public Object answer() throws Throwable {
            for (ConsumerRecord<String, byte[]> rec : preexistingRecords)
                capturedConsumedCallback.getValue().onCompletion(null, rec);
            return null;
        }
    });
    for (Map.Entry<byte[], Struct> deserializationEntry : deserializations.entrySet()) {
        // Note null schema because default settings for internal serialization are schema-less
        EasyMock.expect(converter.toConnectData(EasyMock.eq(TOPIC), EasyMock.aryEq(deserializationEntry.getKey())))
                .andReturn(new SchemaAndValue(null, structToMap(deserializationEntry.getValue())));
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:18,代码来源:KafkaConfigBackingStoreTest.java

示例7: readAllDataWithoutHeader

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void readAllDataWithoutHeader() throws Throwable {
    Path file = createDataFile(false);
    FileReader reader = getReader(fs, file, new HashMap<String, Object>() {{
        put(DelimitedTextFileReader.FILE_READER_DELIMITED_TOKEN, ",");
        put(DelimitedTextFileReader.FILE_READER_DELIMITED_HEADER, "false");
    }});

    assertTrue(reader.hasNext());

    int recordCount = 0;
    while (reader.hasNext()) {
        Struct record = reader.next();
        checkData(record, recordCount);
        recordCount++;
    }
    assertEquals("The number of records in the file does not match", NUM_RECORDS, recordCount);

}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:20,代码来源:DelimitedTextFileReaderTest.java

示例8: defaultFieldNames

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void defaultFieldNames() throws Throwable {
    Map<String, Object> customReaderCfg = new HashMap<>();
    reader = getReader(fs, dataFile, customReaderCfg);
    assertTrue(reader.getFilePath().equals(dataFile));

    assertTrue(reader.hasNext());

    int recordCount = 0;
    while (reader.hasNext()) {
        Struct record = reader.next();
        checkData(SequenceFileReader.FIELD_NAME_KEY_DEFAULT, SequenceFileReader.FIELD_NAME_VALUE_DEFAULT, record, recordCount);
        recordCount++;
    }
    assertEquals("The number of records in the file does not match", NUM_RECORDS, recordCount);
}
 
开发者ID:mmolimar,项目名称:kafka-connect-fs,代码行数:17,代码来源:SequenceFileReaderTest.java

示例9: updateSchemaOfStruct

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void updateSchemaOfStruct() {
    final String fieldName1 = "f1";
    final String fieldName2 = "f2";
    final String fieldValue1 = "value1";
    final int fieldValue2 = 1;
    final Schema schema = SchemaBuilder.struct()
                                  .name("my.orig.SchemaDefn")
                                  .field(fieldName1, Schema.STRING_SCHEMA)
                                  .field(fieldName2, Schema.INT32_SCHEMA)
                                  .build();
    final Struct value = new Struct(schema).put(fieldName1, fieldValue1).put(fieldName2, fieldValue2);

    final Schema newSchema = SchemaBuilder.struct()
                                  .name("my.updated.SchemaDefn")
                                  .field(fieldName1, Schema.STRING_SCHEMA)
                                  .field(fieldName2, Schema.INT32_SCHEMA)
                                  .build();

    Struct newValue = (Struct) SetSchemaMetadata.updateSchemaIn(value, newSchema);
    assertMatchingSchema(newValue, newSchema);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:23,代码来源:SetSchemaMetadataTest.java

示例10: verify

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private void verify(Set<Path> expectedFiles, Struct[] records, Schema schema) throws IOException {
  Path path = new Path(FileUtils.topicDirectory(url, topicsDir, TOPIC));
  FileStatus[] statuses = FileUtils.traverse(storage, path, new CommittedFileFilter());
  assertEquals(expectedFiles.size(), statuses.length);
  int index = 0;
  for (FileStatus status : statuses) {
    Path filePath = status.getPath();
    assertTrue(expectedFiles.contains(status.getPath()));
    Collection<Object> avroRecords = schemaFileReader.readData(conf, filePath);
    assertEquals(3, avroRecords.size());
    for (Object avroRecord: avroRecords) {
      assertEquals(avroData.fromConnectData(schema, records[index]), avroRecord);
    }
    index++;
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:17,代码来源:TopicPartitionWriterTest.java

示例11: prepareData

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private void prepareData(String topic, int partition) throws Exception {
  TopicPartition tp = new TopicPartition(topic, partition);
  DataWriter hdfsWriter = createWriter(context, avroData);
  hdfsWriter.recover(tp);
  String key = "key";
  Schema schema = createSchema();
  Struct record = createRecord(schema);

  Collection<SinkRecord> sinkRecords = new ArrayList<>();
  for (long offset = 0; offset < 7; offset++) {
    SinkRecord sinkRecord =
        new SinkRecord(topic, partition, Schema.STRING_SCHEMA, key, schema, record, offset);
    sinkRecords.add(sinkRecord);
  }
  hdfsWriter.write(sinkRecords);
  hdfsWriter.close(assignment);
  hdfsWriter.stop();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:19,代码来源:ParquetHiveUtilTest.java

示例12: envelope

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void envelope() {
  final Envelope input = new Envelope(
      13246312L,
      true,
      "exchange",
      "routingKey"
  );

  final Struct actual = MessageConverter.envelope(input);
  assertNotNull(actual, "actual should not be null.");
  assertField(input.getDeliveryTag(), actual, MessageConverter.FIELD_ENVELOPE_DELIVERYTAG);
  assertField(input.getExchange(), actual, MessageConverter.FIELD_ENVELOPE_EXCHANGE);
  assertField(input.getRoutingKey(), actual, MessageConverter.FIELD_ENVELOPE_ROUTINGKEY);
  assertField(input.isRedeliver(), actual, MessageConverter.FIELD_ENVELOPE_ISREDELIVER);
}
 
开发者ID:jcustenborder,项目名称:kafka-connect-rabbitmq,代码行数:17,代码来源:MessageConverterTest.java

示例13: applyWithSchema

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
private R applyWithSchema(R record) {
    final Struct value = requireStruct(operatingValue(record), PURPOSE);

    Schema updatedSchema = schemaUpdateCache.get(value.schema());
    if (updatedSchema == null) {
        final SchemaBuilder builder = SchemaUtil.copySchemaBasics(value.schema(), SchemaBuilder.struct());
        Struct defaultValue = (Struct) value.schema().defaultValue();
        buildUpdatedSchema(value.schema(), "", builder, value.schema().isOptional(), defaultValue);
        updatedSchema = builder.build();
        schemaUpdateCache.put(value.schema(), updatedSchema);
    }

    final Struct updatedValue = new Struct(updatedSchema);
    buildWithSchema(value, "", updatedValue);
    return newRecord(record, updatedSchema, updatedValue);
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:17,代码来源:Flatten.java

示例14: putSafeWithNoPreviousValueIsPropagated

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Test
public void putSafeWithNoPreviousValueIsPropagated() {
    final Converter converter = mock(Converter.class);
    final KafkaBasedLog<String, byte[]> kafkaBasedLog = mock(KafkaBasedLog.class);
    final KafkaStatusBackingStore store = new KafkaStatusBackingStore(new MockTime(), converter, STATUS_TOPIC, kafkaBasedLog);

    final byte[] value = new byte[0];

    final Capture<Struct> statusValueStruct = newCapture();
    converter.fromConnectData(eq(STATUS_TOPIC), anyObject(Schema.class), capture(statusValueStruct));
    EasyMock.expectLastCall().andReturn(value);

    kafkaBasedLog.send(eq("status-connector-" + CONNECTOR), eq(value), anyObject(Callback.class));
    expectLastCall();

    replayAll();

    final ConnectorStatus status = new ConnectorStatus(CONNECTOR, ConnectorStatus.State.FAILED, WORKER_ID, 0);
    store.putSafe(status);

    verifyAll();

    assertEquals(status.state().toString(), statusValueStruct.getValue().get(KafkaStatusBackingStore.STATE_KEY_NAME));
    assertEquals(status.workerId(), statusValueStruct.getValue().get(KafkaStatusBackingStore.WORKER_ID_KEY_NAME));
    assertEquals(status.generation(), statusValueStruct.getValue().get(KafkaStatusBackingStore.GENERATION_KEY_NAME));
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:27,代码来源:KafkaStatusBackingStoreTest.java

示例15: apply

import org.apache.kafka.connect.data.Struct; //导入依赖的package包/类
@Override
public R apply(R record) {
    final Schema schema = operatingSchema(record);
    final Object value = operatingValue(record);

    if (schema == null) {
        return newRecord(record, null, Collections.singletonMap(fieldName, value));
    } else {
        Schema updatedSchema = schemaUpdateCache.get(schema);
        if (updatedSchema == null) {
            updatedSchema = SchemaBuilder.struct().field(fieldName, schema).build();
            schemaUpdateCache.put(schema, updatedSchema);
        }

        final Struct updatedValue = new Struct(updatedSchema).put(fieldName, value);

        return newRecord(record, updatedSchema, updatedValue);
    }
}
 
开发者ID:YMCoding,项目名称:kafka-0.11.0.0-src-with-comment,代码行数:20,代码来源:HoistField.java


注:本文中的org.apache.kafka.connect.data.Struct类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。