当前位置: 首页>>代码示例>>Java>>正文


Java Type类代码示例

本文整理汇总了Java中org.apache.kudu.Type的典型用法代码示例。如果您正苦于以下问题:Java Type类的具体用法?Java Type怎么用?Java Type使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


Type类属于org.apache.kudu包,在下文中一共展示了Type类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getSqlTypeFromKuduType

import org.apache.kudu.Type; //导入依赖的package包/类
private RelDataType getSqlTypeFromKuduType(RelDataTypeFactory typeFactory, Type type) {
  switch (type) {
  case BOOL:
    return typeFactory.createSqlType(SqlTypeName.BOOLEAN);
  case DOUBLE:
    return typeFactory.createSqlType(SqlTypeName.DOUBLE);
  case FLOAT:
    return typeFactory.createSqlType(SqlTypeName.FLOAT);
  case INT16:
  case INT32:
  case INT64:
  case INT8:
    return typeFactory.createSqlType(SqlTypeName.INTEGER);
  case STRING:
    return typeFactory.createSqlType(SqlTypeName.VARCHAR);
  case UNIXTIME_MICROS:
    return typeFactory.createSqlType(SqlTypeName.TIMESTAMP);
  case BINARY:
    return typeFactory.createSqlType(SqlTypeName.VARBINARY, Integer.MAX_VALUE);
  default:
    throw new UnsupportedOperationException("Unsupported type.");
  }
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:24,代码来源:DrillKuduTable.java

示例2: convertFromKuduType

import org.apache.kudu.Type; //导入依赖的package包/类
/**
 * Convert from Kudu type to SDC Field type
 * @param kuduType
 * @return Field.Type
 */
public static Field.Type convertFromKuduType(Type kuduType){
  switch(kuduType) {
    case BINARY: return Field.Type.BYTE_ARRAY;
    case BOOL: return Field.Type.BOOLEAN;
    case DOUBLE: return Field.Type.DOUBLE;
    case FLOAT: return Field.Type.FLOAT;
    case INT8: return Field.Type.BYTE;
    case INT16: return Field.Type.SHORT;
    case INT32: return Field.Type.INTEGER;
    case INT64: return Field.Type.LONG;
    case STRING: return  Field.Type.STRING;
    case UNIXTIME_MICROS: return Field.Type.DATETIME;
    default:
      throw new UnsupportedOperationException("Unknown data type: " + kuduType.getName());
  }
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:22,代码来源:KuduUtils.java

示例3: testNullColumnWillEndInError

import org.apache.kudu.Type; //导入依赖的package包/类
/**
 * Ensure that if given field is null and column doesn't support that, the record will
 * end up in error stream rather then terminating whole pipeline execution.
 */
@Test
public void testNullColumnWillEndInError() throws Exception{
  TargetRunner targetRunner = setTargetRunner(tableName, KuduOperationType.INSERT, UnsupportedOperationAction.SEND_TO_ERROR);
  targetRunner.runInit();

  Record record =  RecordCreator.create();
  LinkedHashMap<String, Field> field = new LinkedHashMap<>();
  field.put("key", Field.create(1));
  field.put("value", Field.create(Field.Type.STRING, null));
  field.put("name", Field.create(Field.Type.STRING, null));
  record.set(Field.createListMap(field));

  try {
    targetRunner.runWrite(ImmutableList.of(record));

    List<Record> errors = targetRunner.getErrorRecords();
    Assert.assertEquals(1, errors.size());
  } finally {
    targetRunner.runDestroy();
  }
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:26,代码来源:TestKuduTarget.java

示例4: init

import org.apache.kudu.Type; //导入依赖的package包/类
public void init() throws KuduException {
    client = new KuduClient.KuduClientBuilder(KUDU_MASTER).build();
    List<ColumnSchema> columns = new ArrayList(2);
    columns.add(new ColumnSchema.ColumnSchemaBuilder("id", Type.INT32).key(true).build());
    columns.add(new ColumnSchema.ColumnSchemaBuilder("value", Type.STRING).build());
    columns.add(new ColumnSchema.ColumnSchemaBuilder("hash", Type.INT32).build());
    Schema schema = new Schema(columns);
    List<String> rangeColumns = new ArrayList<String>(1);
    rangeColumns.add("id");
    client.createTable(KUDU_TABLE_NAME, schema, new CreateTableOptions().setRangePartitionColumns(rangeColumns));
    table = client.openTable(KUDU_TABLE_NAME);
    session = client.newSession();
}
 
开发者ID:xausky,项目名称:big-data-store-benchmark,代码行数:14,代码来源:KuduKeyValueTest.java

示例5: getType

import org.apache.kudu.Type; //导入依赖的package包/类
private Type getType(MajorType t) {

    if(t.getMode() == DataMode.REPEATED){
      throw UserException
      .dataWriteError()
      .message("Kudu does not support array types.")
      .build(logger);
    }

    switch (t.getMinorType()) {
    case BIGINT:
      return Type.INT64;
    case BIT:
      return Type.BOOL;
    case FLOAT4:
      return Type.FLOAT;
    case FLOAT8:
      return Type.DOUBLE;
    case INT:
      return Type.INT32;
    case TIMESTAMP:
      return Type.UNIXTIME_MICROS;
    case VARCHAR:
      return Type.STRING;
    case VARBINARY:
      return Type.BINARY;
    default:
      throw UserException
        .dataWriteError()
          .message("Data type: '%s' not supported in Kudu.", t.getMinorType().name())
          .build(logger);
    }
  }
 
开发者ID:axbaretto,项目名称:drill,代码行数:34,代码来源:KuduRecordWriterImpl.java

示例6: createField

import org.apache.kudu.Type; //导入依赖的package包/类
/**
 * Create a field and assign a value off of RowResult.
 * @param result Result obtained from scan
 * @param fieldName Field name to create
 * @param type Kudu Type for the field
 * @return Generated field
 * @throws StageException
 */
public static Field createField(RowResult result, String fieldName, Type type) throws StageException {
  switch (type) {
    case INT8:
      return Field.create(Field.Type.BYTE, result.getByte(fieldName));
    case INT16:
      return Field.create(Field.Type.SHORT, result.getShort(fieldName));
    case INT32:
      return Field.create(Field.Type.INTEGER, result.getInt(fieldName));
    case INT64:
      return Field.create(Field.Type.LONG, result.getLong(fieldName));
    case BINARY:
      try {
        return Field.create(Field.Type.BYTE_ARRAY, result.getBinary(fieldName));
      } catch (IllegalArgumentException ex) {
        throw new OnRecordErrorException(Errors.KUDU_35, fieldName);
      }
    case STRING:
      return Field.create(Field.Type.STRING, result.getString(fieldName));
    case BOOL:
      return Field.create(Field.Type.BOOLEAN, result.getBoolean(fieldName));
    case FLOAT:
      return Field.create(Field.Type.FLOAT, result.getFloat(fieldName));
    case DOUBLE:
      return Field.create(Field.Type.DOUBLE, result.getDouble(fieldName));
    case UNIXTIME_MICROS:
      //UNIXTIME_MICROS is in microsecond
      return Field.create(Field.Type.DATETIME, new Date(result.getLong(fieldName)/1000L));
    default:
      throw new StageException(Errors.KUDU_10, fieldName, type.getName());
  }
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:40,代码来源:KuduUtils.java

示例7: KuduRecordConverter

import org.apache.kudu.Type; //导入依赖的package包/类
public KuduRecordConverter(Map<String, Field.Type> columnsToFieldTypes, Map<String, String> fieldsToColumns,
                           Schema schema, FieldPathConverter converter) {
  this.columnsToFieldTypes = ImmutableMap.copyOf(columnsToFieldTypes);
  this.fieldsToColumns = ImmutableMap.copyOf(fieldsToColumns);
  this.schema = schema;
  this.fieldConverter = converter;
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:8,代码来源:KuduRecordConverter.java

示例8: setup

import org.apache.kudu.Type; //导入依赖的package包/类
@Before
public void setup() {
  // Sample table and schema
  List<ColumnSchema> columns = new ArrayList(2);
  columns.add(new ColumnSchema.ColumnSchemaBuilder("key", Type.INT32).key(true).build());
  columns.add(new ColumnSchema.ColumnSchemaBuilder("value", Type.STRING).build());
  columns.add(new ColumnSchema.ColumnSchemaBuilder("name", Type.STRING).build());
  final Schema schema = new Schema(columns);

  // Mock KuduTable class
  KuduTable table = PowerMockito.spy(PowerMockito.mock(KuduTable.class));
  PowerMockito.suppress(PowerMockito.method(AsyncKuduClient.class, "getTablesList"));
  PowerMockito.when(table.getSchema()).thenReturn(schema);

  // Mock KuduSession class
  PowerMockito.suppress(PowerMockito.method(
      AsyncKuduSession.class,
      "apply",
      Operation.class
  ));
  PowerMockito.suppress(PowerMockito.method(
      AsyncKuduSession.class,
      "flush"
  ));
  PowerMockito.suppress(PowerMockito.method(
      KuduLookupProcessor.class,
      "destroy"
  ));
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:30,代码来源:TestKuduLookup.java

示例9: testUpdate

import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testUpdate() throws Exception {
  record.set("/str", Field.create("val1"));
  record.set("/long", Field.create((long)10));
  record.set("/short1", Field.create(Field.Type.SHORT, null));
  kuduRecordConverter.convert(record, partialRow, KuduOperationType.UPDATE.code); // must not throw NPE
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:8,代码来源:TestKuduRecordConverter.java

示例10: testDelete

import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testDelete() throws Exception {
  record.set("/str", Field.create("primary key"));
  record.set("/long", Field.create((long)10));
  record.set("/short1", Field.create(Field.Type.SHORT, null));
  kuduRecordConverter.convert(record, partialRow, KuduOperationType.DELETE.code); // must not throw NPE
  Assert.assertTrue(Utils.format("Message: {}", partialRow.stringifyRowKey()), partialRow.stringifyRowKey().contains("primary key"));
}
 
开发者ID:streamsets,项目名称:datacollector,代码行数:9,代码来源:TestKuduRecordConverter.java

示例11: initCols

import org.apache.kudu.Type; //导入依赖的package包/类
private void initCols(Schema schema) throws SchemaChangeException {
  ImmutableList.Builder<ProjectedColumnInfo> pciBuilder = ImmutableList.builder();

  for (int i = 0; i < schema.getColumnCount(); i++) {
    ColumnSchema col = schema.getColumnByIndex(i);

    final String name = col.getName();
    final Type kuduType = col.getType();
    MinorType minorType = TYPES.get(kuduType);
    if (minorType == null) {
      logger.warn("Ignoring column that is unsupported.", UserException
          .unsupportedError()
          .message(
              "A column you queried has a data type that is not currently supported by the Kudu storage plugin. "
                  + "The column's name was %s and its Kudu data type was %s. ",
              name, kuduType.toString())
          .addContext("column Name", name)
          .addContext("plugin", "kudu")
          .build(logger));

      continue;
    }
    MajorType majorType;
    if (col.isNullable()) {
      majorType = Types.optional(minorType);
    } else {
      majorType = Types.required(minorType);
    }
    MaterializedField field = MaterializedField.create(name, majorType);
    final Class<? extends ValueVector> clazz = TypeHelper.getValueVectorClass(
        minorType, majorType.getMode());
    ValueVector vector = output.addField(field, clazz);
    vector.allocateNew();

    ProjectedColumnInfo pci = new ProjectedColumnInfo();
    pci.vv = vector;
    pci.kuduColumn = col;
    pci.index = i;
    pciBuilder.add(pci);
  }

  projectedCols = pciBuilder.build();
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:44,代码来源:KuduRecordReader.java

示例12: createKuduTable

import org.apache.kudu.Type; //导入依赖的package包/类
public static void createKuduTable(String tableName, int tablets, int replicas, int rows) throws Exception {

    try (KuduClient client = new KuduClient.KuduClientBuilder(KUDU_MASTER).build()) {

      ListTablesResponse tables = client.getTablesList(tableName);
      if (!tables.getTablesList().isEmpty()) {
        client.deleteTable(tableName);
      }

      List<ColumnSchema> columns = new ArrayList<>(5);
      columns.add(new ColumnSchema.ColumnSchemaBuilder("key", Type.INT32).key(true).build());
      columns.add(new ColumnSchema.ColumnSchemaBuilder("binary", Type.BINARY).nullable(false).build());
      columns.add(new ColumnSchema.ColumnSchemaBuilder("boolean", Type.BOOL).nullable(true).build());
      columns.add(new ColumnSchema.ColumnSchemaBuilder("float", Type.FLOAT).nullable(false).build());
      columns.add(new ColumnSchema.ColumnSchemaBuilder("string", Type.STRING).nullable(true).build());

      Schema schema = new Schema(columns);

      CreateTableOptions builder = new CreateTableOptions();
      builder.setNumReplicas(replicas);
      builder.setRangePartitionColumns(Arrays.asList("key"));
      for (int i = 1; i < tablets; i++) {
        PartialRow splitRow = schema.newPartialRow();
        splitRow.addInt("key", i*1000);
        builder.addSplitRow(splitRow);
      }

      client.createTable(tableName, schema, builder);

      KuduTable table = client.openTable(tableName);

      KuduSession session = client.newSession();
      session.setFlushMode(SessionConfiguration.FlushMode.AUTO_FLUSH_SYNC);
      for (int i = 0; i < rows; i++) {
        Insert insert = table.newInsert();
        PartialRow row = insert.getRow();
        row.addInt(0, i);
        row.addBinary(1, ("Row " + i).getBytes());
        row.addBoolean(2, i % 2 == 0);
        row.addFloat(3, i + 0.01f);
        row.addString(4, ("Row " + i));
        session.apply(insert);
      }

      List<String> projectColumns = new ArrayList<>(1);
      projectColumns.add("float");
      KuduScanner scanner = client.newScannerBuilder(table)
          .setProjectedColumnNames(projectColumns)
          .build();
      while (scanner.hasMoreRows()) {
        RowResultIterator results = scanner.nextRows();
        while (results.hasNext()) {
          RowResult result = results.next();
          System.out.println(result.toStringLongFormat());
        }
      }
    }
  }
 
开发者ID:axbaretto,项目名称:drill,代码行数:59,代码来源:TestKuduConnect.java

示例13: testWriteShouldWriteToATableWithTheSameNameAsTheProperty

import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testWriteShouldWriteToATableWithTheSameNameAsTheProperty() throws KuduException {
  org.apache.kudu.Schema kuduSchema = new org.apache.kudu.Schema(Arrays.asList(
    new ColumnSchema.ColumnSchemaBuilder("id", Type.STRING).build()
  ));
  Schema schema = SchemaBuilder.struct().name("record")
    .version(1)
    .field("id", STRING_SCHEMA)
    .field("_table", STRING_SCHEMA)
    .build();

  final String TABLE_1 = "table_1";
  final String TABLE_2 = "table_2";
  final String TABLE_3 = "table_3";
  
  Struct struct1 = new Struct(schema)
    .put("id", "a")
    .put("_table", TABLE_1);
  SinkRecord record1 = new SinkRecord("topic", 1, STRING_SCHEMA, "key", struct1.schema(), struct1, 1);

  Struct struct2 = new Struct(schema)
    .put("id", "a")
    .put("_table", TABLE_2);
  SinkRecord record2 = new SinkRecord("topic", 1, STRING_SCHEMA, "key", struct2.schema(), struct2, 2);

  Struct struct3 = new Struct(schema)
    .put("id", "b")
    .put("_table", TABLE_3);
  SinkRecord record3 = new SinkRecord("topic", 1, STRING_SCHEMA, "key", struct3.schema(), struct3, 3);


  when(client.newSession()).thenReturn(session);
  when(session.apply(upsert)).thenReturn(operationResponse);
  when(client.openTable(TABLE_1)).thenReturn(table);
  when(table.getSchema()).thenReturn(kuduSchema);
  when(table.newUpsert()).thenReturn(upsert);
  when(upsert.getRow()).thenReturn(row);

  Map<String,String> props = new HashMap<String, String>();
  props.put("kudu.master", "0.0.0.0");
  props.put("kudu.table.field", "_table");
  props.put("kudu.table.filter", TABLE_2.substring(TABLE_2.length() - 3, TABLE_2.length()));
  KuduSinkConfig config = new KuduSinkConfig(props);

  KuduWriter writer = new KuduWriter(config, client);

  writer.write(Arrays.asList(record1, record2, record3));

  verify(client, times(1)).openTable(TABLE_1);
  verify(client, never()).openTable(TABLE_2);
  verify(client, times(1)).openTable(TABLE_3);

  verify(client, never()).openTable("topic");

  verify(row, times(1)).addString("id", "a");
  verify(row, never()).addString("_table", TABLE_1);
  verify(session, times(1)).flush();
}
 
开发者ID:onfocusio,项目名称:kafka-connect-kudu,代码行数:59,代码来源:KuduWriterTest.java

示例14: testWriteShouldInsertKeyFields

import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testWriteShouldInsertKeyFields() throws KuduException {
  org.apache.kudu.Schema kuduSchema = new org.apache.kudu.Schema(Arrays.asList(
    new ColumnSchema.ColumnSchemaBuilder("ks", Type.STRING).build(),
    new ColumnSchema.ColumnSchemaBuilder("ki", Type.INT32).build(),
    new ColumnSchema.ColumnSchemaBuilder("int_field", Type.INT32).build()
  ));

  Schema keySchema = SchemaBuilder.struct().name("record")
    .version(1)
    .field("ks", STRING_SCHEMA)
    .field("ki", INT32_SCHEMA)
    .build();
  Struct key = new Struct(keySchema)
    .put("ks", "z")
    .put("ki", 9);

  Schema valueSchema = SchemaBuilder.struct().name("record")
    .version(1)
    .field("int_field", INT32_SCHEMA)
    .build();
  Struct value = new Struct(valueSchema)
    .put("int_field", 1);

  SinkRecord record = new SinkRecord("topic", 1, key.schema(), key, value.schema(), value, 1);

  when(client.newSession()).thenReturn(session);
  when(session.apply(upsert)).thenReturn(operationResponse);
  when(client.openTable("topic")).thenReturn(table);
  when(table.getSchema()).thenReturn(kuduSchema);
  when(table.newUpsert()).thenReturn(upsert);
  when(upsert.getRow()).thenReturn(row);

  Map<String,String> props = new HashMap<String, String>();
  props.put("kudu.master","0.0.0.0");
  props.put("key.insert", "true");
  KuduSinkConfig config = new KuduSinkConfig(props);

  KuduWriter writer = new KuduWriter(config, client);

  writer.write(Arrays.asList(record));

  verify(client, times(1)).openTable("topic");
  verify(row, times(1)).addString("ks", "z");
  verify(row, times(1)).addInt("ki", 9);
  verify(row, times(1)).addInt("int_field", 1);
  verify(session, times(1)).flush();
}
 
开发者ID:onfocusio,项目名称:kafka-connect-kudu,代码行数:49,代码来源:KuduWriterTest.java

示例15: testWriteShouldHandleArrays

import org.apache.kudu.Type; //导入依赖的package包/类
@Test
public void testWriteShouldHandleArrays() throws KuduException {
  org.apache.kudu.Schema kuduSchema = new org.apache.kudu.Schema(Arrays.asList(
    new ColumnSchema.ColumnSchemaBuilder("k_1", Type.STRING).build(),
    new ColumnSchema.ColumnSchemaBuilder("k_2", Type.STRING).build(),
    new ColumnSchema.ColumnSchemaBuilder("k_3", Type.STRING).build(),
    new ColumnSchema.ColumnSchemaBuilder("a_1", Type.STRING).build(),
    new ColumnSchema.ColumnSchemaBuilder("a_2", Type.STRING).build()
  ));

  Schema keySchema = SchemaBuilder.struct().name("record")
    .version(1)
    .field("k", SchemaBuilder.array(STRING_SCHEMA))
    .build();
  Struct keyStruct = new Struct(keySchema)
    .put("k", Arrays.asList(new String[] {"a", "b", "c", "d"}));


  Schema valueSchema = SchemaBuilder.struct().name("record")
    .version(1)
    .field("a", SchemaBuilder.array(STRING_SCHEMA))
    .build();

  Struct valueStruct = new Struct(valueSchema)
    .put("a", Arrays.asList(new String[] {"e", "f", "g"}));


  SinkRecord record = new SinkRecord("topic", 1, keyStruct.schema(), keyStruct, valueStruct.schema(), valueStruct, 1);

  when(client.newSession()).thenReturn(session);
  when(session.apply(upsert)).thenReturn(operationResponse);
  when(client.openTable("topic")).thenReturn(table);
  when(table.getSchema()).thenReturn(kuduSchema);
  when(table.newUpsert()).thenReturn(upsert);
  when(upsert.getRow()).thenReturn(row);

  Map<String,String> props = new HashMap<String, String>();
  props.put("kudu.master","0.0.0.0");
  props.put("key.insert", "true");
  KuduSinkConfig config = new KuduSinkConfig(props);

  KuduWriter writer = new KuduWriter(config, client);

  writer.write(Arrays.asList(record));

  verify(client, times(1)).openTable("topic");
  verify(row, times(1)).addString("a_1", "e");
  verify(row, times(1)).addString("a_2", "f");
  verify(row, never()).addString("a_3", "g");
  verify(row, times(1)).addString("k_1", "a");
  verify(row, times(1)).addString("k_2", "b");
  verify(row, times(1)).addString("k_3", "c");
  verify(row, never()).addString("k_4", "d");
  verify(session, times(1)).flush();
}
 
开发者ID:onfocusio,项目名称:kafka-connect-kudu,代码行数:56,代码来源:KuduWriterTest.java


注:本文中的org.apache.kudu.Type类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。