当前位置: 首页>>代码示例>>Java>>正文


Java HiveIgnoreKeyTextOutputFormat类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat的典型用法代码示例。如果您正苦于以下问题:Java HiveIgnoreKeyTextOutputFormat类的具体用法?Java HiveIgnoreKeyTextOutputFormat怎么用?Java HiveIgnoreKeyTextOutputFormat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HiveIgnoreKeyTextOutputFormat类属于org.apache.hadoop.hive.ql.io包,在下文中一共展示了HiveIgnoreKeyTextOutputFormat类的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: getStoreType

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
public static String getStoreType(String fileFormat) {
  Preconditions.checkNotNull(fileFormat);

  String[] fileFormatArrary = fileFormat.split("\\.");
  if(fileFormatArrary.length < 1) {
    throw new CatalogException("Hive file output format is wrong. - file output format:" + fileFormat);
  }

  String outputFormatClass = fileFormatArrary[fileFormatArrary.length-1];
  if(outputFormatClass.equals(HiveIgnoreKeyTextOutputFormat.class.getSimpleName())) {
    return CatalogProtos.StoreType.CSV.name();
  } else if(outputFormatClass.equals(RCFileOutputFormat.class.getSimpleName())) {
      return CatalogProtos.StoreType.RCFILE.name();
  } else {
    throw new CatalogException("Not supported file output format. - file output format:" + fileFormat);
  }
}
 
开发者ID:apache,项目名称:incubator-tajo,代码行数:18,代码来源:HCatalogUtil.java

示例2: getStoreType

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
public static String getStoreType(String fileFormat) {
  Preconditions.checkNotNull(fileFormat);

  String[] fileFormatArrary = fileFormat.split("\\.");
  if(fileFormatArrary.length < 1) {
    throw new CatalogException("Hive file output format is wrong. - file output format:" + fileFormat);
  }

  String outputFormatClass = fileFormatArrary[fileFormatArrary.length-1];
  if(outputFormatClass.equals(HiveIgnoreKeyTextOutputFormat.class.getSimpleName())) {
    return CatalogProtos.StoreType.CSV.name();
  } else if(outputFormatClass.equals(HiveSequenceFileOutputFormat.class.getSimpleName())) {
    return CatalogProtos.StoreType.SEQUENCEFILE.name();
  } else if(outputFormatClass.equals(RCFileOutputFormat.class.getSimpleName())) {
      return CatalogProtos.StoreType.RCFILE.name();
  } else {
    throw new CatalogException("Not supported file output format. - file output format:" + fileFormat);
  }
}
 
开发者ID:gruter,项目名称:tajo-cdh,代码行数:20,代码来源:HCatalogUtil.java

示例3: testTableUsingRegex

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test
public void testTableUsingRegex() throws Exception {
  TableMeta meta = new TableMeta(BuiltinStorages.REGEX, new KeyValueSet());
  meta.putProperty(StorageConstants.TEXT_REGEX, "([^ ]*)");
  meta.putProperty(StorageConstants.TEXT_REGEX_OUTPUT_FORMAT_STRING, "%1$s");

  org.apache.tajo.catalog.Schema schema = SchemaBuilder.builder()
      .add("c_custkey", TajoDataTypes.Type.TEXT)
      .build();

  TableDesc table = new TableDesc(IdentifierUtil.buildFQName(DB_NAME, CUSTOMER), schema, meta,
      new Path(warehousePath, new Path(DB_NAME, CUSTOMER)).toUri());
  store.createTable(table.getProto());
  assertTrue(store.existTable(DB_NAME, CUSTOMER));

  org.apache.hadoop.hive.ql.metadata.Table hiveTable = store.getHiveTable(DB_NAME, CUSTOMER);
  assertEquals(TextInputFormat.class.getName(), hiveTable.getSd().getInputFormat());
  assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat());
  assertEquals(RegexSerDe.class.getName(), hiveTable.getSerializationLib());

  TableDesc table1 = new TableDesc(store.getTable(DB_NAME, CUSTOMER));
  assertEquals(table.getName(), table1.getName());
  assertEquals(table.getUri(), table1.getUri());
  assertEquals(table.getSchema().size(), table1.getSchema().size());
  for (int i = 0; i < table.getSchema().size(); i++) {
    assertEquals(table.getSchema().getColumn(i).getSimpleName(), table1.getSchema().getColumn(i).getSimpleName());
  }

  store.dropTable(DB_NAME, CUSTOMER);
}
 
开发者ID:apache,项目名称:tajo,代码行数:31,代码来源:TestHiveCatalogStore.java

示例4: testTableUsingTextFile

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test
public void testTableUsingTextFile() throws Exception {
  TableMeta meta = new TableMeta(BuiltinStorages.TEXT, new KeyValueSet());

  org.apache.tajo.catalog.Schema schema = SchemaBuilder.builder()
      .add("c_custkey", TajoDataTypes.Type.INT4)
      .add("c_name", TajoDataTypes.Type.TEXT)
      .add("c_address", TajoDataTypes.Type.TEXT)
      .add("c_nationkey", TajoDataTypes.Type.INT4)
      .add("c_phone", TajoDataTypes.Type.TEXT)
      .add("c_acctbal", TajoDataTypes.Type.FLOAT8)
      .add("c_mktsegment", TajoDataTypes.Type.TEXT)
      .add("c_comment", TajoDataTypes.Type.TEXT)
      .build();

  TableDesc table = new TableDesc(IdentifierUtil.buildFQName(DB_NAME, CUSTOMER), schema, meta,
      new Path(warehousePath, new Path(DB_NAME, CUSTOMER)).toUri());
  store.createTable(table.getProto());
  assertTrue(store.existTable(DB_NAME, CUSTOMER));

  StorageFormatDescriptor descriptor = formatFactory.get(IOConstants.TEXTFILE);
  org.apache.hadoop.hive.ql.metadata.Table hiveTable = store.getHiveTable(DB_NAME, CUSTOMER);
  assertEquals(descriptor.getInputFormat(), hiveTable.getSd().getInputFormat());
  //IgnoreKeyTextOutputFormat was deprecated
  assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat());

  TableDesc table1 = new TableDesc(store.getTable(DB_NAME, CUSTOMER));
  assertEquals(table.getName(), table1.getName());
  assertEquals(table.getUri(), table1.getUri());
  assertEquals(table.getSchema().size(), table1.getSchema().size());
  for (int i = 0; i < table.getSchema().size(); i++) {
    assertEquals(table.getSchema().getColumn(i).getSimpleName(), table1.getSchema().getColumn(i).getSimpleName());
  }

  assertEquals(StringEscapeUtils.escapeJava(StorageConstants.DEFAULT_FIELD_DELIMITER),
      table1.getMeta().getProperty(StorageConstants.TEXT_DELIMITER));

  Map<String, String> expected = getProperties(DB_NAME, CUSTOMER);
  Map<String, String> toSet = new ImmutableMap.Builder<String, String>()
      .put("key1", "value1")
      .put("key2", "value2")
      .build();
  expected.putAll(toSet);

  setProperty(DB_NAME, CUSTOMER, toSet);
  Map<String, String> actual = getProperties(DB_NAME, CUSTOMER);
  assertEquals(actual.get(StorageConstants.TEXT_DELIMITER), expected.get(StorageConstants.TEXT_DELIMITER));
  assertEquals(actual.get("key1"), expected.get("key1"));
  assertEquals(actual.get("key2"), expected.get("key2"));

  Set<String> toUnset = Sets.newHashSet("key2", "key3");
  for (String key : toUnset) {
    expected.remove(key);
  }
  unSetProperty(DB_NAME, CUSTOMER, toUnset);
  actual = getProperties(DB_NAME, CUSTOMER);
  assertEquals(actual.get(StorageConstants.TEXT_DELIMITER), expected.get(StorageConstants.TEXT_DELIMITER));
  assertEquals(actual.get("key1"), expected.get("key1"));
  assertNull(actual.get("key2"));

  store.dropTable(DB_NAME, CUSTOMER);
}
 
开发者ID:apache,项目名称:tajo,代码行数:63,代码来源:TestHiveCatalogStore.java

示例5: testTableWithNullValue

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test
public void testTableWithNullValue() throws Exception {
  KeyValueSet options = new KeyValueSet();
  options.set(StorageConstants.TEXT_DELIMITER, StringEscapeUtils.escapeJava("\u0002"));
  options.set(StorageConstants.TEXT_NULL, StringEscapeUtils.escapeJava("\u0003"));
  TableMeta meta = new TableMeta(BuiltinStorages.TEXT, options);

  org.apache.tajo.catalog.Schema schema = SchemaBuilder.builder()
      .add("s_suppkey", TajoDataTypes.Type.INT4)
      .add("s_name", TajoDataTypes.Type.TEXT)
      .add("s_address", TajoDataTypes.Type.TEXT)
      .add("s_nationkey", TajoDataTypes.Type.INT4)
      .add("s_phone", TajoDataTypes.Type.TEXT)
      .add("s_acctbal", TajoDataTypes.Type.FLOAT8)
      .add("s_comment", TajoDataTypes.Type.TEXT)
      .build();

  TableDesc table = new TableDesc(IdentifierUtil.buildFQName(DB_NAME, SUPPLIER), schema, meta,
      new Path(warehousePath, new Path(DB_NAME, SUPPLIER)).toUri());

  store.createTable(table.getProto());
  assertTrue(store.existTable(DB_NAME, SUPPLIER));

  StorageFormatDescriptor descriptor = formatFactory.get(IOConstants.TEXTFILE);
  org.apache.hadoop.hive.ql.metadata.Table hiveTable = store.getHiveTable(DB_NAME, SUPPLIER);
  assertEquals(descriptor.getInputFormat(), hiveTable.getSd().getInputFormat());
  //IgnoreKeyTextOutputFormat was deprecated
  assertEquals(HiveIgnoreKeyTextOutputFormat.class.getName(), hiveTable.getSd().getOutputFormat());

  TableDesc table1 = new TableDesc(store.getTable(DB_NAME, SUPPLIER));
  assertEquals(table.getName(), table1.getName());
  assertEquals(table.getUri(), table1.getUri());
  assertEquals(table.getSchema().size(), table1.getSchema().size());
  for (int i = 0; i < table.getSchema().size(); i++) {
    assertEquals(table.getSchema().getColumn(i).getSimpleName(), table1.getSchema().getColumn(i).getSimpleName());
  }

  assertEquals(table.getMeta().getProperty(StorageConstants.TEXT_DELIMITER),
      table1.getMeta().getProperty(StorageConstants.TEXT_DELIMITER));

  assertEquals(table.getMeta().getProperty(StorageConstants.TEXT_NULL),
      table1.getMeta().getProperty(StorageConstants.TEXT_NULL));

  assertEquals(table1.getMeta().getProperty(StorageConstants.TEXT_DELIMITER),
      StringEscapeUtils.escapeJava("\u0002"));

  assertEquals(table1.getMeta().getProperty(StorageConstants.TEXT_NULL),
      StringEscapeUtils.escapeJava("\u0003"));

  Map<String, String> expected = getProperties(DB_NAME, SUPPLIER);
  Map<String, String> toSet = new ImmutableMap.Builder<String, String>()
          .put("key1", "value1")
          .put("key2", "value2")
          .build();
  expected.putAll(toSet);

  setProperty(DB_NAME, SUPPLIER, toSet);
  Map<String, String> actual = getProperties(DB_NAME, SUPPLIER);
  assertEquals(actual.get(StorageConstants.TEXT_DELIMITER), expected.get(StorageConstants.TEXT_DELIMITER));
  assertEquals(actual.get("key1"), expected.get("key1"));
  assertEquals(actual.get("key2"), expected.get("key2"));

  Set<String> toUnset = Sets.newHashSet("key2", "key3");
  for (String key : toUnset) {
    expected.remove(key);
  }
  unSetProperty(DB_NAME, SUPPLIER, toUnset);
  actual = getProperties(DB_NAME, SUPPLIER);
  assertEquals(actual.get(StorageConstants.TEXT_DELIMITER), expected.get(StorageConstants.TEXT_DELIMITER));
  assertEquals(actual.get("key1"), expected.get("key1"));
  assertNull(actual.get("key2"));

  store.dropTable(DB_NAME, SUPPLIER);

}
 
开发者ID:apache,项目名称:tajo,代码行数:76,代码来源:TestHiveCatalogStore.java

示例6: testSkipPartitionsOlderThanFactStartTime

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test(priority = 2)
public void testSkipPartitionsOlderThanFactStartTime() throws Exception {
  Date now = new Date();
  SimpleDateFormat dateFormat = new SimpleDateFormat(DateUtil.ABSDATE_FMT);

  List<FieldSchema> factColumns = new ArrayList<>(cubeMeasures.size());
  String factNameSkipPart = "testFactSkipPart";

  for (CubeMeasure measure : cubeMeasures) {
    factColumns.add(measure.getColumn());
  }

  // add some dimensions of the cube
  factColumns.add(new FieldSchema("dim1", "string", "dim1"));
  factColumns.add(new FieldSchema("dim2", "string", "dim2"));

  List<FieldSchema> factPartColumns = Lists.newArrayList(new FieldSchema("region", "string", "region part"));

  Set<UpdatePeriod> updates = Sets.newHashSet(HOURLY);
  ArrayList<FieldSchema> partCols = Lists.newArrayList(getDatePartition(), factPartColumns.get(0));
  Map<String, String> factProps = new HashMap<>();

  factProps.put(MetastoreConstants.FACT_RELATIVE_START_TIME, "now -30 days");
  factProps.put(MetastoreConstants.FACT_RELATIVE_END_TIME, "now +10 days");

  StorageTableDesc s1 = new StorageTableDesc(TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class, partCols,
          datePartKeySingleton);
  Map<String, Set<UpdatePeriod>> updatePeriods = getHashMap(c1, updates);
  Map<String, StorageTableDesc> storageTables = getHashMap(c1, s1);

  CubeFactTable cubeFactWithParts = new CubeFactTable(CUBE_NAME, factNameSkipPart, factColumns, updatePeriods);
  // create cube fact
  client.createCubeFactTable(CUBE_NAME, factNameSkipPart, factColumns,
          updatePeriods, 0L, factProps, storageTables);

  assertTrue(client.tableExists(factNameSkipPart));
  Table cubeTbl = client.getHiveTable(factNameSkipPart);
  assertTrue(client.isFactTable(cubeTbl));
  assertTrue(client.isFactTableForCube(cubeTbl, CUBE_NAME));
  CubeFactTable cubeFact2 = new CubeFactTable(cubeTbl);
  assertTrue(cubeFactWithParts.equals(cubeFact2));

  // Assert for storage tables
  for (String entry : storageTables.keySet()) {
    String storageTableName = getFactOrDimtableStorageTableName(factNameSkipPart, entry);
    assertTrue(client.tableExists(storageTableName));
  }

  Map<String, String> partSpec = getHashMap(factPartColumns.get(0).getName(), "APAC");
  Map<String, Date> timePartsNow = getHashMap(getDatePartitionKey(), NOW);
  Map<String, Date> timePartsBeforeTwoMonths = getHashMap(getDatePartitionKey(), TWO_MONTHS_BACK);

  // test partition
  List<StoragePartitionDesc> storageDescs = new ArrayList<>();
  StoragePartitionDesc sPartSpecNow =
          new StoragePartitionDesc(cubeFactWithParts.getName(), timePartsNow, partSpec, HOURLY);
  StoragePartitionDesc sPartSpecTwoMonthsBack =
          new StoragePartitionDesc(cubeFactWithParts.getName(), timePartsBeforeTwoMonths, partSpec, HOURLY);
  storageDescs.add(sPartSpecNow);
  storageDescs.add(sPartSpecTwoMonthsBack);

  client.addPartitions(storageDescs, c1, CubeTableType.FACT);
  assertTrue(client.factPartitionExists(cubeFactWithParts.getName(), c1, HOURLY, timePartsNow, partSpec));
  assertFalse(client.factPartitionExists(cubeFactWithParts.getName(), c1, HOURLY,
          timePartsBeforeTwoMonths, partSpec));
}
 
开发者ID:apache,项目名称:lens,代码行数:67,代码来源:TestCubeMetastoreClient.java

示例7: testSkipPartitionsOlderThanStorageStartTime

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test(priority = 2)
public void testSkipPartitionsOlderThanStorageStartTime() throws Exception {
  Date now = new Date();
  SimpleDateFormat dateFormat = new SimpleDateFormat(DateUtil.ABSDATE_FMT);

  List<FieldSchema> factColumns = new ArrayList<>(cubeMeasures.size());
  String factNameSkipPart = "testStorageSkipPart";

  for (CubeMeasure measure : cubeMeasures) {
    factColumns.add(measure.getColumn());
  }

  // add some dimensions of the cube
  factColumns.add(new FieldSchema("dim1", "string", "dim1"));
  factColumns.add(new FieldSchema("dim2", "string", "dim2"));

  List<FieldSchema> factPartColumns = Lists.newArrayList(new FieldSchema("region", "string", "region part"));

  Set<UpdatePeriod> updates = Sets.newHashSet(HOURLY);
  ArrayList<FieldSchema> partCols = Lists.newArrayList(getDatePartition(), factPartColumns.get(0));

  Map<String, String> storageProps = new HashMap<>();
  storageProps.put(getStoragetableStartTimesKey(), "now -30 days");
  storageProps.put(getStoragetableEndTimesKey(), "now +10 days");

  StorageTableDesc s1 = new StorageTableDesc(TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class, partCols,
          datePartKeySingleton);
  s1.getTblProps().putAll(storageProps);

  Map<String, Set<UpdatePeriod>> updatePeriods = getHashMap(c1, updates);
  Map<String, StorageTableDesc> storageTables = getHashMap(c1, s1);

  CubeFactTable cubeFactWithParts = new CubeFactTable(CUBE_NAME, factNameSkipPart, factColumns, updatePeriods);
  // create cube fact
  client.createCubeFactTable(CUBE_NAME, factNameSkipPart, factColumns,
          updatePeriods, 0L, null, storageTables);

  assertTrue(client.tableExists(factNameSkipPart));
  Table cubeTbl = client.getHiveTable(factNameSkipPart);
  assertTrue(client.isFactTable(cubeTbl));
  assertTrue(client.isFactTableForCube(cubeTbl, CUBE_NAME));
  CubeFactTable cubeFact2 = new CubeFactTable(cubeTbl);
  assertTrue(cubeFactWithParts.equals(cubeFact2));

  // Assert for storage tables
  for (String entry : storageTables.keySet()) {
    String storageTableName = getFactOrDimtableStorageTableName(factNameSkipPart, entry);
    assertTrue(client.tableExists(storageTableName));
  }

  Map<String, String> partSpec = getHashMap(factPartColumns.get(0).getName(), "APAC");
  Map<String, Date> timePartsNow = getHashMap(getDatePartitionKey(), NOW);
  Map<String, Date> timePartsBeforeTwoMonths = getHashMap(getDatePartitionKey(), TWO_MONTHS_BACK);

  // test partition
  List<StoragePartitionDesc> storageDescs = new ArrayList<>();
  StoragePartitionDesc sPartSpecNow =
          new StoragePartitionDesc(cubeFactWithParts.getName(), timePartsNow, partSpec, HOURLY);
  StoragePartitionDesc sPartSpecTwoMonthsBack =
          new StoragePartitionDesc(cubeFactWithParts.getName(), timePartsBeforeTwoMonths, partSpec, HOURLY);
  storageDescs.add(sPartSpecNow);
  storageDescs.add(sPartSpecTwoMonthsBack);

  client.getTimelines(factNameSkipPart, null, null, null);
  client.addPartitions(storageDescs, c1, CubeTableType.FACT);
  assertTrue(client.factPartitionExists(cubeFactWithParts.getName(), c1, HOURLY, timePartsNow, partSpec));
  assertFalse(client.factPartitionExists(cubeFactWithParts.getName(), c1, HOURLY,
          timePartsBeforeTwoMonths, partSpec));
}
 
开发者ID:apache,项目名称:lens,代码行数:70,代码来源:TestCubeMetastoreClient.java

示例8: testCubeDimWithNonTimeParts

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test(priority = 2)
public void testCubeDimWithNonTimeParts() throws Exception {
  String dimName = "countrytable_partitioned";

  List<FieldSchema> dimColumns = new ArrayList<>();
  dimColumns.add(new FieldSchema("id", "int", "code"));
  dimColumns.add(new FieldSchema("name", "string", "field1"));
  dimColumns.add(new FieldSchema("capital", "string", "field2"));

  Set<String> storageNames = new HashSet<>();

  ArrayList<FieldSchema> partCols = Lists.newArrayList(new FieldSchema("region", "string", "region name"),
    getDatePartition());
  StorageTableDesc s1 = new StorageTableDesc(TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class, partCols,
    datePartKeySingleton);
  storageNames.add(c3);

  Map<String, StorageTableDesc> storageTables = getHashMap(c3, s1);

  client.createCubeDimensionTable(countryDim.getName(), dimName, dimColumns, 0L, storageNames, null, storageTables);
  // test partition
  String storageTableName = getFactOrDimtableStorageTableName(dimName, c3);
  assertFalse(client.dimTableLatestPartitionExists(storageTableName));
  Map<String, Date> expectedLatestValues = Maps.newHashMap();
  Map<String, Date> timeParts = new HashMap<>();
  Map<String, String> nonTimeParts = new HashMap<>();

  timeParts.put(getDatePartitionKey(), NOW);
  nonTimeParts.put("region", "asia");
  StoragePartitionDesc sPartSpec = new StoragePartitionDesc(dimName, timeParts, nonTimeParts, HOURLY);
  client.addPartition(sPartSpec, c3, CubeTableType.DIM_TABLE);
  expectedLatestValues.put("asia", NOW);
  assertLatestForRegions(storageTableName, expectedLatestValues);

  timeParts.put(getDatePartitionKey(), getDateWithOffset(HOURLY, -1));
  nonTimeParts.put("region", "africa");
  sPartSpec = new StoragePartitionDesc(dimName, timeParts, nonTimeParts, HOURLY);
  client.addPartition(sPartSpec, c3, CubeTableType.DIM_TABLE);
  expectedLatestValues.put("asia", NOW);
  expectedLatestValues.put("africa", getDateWithOffset(HOURLY, -1));
  assertLatestForRegions(storageTableName, expectedLatestValues);

  timeParts.put(getDatePartitionKey(), getDateWithOffset(HOURLY, 1));
  nonTimeParts.put("region", "africa");
  sPartSpec = new StoragePartitionDesc(dimName, timeParts, nonTimeParts, HOURLY);
  client.addPartition(sPartSpec, c3, CubeTableType.DIM_TABLE);
  expectedLatestValues.put("asia", NOW);
  expectedLatestValues.put("africa", getDateWithOffset(HOURLY, 1));
  assertLatestForRegions(storageTableName, expectedLatestValues);

  timeParts.put(getDatePartitionKey(), getDateWithOffset(HOURLY, 3));
  nonTimeParts.put("region", "asia");
  sPartSpec = new StoragePartitionDesc(dimName, timeParts, nonTimeParts, HOURLY);
  client.addPartition(sPartSpec, c3, CubeTableType.DIM_TABLE);
  expectedLatestValues.put("asia", getDateWithOffset(HOURLY, 3));
  expectedLatestValues.put("africa", getDateWithOffset(HOURLY, 1));
  assertLatestForRegions(storageTableName, expectedLatestValues);

  client.dropPartition(dimName, c3, timeParts, nonTimeParts, HOURLY);
  expectedLatestValues.put("asia", NOW);
  expectedLatestValues.put("africa", getDateWithOffset(HOURLY, 1));
  assertLatestForRegions(storageTableName, expectedLatestValues);

  timeParts.put(getDatePartitionKey(), NOW);
  client.dropPartition(dimName, c3, timeParts, nonTimeParts, HOURLY);
  expectedLatestValues.remove("asia");
  assertLatestForRegions(storageTableName, expectedLatestValues);

  nonTimeParts.put("region", "africa");
  timeParts.put(getDatePartitionKey(), getDateWithOffset(HOURLY, -1));
  assertLatestForRegions(storageTableName, expectedLatestValues);

  timeParts.put(getDatePartitionKey(), getDateWithOffset(HOURLY, 3));
  nonTimeParts.remove("africa");
  assertLatestForRegions(storageTableName, expectedLatestValues);
}
 
开发者ID:apache,项目名称:lens,代码行数:77,代码来源:TestCubeMetastoreClient.java

示例9: testCubeDimWithoutDumps

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test(priority = 2)
public void testCubeDimWithoutDumps() throws Exception {
  String dimName = "countrytableMeta";

  List<FieldSchema> dimColumns = new ArrayList<>();
  dimColumns.add(new FieldSchema("id", "int", "code"));
  dimColumns.add(new FieldSchema("name", "string", "field1"));
  dimColumns.add(new FieldSchema("capital", "string", "field2"));
  dimColumns.add(new FieldSchema("region", "string", "region name"));

  Set<String> storageNames = new HashSet<>();

  StorageTableDesc s1 = new StorageTableDesc(TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class, null, null);
  storageNames.add(c1);
  Map<String, StorageTableDesc> storageTables = getHashMap(c1, s1);
  CubeDimensionTable cubeDim = new CubeDimensionTable(countryDim.getName(), dimName, dimColumns, 0L, storageNames);
  client.createCubeDimensionTable(countryDim.getName(), dimName, dimColumns, 0L, storageNames, null, storageTables);

  assertTrue(client.tableExists(dimName));
  Table cubeTbl = client.getHiveTable(dimName);
  assertTrue(client.isDimensionTable(cubeTbl));

  List<CubeDimensionTable> tbls = client.getAllDimensionTables(countryDim);
  boolean found = false;
  for (CubeDimensionTable dim : tbls) {
    if (dim.getName().equalsIgnoreCase(dimName)) {
      found = true;
      break;
    }
  }
  assertTrue(found);

  CubeDimensionTable cubeDim2 = new CubeDimensionTable(cubeTbl);
  assertTrue(cubeDim.equals(cubeDim2));

  // Assert for storage tables
  for (String storageName : storageTables.keySet()) {
    String storageTableName = getFactOrDimtableStorageTableName(dimName, storageName);
    assertTrue(client.tableExists(storageTableName));
    assertTrue(!client.getHiveTable(storageTableName).isPartitioned());
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:43,代码来源:TestCubeMetastoreClient.java

示例10: testCubeDimWithTwoStorages

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@Test(priority = 2)
public void testCubeDimWithTwoStorages() throws Exception {
  String dimName = "citytableMeta";

  List<FieldSchema> dimColumns = new ArrayList<>();
  dimColumns.add(new FieldSchema("id", "int", "code"));
  dimColumns.add(new FieldSchema("name", "string", "field1"));
  dimColumns.add(new FieldSchema("stateid", "int", "state id"));

  StorageTableDesc s1 = new StorageTableDesc(TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class,
    datePartSingleton, datePartKeySingleton);
  StorageTableDesc s2 = new StorageTableDesc(TextInputFormat.class, HiveIgnoreKeyTextOutputFormat.class, null, null);

  Map<String, UpdatePeriod> dumpPeriods = getHashMap(c1, HOURLY, c2, null);
  Map<String, StorageTableDesc> storageTables = getHashMap(c1, s1, c2, s2);

  CubeDimensionTable cubeDim = new CubeDimensionTable(cityDim.getName(), dimName, dimColumns, 0L, dumpPeriods);
  client.createCubeDimensionTable(cityDim.getName(), dimName, dimColumns, 0L, dumpPeriods, null, storageTables);

  assertTrue(client.tableExists(dimName));
  Table cubeTbl = client.getHiveTable(dimName);
  assertTrue(client.isDimensionTable(cubeTbl));

  List<CubeDimensionTable> tbls = client.getAllDimensionTables(cityDim);
  boolean found = false;
  for (CubeDimensionTable dim : tbls) {
    if (dim.getName().equalsIgnoreCase(dimName)) {
      found = true;
      break;
    }
  }
  assertTrue(found);

  CubeDimensionTable cubeDim2 = new CubeDimensionTable(cubeTbl);
  assertTrue(cubeDim.equals(cubeDim2));

  // Assert for storage tables
  String storageTableName1 = getFactOrDimtableStorageTableName(dimName, c1);
  assertTrue(client.tableExists(storageTableName1));
  String storageTableName2 = getFactOrDimtableStorageTableName(dimName, c2);
  assertTrue(client.tableExists(storageTableName2));
  assertTrue(!client.getHiveTable(storageTableName2).isPartitioned());
}
 
开发者ID:apache,项目名称:lens,代码行数:44,代码来源:TestCubeMetastoreClient.java

示例11: getOutputFormatClass

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
@SuppressWarnings("rawtypes")
public Class<? extends OutputFormat> getOutputFormatClass() {
	return HiveIgnoreKeyTextOutputFormat.class;
}
 
开发者ID:simonellistonball,项目名称:hive-azuretables,代码行数:5,代码来源:AzureTablesStorageHandler.java

示例12: getFileExtension

import org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat; //导入依赖的package包/类
/**
 * Based on compression option, output format, and configured output codec -
 * get extension for output file. Text files require an extension, whereas
 * others, like sequence files, do not.
 * <p>
 * The property <code>hive.output.file.extension</code> is used to determine
 * the extension - if set, it will override other logic for choosing an
 * extension.
 *
 * @param jc
 *          Job Configuration
 * @param isCompressed
 *          Whether the output file is compressed or not
 * @param hiveOutputFormat
 *          The output format, used to detect if the format is text
 * @return the required file extension (example: .gz)
 */
public static String getFileExtension(JobConf jc, boolean isCompressed,
    HiveOutputFormat<?, ?> hiveOutputFormat) {
  String extension = HiveConf.getVar(jc, HiveConf.ConfVars.OUTPUT_FILE_EXTENSION);
  if (!StringUtils.isEmpty(extension)) {
    return extension;
  }
  if ((hiveOutputFormat instanceof HiveIgnoreKeyTextOutputFormat) && isCompressed) {
    Class<? extends CompressionCodec> codecClass = FileOutputFormat.getOutputCompressorClass(jc,
        DefaultCodec.class);
    CompressionCodec codec = ReflectionUtil.newInstance(codecClass, jc);
    return codec.getDefaultExtension();
  }
  return "";
}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:32,代码来源:Utilities.java


注:本文中的org.apache.hadoop.hive.ql.io.HiveIgnoreKeyTextOutputFormat类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。