当前位置: 首页>>代码示例>>Java>>正文


Java HiveConf类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.conf.HiveConf的典型用法代码示例。如果您正苦于以下问题:Java HiveConf类的具体用法?Java HiveConf怎么用?Java HiveConf使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HiveConf类属于org.apache.hadoop.hive.conf包,在下文中一共展示了HiveConf类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: newHiveConf

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
private HiveConf newHiveConf(TunnelMetastoreCatalog hiveCatalog, Configuration baseConf) {
  List<String> siteXml = hiveCatalog.getSiteXml();
  if (CollectionUtils.isEmpty(siteXml)) {
    LOG.info("No Hadoop site XML is defined for catalog {}.", hiveCatalog.getName());
  }
  Map<String, String> properties = new HashMap<>();
  for (Entry<String, String> entry : baseConf) {
    properties.put(entry.getKey(), entry.getValue());
  }
  if (hiveCatalog.getHiveMetastoreUris() != null) {
    properties.put(ConfVars.METASTOREURIS.varname, hiveCatalog.getHiveMetastoreUris());
  }
  configureMetastoreTunnel(hiveCatalog.getMetastoreTunnel(), properties);
  putConfigurationProperties(hiveCatalog.getConfigurationProperties(), properties);
  HiveConf hiveConf = new HiveConfFactory(siteXml, properties).newInstance();
  return hiveConf;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:18,代码来源:CommonBeans.java

示例2: HiveMetaStore

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
public HiveMetaStore(Configuration conf, HdfsSinkConnectorConfig connectorConfig) throws HiveMetaStoreException {
  HiveConf hiveConf = new HiveConf(conf, HiveConf.class);
  String hiveConfDir = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_CONF_DIR_CONFIG);
  String hiveMetaStoreURIs = connectorConfig.getString(HdfsSinkConnectorConfig.HIVE_METASTORE_URIS_CONFIG);
  if (hiveMetaStoreURIs.isEmpty()) {
    log.warn("hive.metastore.uris empty, an embedded Hive metastore will be "
             + "created in the directory the connector is started. "
             + "You need to start Hive in that specific directory to query the data.");
  }
  if (!hiveConfDir.equals("")) {
    String hiveSitePath = hiveConfDir + "/hive-site.xml";
    File hiveSite = new File(hiveSitePath);
    if (!hiveSite.exists()) {
      log.warn("hive-site.xml does not exist in provided Hive configuration directory {}.", hiveConf);
    }
    hiveConf.addResource(new Path(hiveSitePath));
  }
  hiveConf.set("hive.metastore.uris", hiveMetaStoreURIs);
  try {
    client = HCatUtil.getHiveMetastoreClient(hiveConf);
  } catch (IOException | MetaException e) {
    throw new HiveMetaStoreException(e);
  }
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:25,代码来源:HiveMetaStore.java

示例3: VacuumToolApplication

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
@Autowired
VacuumToolApplication(
    @Value("#{replicaHiveConf}") HiveConf conf,
    @Value("#{replicaMetaStoreClientSupplier}") Supplier<CloseableMetaStoreClient> clientSupplier,
    LegacyReplicaPathRepository legacyReplicaPathRepository,
    HousekeepingService housekeepingService,
    TableReplications replications,
    @Value("${dry-run:false}") boolean isDryRun,
    @Value("${partition-batch-size:1000}") short batchSize,
    @Value("${expected-path-count:10000}") int expectedPathCount) {
  this.conf = conf;
  this.clientSupplier = clientSupplier;
  this.legacyReplicaPathRepository = legacyReplicaPathRepository;
  this.housekeepingService = housekeepingService;
  this.isDryRun = isDryRun;
  this.batchSize = batchSize;
  this.expectedPathCount = expectedPathCount;
  tableReplications = replications.getTableReplications();
  vacuumEventId = "vacuum-" + DateTime.now(DateTimeZone.UTC);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:21,代码来源:VacuumToolApplication.java

示例4: DatasetBuilder

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
private DatasetBuilder(HiveClient client, String user, NamespaceKey datasetPath, boolean ignoreAuthzErrors, HiveConf hiveConf, String dbName, String tableName, Table table, DatasetConfig oldConfig){
  if(oldConfig == null){
    datasetConfig = new DatasetConfig()
        .setPhysicalDataset(new PhysicalDataset())
        .setId(new EntityId().setId(UUID.randomUUID().toString()));
  } else {
    datasetConfig = oldConfig;
    // We're rewriting the read definition. Delete the old one.
    oldConfig.setReadDefinition(null);
  }
  this.client = client;
  this.user = user;
  this.datasetPath = datasetPath;
  this.hiveConf = hiveConf;
  this.table = table;
  this.dbName = dbName;
  this.tableName = tableName;
  this.ignoreAuthzErrors = ignoreAuthzErrors;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:20,代码来源:DatasetBuilder.java

示例5: HdfsSnapshotLocationManager

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
HdfsSnapshotLocationManager(
    HiveConf sourceHiveConf,
    String eventId,
    Table sourceTable,
    boolean snapshotsDisabled,
    SourceCatalogListener sourceCatalogListener) throws IOException {
  this(sourceHiveConf, eventId, sourceTable, snapshotsDisabled, null, sourceCatalogListener);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:9,代码来源:HdfsSnapshotLocationManager.java

示例6: getDatasetBuilder

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
/**
 * @return null if datasetPath is not canonical and couldn't find a corresponding table in the source
 */
static DatasetBuilder getDatasetBuilder(
    HiveClient client,
    String user,
    NamespaceKey datasetPath,
    boolean isCanonicalDatasetPath,
    boolean ignoreAuthzErrors,
    HiveConf hiveConf,
    DatasetConfig oldConfig) throws TException {
  final List<String> noSourceSchemaPath =
    datasetPath.getPathComponents().subList(1, datasetPath.getPathComponents().size());

  // extract database and table names from dataset path
  final String dbName;
  final String tableName;
  switch (noSourceSchemaPath.size()) {
  case 1:
    dbName = "default";
    tableName = noSourceSchemaPath.get(0);
    break;
  case 2:
    dbName = noSourceSchemaPath.get(0);
    tableName = noSourceSchemaPath.get(1);
    break;
  default:
    //invalid.
    return null;
  }

  // if the dataset path is not canonized we need to get it from the source
  final Table table;
  final String canonicalTableName;
  final String canonicalDbName;
  if (isCanonicalDatasetPath) {
    canonicalDbName = dbName;
    canonicalTableName = tableName;
    table = null;
  } else {
    // passed datasetPath is not canonical, we need to get it from the source
    table = client.getTable(dbName, tableName, ignoreAuthzErrors);
    if(table == null){
      return null;
    }
    canonicalTableName = table.getTableName();
    canonicalDbName = table.getDbName();
  }

  final List<String> canonicalDatasetPath = Lists.newArrayList(datasetPath.getRoot(), canonicalDbName, canonicalTableName);
  return new DatasetBuilder(client, user, new NamespaceKey(canonicalDatasetPath), ignoreAuthzErrors, hiveConf, canonicalDbName, canonicalTableName, table, oldConfig);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:53,代码来源:DatasetBuilder.java

示例7: SourceFactory

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
@Autowired
public SourceFactory(
    SourceCatalog sourceCatalog,
    @Value("#{sourceHiveConf}") HiveConf sourceHiveConf,
    @Value("#{sourceMetaStoreClientSupplier}") Supplier<CloseableMetaStoreClient> sourceMetaStoreClientSupplier,
    SourceCatalogListener sourceCatalogListener) {
  this.sourceCatalog = sourceCatalog;
  this.sourceHiveConf = sourceHiveConf;
  this.sourceMetaStoreClientSupplier = sourceMetaStoreClientSupplier;
  this.sourceCatalogListener = sourceCatalogListener;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:12,代码来源:SourceFactory.java

示例8: get

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
@Override
public CloseableMetaStoreClient get() {
  LOG.debug("Creating tunnel: {}:? -> {} -> {}:{}", localHost, sshRoute, remoteHost, remotePort);
  try {
    TunnelConnectionManager tunnelConnectionManager = tunnelConnectionManagerFactory.create(sshRoute, localHost,
        FIRST_AVAILABLE_PORT, remoteHost, remotePort);
    int localPort = tunnelConnectionManager.getTunnel(remoteHost, remotePort).getAssignedLocalPort();
    tunnelConnectionManager.open();
    LOG.debug("Tunnel created: {}:{} -> {} -> {}:{}", localHost, localPort, sshRoute, remoteHost, remotePort);

    localPort = tunnelConnectionManager.getTunnel(remoteHost, remotePort).getAssignedLocalPort();
    HiveConf localHiveConf = new HiveConf(hiveConf);
    String proxyMetaStoreUris = "thrift://" + localHost + ":" + localPort;
    localHiveConf.setVar(ConfVars.METASTOREURIS, proxyMetaStoreUris);
    LOG.info("Metastore URI {} is being proxied to {}", hiveConf.getVar(ConfVars.METASTOREURIS), proxyMetaStoreUris);
    InvocationHandler handler = new TunnellingMetaStoreClientInvocationHandler(
        metaStoreClientFactory.newInstance(localHiveConf, name), tunnelConnectionManager);
    return (CloseableMetaStoreClient) Proxy.newProxyInstance(getClass().getClassLoader(), INTERFACES, handler);
  } catch (Exception e) {
    String message = String.format("Unable to establish SSH tunnel: '%s:?' -> '%s' -> '%s:%s'", localHost, sshRoute,
        remoteHost, remotePort);
    throw new MetaStoreClientException(message, e);
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:25,代码来源:TunnellingMetaStoreClientSupplier.java

示例9: setUp

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
@Before
public void setUp() throws Exception {
  setupHiveTables();
  when(client.listPartitionNames(table1.getDbName(), table1.getTableName(), (short) -1))
      .thenReturn(table1PartitionNames);
  when(client.getPartitionsByNames(table1.getDbName(), table1.getTableName(), table1PartitionNames))
      .thenReturn(table1Partitions);
  when(factory.newInstance(any(HiveConf.class), anyString())).thenReturn(client);
  when(tableReplication.getSourceTable()).thenReturn(sourceTable);
  when(tableReplication.getPartitionIteratorBatchSize()).thenReturn((short) 100);
  when(tableReplication.getPartitionFetcherBufferSize()).thenReturn((short) 100);
  // HiveConf hiveConf = new HiveConf(catalog.conf(), HiveMetaStoreClient.class);
  HiveConf hiveConf = new HiveConf();
  supplier = new DefaultMetaStoreClientSupplier(hiveConf, "test", factory);
  when(source.getHiveConf()).thenReturn(hiveConf);
  predicate = new DiffGeneratedPartitionPredicate(source, replica, tableReplication, checksumFunction);
  when(source.getMetaStoreClientSupplier()).thenReturn(supplier);
  when(replica.getMetaStoreClientSupplier()).thenReturn(supplier);
  when(source.getMetaStoreClientSupplier()).thenReturn(supplier);
  when(replica.getMetaStoreClientSupplier()).thenReturn(supplier);
  when(source.getTableAndStatistics(tableReplication)).thenReturn(sourceTableAndStats);
  when(sourceTableAndStats.getTable()).thenReturn(table1);
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:24,代码来源:DiffGeneratedPartitionPredicateTest.java

示例10: create

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
@Override
public ProducerOperator create(FragmentExecutionContext fragmentExecContext, OperatorContext context, HiveSubScan config) throws ExecutionSetupException {
  try{
    HiveStoragePlugin2 storagePlugin = (HiveStoragePlugin2) fragmentExecContext.getStoragePlugin(config.getPluginId());
    HiveConf conf = storagePlugin.getHiveConf();
    final HiveTableXattr tableAttr = HiveTableXattr.parseFrom(config.getExtendedProperty().toByteArray());
    final CompositeReaderConfig compositeConfig = CompositeReaderConfig.getCompound(config.getSchema(), config.getColumns(), config.getPartitionColumns());

    switch(tableAttr.getReaderType()){
    case NATIVE_PARQUET:
      return createNativeParquet(conf, fragmentExecContext, context, config, tableAttr, compositeConfig);
    case BASIC:
      return createBasicReader(conf, fragmentExecContext, context, config, tableAttr, compositeConfig);
    default:
      throw new UnsupportedOperationException(tableAttr.getReaderType().name());
    }
  } catch (InvalidProtocolBufferException e) {
    throw new ExecutionSetupException("Failure parsing table extended properties.", e);
  }

}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:22,代码来源:HiveScanBatchCreator.java

示例11: prepHiveConfAndData

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
protected static void prepHiveConfAndData() throws Exception {
  hiveConf = new HiveConf();

  // Configure metastore persistence db location on local filesystem
  final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true",  getTempDir("metastore_db"));
  hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl);

  hiveConf.set(ConfVars.SCRATCHDIR.varname, "file:///" + getTempDir("scratch_dir"));
  hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir"));

  // Set MiniDFS conf in HiveConf
  hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));

  whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname);
  FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777));

  studentData = getPhysicalFileFromResource("student.txt");
  voterData = getPhysicalFileFromResource("voter.txt");
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:20,代码来源:BaseTestHiveImpersonation.java

示例12: hiveConfForTunneling

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
@Test
public void hiveConfForTunneling() throws Exception {
  ArgumentCaptor<HiveConf> hiveConfCaptor = ArgumentCaptor.forClass(HiveConf.class);

  MetastoreTunnel metastoreTunnel = new MetastoreTunnel();
  metastoreTunnel.setLocalhost("local-machine");
  metastoreTunnel.setPort(2222);
  metastoreTunnel.setRoute("a -> b -> c");
  metastoreTunnel.setKnownHosts("knownHosts");
  metastoreTunnel.setPrivateKeys("privateKeys");
  AbstractMetaStore federatedMetaStore = newFederatedInstance("fed1", THRIFT_URI);
  federatedMetaStore.setMetastoreTunnel(metastoreTunnel);

  factory.newInstance(federatedMetaStore);
  verify(metaStoreClientFactory).newInstance(hiveConfCaptor.capture(), anyString(), anyInt());

  HiveConf hiveConf = hiveConfCaptor.getValue();
  assertThat(hiveConf.getVar(ConfVars.METASTOREURIS), is(THRIFT_URI));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_LOCALHOST.varname), is("local-machine"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_PORT.varname), is("2222"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_ROUTE.varname), is("a -> b -> c"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_KNOWN_HOSTS.varname), is("knownHosts"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_PRIVATE_KEYS.varname), is("privateKeys"));
}
 
开发者ID:HotelsDotCom,项目名称:waggle-dance,代码行数:25,代码来源:CloseableThriftHiveMetastoreIfaceClientFactoryTest.java

示例13: HiveExec

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
/**
 * HiveExec constructor
 * @param config HDFS Connector configuration
 */
public HiveExec(HdfsSinkConnectorConfig config) {
  hiveConf = new HiveConf();
  String hiveConfDir = config.getString(HdfsSinkConnectorConfig.HIVE_CONF_DIR_CONFIG);
  hiveConf.addResource(new Path(hiveConfDir, "hive-site.xml"));
  SessionState.start(new CliSessionState(hiveConf));
  cliDriver = new CliDriver();
}
 
开发者ID:jiangxiluning,项目名称:kafka-connect-hdfs,代码行数:12,代码来源:HiveExec.java

示例14: init

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
@Override
protected void init() throws Throwable {
  super.init();
  try (ServerSocket socket = new ServerSocket(0)) {
    port = socket.getLocalPort();
  }
  conf.setIntVar(HiveConf.ConfVars.HIVE_SERVER2_THRIFT_PORT, port);
}
 
开发者ID:HotelsDotCom,项目名称:beeju,代码行数:9,代码来源:HiveServer2JUnitRule.java

示例15: Reader

import org.apache.hadoop.hive.conf.HiveConf; //导入依赖的package包/类
Reader(
    HiveTableXattr tableAttr,
    DatasetSplit split,
    List<SchemaPath> projectedColumns,
    List<String> partitionColumns,
    OperatorContext context,
    final HiveConf hiveConf) throws ExecutionSetupException {
  super(tableAttr, split, projectedColumns, partitionColumns, context, hiveConf);
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:10,代码来源:HiveRecordReaders.java


注:本文中的org.apache.hadoop.hive.conf.HiveConf类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。