当前位置: 首页>>代码示例>>Java>>正文


Java ConfVars类代码示例

本文整理汇总了Java中org.apache.hadoop.hive.conf.HiveConf.ConfVars的典型用法代码示例。如果您正苦于以下问题:Java ConfVars类的具体用法?Java ConfVars怎么用?Java ConfVars使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


ConfVars类属于org.apache.hadoop.hive.conf.HiveConf包,在下文中一共展示了ConfVars类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: newHiveConf

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
private HiveConf newHiveConf(TunnelMetastoreCatalog hiveCatalog, Configuration baseConf) {
  List<String> siteXml = hiveCatalog.getSiteXml();
  if (CollectionUtils.isEmpty(siteXml)) {
    LOG.info("No Hadoop site XML is defined for catalog {}.", hiveCatalog.getName());
  }
  Map<String, String> properties = new HashMap<>();
  for (Entry<String, String> entry : baseConf) {
    properties.put(entry.getKey(), entry.getValue());
  }
  if (hiveCatalog.getHiveMetastoreUris() != null) {
    properties.put(ConfVars.METASTOREURIS.varname, hiveCatalog.getHiveMetastoreUris());
  }
  configureMetastoreTunnel(hiveCatalog.getMetastoreTunnel(), properties);
  putConfigurationProperties(hiveCatalog.getConfigurationProperties(), properties);
  HiveConf hiveConf = new HiveConfFactory(siteXml, properties).newInstance();
  return hiveConf;
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:18,代码来源:CommonBeans.java

示例2: get

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
@Override
public CloseableMetaStoreClient get() {
  LOG.debug("Creating tunnel: {}:? -> {} -> {}:{}", localHost, sshRoute, remoteHost, remotePort);
  try {
    TunnelConnectionManager tunnelConnectionManager = tunnelConnectionManagerFactory.create(sshRoute, localHost,
        FIRST_AVAILABLE_PORT, remoteHost, remotePort);
    int localPort = tunnelConnectionManager.getTunnel(remoteHost, remotePort).getAssignedLocalPort();
    tunnelConnectionManager.open();
    LOG.debug("Tunnel created: {}:{} -> {} -> {}:{}", localHost, localPort, sshRoute, remoteHost, remotePort);

    localPort = tunnelConnectionManager.getTunnel(remoteHost, remotePort).getAssignedLocalPort();
    HiveConf localHiveConf = new HiveConf(hiveConf);
    String proxyMetaStoreUris = "thrift://" + localHost + ":" + localPort;
    localHiveConf.setVar(ConfVars.METASTOREURIS, proxyMetaStoreUris);
    LOG.info("Metastore URI {} is being proxied to {}", hiveConf.getVar(ConfVars.METASTOREURIS), proxyMetaStoreUris);
    InvocationHandler handler = new TunnellingMetaStoreClientInvocationHandler(
        metaStoreClientFactory.newInstance(localHiveConf, name), tunnelConnectionManager);
    return (CloseableMetaStoreClient) Proxy.newProxyInstance(getClass().getClassLoader(), INTERFACES, handler);
  } catch (Exception e) {
    String message = String.format("Unable to establish SSH tunnel: '%s:?' -> '%s' -> '%s:%s'", localHost, sshRoute,
        remoteHost, remotePort);
    throw new MetaStoreClientException(message, e);
  }
}
 
开发者ID:HotelsDotCom,项目名称:circus-train,代码行数:25,代码来源:TunnellingMetaStoreClientSupplier.java

示例3: prepHiveConfAndData

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
protected static void prepHiveConfAndData() throws Exception {
  hiveConf = new HiveConf();

  // Configure metastore persistence db location on local filesystem
  final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true",  getTempDir("metastore_db"));
  hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl);

  hiveConf.set(ConfVars.SCRATCHDIR.varname, "file:///" + getTempDir("scratch_dir"));
  hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, getTempDir("local_scratch_dir"));

  // Set MiniDFS conf in HiveConf
  hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));

  whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname);
  FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777));

  studentData = getPhysicalFileFromResource("student.txt");
  voterData = getPhysicalFileFromResource("voter.txt");
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:20,代码来源:BaseTestHiveImpersonation.java

示例4: newInstance

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
public CloseableThriftHiveMetastoreIface newInstance(AbstractMetaStore metaStore) {
  Map<String, String> properties = new HashMap<>();
  String uris = normaliseMetaStoreUris(metaStore.getRemoteMetaStoreUris());
  String name = metaStore.getName().toLowerCase();
  MetastoreTunnel metastoreTunnel = metaStore.getMetastoreTunnel();
  properties.put(ConfVars.METASTOREURIS.varname, uris);
  if (metastoreTunnel != null) {
    properties.put(WaggleDanceHiveConfVars.SSH_LOCALHOST.varname, metastoreTunnel.getLocalhost());
    properties.put(WaggleDanceHiveConfVars.SSH_PORT.varname, String.valueOf(metastoreTunnel.getPort()));
    properties.put(WaggleDanceHiveConfVars.SSH_ROUTE.varname, metastoreTunnel.getRoute());
    properties.put(WaggleDanceHiveConfVars.SSH_KNOWN_HOSTS.varname, metastoreTunnel.getKnownHosts());
    properties.put(WaggleDanceHiveConfVars.SSH_PRIVATE_KEYS.varname, metastoreTunnel.getPrivateKeys());
  }
  HiveConfFactory confFactory = new HiveConfFactory(Collections.<String> emptyList(), properties);
  return metaStoreClientFactory.newInstance(confFactory.newInstance(), "waggledance-" + name, 3);
}
 
开发者ID:HotelsDotCom,项目名称:waggle-dance,代码行数:17,代码来源:CloseableThriftHiveMetastoreIfaceClientFactory.java

示例5: createServerSocket

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
private TServerSocket createServerSocket(boolean useSSL, int port) throws IOException, TTransportException {
  TServerSocket serverSocket = null;
  // enable SSL support for HMS
  List<String> sslVersionBlacklist = new ArrayList<>();
  for (String sslVersion : hiveConf.getVar(ConfVars.HIVE_SSL_PROTOCOL_BLACKLIST).split(",")) {
    sslVersionBlacklist.add(sslVersion);
  }
  if (!useSSL) {
    serverSocket = HiveAuthUtils.getServerSocket(null, port);
  } else {
    String keyStorePath = hiveConf.getVar(ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PATH).trim();
    if (keyStorePath.isEmpty()) {
      throw new IllegalArgumentException(
          ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname + " Not configured for SSL connection");
    }
    String keyStorePassword = ShimLoader.getHadoopShims().getPassword(hiveConf,
        HiveConf.ConfVars.HIVE_METASTORE_SSL_KEYSTORE_PASSWORD.varname);
    serverSocket = HiveAuthUtils.getServerSSLSocket(null, port, keyStorePath, keyStorePassword, sslVersionBlacklist);
  }
  return serverSocket;
}
 
开发者ID:HotelsDotCom,项目名称:waggle-dance,代码行数:22,代码来源:MetaStoreProxyServer.java

示例6: hiveConfForTunneling

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
@Test
public void hiveConfForTunneling() throws Exception {
  ArgumentCaptor<HiveConf> hiveConfCaptor = ArgumentCaptor.forClass(HiveConf.class);

  MetastoreTunnel metastoreTunnel = new MetastoreTunnel();
  metastoreTunnel.setLocalhost("local-machine");
  metastoreTunnel.setPort(2222);
  metastoreTunnel.setRoute("a -> b -> c");
  metastoreTunnel.setKnownHosts("knownHosts");
  metastoreTunnel.setPrivateKeys("privateKeys");
  AbstractMetaStore federatedMetaStore = newFederatedInstance("fed1", THRIFT_URI);
  federatedMetaStore.setMetastoreTunnel(metastoreTunnel);

  factory.newInstance(federatedMetaStore);
  verify(metaStoreClientFactory).newInstance(hiveConfCaptor.capture(), anyString(), anyInt());

  HiveConf hiveConf = hiveConfCaptor.getValue();
  assertThat(hiveConf.getVar(ConfVars.METASTOREURIS), is(THRIFT_URI));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_LOCALHOST.varname), is("local-machine"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_PORT.varname), is("2222"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_ROUTE.varname), is("a -> b -> c"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_KNOWN_HOSTS.varname), is("knownHosts"));
  assertThat(hiveConf.get(WaggleDanceHiveConfVars.SSH_PRIVATE_KEYS.varname), is("privateKeys"));
}
 
开发者ID:HotelsDotCom,项目名称:waggle-dance,代码行数:25,代码来源:CloseableThriftHiveMetastoreIfaceClientFactoryTest.java

示例7: serializePlan

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
private static void serializePlan(Object plan, OutputStream out, Configuration conf, boolean cloningPlan) {
  PerfLogger perfLogger = PerfLogger.getPerfLogger();
  perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.SERIALIZE_PLAN);
  String serializationType = conf.get(HiveConf.ConfVars.PLAN_SERIALIZATION.varname, "kryo");
  LOG.info("Serializing " + plan.getClass().getSimpleName() + " via " + serializationType);
  if("javaXML".equalsIgnoreCase(serializationType)) {
    serializeObjectByJavaXML(plan, out);
  } else {
    if(cloningPlan) {
      serializeObjectByKryo(cloningQueryPlanKryo.get(), plan, out);
    } else {
      serializeObjectByKryo(runtimeSerializationKryo.get(), plan, out);
    }
  }
  perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.SERIALIZE_PLAN);
}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:17,代码来源:Utilities.java

示例8: deserializePlan

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
private static <T> T deserializePlan(InputStream in, Class<T> planClass, Configuration conf, boolean cloningPlan) {
  PerfLogger perfLogger = PerfLogger.getPerfLogger();
  perfLogger.PerfLogBegin(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN);
  T plan;
  String serializationType = conf.get(HiveConf.ConfVars.PLAN_SERIALIZATION.varname, "kryo");
  LOG.info("Deserializing " + planClass.getSimpleName() + " via " + serializationType);
  if("javaXML".equalsIgnoreCase(serializationType)) {
    plan = deserializeObjectByJavaXML(in);
  } else {
    if(cloningPlan) {
      plan = deserializeObjectByKryo(cloningQueryPlanKryo.get(), in, planClass);
    } else {
      plan = deserializeObjectByKryo(runtimeSerializationKryo.get(), in, planClass);
    }
  }
  perfLogger.PerfLogEnd(CLASS_NAME, PerfLogger.DESERIALIZE_PLAN);
  return plan;
}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:19,代码来源:Utilities.java

示例9: estimateNumberOfReducers

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
/**
 * Estimate the number of reducers needed for this job, based on job input,
 * and configuration parameters.
 *
 * The output of this method should only be used if the output of this
 * MapRedTask is not being used to populate a bucketed table and the user
 * has not specified the number of reducers to use.
 *
 * @return the number of reducers.
 */
public static int estimateNumberOfReducers(HiveConf conf, ContentSummary inputSummary,
                                           MapWork work, boolean finalMapRed) throws IOException {
  long bytesPerReducer = conf.getLongVar(HiveConf.ConfVars.BYTESPERREDUCER);
  int maxReducers = conf.getIntVar(HiveConf.ConfVars.MAXREDUCERS);

  double samplePercentage = getHighestSamplePercentage(work);
  long totalInputFileSize = getTotalInputFileSize(inputSummary, work, samplePercentage);

  // if all inputs are sampled, we should shrink the size of reducers accordingly.
  if (totalInputFileSize != inputSummary.getLength()) {
    LOG.info("BytesPerReducer=" + bytesPerReducer + " maxReducers="
        + maxReducers + " estimated totalInputFileSize=" + totalInputFileSize);
  } else {
    LOG.info("BytesPerReducer=" + bytesPerReducer + " maxReducers="
      + maxReducers + " totalInputFileSize=" + totalInputFileSize);
  }

  // If this map reduce job writes final data to a table and bucketing is being inferred,
  // and the user has configured Hive to do this, make sure the number of reducers is a
  // power of two
  boolean powersOfTwo = conf.getBoolVar(HiveConf.ConfVars.HIVE_INFER_BUCKET_SORT_NUM_BUCKETS_POWER_TWO) &&
      finalMapRed && !work.getBucketedColsByDirectory().isEmpty();

  return estimateReducers(totalInputFileSize, bytesPerReducer, maxReducers, powersOfTwo);
}
 
开发者ID:mini666,项目名称:hive-phoenix-handler,代码行数:36,代码来源:Utilities.java

示例10: prepHiveConfAndData

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
protected static void prepHiveConfAndData() throws Exception {
  hiveConf = new HiveConf();

  File scratchDir = createFileWithPermissions(dirTestWatcher.getRootDir(), "scratch_dir");
  File localScratchDir = createFileWithPermissions(dirTestWatcher.getRootDir(), "local_scratch_dir");
  File metaStoreDBDir = new File(dirTestWatcher.getRootDir(), "metastore_db");

  // Configure metastore persistence db location on local filesystem
  final String dbUrl = String.format("jdbc:derby:;databaseName=%s;create=true",  metaStoreDBDir.getAbsolutePath());
  hiveConf.set(ConfVars.METASTORECONNECTURLKEY.varname, dbUrl);

  hiveConf.set(ConfVars.SCRATCHDIR.varname, "file://" + scratchDir.getAbsolutePath());
  hiveConf.set(ConfVars.LOCALSCRATCHDIR.varname, localScratchDir.getAbsolutePath());

  // Set MiniDFS conf in HiveConf
  hiveConf.set(FS_DEFAULT_NAME_KEY, dfsConf.get(FS_DEFAULT_NAME_KEY));

  whDir = hiveConf.get(ConfVars.METASTOREWAREHOUSE.varname);
  FileSystem.mkdirs(fs, new Path(whDir), new FsPermission((short) 0777));

  studentData = getPhysicalFileFromResource("student.txt");
  voterData = getPhysicalFileFromResource("voter.txt");
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:24,代码来源:BaseTestHiveImpersonation.java

示例11: start

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
public void start(Map<String, String> confOverlay) throws Exception {
  if (isMetastoreRemote) {
    int metaStorePort = MetaStoreUtils.findFreePort();
    getHiveConf().setVar(ConfVars.METASTOREURIS, "thrift://localhost:" + metaStorePort);
    MetaStoreUtils.startMetaStore(metaStorePort,
    ShimLoader.getHadoopThriftAuthBridge(), getHiveConf());
  }

  hiveServer2 = new HiveServer2();
  // Set confOverlay parameters
  for (Map.Entry<String, String> entry : confOverlay.entrySet()) {
    setConfProperty(entry.getKey(), entry.getValue());
  }
  hiveServer2.init(getHiveConf());
  hiveServer2.start();
  waitForStartup();
  setStarted(true);
}
 
开发者ID:bobfreitas,项目名称:hiveunit-mr2,代码行数:19,代码来源:MiniHS2.java

示例12: main

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
public static void main(String[] args) throws Exception {
    HiveConf hiveConf = new HiveConf();
    hiveConf.set("javax.jdo.option.ConnectionURL", "jdbc:derby:;databaseName=metastore_db;create=true");
    hiveConf.set("javax.jdo.option.ConnectionDriverName", "org.apache.derby.jdbc.EmbeddedDriver");
    hiveConf.set("hive.metastore.warehouse.dir", "file:///tmp");
    //hiveConf.set("hive.server2.thrift.port", "11100");
    hiveConf.setBoolVar(ConfVars.HIVE_SERVER2_ENABLE_DOAS, false);
    hiveConf.setVar(ConfVars.HIVE_SERVER2_THRIFT_BIND_HOST, HOST);
    hiveConf.setIntVar(ConfVars.HIVE_SERVER2_THRIFT_PORT, PORT);
    hiveConf.setVar(ConfVars.HIVE_SERVER2_AUTHENTICATION, AuthTypes.NOSASL.toString());
    hiveConf.setVar(ConfVars.HIVE_SERVER2_TRANSPORT_MODE, "binary");

    /*<!--hive.metastore.local=true
            mapreduce.framework.name=yarn
            hive.exec.submitviachild=false-->
            hive.debug.localtask=true
            hive.auto.convert.join.use.nonstaged=true*/
    HiveServer2 server = new HiveServer2();
    server.init(hiveConf);
    server.start();

    initClient(createBinaryTransport());
}
 
开发者ID:bbonnin,项目名称:hadoop-mongodb,代码行数:24,代码来源:HiveServer2Launcher.java

示例13: doPasswdAuth

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
/**
 * Do passwd auth.
 *
 * @param userName the user name
 * @param password the password
 */
private void doPasswdAuth(String userName, String password) {
  // Lens confs to Hive Confs.
  for (ConfVars var : new ConfVars[]{ConfVars.HIVE_SERVER2_PLAIN_LDAP_DOMAIN}) {
    if (cliService.getHiveConf().getVar(var) == null) {
      cliService.getHiveConf().setVar(var, cliService.getHiveConf().get(LensConfConstants.SERVER_DOMAIN));
    }
  }
  String authType = getHiveConf().getVar(ConfVars.HIVE_SERVER2_AUTHENTICATION);
  // No-op when authType is NOSASL
  if (!authType.equalsIgnoreCase(HiveAuthFactory.AuthTypes.NOSASL.toString())) {
    try {
      AuthenticationProviderFactory.AuthMethods authMethod = AuthenticationProviderFactory.AuthMethods
        .getValidAuthMethod(authType);
      PasswdAuthenticationProvider provider = AuthenticationProviderFactory
        .getAuthenticationProvider(authMethod, getHiveConf());
      provider.Authenticate(userName, password);
    } catch (Exception e) {
      log.error("Auth error: ", e);
      throw new NotAuthorizedException(e);
    }
  }
}
 
开发者ID:apache,项目名称:lens,代码行数:29,代码来源:BaseLensService.java

示例14: checkLockManager

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
private boolean checkLockManager() {
  boolean supportConcurrency = conf.getBoolVar(HiveConf.ConfVars.HIVE_SUPPORT_CONCURRENCY);
  if (!supportConcurrency) {
    return false;
  }
  if ((hiveLockMgr == null)) {
    try {
      setLockManager();
    } catch (SemanticException e) {
      errorMessage = "FAILED: Error in semantic analysis: " + e.getMessage();
      SQLState = ErrorMsg.findSQLState(e.getMessage());
      downstreamError = e;
      console.printError(errorMessage, "\n"
          + org.apache.hadoop.util.StringUtils.stringifyException(e));
      return false;
    }
  }
  // the reason that we set the lock manager for the cxt here is because each
  // query has its own ctx object. The hiveLockMgr is shared accross the
  // same instance of Driver, which can run multiple queries.
  ctx.setHiveLockMgr(hiveLockMgr);
  return hiveLockMgr != null;
}
 
开发者ID:adrian-wang,项目名称:project-panthera-skin,代码行数:24,代码来源:SkinDriver.java

示例15: testAccessConfigRestrictions

import org.apache.hadoop.hive.conf.HiveConf.ConfVars; //导入依赖的package包/类
/**
 * Test that the required access configs are set by session hook
 */
@Test
public void testAccessConfigRestrictions() throws Exception {
  context = createContext(properties);
  policyFile
      .setUserGroupMapping(StaticUserGroup.getStaticMapping())
      .write(context.getPolicyFile());

  String testUser = USER1_1;
  // verify the config is set correctly by session hook
  verifyConfig(testUser, ConfVars.SEMANTIC_ANALYZER_HOOK.varname,
      HiveAuthzBindingSessionHook.SEMANTIC_HOOK);
  verifyConfig(testUser, ConfVars.HIVE_CAPTURE_TRANSFORM_ENTITY.varname,
      "true");
  verifyConfig(testUser, ConfVars.HIVE_SECURITY_COMMAND_WHITELIST.varname, "set");
  verifyConfig(testUser, ConfVars.SCRATCHDIRPERMISSION.varname, HiveAuthzBindingSessionHook.SCRATCH_DIR_PERMISSIONS);
  verifyConfig(testUser, HiveConf.ConfVars.HIVE_CONF_RESTRICTED_LIST.varname,
      HiveAuthzBindingSessionHook.ACCESS_RESTRICT_LIST);
  verifyConfig(testUser, HiveAuthzConf.HIVE_ACCESS_SUBJECT_NAME, testUser);
 }
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:23,代码来源:TestServerConfiguration.java


注:本文中的org.apache.hadoop.hive.conf.HiveConf.ConfVars类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。