当前位置: 首页>>代码示例>>Java>>正文


Java StorageDescriptor.getLocation方法代码示例

本文整理汇总了Java中org.apache.hadoop.hive.metastore.api.StorageDescriptor.getLocation方法的典型用法代码示例。如果您正苦于以下问题:Java StorageDescriptor.getLocation方法的具体用法?Java StorageDescriptor.getLocation怎么用?Java StorageDescriptor.getLocation使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在org.apache.hadoop.hive.metastore.api.StorageDescriptor的用法示例。


在下文中一共展示了StorageDescriptor.getLocation方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: StorageDescriptorWrapper

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor sd) {
      this.sd = sd;
      this.cols = Lists.newArrayList();
      for (FieldSchema f : sd.getCols()) {
        this.cols.add(new FieldSchemaWrapper(f));
      }
      this.location = sd.getLocation();
      this.inputFormat = sd.getInputFormat();
      this.outputFormat = sd.getOutputFormat();
      this.compressed = sd.isCompressed();
      this.numBuckets = sd.getNumBuckets();
      this.serDeInfo = new SerDeInfoWrapper(sd.getSerdeInfo());
//      this.bucketCols = sd.getBucketCols();
      this.sortCols = Lists.newArrayList();
      for (Order o : sd.getSortCols()) {
        this.sortCols.add(new OrderWrapper(o));
      }
      this.parameters = sd.getParameters();
    }
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:20,代码来源:HiveTable.java

示例2: StorageDescriptorWrapper

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
public StorageDescriptorWrapper(StorageDescriptor storageDescriptor) {
  sd = storageDescriptor;
  location = storageDescriptor.getLocation();
  inputFormat = storageDescriptor.getInputFormat();
  outputFormat = storageDescriptor.getOutputFormat();
  compressed = storageDescriptor.isCompressed();
  numBuckets = storageDescriptor.getNumBuckets();
  serDeInfo = new SerDeInfoWrapper(storageDescriptor.getSerdeInfo());
  if (sd.getSortCols() != null) {
    sortCols = Lists.newArrayList();
    for (Order order : sd.getSortCols()) {
      sortCols.add(new OrderWrapper(order));
    }
  }
  parameters = storageDescriptor.getParameters();
  if (sd.getCols() != null) {
    this.columns = Lists.newArrayList();
    for (FieldSchema fieldSchema : sd.getCols()) {
      this.columns.add(new FieldSchemaWrapper(fieldSchema));
    }
  }
}
 
开发者ID:axbaretto,项目名称:drill,代码行数:23,代码来源:HiveTableWrapper.java

示例3: extractPartInfo

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static PartInfo extractPartInfo(HCatSchema schema, StorageDescriptor sd,
                    Map<String, String> parameters, Configuration conf,
                    InputJobInfo inputJobInfo) throws IOException {

  StorerInfo storerInfo = InternalUtil.extractStorerInfo(sd, parameters);

  Properties hcatProperties = new Properties();
  HiveStorageHandler storageHandler = HCatUtil.getStorageHandler(conf, storerInfo);

  // copy the properties from storageHandler to jobProperties
  Map<String, String> jobProperties =
      HCatRSUtil.getInputJobProperties(storageHandler, inputJobInfo);

  for (String key : parameters.keySet()) {
    hcatProperties.put(key, parameters.get(key));
  }
  // FIXME
  // Bloating partinfo with inputJobInfo is not good
  return new PartInfo(schema, storageHandler, sd.getLocation(),
    hcatProperties, jobProperties, inputJobInfo.getTableInfo());
}
 
开发者ID:cloudera,项目名称:RecordServiceClient,代码行数:22,代码来源:InitializeInput.java

示例4: extractPartInfo

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
/**
 * Extract partition info.
 *
 * @param schema Table schema
 * @param sd Storage descriptor
 * @param parameters Parameters
 * @param conf Configuration
 * @param inputJobInfo Input job info
 * @return Partition info
 * @throws IOException
 */
private static PartInfo extractPartInfo(
    HCatSchema schema, StorageDescriptor sd, Map<String, String> parameters,
    Configuration conf, InputJobInfo inputJobInfo) throws IOException {
  StorerInfo storerInfo = InternalUtil.extractStorerInfo(sd, parameters);

  Properties hcatProperties = new Properties();
  HCatStorageHandler storageHandler = HCatUtil.getStorageHandler(conf,
      storerInfo);

  // Copy the properties from storageHandler to jobProperties
  Map<String, String> jobProperties =
      HCatUtil.getInputJobProperties(storageHandler, inputJobInfo);

  for (Map.Entry<String, String> param : parameters.entrySet()) {
    hcatProperties.put(param.getKey(), param.getValue());
  }

  return new PartInfo(schema, storageHandler, sd.getLocation(),
      hcatProperties, jobProperties, inputJobInfo.getTableInfo());
}
 
开发者ID:renato2099,项目名称:giraph-gora,代码行数:32,代码来源:HCatUtils.java

示例5: splitInput

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private void splitInput(final Properties properties, final StorageDescriptor sd, final Partition partition)
    throws ReflectiveOperationException, IOException {
  final JobConf job = new JobConf();
  for (final Object obj : properties.keySet()) {
    job.set((String) obj, (String) properties.get(obj));
  }
  for (final Map.Entry<String, String> entry : hiveReadEntry.hiveConfigOverride.entrySet()) {
    job.set(entry.getKey(), entry.getValue());
  }
  InputFormat<?, ?> format = (InputFormat<?, ?>)
      Class.forName(sd.getInputFormat()).getConstructor().newInstance();
  job.setInputFormat(format.getClass());
  final Path path = new Path(sd.getLocation());
  final FileSystem fs = path.getFileSystem(job);

  if (fs.exists(path)) {
    FileInputFormat.addInputPath(job, path);
    format = job.getInputFormat();
    for (final InputSplit split : format.getSplits(job, 1)) {
      inputSplits.add(split);
      partitionMap.put(split, partition);
    }
  }
  final String numRowsProp = properties.getProperty("numRows");
  logger.trace("HiveScan num rows property = {}", numRowsProp);
  if (numRowsProp != null) {
    final long numRows = Long.valueOf(numRowsProp);
    // starting from hive-0.13, when no statistics are available, this property is set to -1
    // it's important to note that the value returned by hive may not be up to date
    if (numRows > 0) {
      rowCount += numRows;
    }
  }
}
 
开发者ID:skhalifa,项目名称:QDrill,代码行数:35,代码来源:HiveScan.java

示例6: addInputPath

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private static boolean addInputPath(StorageDescriptor sd, JobConf job) throws IOException {
  final Path path = new Path(sd.getLocation());
  final FileSystem fs = FileSystemWrapper.get(path, job);

  if (fs.exists(path)) {
    FileInputFormat.addInputPath(job, path);
    return true;
  }

  return false;
}
 
开发者ID:dremio,项目名称:dremio-oss,代码行数:12,代码来源:DatasetBuilder.java

示例7: getSdLocation

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private String getSdLocation(StorageDescriptor sd) {
  if (sd == null) {
    return "";
  } else {
    return sd.getLocation();
  }
}
 
开发者ID:apache,项目名称:incubator-sentry,代码行数:8,代码来源:MetastoreAuthzBinding.java

示例8: locationOnS3

import org.apache.hadoop.hive.metastore.api.StorageDescriptor; //导入方法依赖的package包/类
private boolean locationOnS3(StorageDescriptor sd) {
  String location = sd.getLocation();

  return location != null && (location.startsWith("s3n") || location.startsWith("s3a"));
}
 
开发者ID:airbnb,项目名称:reair,代码行数:6,代码来源:ObjectConflictHandler.java


注:本文中的org.apache.hadoop.hive.metastore.api.StorageDescriptor.getLocation方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。