当前位置: 首页>>代码示例>>Java>>正文


Java HadoopCompat类代码示例

本文整理汇总了Java中org.apache.cassandra.hadoop.HadoopCompat的典型用法代码示例。如果您正苦于以下问题:Java HadoopCompat类的具体用法?Java HadoopCompat怎么用?Java HadoopCompat使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。


HadoopCompat类属于org.apache.cassandra.hadoop包,在下文中一共展示了HadoopCompat类的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。

示例1: write

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
/**
 * The column values must correspond to the order in which
 * they appear in the insert stored procedure. 
 * 
 * Key is not used, so it can be null or any object.
 * </p>
 *
 * @param key
 *            any object or null.
 * @param values
 *            the values to write.
 * @throws IOException
 */
@Override
public void write(Object key, List<ByteBuffer> values) throws IOException
{
    prepareWriter();
    try
    {
        ((CQLSSTableWriter) writer).rawAddRow(values);
        
        if (null != progress)
            progress.progress();
        if (null != context)
            HadoopCompat.progress(context);
    } 
    catch (InvalidRequestException e)
    {
        throw new IOException("Error adding row with key: " + key, e);
    }
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:32,代码来源:CqlBulkRecordWriter.java

示例2: write

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
/**
 * <p>
 * The column values must correspond to the order in which
 * they appear in the insert stored procedure. 
 * 
 * Key is not used, so it can be null or any object.
 * </p>
 *
 * @param key
 *            any object or null.
 * @param values
 *            the values to write.
 * @throws IOException
 */
@Override
public void write(Object key, List<ByteBuffer> values) throws IOException
{
    prepareWriter();
    try
    {
        ((CQLSSTableWriter) writer).rawAddRow(values);
        
        if (null != progress)
            progress.progress();
        if (null != context)
            HadoopCompat.progress(context);
    } 
    catch (InvalidRequestException e)
    {
        throw new IOException("Error adding row with key: " + key, e);
    }
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:33,代码来源:CqlBulkRecordWriter.java

示例3: getRecordReader

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
public RecordReader<Map<String, ByteBuffer>, Map<String, ByteBuffer>> getRecordReader(InputSplit split, JobConf jobConf, final Reporter reporter)
        throws IOException
{
    TaskAttemptContext tac = HadoopCompat.newMapContext(
            jobConf,
            TaskAttemptID.forName(jobConf.get(MAPRED_TASK_ID)),
            null,
            null,
            null,
            new ReporterWrapper(reporter),
            null);

    CqlPagingRecordReader recordReader = new CqlPagingRecordReader();
    recordReader.initialize((org.apache.hadoop.mapreduce.InputSplit)split, tac);
    return recordReader;
}
 
开发者ID:rajath26,项目名称:cassandra-trunk,代码行数:17,代码来源:CqlPagingInputFormat.java

示例4: write

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
@Override
public void write(final ByteBuffer ignoredKey, final CQLRecord record)  {
  prepareWriter();
  // To ensure Crunch doesn't reuse CQLSSTableWriter's objects
  List<ByteBuffer> bb = Lists.newArrayList();
  for (ByteBuffer v : record.getValues()) {
    bb.add(ByteBufferUtil.clone(v));
  }
  try {
    ((CQLSSTableWriter) writer).rawAddRow(bb);
    if (null != progress)
      progress.progress();
    if (null != context)
      HadoopCompat.progress(context);
  } catch (InvalidRequestException | IOException e) {
    LOG.error(e.getMessage());
    throw new CrunchRuntimeException("Error adding row : " + e.getMessage());
  }
}
 
开发者ID:spotify,项目名称:hdfs2cass,代码行数:20,代码来源:CrunchCqlBulkRecordWriter.java

示例5: setStoreLocation

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
/** set store configuration settings */
public void setStoreLocation(String location, Job job) throws IOException
{
    conf = HadoopCompat.getConfiguration(job);
    setLocationFromUri(location);

    if (username != null && password != null)
        ConfigHelper.setOutputKeyspaceUserNameAndPassword(conf, username, password);
    if (splitSize > 0)
        ConfigHelper.setInputSplitSize(conf, splitSize);
    if (partitionerClass!= null)
        ConfigHelper.setOutputPartitioner(conf, partitionerClass);
    if (rpcPort != null)
    {
        ConfigHelper.setOutputRpcPort(conf, rpcPort);
        ConfigHelper.setInputRpcPort(conf, rpcPort);
    }
    if (initHostAddress != null)
    {
        ConfigHelper.setOutputInitialAddress(conf, initHostAddress);
        ConfigHelper.setInputInitialAddress(conf, initHostAddress);
    }

    ConfigHelper.setOutputColumnFamily(conf, keyspace, column_family);
    CqlConfigHelper.setOutputCql(conf, outputQuery);

    setConnectionInformation();

    if (ConfigHelper.getOutputRpcPort(conf) == 0)
        throw new IOException("PIG_OUTPUT_RPC_PORT or PIG_RPC_PORT environment variable not set");
    if (ConfigHelper.getOutputInitialAddress(conf) == null)
        throw new IOException("PIG_OUTPUT_INITIAL_ADDRESS or PIG_INITIAL_ADDRESS environment variable not set");
    if (ConfigHelper.getOutputPartitioner(conf) == null)
        throw new IOException("PIG_OUTPUT_PARTITIONER or PIG_PARTITIONER environment variable not set");

    initSchema(storeSignature);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:38,代码来源:CqlNativeStorage.java

示例6: close

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
private void close() throws IOException
{
    if (writer != null)
    {
        writer.close();
        Future<StreamState> future = loader.stream(ignores);
        while (true)
        {
            try
            {
                future.get(1000, TimeUnit.MILLISECONDS);
                break;
            }
            catch (ExecutionException | TimeoutException te)
            {
                if (null != progress)
                    progress.progress();
                if (null != context)
                    HadoopCompat.progress(context);
            }
            catch (InterruptedException e)
            {
                throw new IOException(e);
            }
        }
        if (loader.getFailedHosts().size() > 0)
        {
            if (loader.getFailedHosts().size() > maxFailures)
                throw new IOException("Too many hosts failed: " + loader.getFailedHosts());
            else
                logger.warn("Some hosts failed: {}", loader.getFailedHosts());
        }
    }
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:35,代码来源:CqlBulkRecordWriter.java

示例7: CrunchBulkRecordWriter

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
public CrunchBulkRecordWriter(TaskAttemptContext context) {
  Config.setClientMode(true);
  Config.setOutboundBindAny(true);
  this.conf = HadoopCompat.getConfiguration(context);
  this.context = context;
  int megabitsPerSec = Integer.parseInt(conf.get(STREAM_THROTTLE_MBITS, "0"));
  LOG.info("Setting stream throttling to " + megabitsPerSec);
  DatabaseDescriptor.setStreamThroughputOutboundMegabitsPerSec(megabitsPerSec);
  DatabaseDescriptor.setInterDCStreamThroughputOutboundMegabitsPerSec(megabitsPerSec);
  heartbeat = new ProgressHeartbeat(context, 120);
}
 
开发者ID:spotify,项目名称:hdfs2cass,代码行数:12,代码来源:CrunchBulkRecordWriter.java

示例8: initialize

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException
{
    this.split = (ColumnFamilySplit) split;
    Configuration conf = HadoopCompat.getConfiguration(context);
    totalRowCount = (this.split.getLength() < Long.MAX_VALUE)
                  ? (int) this.split.getLength()
                  : ConfigHelper.getInputSplitSize(conf);
    cfName = ConfigHelper.getInputColumnFamily(conf);
    keyspace = ConfigHelper.getInputKeyspace(conf);
    partitioner = ConfigHelper.getInputPartitioner(conf);
    inputColumns = CqlConfigHelper.getInputcolumns(conf);
    userDefinedWhereClauses = CqlConfigHelper.getInputWhereClauses(conf);

    try
    {
        if (cluster != null)
            return;

        // create a Cluster instance
        String[] locations = split.getLocations();
        cluster = CqlConfigHelper.getInputCluster(locations, conf);
    }
    catch (Exception e)
    {
        throw new RuntimeException(e);
    }

    if (cluster != null)
        session = cluster.connect(quote(keyspace));

    if (session == null)
      throw new RuntimeException("Can't create connection session");

    //get negotiated serialization protocol
    nativeProtocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion();

    // If the user provides a CQL query then we will use it without validation
    // otherwise we will fall back to building a query using the:
    //   inputColumns
    //   whereClauses
    cqlQuery = CqlConfigHelper.getInputCql(conf);
    // validate that the user hasn't tried to give us a custom query along with input columns
    // and where clauses
    if (StringUtils.isNotEmpty(cqlQuery) && (StringUtils.isNotEmpty(inputColumns) ||
                                             StringUtils.isNotEmpty(userDefinedWhereClauses)))
    {
        throw new AssertionError("Cannot define a custom query with input columns and / or where clauses");
    }

    if (StringUtils.isEmpty(cqlQuery))
        cqlQuery = buildQuery();
    logger.debug("cqlQuery {}", cqlQuery);

    rowIterator = new RowIterator();
    logger.debug("created {}", rowIterator);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:58,代码来源:CqlRecordReader.java

示例9: setLocation

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
/** set read configuration settings */
public void setLocation(String location, Job job) throws IOException
{
    conf = HadoopCompat.getConfiguration(job);
    setLocationFromUri(location);

    if (ConfigHelper.getInputSlicePredicate(conf) == null)
    {
        SliceRange range = new SliceRange(slice_start, slice_end, slice_reverse, limit);
        SlicePredicate predicate = new SlicePredicate().setSlice_range(range);
        ConfigHelper.setInputSlicePredicate(conf, predicate);
    }
    if (System.getenv(PIG_WIDEROW_INPUT) != null)
        widerows = Boolean.parseBoolean(System.getenv(PIG_WIDEROW_INPUT));
    if (System.getenv(PIG_USE_SECONDARY) != null)
        usePartitionFilter = Boolean.parseBoolean(System.getenv(PIG_USE_SECONDARY));
    if (System.getenv(PIG_INPUT_SPLIT_SIZE) != null)
    {
        try
        {
            ConfigHelper.setInputSplitSize(conf, Integer.parseInt(System.getenv(PIG_INPUT_SPLIT_SIZE)));
        }
        catch (NumberFormatException e)
        {
            throw new IOException("PIG_INPUT_SPLIT_SIZE is not a number", e);
        }           
    } 

    if (usePartitionFilter && getIndexExpressions() != null)
        ConfigHelper.setInputRange(conf, getIndexExpressions());

    if (username != null && password != null)
        ConfigHelper.setInputKeyspaceUserNameAndPassword(conf, username, password);

    if (splitSize > 0)
        ConfigHelper.setInputSplitSize(conf, splitSize);
    if (partitionerClass!= null)
        ConfigHelper.setInputPartitioner(conf, partitionerClass);
    if (rpcPort != null)
        ConfigHelper.setInputRpcPort(conf, rpcPort);
    if (initHostAddress != null)
        ConfigHelper.setInputInitialAddress(conf, initHostAddress);

    ConfigHelper.setInputColumnFamily(conf, keyspace, column_family, widerows);
    setConnectionInformation();

    if (ConfigHelper.getInputRpcPort(conf) == 0)
        throw new IOException("PIG_INPUT_RPC_PORT or PIG_RPC_PORT environment variable not set");
    if (ConfigHelper.getInputInitialAddress(conf) == null)
        throw new IOException("PIG_INPUT_INITIAL_ADDRESS or PIG_INITIAL_ADDRESS environment variable not set");
    if (ConfigHelper.getInputPartitioner(conf) == null)
        throw new IOException("PIG_INPUT_PARTITIONER or PIG_PARTITIONER environment variable not set");
    if (loadSignature == null)
        loadSignature = location;
    initSchema(loadSignature);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:57,代码来源:CassandraStorage.java

示例10: setStoreLocation

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
/** set store configuration settings */
public void setStoreLocation(String location, Job job) throws IOException
{
    conf = HadoopCompat.getConfiguration(job);
    
    // don't combine mappers to a single mapper per node
    conf.setBoolean("pig.noSplitCombination", true);
    setLocationFromUri(location);

    if (username != null && password != null)
        ConfigHelper.setOutputKeyspaceUserNameAndPassword(conf, username, password);
    if (splitSize > 0)
        ConfigHelper.setInputSplitSize(conf, splitSize);
    if (partitionerClass!= null)
        ConfigHelper.setOutputPartitioner(conf, partitionerClass);
    if (rpcPort != null)
    {
        ConfigHelper.setOutputRpcPort(conf, rpcPort);
        ConfigHelper.setInputRpcPort(conf, rpcPort);
    }
    if (initHostAddress != null)
    {
        ConfigHelper.setOutputInitialAddress(conf, initHostAddress);
        ConfigHelper.setInputInitialAddress(conf, initHostAddress);
    }

    ConfigHelper.setOutputColumnFamily(conf, keyspace, column_family);
    setConnectionInformation();

    if (ConfigHelper.getOutputRpcPort(conf) == 0)
        throw new IOException("PIG_OUTPUT_RPC_PORT or PIG_RPC_PORT environment variable not set");
    if (ConfigHelper.getOutputInitialAddress(conf) == null)
        throw new IOException("PIG_OUTPUT_INITIAL_ADDRESS or PIG_INITIAL_ADDRESS environment variable not set");
    if (ConfigHelper.getOutputPartitioner(conf) == null)
        throw new IOException("PIG_OUTPUT_PARTITIONER or PIG_PARTITIONER environment variable not set");

    // we have to do this again here for the check in writeColumnsFromTuple
    if (System.getenv(PIG_USE_SECONDARY) != null)
        usePartitionFilter = Boolean.parseBoolean(System.getenv(PIG_USE_SECONDARY));

    initSchema(storeSignature);
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:43,代码来源:CassandraStorage.java

示例11: CqlBulkRecordWriter

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
CqlBulkRecordWriter(TaskAttemptContext context) throws IOException
{
    this(HadoopCompat.getConfiguration(context));
    this.context = context;
    setConfigs();
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:7,代码来源:CqlBulkRecordWriter.java

示例12: checkOutputSpecs

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
@Override
public void checkOutputSpecs(JobContext context)
{
    checkOutputSpecs(HadoopCompat.getConfiguration(context));
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:6,代码来源:CqlBulkOutputFormat.java

示例13: initialize

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
@Override
public void initialize(InputSplit split, TaskAttemptContext context) throws IOException
{
    this.split = (ColumnFamilySplit) split;
    Configuration conf = HadoopCompat.getConfiguration(context);
    totalRowCount = (this.split.getLength() < Long.MAX_VALUE)
                  ? (int) this.split.getLength()
                  : ConfigHelper.getInputSplitSize(conf);
    cfName = ConfigHelper.getInputColumnFamily(conf);
    keyspace = ConfigHelper.getInputKeyspace(conf);
    partitioner = ConfigHelper.getInputPartitioner(conf);
    inputColumns = CqlConfigHelper.getInputcolumns(conf);
    userDefinedWhereClauses = CqlConfigHelper.getInputWhereClauses(conf);

    try
    {
        if (cluster != null)
            return;

        // create a Cluster instance
        String[] locations = split.getLocations();
        cluster = CqlConfigHelper.getInputCluster(locations, conf);
    }
    catch (Exception e)
    {
        throw new RuntimeException(e);
    }

    if (cluster != null)
        session = cluster.connect(quote(keyspace));

    if (session == null)
      throw new RuntimeException("Can't create connection session");

    //get negotiated serialization protocol
    nativeProtocolVersion = cluster.getConfiguration().getProtocolOptions().getProtocolVersion().toInt();

    // If the user provides a CQL query then we will use it without validation
    // otherwise we will fall back to building a query using the:
    //   inputColumns
    //   whereClauses
    cqlQuery = CqlConfigHelper.getInputCql(conf);
    // validate that the user hasn't tried to give us a custom query along with input columns
    // and where clauses
    if (StringUtils.isNotEmpty(cqlQuery) && (StringUtils.isNotEmpty(inputColumns) ||
                                             StringUtils.isNotEmpty(userDefinedWhereClauses)))
    {
        throw new AssertionError("Cannot define a custom query with input columns and / or where clauses");
    }

    if (StringUtils.isEmpty(cqlQuery))
        cqlQuery = buildQuery();
    logger.trace("cqlQuery {}", cqlQuery);

    rowIterator = new RowIterator();
    logger.trace("created {}", rowIterator);
}
 
开发者ID:scylladb,项目名称:scylla-tools-java,代码行数:58,代码来源:CqlRecordReader.java

示例14: CqlRecordWriter

import org.apache.cassandra.hadoop.HadoopCompat; //导入依赖的package包/类
/**
 * Upon construction, obtain the map that this writer will use to collect
 * mutations, and the ring cache for the given keyspace.
 *
 * @param context the task attempt context
 * @throws IOException
 */
CqlRecordWriter(TaskAttemptContext context) throws IOException
{
    this(HadoopCompat.getConfiguration(context));
    this.context = context;
}
 
开发者ID:vcostet,项目名称:cassandra-kmean,代码行数:13,代码来源:CqlRecordWriter.java


注:本文中的org.apache.cassandra.hadoop.HadoopCompat类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。