本文整理汇总了Java中org.apache.cassandra.hadoop.HadoopCompat.progress方法的典型用法代码示例。如果您正苦于以下问题:Java HadoopCompat.progress方法的具体用法?Java HadoopCompat.progress怎么用?Java HadoopCompat.progress使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类org.apache.cassandra.hadoop.HadoopCompat
的用法示例。
在下文中一共展示了HadoopCompat.progress方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: write
import org.apache.cassandra.hadoop.HadoopCompat; //导入方法依赖的package包/类
/**
* The column values must correspond to the order in which
* they appear in the insert stored procedure.
*
* Key is not used, so it can be null or any object.
* </p>
*
* @param key
* any object or null.
* @param values
* the values to write.
* @throws IOException
*/
@Override
public void write(Object key, List<ByteBuffer> values) throws IOException
{
prepareWriter();
try
{
((CQLSSTableWriter) writer).rawAddRow(values);
if (null != progress)
progress.progress();
if (null != context)
HadoopCompat.progress(context);
}
catch (InvalidRequestException e)
{
throw new IOException("Error adding row with key: " + key, e);
}
}
示例2: write
import org.apache.cassandra.hadoop.HadoopCompat; //导入方法依赖的package包/类
/**
* <p>
* The column values must correspond to the order in which
* they appear in the insert stored procedure.
*
* Key is not used, so it can be null or any object.
* </p>
*
* @param key
* any object or null.
* @param values
* the values to write.
* @throws IOException
*/
@Override
public void write(Object key, List<ByteBuffer> values) throws IOException
{
prepareWriter();
try
{
((CQLSSTableWriter) writer).rawAddRow(values);
if (null != progress)
progress.progress();
if (null != context)
HadoopCompat.progress(context);
}
catch (InvalidRequestException e)
{
throw new IOException("Error adding row with key: " + key, e);
}
}
示例3: write
import org.apache.cassandra.hadoop.HadoopCompat; //导入方法依赖的package包/类
@Override
public void write(final ByteBuffer ignoredKey, final CQLRecord record) {
prepareWriter();
// To ensure Crunch doesn't reuse CQLSSTableWriter's objects
List<ByteBuffer> bb = Lists.newArrayList();
for (ByteBuffer v : record.getValues()) {
bb.add(ByteBufferUtil.clone(v));
}
try {
((CQLSSTableWriter) writer).rawAddRow(bb);
if (null != progress)
progress.progress();
if (null != context)
HadoopCompat.progress(context);
} catch (InvalidRequestException | IOException e) {
LOG.error(e.getMessage());
throw new CrunchRuntimeException("Error adding row : " + e.getMessage());
}
}
示例4: close
import org.apache.cassandra.hadoop.HadoopCompat; //导入方法依赖的package包/类
private void close() throws IOException
{
if (writer != null)
{
writer.close();
Future<StreamState> future = loader.stream(ignores);
while (true)
{
try
{
future.get(1000, TimeUnit.MILLISECONDS);
break;
}
catch (ExecutionException | TimeoutException te)
{
if (null != progress)
progress.progress();
if (null != context)
HadoopCompat.progress(context);
}
catch (InterruptedException e)
{
throw new IOException(e);
}
}
if (loader.getFailedHosts().size() > 0)
{
if (loader.getFailedHosts().size() > maxFailures)
throw new IOException("Too many hosts failed: " + loader.getFailedHosts());
else
logger.warn("Some hosts failed: {}", loader.getFailedHosts());
}
}
}