本文整理汇总了Java中org.apache.cassandra.streaming.StreamState类的典型用法代码示例。如果您正苦于以下问题:Java StreamState类的具体用法?Java StreamState怎么用?Java StreamState使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
StreamState类属于org.apache.cassandra.streaming包,在下文中一共展示了StreamState类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: fromCompositeData
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
public static StreamState fromCompositeData(CompositeData cd)
{
assert cd.getCompositeType().equals(COMPOSITE_TYPE);
Object[] values = cd.getAll(ITEM_NAMES);
UUID planId = UUID.fromString((String) values[0]);
String description = (String) values[1];
Set<SessionInfo> sessions = Sets.newHashSet(Iterables.transform(Arrays.asList((CompositeData[]) values[2]),
new Function<CompositeData, SessionInfo>()
{
public SessionInfo apply(CompositeData input)
{
return SessionInfoCompositeData.fromCompositeData(input);
}
}));
return new StreamState(planId, description, sessions);
}
示例2: close
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private void close() throws IOException {
if (writer != null) {
writer.close();
Future<StreamState> future = loader.stream();
while (true) {
try {
future.get(1000, TimeUnit.MILLISECONDS);
break;
} catch (ExecutionException ee0) {
progress.progress();
} catch (TimeoutException te) {
progress.progress();
} catch (InterruptedException e) {
throw new IOException(e);
}
}
if (loader.getFailedHosts().size() > 0) {
if (loader.getFailedHosts().size() > maxFailures) {
throw new IOException("Too many hosts failed: " + loader.getFailedHosts());
} else {
logger.warn("Some hosts failed: " + loader.getFailedHosts());
}
}
}
}
示例3: toCompositeData
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
public static CompositeData toCompositeData(final StreamState streamState) {
Map<String, Object> valueMap = new HashMap<>();
valueMap.put(ITEM_NAMES[0], streamState.planId.toString());
valueMap.put(ITEM_NAMES[1], streamState.description);
CompositeData[] sessions = new CompositeData[streamState.sessions.size()];
Lists.newArrayList(Iterables.transform(streamState.sessions,
input -> SessionInfoCompositeData.toCompositeData(streamState.planId, input))).toArray(sessions);
valueMap.put(ITEM_NAMES[2], sessions);
long currentRxBytes = 0;
long totalRxBytes = 0;
long currentTxBytes = 0;
long totalTxBytes = 0;
for (SessionInfo sessInfo : streamState.sessions) {
currentRxBytes += sessInfo.getTotalSizeReceived();
totalRxBytes += sessInfo.getTotalSizeToReceive();
currentTxBytes += sessInfo.getTotalSizeSent();
totalTxBytes += sessInfo.getTotalSizeToSend();
}
double rxPercentage = (totalRxBytes == 0 ? 100L : currentRxBytes * 100L / totalRxBytes);
double txPercentage = (totalTxBytes == 0 ? 100L : currentTxBytes * 100L / totalTxBytes);
valueMap.put(ITEM_NAMES[3], currentRxBytes);
valueMap.put(ITEM_NAMES[4], totalRxBytes);
valueMap.put(ITEM_NAMES[5], rxPercentage);
valueMap.put(ITEM_NAMES[6], currentTxBytes);
valueMap.put(ITEM_NAMES[7], totalTxBytes);
valueMap.put(ITEM_NAMES[8], txPercentage);
try {
return new CompositeDataSupport(COMPOSITE_TYPE, valueMap);
} catch (OpenDataException e) {
throw Throwables.propagate(e);
}
}
示例4: fromCompositeData
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
public static StreamState fromCompositeData(CompositeData cd) {
assert cd.getCompositeType().equals(COMPOSITE_TYPE);
Object[] values = cd.getAll(ITEM_NAMES);
UUID planId = UUID.fromString((String) values[0]);
String description = (String) values[1];
Set<SessionInfo> sessions = Sets.newHashSet(Iterables.transform(Arrays.asList((CompositeData[]) values[2]),
input -> SessionInfoCompositeData.fromCompositeData(input)));
return new StreamState(planId, description, sessions);
}
示例5: prepareWriter
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private void prepareWriter() throws IOException
{
try
{
if (writer == null)
{
writer = CQLSSTableWriter.builder()
.forTable(schema)
.using(insertStatement)
.withPartitioner(ConfigHelper.getOutputPartitioner(conf))
.inDirectory(outputDir)
.withBufferSizeInMB(Integer.parseInt(conf.get(BUFFER_SIZE_IN_MB, "64")))
.build();
}
if (loader == null)
{
ExternalClient externalClient = new ExternalClient(conf);
externalClient.addKnownCfs(keyspace, schema);
this.loader = new SSTableLoader(outputDir, externalClient, new BulkRecordWriter.NullOutputHandler()) {
@Override
public void onSuccess(StreamState finalState)
{
if (deleteSrc)
FileUtils.deleteRecursive(outputDir);
}
};
}
}
catch (Exception e)
{
throw new IOException(e);
}
}
示例6: close
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private void close() throws IOException
{
if (writer != null)
{
writer.close();
Future<StreamState> future = loader.stream();
while (true)
{
try
{
future.get(1000, TimeUnit.MILLISECONDS);
break;
}
catch (ExecutionException | TimeoutException te)
{
if (null != progress)
progress.progress();
if (null != context)
HadoopCompat.progress(context);
}
catch (InterruptedException e)
{
throw new IOException(e);
}
}
if (loader.getFailedHosts().size() > 0)
{
if (loader.getFailedHosts().size() > maxFailures)
throw new IOException("Too many hosts failed: " + loader.getFailedHosts());
else
logger.warn("Some hosts failed: {}", loader.getFailedHosts());
}
}
}
示例7: streamHints
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private Future<StreamState> streamHints()
{
// StreamPlan will not fail if there are zero files to transfer, so flush anyway (need to get any in-memory hints, as well)
ColumnFamilyStore hintsCF = Keyspace.open(Keyspace.SYSTEM_KS).getColumnFamilyStore(SystemKeyspace.HINTS_CF);
FBUtilities.waitOnFuture(hintsCF.forceFlush());
// gather all live nodes in the cluster that aren't also leaving
List<InetAddress> candidates = new ArrayList<>(StorageService.instance.getTokenMetadata().cloneAfterAllLeft().getAllEndpoints());
candidates.remove(FBUtilities.getBroadcastAddress());
for (Iterator<InetAddress> iter = candidates.iterator(); iter.hasNext(); )
{
InetAddress address = iter.next();
if (!FailureDetector.instance.isAlive(address))
iter.remove();
}
if (candidates.isEmpty())
{
logger.warn("Unable to stream hints since no live endpoints seen");
return Futures.immediateFuture(null);
}
else
{
// stream to the closest peer as chosen by the snitch
DatabaseDescriptor.getEndpointSnitch().sortByProximity(FBUtilities.getBroadcastAddress(), candidates);
InetAddress hintsDestinationHost = candidates.get(0);
InetAddress preferred = SystemKeyspace.getPreferredIP(hintsDestinationHost);
// stream all hints -- range list will be a singleton of "the entire ring"
Token token = StorageService.getPartitioner().getMinimumToken();
List<Range<Token>> ranges = Collections.singletonList(new Range<>(token, token));
return new StreamPlan("Hints").transferRanges(hintsDestinationHost,
preferred,
Keyspace.SYSTEM_KS,
ranges,
SystemKeyspace.HINTS_CF)
.execute();
}
}
示例8: getStreamStatus
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
public Set<StreamState> getStreamStatus()
{
return Sets.newHashSet(Iterables.transform(streamProxy.getCurrentStreams(), new Function<CompositeData, StreamState>()
{
public StreamState apply(CompositeData input)
{
return StreamStateCompositeData.fromCompositeData(input);
}
}));
}
示例9: close
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private void close() throws IOException
{
if (writer != null)
{
writer.close();
Future<StreamState> future = loader.stream();
while (true)
{
try
{
future.get(1000, TimeUnit.MILLISECONDS);
break;
}
catch (ExecutionException | TimeoutException te)
{
progress.progress();
}
catch (InterruptedException e)
{
throw new IOException(e);
}
}
if (loader.getFailedHosts().size() > 0)
{
if (loader.getFailedHosts().size() > maxFailures)
throw new IOException("Too many hosts failed: " + loader.getFailedHosts());
else
logger.warn("Some hosts failed: " + loader.getFailedHosts());
}
}
}
示例10: close
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private void close() throws IOException
{
if (writer != null)
{
writer.close();
Future<StreamState> future = loader.stream(ignores);
while (true)
{
try
{
future.get(1000, TimeUnit.MILLISECONDS);
break;
}
catch (ExecutionException | TimeoutException te)
{
if (null != progress)
progress.progress();
if (null != context)
HadoopCompat.progress(context);
}
catch (InterruptedException e)
{
throw new IOException(e);
}
}
if (loader.getFailedHosts().size() > 0)
{
if (loader.getFailedHosts().size() > maxFailures)
throw new IOException("Too many hosts failed: " + loader.getFailedHosts());
else
logger.warn("Some hosts failed: {}", loader.getFailedHosts());
}
}
}
示例11: onSuccess
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
public void onSuccess(StreamState result)
{
String message = String.format("Sync complete using session %s between %s and %s on %s", desc.sessionId, r1.endpoint, r2.endpoint, desc.columnFamily);
logger.info("[repair #{}] {}", desc.sessionId, message);
Tracing.traceRepair(message);
set(stat);
}
示例12: close
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private void close() throws IOException
{
if (writer != null)
{
writer.close();
Future<StreamState> future = loader.stream();
while (true)
{
try
{
future.get(1000, TimeUnit.MILLISECONDS);
break;
}
catch (ExecutionException | TimeoutException te)
{
progress.progress();
}
catch (InterruptedException e)
{
throw new IOException(e);
}
}
if (loader.getFailedHosts().size() > 0)
{
if (loader.getFailedHosts().size() > maxFailures)
throw new IOException("Too many hosts failed: " + loader.getFailedHosts());
else
logger.warn("Some hosts failed: {}", loader.getFailedHosts());
}
}
}
示例13: close
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
private void close() throws IOException {
LOG.info("SSTables built. Now starting streaming");
heartbeat.startHeartbeat();
try {
if (writer != null) {
writer.close();
Future<StreamState> future =
loader.stream(Collections.<InetAddress>emptySet(), new ProgressIndicator());
try {
StreamState streamState = Uninterruptibles.getUninterruptibly(future);
if (streamState.hasFailedSession()) {
LOG.warn("Some streaming sessions failed");
} else {
LOG.info("Streaming finished successfully");
}
} catch (ExecutionException e) {
throw new RuntimeException("Streaming to the following hosts failed: " +
loader.getFailedHosts(), e);
}
} else {
LOG.info("SSTableWriter wasn't instantiated, no streaming happened.");
}
} finally {
heartbeat.stopHeartbeat();
}
LOG.info("Successfully closed bulk record writer");
}
示例14: stream
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
public Future<StreamState> stream()
{
return streamPlan.execute();
}
示例15: toCompositeData
import org.apache.cassandra.streaming.StreamState; //导入依赖的package包/类
public static CompositeData toCompositeData(final StreamState streamState)
{
Map<String, Object> valueMap = new HashMap<>();
valueMap.put(ITEM_NAMES[0], streamState.planId.toString());
valueMap.put(ITEM_NAMES[1], streamState.description);
CompositeData[] sessions = new CompositeData[streamState.sessions.size()];
Lists.newArrayList(Iterables.transform(streamState.sessions, new Function<SessionInfo, CompositeData>()
{
public CompositeData apply(SessionInfo input)
{
return SessionInfoCompositeData.toCompositeData(streamState.planId, input);
}
})).toArray(sessions);
valueMap.put(ITEM_NAMES[2], sessions);
long currentRxBytes = 0;
long totalRxBytes = 0;
long currentTxBytes = 0;
long totalTxBytes = 0;
for (SessionInfo sessInfo : streamState.sessions)
{
currentRxBytes += sessInfo.getTotalSizeReceived();
totalRxBytes += sessInfo.getTotalSizeToReceive();
currentTxBytes += sessInfo.getTotalSizeSent();
totalTxBytes += sessInfo.getTotalSizeToSend();
}
double rxPercentage = (totalRxBytes == 0 ? 100L : currentRxBytes * 100L / totalRxBytes);
double txPercentage = (totalTxBytes == 0 ? 100L : currentTxBytes * 100L / totalTxBytes);
valueMap.put(ITEM_NAMES[3], currentRxBytes);
valueMap.put(ITEM_NAMES[4], totalRxBytes);
valueMap.put(ITEM_NAMES[5], rxPercentage);
valueMap.put(ITEM_NAMES[6], currentTxBytes);
valueMap.put(ITEM_NAMES[7], totalTxBytes);
valueMap.put(ITEM_NAMES[8], txPercentage);
try
{
return new CompositeDataSupport(COMPOSITE_TYPE, valueMap);
}
catch (OpenDataException e)
{
throw Throwables.propagate(e);
}
}