本文整理汇总了C++中Publisher::send方法的典型用法代码示例。如果您正苦于以下问题:C++ Publisher::send方法的具体用法?C++ Publisher::send怎么用?C++ Publisher::send使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类Publisher
的用法示例。
在下文中一共展示了Publisher::send方法的6个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: welcome
void ChatGreeter::welcome(Publisher atPub, const std::string& nodeId, const std::string& subId) {
Message* greeting = Message::toSubscriber(subId);
greeting->putMeta("participant", _username);
greeting->putMeta("subscriber", _subId);
atPub.send(greeting);
delete greeting;
}
示例2: receive
void receive(Message* msg) {
RScopeLock lock(mutex);
uint64_t currServerTimeStamp;
Message::read(msg->data(), &currSeqNr);
Message::read(msg->data() + 8, &currServerTimeStamp);
Message::read(msg->data() + 16, &reportInterval);
if (currSeqNr < lastSeqNr) {
// new throughput run!
lastSeqNr = 0;
timeStampServerFirst = 0;
currReportNr = 0;
bytesRcvd = 0;
pktsRecvd = 0;
pktsDropped = 0;
}
bytesRcvd += msg->size();
pktsRecvd++;
if (timeStampServerFirst == 0)
timeStampServerFirst = currServerTimeStamp;
if (lastSeqNr > 0 && lastSeqNr != currSeqNr - 1) {
pktsDropped += currSeqNr - lastSeqNr;
}
lastSeqNr = currSeqNr;
// reply?
if (currServerTimeStamp - reportInterval >= timeStampServerLast) {
RScopeLock lock(mutex);
timeStampServerLast = currServerTimeStamp;
Message* msg = new Message();
msg->putMeta("bytes.rcvd", toStr(bytesRcvd));
msg->putMeta("pkts.dropped", toStr(pktsDropped));
msg->putMeta("pkts.rcvd", toStr(pktsRecvd));
msg->putMeta("last.seq", toStr(lastSeqNr));
msg->putMeta("report.seq", toStr(currReportNr++));
msg->putMeta("timestamp.server.last", toStr(timeStampServerLast));
msg->putMeta("timestamp.server.first", toStr(timeStampServerFirst));
msg->putMeta("hostname", umundo::Host::getHostname());
reporter.send(msg);
delete msg;
pktsDropped = 0;
pktsRecvd = 0;
bytesRcvd = 0;
}
}
示例3: testDomainReception
void testDomainReception() {
Node* fooNode1 = new Node("foo");
Node* fooNode2 = new Node("foo");
Node* barNode = new Node("bar");
assert(Node::instances == 3);
Subscriber* sub = new Subscriber("test1", new TestReceiver("test1"));
Publisher* pub = new Publisher("test1");
fooNode1->addPublisher(pub);
fooNode2->addPublisher(pub);
barNode->addPublisher(pub);
fooNode1->addSubscriber(sub);
fooNode2->addSubscriber(sub);
barNode->addSubscriber(sub);
char buffer[BUFFER_SIZE];
for (int i = 0; i < BUFFER_SIZE; i++) {
buffer[i] = (char)i%255;
}
pub->waitForSubscribers(1);
assert(pub->waitForSubscribers(1) >= 1);
Thread::sleepMs(100);
int iterations = 10; // this has to be less or equal to the high water mark / 3
receives = 0;
for (int i = 0; i < iterations; i++) {
Message* msg = new Message();
msg->setData(buffer, BUFFER_SIZE);
msg->setMeta("type", "foo!");
pub->send(msg);
delete(msg);
}
Thread::sleepMs(200);
std::cout << "Received " << receives << " messages, expected " << iterations << " messages" << std::endl;
// assert(receives == iterations);
delete(fooNode1);
delete(fooNode2);
delete(barNode);
delete(sub);
delete(pub);
}
示例4: runAsServer
void runAsServer() {
ThroughputGreeter tpGreeter;
Publisher pub;
switch (type) {
case PUB_RTP: {
PublisherConfigRTP config("throughput.rtp");
config.setTimestampIncrement(166);
pub = Publisher(&config);
pub.setGreeter(&tpGreeter);
break;
}
case PUB_MCAST: {
PublisherConfigMCast config("throughput.mcast");
config.setTimestampIncrement(166);
// config.setPortbase(42142);
pub = Publisher(&config);
pub.setGreeter(&tpGreeter);
break;
}
case PUB_TCP: {
pub = Publisher("throughput.tcp");
pub.setGreeter(&tpGreeter);
break;
}
}
node.addPublisher(pub);
disc.add(node);
timeStampStartedAt = Thread::getTimeStampMs();
if (duration > 0)
duration *= 1000; // convert to ms
pub.waitForSubscribers(waitForSubs);
// reserve 20 bytes for timestamp, sequence number and report interval
size_t dataSize = (std::max)(mtu, (size_t)20);
char* data = (char*)malloc(dataSize);
Message* msg = NULL;
if (useZeroCopy) {
msg = new Message(data, dataSize, doneCallback, (void*)NULL);
} else {
msg = new Message();
}
uint64_t lastReportAt = Thread::getTimeStampMs();
while(1) {
uint64_t now = Thread::getTimeStampMs();
if (duration > 0 && now - timeStampStartedAt > duration)
break;
// first 16 bytes are seqNr and timestamp
Message::write(&data[0], ++currSeqNr);
Message::write(&data[8], now);
Message::write(&data[16], reportInterval);
if (!useZeroCopy) {
msg->setData(data, dataSize);
}
pub.send(msg);
intervalFactor = 1000.0 / (double)reportInterval;
bytesWritten += dataSize;
bytesTotal += dataSize;
packetsWritten++;
// sleep just enough to reach the desired bps
{
RScopeLock lock(mutex);
size_t packetsPerSecond = (std::max)(bytesPerSecond / dataSize, (size_t)1);
delay = (std::max)((1000000) / (packetsPerSecond), (size_t)1);
}
// every report interval we are recalculating bandwith
if (now - lastReportAt > reportInterval) {
RScopeLock lock(mutex);
std::string scaleInfo("--");
// and recalculate bytes to send
if (reports.size() > 0) {
double decreasePressure = 0;
// print report messages
std::map<std::string, Report>::iterator repIter = reports.begin();
// bandwidth is not fixed
if (fixedBytesPerSecond == 0) {
while(repIter != reports.end()) {
const Report& report = repIter->second;
double reportPressure = 0;
// see if we need to decrease bandwidth
if (report.pktsLate > packetsWritten / 2) {
// client is lagging more than half a second, scale back somewhat
reportPressure = (std::max)(1.1, reportPressure);
//.........这里部分代码省略.........
示例5: runAsServer
//.........这里部分代码省略.........
msgDataOffset += toRead;
streamDataOffset += toRead;
// UM_LOG_WARN("%d: %d / %d", streamData.size(), msgDataOffset, streamDataOffset);
if (msgDataOffset == mtu)
break;
if (streamData.size() == streamDataOffset)
streamDataOffset = 0;
}
// UM_LOG_WARN("%d: %s", msg->size(), md5(msg->data(), msg->size()).c_str());
uint64_t now = Thread::getTimeStampMs();
if (duration > 0 && now - timeStampStartedPublishing > duration)
break;
// first 16 bytes are seqNr and timestamp
Message::write(&msg->data()[0], ++currSeqNr);
Message::write(&msg->data()[8], now);
Message::write(&msg->data()[16], reportInterval);
// msg->putMeta("md5", md5(msg->data(), msg->size()));
intervalFactor = 1000.0 / (double)reportInterval;
bytesWritten += msg->size();
bytesTotal += msg->size();
packetsWritten++;
// sleep just enough to reach the desired bps
{
RScopeLock lock(mutex);
size_t packetsPerSecond = (std::max)(bytesPerSecond / msg->size(), (size_t)1);
delay = (std::max)((1000000) / (packetsPerSecond), (size_t)1);
}
// sending with compression will alter msg->size(); not anymore ...
pub.send(msg);
// every report interval we are recalculating bandwith
if (now - lastReportAt > reportInterval) {
RScopeLock lock(mutex);
std::string scaleInfo("--");
// and recalculate bytes to send
if (reports.size() > 0) {
double decreasePressure = 0;
// print report messages
std::map<std::string, Report>::iterator repIter = reports.begin();
// bandwidth is fixed
if (fixedBytesPerSecond == 0) {
while(repIter != reports.end()) {
const Report& report = repIter->second;
double reportPressure = 0;
// see if we need to decrease bandwidth
if (report.pktsLate > packetsWritten / 2) {
// client is lagging more than half a second, scale back somewhat
reportPressure = (std::max)(1.1, reportPressure);
}
if (report.pcntLoss > pcntLossOk) {
// we lost packages, scale back somewhat
reportPressure = (std::max)((double)report.pktsDropped / report.pktsRcvd, reportPressure);
reportPressure = (std::min)(reportPressure, 1.4);
}
if (report.pktsLate > 3 * packetsWritten) {
// queues explode! scale back alot!
reportPressure = (std::max)((double)report.pktsLate / packetsWritten, reportPressure);
示例6: receive
void receive(Message* msg) {
RScopeLock lock(mutex);
if (msg->getMeta("md5").size() > 0 && msg->getMeta("md5") != md5(msg->data(), msg->size())) {
UM_LOG_WARN("Corrupted message received");
}
uint64_t currServerTimeStamp;
Message::read(msg->data(), &currSeqNr);
Message::read(msg->data() + 8, &currServerTimeStamp);
Message::read(msg->data() + 16, &reportInterval);
if (currSeqNr < lastSeqNr) {
// new throughput run!
lastSeqNr = 0;
timeStampServerFirst = 0;
timeStampServerLast = 0;
currReportNr = 0;
bytesRcvd = 0;
pktsRecvd = 0;
pktsDropped = 0;
}
bytesRcvd += msg->size();
pktsRecvd++;
if (timeStampServerFirst == 0)
timeStampServerFirst = currServerTimeStamp;
if (lastSeqNr > 0 && lastSeqNr != currSeqNr - 1) {
pktsDropped += currSeqNr - lastSeqNr;
}
lastSeqNr = currSeqNr;
compressRatioHead += (msg->getMeta().find("um.compressRatio.head") != msg->getMeta().end() ? strTo<double>(msg->getMeta("um.compressRatio.head")) : 100);
compressRatioPayload += (msg->getMeta().find("um.compressRatio.payload") != msg->getMeta().end() ? strTo<double>(msg->getMeta("um.compressRatio.payload")) : 100);
// if (timeStampServerLast == 0)
// timeStampServerLast = currServerTimeStamp;
// may happen when initializing timeStampServerLast in client
if (timeStampServerLast > currServerTimeStamp)
timeStampServerLast = currServerTimeStamp;
// reply?
if (currServerTimeStamp - reportInterval >= timeStampServerLast) {
RScopeLock lock(mutex);
timeStampServerLast = currServerTimeStamp;
Message* msg = new Message();
msg->putMeta("bytes.rcvd", toStr(bytesRcvd));
msg->putMeta("pkts.dropped", toStr(pktsDropped));
msg->putMeta("compress.ratio.head", toStr(compressRatioHead / (double)pktsRecvd));
msg->putMeta("compress.ratio.payload", toStr(compressRatioPayload / (double)pktsRecvd));
msg->putMeta("pkts.rcvd", toStr(pktsRecvd));
msg->putMeta("last.seq", toStr(lastSeqNr));
msg->putMeta("report.seq", toStr(currReportNr++));
msg->putMeta("timestamp.server.last", toStr(timeStampServerLast));
msg->putMeta("timestamp.server.first", toStr(timeStampServerFirst));
msg->putMeta("hostname", umundo::Host::getHostname());
reporter.send(msg);
delete msg;
pktsDropped = 0;
pktsRecvd = 0;
bytesRcvd = 0;
compressRatioHead = 0;
compressRatioPayload = 0;
}
}