本文整理汇总了C++中RtpHeader类的典型用法代码示例。如果您正苦于以下问题:C++ RtpHeader类的具体用法?C++ RtpHeader怎么用?C++ RtpHeader使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了RtpHeader类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ELOG_DEBUG
void WebRtcConnection::writeSsrc(char* buf, int len, unsigned int ssrc) {
ELOG_DEBUG("LEN %d", len);
RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
//if it is RTCP we check it it is a compound packet
if (chead->isRtcp()) {
char* movingBuf = buf;
int rtcpLength = 0;
int totalLength = 0;
do{
movingBuf+=rtcpLength;
RtcpHeader *chead= reinterpret_cast<RtcpHeader*>(movingBuf);
rtcpLength= (ntohs(chead->length)+1)*4;
totalLength+= rtcpLength;
ELOG_DEBUG("Is RTCP, prev SSRC %u, new %u, len %d ", chead->getSSRC(), ssrc, rtcpLength);
chead->ssrc=htonl(ssrc);
if (chead->packettype == RTCP_PS_Feedback_PT){
FirHeader *thefir = reinterpret_cast<FirHeader*>(movingBuf);
if (thefir->fmt == 4){ // It is a FIR Packet, we generate it
this->sendPLI();
}
}
} while(totalLength<len);
} else {
head->setSSRC(ssrc);
}
}
示例2: ELOG_DEBUG
int InputProcessor::unpackageVideo(unsigned char* inBuff, int inBuffLen, unsigned char* outBuff, int* gotFrame) {
if (videoUnpackager == 0) {
ELOG_DEBUG("Unpackager not correctly initialized");
return -1;
}
int inBuffOffset = 0;
*gotFrame = 0;
RtpHeader* head = reinterpret_cast<RtpHeader*>(inBuff);
if (head->getPayloadType() != 100) {
return -1;
}
int l = inBuffLen - head->getHeaderLength();
inBuffOffset+=head->getHeaderLength();
erizo::RTPPayloadVP8* parsed = pars.parseVP8((unsigned char*) &inBuff[inBuffOffset], l);
memcpy(outBuff, parsed->data, parsed->dataLength);
if (head->getMarker()) {
*gotFrame = 1;
}
int ret = parsed->dataLength;
delete parsed;
return ret;
}
示例3: if
void MediaStream::onTransportData(std::shared_ptr<DataPacket> incoming_packet, Transport *transport) {
if ((audio_sink_ == nullptr && video_sink_ == nullptr && fb_sink_ == nullptr)) {
return;
}
std::shared_ptr<DataPacket> packet = std::make_shared<DataPacket>(*incoming_packet);
if (transport->mediaType == AUDIO_TYPE) {
packet->type = AUDIO_PACKET;
} else if (transport->mediaType == VIDEO_TYPE) {
packet->type = VIDEO_PACKET;
}
auto stream_ptr = shared_from_this();
worker_->task([stream_ptr, packet]{
if (!stream_ptr->pipeline_initialized_) {
ELOG_DEBUG("%s message: Pipeline not initialized yet.", stream_ptr->toLog());
return;
}
char* buf = packet->data;
RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
if (!chead->isRtcp()) {
uint32_t recvSSRC = head->getSSRC();
if (stream_ptr->isVideoSourceSSRC(recvSSRC)) {
packet->type = VIDEO_PACKET;
} else if (stream_ptr->isAudioSourceSSRC(recvSSRC)) {
packet->type = AUDIO_PACKET;
}
}
stream_ptr->pipeline_->read(std::move(packet));
});
}
示例4: main
int main(int argc, const char * argv[]) {
// data setup
uint32_t first = 0xFFFF | 0x1FFFFFFF;
uint32_t timestamp = 0x0128;
uint32_t ssrc = 0x01 | 0x02 | 0x04 | 0x08 | 0x256;
int somedata[4];
// The htonl() function converts the unsigned integer hostlong from host byte order to network byte order.
// on the other side:
// The ntohl() function converts the unsigned integer netlong from network byte order to host byte order.
somedata[0] = htonl(first);
somedata[1] = htonl(timestamp);
somedata[2] = htonl(ssrc);
somedata[2] = htonl(ssrc);
somedata[3] = htonl(ssrc);
RtpHeader* head = reinterpret_cast<RtpHeader*>(somedata);
printf("version: %" PRIu8 "\n", head->getVersion());
printf("padding: %" PRIu8 "\n", head->hasPadding());
printf("extension: %" PRIu8 "\n", head->getExtension());
printf("marker: %" PRIu8 "\n", head->getMarker());
printf("payload type: %" PRIu8 "\n", head->getPayloadType());
printf("sequence number: %" PRIu16 "\n", head->getSeqNumber());
printf("timestamp %" PRIu32 "\n", head->getTimestamp());
printf("ssrc %" PRIu32 "\n", head->getSSRC());
printf("header length: %u\n", head->getHeaderLength());
return 0;
}
示例5: writeVideoData
void ExternalOutput::writeVideoData(char* buf, int len) {
RtpHeader* head = reinterpret_cast<RtpHeader*>(buf);
uint16_t current_video_sequence_number = head->getSeqNumber();
if (current_video_sequence_number != last_video_sequence_number_ + 1) {
// Something screwy. We should always see sequence numbers incrementing monotonically.
ELOG_DEBUG("Unexpected video sequence number; current %d, previous %d",
current_video_sequence_number, last_video_sequence_number_);
// Restart the depacketizer so it looks for the start of a frame
if (depacketizer_!= nullptr) {
depacketizer_->reset();
}
}
last_video_sequence_number_ = current_video_sequence_number;
if (first_video_timestamp_ == -1) {
first_video_timestamp_ = head->getTimestamp();
}
auto map_iterator = video_maps_.find(head->getPayloadType());
if (map_iterator != video_maps_.end()) {
updateVideoCodec(map_iterator->second);
if (map_iterator->second.encoding_name == "VP8" || map_iterator->second.encoding_name == "H264") {
maybeWriteVideoPacket(buf, len);
}
}
}
示例6: if
void MediaStream::onTransportData(std::shared_ptr<DataPacket> packet, Transport *transport) {
if ((audio_sink_ == nullptr && video_sink_ == nullptr && fb_sink_ == nullptr)) {
return;
}
if (transport->mediaType == AUDIO_TYPE) {
packet->type = AUDIO_PACKET;
} else if (transport->mediaType == VIDEO_TYPE) {
packet->type = VIDEO_PACKET;
}
char* buf = packet->data;
RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
if (!chead->isRtcp()) {
uint32_t recvSSRC = head->getSSRC();
if (isVideoSourceSSRC(recvSSRC)) {
packet->type = VIDEO_PACKET;
} else if (isAudioSourceSSRC(recvSSRC)) {
packet->type = AUDIO_PACKET;
}
}
if (!pipeline_initialized_) {
ELOG_DEBUG("%s message: Pipeline not initialized yet.", toLog());
return;
}
pipeline_->read(std::move(packet));
}
示例7: deliverVideoData_
int ExternalOutput::deliverVideoData_(char* buf, int len) {
if (videoSourceSsrc_ == 0) {
RtpHeader* h = reinterpret_cast<RtpHeader*>(buf);
videoSourceSsrc_ = h->getSSRC();
}
this->queueData(buf, len, VIDEO_PACKET);
return 0;
}
示例8: deliverVideoData_
int ExternalOutput::deliverVideoData_(std::shared_ptr<dataPacket> video_packet) {
std::shared_ptr<dataPacket> copied_packet = std::make_shared<dataPacket>(*video_packet);
if (videoSourceSsrc_ == 0) {
RtpHeader* h = reinterpret_cast<RtpHeader*>(copied_packet->data);
videoSourceSsrc_ = h->getSSRC();
}
this->queueData(copied_packet->data, copied_packet->length, VIDEO_PACKET);
return 0;
}
示例9: writeAudioData
void ExternalOutput::writeAudioData(char* buf, int len){
RtpHeader* head = reinterpret_cast<RtpHeader*>(buf);
if (firstAudioTimestamp_ == -1) {
firstAudioTimestamp_ = head->getTimestamp();
}
timeval time;
gettimeofday(&time, NULL);
// Figure out our audio codec.
if(context_->oformat->audio_codec == AV_CODEC_ID_NONE) {
//We dont need any other payload at this time
if(head->getPayloadType() == PCMU_8000_PT){
context_->oformat->audio_codec = AV_CODEC_ID_PCM_MULAW;
} else if (head->getPayloadType() == OPUS_48000_PT) {
context_->oformat->audio_codec = AV_CODEC_ID_OPUS;
}
}
initContext();
if (audio_stream_ == NULL) {
// not yet.
return;
}
int ret = inputProcessor_->unpackageAudio(reinterpret_cast<unsigned char*>(buf), len, unpackagedAudioBuffer_);
if (ret <= 0)
return;
long long currentTimestamp = head->getTimestamp();
if (currentTimestamp - firstAudioTimestamp_ < 0) {
// we wrapped. add 2^32 to correct this. We only handle a single wrap around since that's 13 hours of recording, minimum.
currentTimestamp += 0xFFFFFFFF;
}
long long timestampToWrite = (currentTimestamp - firstAudioTimestamp_) / (audio_stream_->codec->time_base.den / audio_stream_->time_base.den);
// Adjust for our start time offset
timestampToWrite += audioOffsetMsec_ / (1000 / audio_stream_->time_base.den); // in practice, our timebase den is 1000, so this operation is a no-op.
/* ELOG_DEBUG("Writing audio frame %d with timestamp %u, normalized timestamp %u, audio offset msec %u, length %d, input timebase: %d/%d, target timebase: %d/%d", */
/* head->getSeqNumber(), head->getTimestamp(), timestampToWrite, audioOffsetMsec_, ret, */
/* audio_stream_->codec->time_base.num, audio_stream_->codec->time_base.den, // timebase we requested */
/* audio_stream_->time_base.num, audio_stream_->time_base.den); // actual timebase */
AVPacket avpkt;
av_init_packet(&avpkt);
avpkt.data = unpackagedAudioBuffer_;
avpkt.size = ret;
avpkt.pts = timestampToWrite;
avpkt.stream_index = 1;
av_write_frame(context_, &avpkt);
av_free_packet(&avpkt);
}
示例10: deliverVideoData_
int ExternalOutput::deliverVideoData_(std::shared_ptr<DataPacket> video_packet) {
if (video_source_ssrc_ == 0) {
RtpHeader* h = reinterpret_cast<RtpHeader*>(video_packet->data);
video_source_ssrc_ = h->getSSRC();
}
std::shared_ptr<DataPacket> copied_packet = std::make_shared<DataPacket>(*video_packet);
copied_packet->type = VIDEO_PACKET;
queueDataAsync(copied_packet);
return 0;
}
示例11: read
void MediaStream::read(std::shared_ptr<DataPacket> packet) {
char* buf = packet->data;
int len = packet->length;
// PROCESS RTCP
RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
uint32_t recvSSRC = 0;
if (!chead->isRtcp()) {
recvSSRC = head->getSSRC();
} else if (chead->packettype == RTCP_Sender_PT) { // Sender Report
recvSSRC = chead->getSSRC();
}
// DELIVER FEEDBACK (RR, FEEDBACK PACKETS)
if (chead->isFeedback()) {
if (fb_sink_ != nullptr && should_send_feedback_) {
fb_sink_->deliverFeedback(std::move(packet));
}
} else {
// RTP or RTCP Sender Report
if (bundle_) {
// Check incoming SSRC
// Deliver data
if (isVideoSourceSSRC(recvSSRC)) {
parseIncomingPayloadType(buf, len, VIDEO_PACKET);
video_sink_->deliverVideoData(std::move(packet));
} else if (isAudioSourceSSRC(recvSSRC)) {
parseIncomingPayloadType(buf, len, AUDIO_PACKET);
audio_sink_->deliverAudioData(std::move(packet));
} else {
ELOG_DEBUG("%s read video unknownSSRC: %u, localVideoSSRC: %u, localAudioSSRC: %u",
toLog(), recvSSRC, this->getVideoSourceSSRC(), this->getAudioSourceSSRC());
}
} else {
if (packet->type == AUDIO_PACKET && audio_sink_ != nullptr) {
parseIncomingPayloadType(buf, len, AUDIO_PACKET);
// Firefox does not send SSRC in SDP
if (getAudioSourceSSRC() == 0) {
ELOG_DEBUG("%s discoveredAudioSourceSSRC:%u", toLog(), recvSSRC);
this->setAudioSourceSSRC(recvSSRC);
}
audio_sink_->deliverAudioData(std::move(packet));
} else if (packet->type == VIDEO_PACKET && video_sink_ != nullptr) {
parseIncomingPayloadType(buf, len, VIDEO_PACKET);
// Firefox does not send SSRC in SDP
if (getVideoSourceSSRC() == 0) {
ELOG_DEBUG("%s discoveredVideoSourceSSRC:%u", toLog(), recvSSRC);
this->setVideoSourceSSRC(recvSSRC);
}
// change ssrc for RTP packets, don't touch here if RTCP
video_sink_->deliverVideoData(std::move(packet));
}
} // if not bundle
} // if not Feedback
}
示例12: handleRtpPacket
void SRPacketHandler::handleRtpPacket(std::shared_ptr<dataPacket> packet) {
RtpHeader *head = reinterpret_cast<RtpHeader*>(packet->data);
uint32_t ssrc = head->getSSRC();
auto sr_selected_info_iter = sr_info_map_.find(ssrc);
std::shared_ptr<SRInfo> selected_info;
if (sr_selected_info_iter == sr_info_map_.end()) {
ELOG_DEBUG("message: Inserting new SSRC in sr_info_map, ssrc: %u", ssrc);
sr_info_map_[ssrc] = std::make_shared<SRInfo>();
}
selected_info = sr_info_map_[ssrc];
selected_info->sent_packets++;
selected_info->sent_octets += (packet->length - head->getHeaderLength());
}
示例13: isRetransmitOfOldPacket
bool RtcpRrGenerator::isRetransmitOfOldPacket(std::shared_ptr<dataPacket> packet) {
RtpHeader *head = reinterpret_cast<RtpHeader*>(packet->data);
if (!RtpUtils::sequenceNumberLessThan(head->getSeqNumber(), rr_info_.max_seq) || rr_info_.jitter.jitter == 0) {
return false;
}
int64_t time_diff_ms = static_cast<uint32_t>(packet->received_time_ms) - rr_info_.last_recv_ts;
int64_t timestamp_diff = static_cast<int32_t>(head->getTimestamp() - rr_info_.last_rtp_ts);
uint16_t clock_rate = type_ == VIDEO_PACKET ? getVideoClockRate(head->getPayloadType()) :
getAudioClockRate(head->getPayloadType());
int64_t rtp_time_stamp_diff_ms = timestamp_diff / clock_rate;
int64_t max_delay_ms = ((2 * rr_info_.jitter.jitter) / clock_rate);
return time_diff_ms > rtp_time_stamp_diff_ms + max_delay_ms;
}
示例14: handleRtpPacket
bool RtcpRrGenerator::handleRtpPacket(std::shared_ptr<dataPacket> packet) {
RtpHeader *head = reinterpret_cast<RtpHeader*>(packet->data);
if (ssrc_ != head->getSSRC()) {
ELOG_DEBUG("message: handleRtpPacket ssrc not found, ssrc: %u", head->getSSRC());
return false;
}
uint16_t seq_num = head->getSeqNumber();
rr_info_.packets_received++;
if (rr_info_.base_seq == -1) {
rr_info_.base_seq = head->getSeqNumber();
}
if (rr_info_.max_seq == -1) {
rr_info_.max_seq = seq_num;
} else if (!RtpUtils::sequenceNumberLessThan(seq_num, rr_info_.max_seq)) {
if (seq_num < rr_info_.max_seq) {
rr_info_.cycle++;
}
rr_info_.max_seq = seq_num;
}
rr_info_.extended_seq = (rr_info_.cycle << 16) | rr_info_.max_seq;
uint16_t clock_rate = type_ == VIDEO_PACKET ? getVideoClockRate(head->getPayloadType()) :
getAudioClockRate(head->getPayloadType());
if (head->getTimestamp() != rr_info_.last_rtp_ts &&
!isRetransmitOfOldPacket(packet)) {
int transit_time = static_cast<int>((packet->received_time_ms * clock_rate) - head->getTimestamp());
int delta = abs(transit_time - rr_info_.jitter.transit_time);
if (rr_info_.jitter.transit_time != 0 && delta < MAX_DELAY) {
rr_info_.jitter.jitter +=
(1. / 16.) * (static_cast<double>(delta) - rr_info_.jitter.jitter);
}
rr_info_.jitter.transit_time = transit_time;
}
rr_info_.last_rtp_ts = head->getTimestamp();
rr_info_.last_recv_ts = static_cast<uint32_t>(packet->received_time_ms);
uint64_t now = ClockUtils::timePointToMs(clock_->now());
if (rr_info_.next_packet_ms == 0) { // Schedule the first packet
uint16_t selected_interval = selectInterval();
rr_info_.next_packet_ms = now + selected_interval;
return false;
}
if (now >= rr_info_.next_packet_ms) {
ELOG_DEBUG("message: should send packet, ssrc: %u", ssrc_);
return true;
}
return false;
}
示例15: memcpy
int OneToManyTranscoder::deliverVideoData_(char* buf, int len) {
memcpy(sendVideoBuffer_, buf, len);
RtpHeader* theHead = reinterpret_cast<RtpHeader*>(buf);
// ELOG_DEBUG("extension %d pt %u", theHead->getExtension(),
// theHead->getPayloadType());
if (theHead->getPayloadType() == 100) {
ip_->deliverVideoData(sendVideoBuffer_, len);
} else {
this->receiveRtpData((unsigned char*) buf, len);
}
sentPackets_++;
return 0;
}