本文整理匯總了C++中ELOG_DEBUG函數的典型用法代碼示例。如果您正苦於以下問題:C++ ELOG_DEBUG函數的具體用法?C++ ELOG_DEBUG怎麽用?C++ ELOG_DEBUG使用的例子?那麽, 這裏精選的函數代碼示例或許可以為您提供幫助。
在下文中一共展示了ELOG_DEBUG函數的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的C++代碼示例。
示例1: ELOG_DEBUG
void WebRtcConnection::writeSsrc(char* buf, int len, unsigned int ssrc) {
ELOG_DEBUG("LEN %d", len);
RtpHeader *head = reinterpret_cast<RtpHeader*> (buf);
RtcpHeader *chead = reinterpret_cast<RtcpHeader*> (buf);
//if it is RTCP we check it it is a compound packet
if (chead->isRtcp()) {
char* movingBuf = buf;
int rtcpLength = 0;
int totalLength = 0;
do{
movingBuf+=rtcpLength;
RtcpHeader *chead= reinterpret_cast<RtcpHeader*>(movingBuf);
rtcpLength= (ntohs(chead->length)+1)*4;
totalLength+= rtcpLength;
ELOG_DEBUG("Is RTCP, prev SSRC %u, new %u, len %d ", chead->getSSRC(), ssrc, rtcpLength);
chead->ssrc=htonl(ssrc);
if (chead->packettype == RTCP_PS_Feedback_PT){
FirHeader *thefir = reinterpret_cast<FirHeader*>(movingBuf);
if (thefir->fmt == 4){ // It is a FIR Packet, we generate it
this->sendPLI();
}
}
} while(totalLength<len);
} else {
head->setSSRC(ssrc);
}
}
示例2: ELOG_DEBUG
bool NiceConnection::setRemoteCandidates(
std::vector<CandidateInfo> &candidates) {
ELOG_DEBUG("Setting remote candidates %d", candidates.size());
for (unsigned int compId = 1; compId <= iceComponents_; compId++) {
GSList* candList = NULL;
for (unsigned int it = 0; it < candidates.size(); it++) {
NiceCandidateType nice_cand_type;
CandidateInfo cinfo = candidates[it];
if (cinfo.mediaType != this->mediaType
|| this->transportName->compare(cinfo.transProtocol)
|| cinfo.componentId != compId)
continue;
switch (cinfo.hostType) {
case HOST:
nice_cand_type = NICE_CANDIDATE_TYPE_HOST;
break;
case SRLFX:
nice_cand_type = NICE_CANDIDATE_TYPE_SERVER_REFLEXIVE;
break;
case PRFLX:
nice_cand_type = NICE_CANDIDATE_TYPE_PEER_REFLEXIVE;
break;
case RELAY:
nice_cand_type = NICE_CANDIDATE_TYPE_RELAYED;
break;
default:
nice_cand_type = NICE_CANDIDATE_TYPE_HOST;
break;
}
NiceCandidate* thecandidate = nice_candidate_new(nice_cand_type);
NiceAddress* naddr = nice_address_new();
nice_address_set_from_string(naddr, cinfo.hostAddress.c_str());
nice_address_set_port(naddr, cinfo.hostPort);
thecandidate->addr = *naddr;
sprintf(thecandidate->foundation, "%s", cinfo.foundation.c_str());
thecandidate->username = strdup(cinfo.username.c_str());
thecandidate->password = strdup(cinfo.password.c_str());
thecandidate->stream_id = (guint) 1;
thecandidate->component_id = cinfo.componentId;
thecandidate->priority = cinfo.priority;
thecandidate->transport = NICE_CANDIDATE_TRANSPORT_UDP;
candList = g_slist_append(candList, thecandidate);
ELOG_DEBUG("New Candidate SET %s %d", cinfo.hostAddress.c_str(), cinfo.hostPort);
}
nice_agent_set_remote_candidates(agent_, (guint) 1, compId, candList);
}
ELOG_DEBUG("Candidates SET");
this->updateIceState(NICE_CANDIDATES_RECEIVED);
return true;
}
示例3: ELOG_DEBUG
ExternalOutput::~ExternalOutput(){
ELOG_DEBUG("Destructor");
ELOG_DEBUG("Closing Sink");
delete in;
in = NULL;
if (context_!=NULL){
if (writeheadres_>=0)
av_write_trailer(context_);
if (avio_close>=0)
avio_close(context_->pb);
avformat_free_context(context_);
context_=NULL;
}
if (videoCodec_!=NULL){
avcodec_close(videoCodecCtx_);
videoCodec_=NULL;
}
if (audioCodec_!=NULL){
avcodec_close(audioCodecCtx_);
audioCodec_ = NULL;
}
sending_ = false;
cond_.notify_one();
thread_.join();
/* boost::unique_lock<boost::mutex> lock(queueMutex_); */
ELOG_DEBUG("ExternalOutput closed Successfully");
}
示例4: avcodec_find_encoder
bool OutputProcessor::initAudioCoder() {
aCoder = avcodec_find_encoder(static_cast<AVCodecID>(mediaInfo.audioCodec.codec));
if (!aCoder) {
ELOG_DEBUG("Encoder de audio no encontrado");
return false;
}
aCoderContext = avcodec_alloc_context3(aCoder);
if (!aCoderContext) {
ELOG_DEBUG("Error de memoria en coder de audio");
return false;
}
aCoderContext->sample_fmt = AV_SAMPLE_FMT_S16;
aCoderContext->bit_rate = mediaInfo.audioCodec.bitRate;
aCoderContext->sample_rate = mediaInfo.audioCodec.sampleRate;
aCoderContext->channels = 1;
if (avcodec_open2(aCoderContext, aCoder, NULL) < 0) {
ELOG_DEBUG("Error al abrir el coder de audio");
return false;
}
audioCoder = 1;
return true;
}
示例5: getContext
void RRGenerationHandler::notifyUpdate() {
if (initialized_) {
return;
}
auto pipeline = getContext()->getPipelineShared();
if (!pipeline) {
return;
}
connection_ = pipeline->getService<WebRtcConnection>().get();
if (!connection_) {
return;
}
uint32_t video_ssrc = connection_->getVideoSourceSSRC();
if (video_ssrc != 0) {
auto video_packets = std::make_shared<RRPackets>();
video_packets->ssrc = video_ssrc;
video_packets->type = VIDEO_PACKET;
rr_info_map_[video_ssrc] = video_packets;
ELOG_DEBUG("%s, message: Initialized video, ssrc: %u", connection_->toLog(), video_ssrc);
initialized_ = true;
}
uint32_t audio_ssrc = connection_->getAudioSourceSSRC();
if (audio_ssrc != 0) {
auto audio_packets = std::make_shared<RRPackets>();
audio_packets->ssrc = audio_ssrc;
audio_packets->type = AUDIO_PACKET;
rr_info_map_[audio_ssrc] = audio_packets;
initialized_ = true;
ELOG_DEBUG("%s, message: Initialized audio, ssrc: %u", connection_->toLog(), audio_ssrc);
}
}
示例6: malloc
int OutputProcessor::init(const MediaInfo& info, RTPDataReceiver* rtpReceiver) {
this->mediaInfo = info;
this->rtpReceiver_ = rtpReceiver;
encodedBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE);
packagedBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE);
rtpBuffer_ = (unsigned char*) malloc(PACKAGED_BUFFER_SIZE);
if(info.processorType == PACKAGE_ONLY){
this->initVideoPackager();
this->initAudioPackager();
return 0;
}
if (mediaInfo.hasVideo) {
this->mediaInfo.videoCodec.codec = VIDEO_CODEC_VP8;
if (vCoder.initEncoder(mediaInfo.videoCodec)) {
ELOG_DEBUG("Error initing encoder");
}
this->initVideoPackager();
}
if (mediaInfo.hasAudio) {
ELOG_DEBUG("Init AUDIO processor");
mediaInfo.audioCodec.codec = AUDIO_CODEC_PCM_U8;
mediaInfo.audioCodec.sampleRate= 44100;
mediaInfo.audioCodec.bitRate = 64000;
encodedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE);
packagedAudioBuffer_ = (unsigned char*) malloc(UNPACKAGED_BUFFER_SIZE);
this->initAudioCoder();
this->initAudioPackager();
}
return 0;
}
示例7: lock
void WebRtcConnection::onTransportData(char* buf, int len, Transport *transport) {
boost::mutex::scoped_lock lock(writeMutex_);
if (audioSink_ == NULL && videoSink_ == NULL && fbSink_==NULL)
return;
int length = len;
rtcpheader *chead = reinterpret_cast<rtcpheader*> (buf);
if (chead->packettype == RTCP_Receiver_PT || chead->packettype == RTCP_PS_Feedback_PT || chead->packettype == RTCP_RTP_Feedback_PT){
if (fbSink_ != NULL) {
fbSink_->deliverFeedback(buf,length);
}
} else {
// RTP or RTCP Sender Report
if (bundle_) {
// Check incoming SSRC
rtpheader *head = reinterpret_cast<rtpheader*> (buf);
rtcpheader *chead = reinterpret_cast<rtcpheader*> (buf);
unsigned int recvSSRC = ntohl(head->ssrc);
if (chead->packettype == RTCP_Sender_PT) { //Sender Report
ELOG_DEBUG ("RTP Sender Report %d length %d ", chead->packettype, ntohs(chead->length));
recvSSRC = ntohl(chead->ssrc);
}
// Deliver data
if (recvSSRC==this->getVideoSourceSSRC() || recvSSRC==this->getVideoSinkSSRC()) {
videoSink_->deliverVideoData(buf, length);
} else if (recvSSRC==this->getAudioSourceSSRC() || recvSSRC==this->getAudioSinkSSRC()) {
audioSink_->deliverAudioData(buf, length);
} else {
ELOG_DEBUG("Unknown SSRC %u, localVideo %u, remoteVideo %u, ignoring", recvSSRC, this->getVideoSourceSSRC(), this->getVideoSinkSSRC());
}
} else if (transport->mediaType == AUDIO_TYPE) {
if (audioSink_ != NULL) {
rtpheader *head = (rtpheader*) buf;
// Firefox does not send SSRC in SDP
if (this->getAudioSourceSSRC() == 0) {
ELOG_DEBUG("Audio Source SSRC is %u", ntohl(head->ssrc));
this->setAudioSourceSSRC(ntohl(head->ssrc));
this->updateState(TRANSPORT_READY, transport);
}
head->ssrc = htonl(this->getAudioSinkSSRC());
audioSink_->deliverAudioData(buf, length);
}
} else if (transport->mediaType == VIDEO_TYPE) {
if (videoSink_ != NULL) {
rtpheader *head = (rtpheader*) buf;
// Firefox does not send SSRC in SDP
if (this->getVideoSourceSSRC() == 0) {
ELOG_DEBUG("Video Source SSRC is %u", ntohl(head->ssrc));
this->setVideoSourceSSRC(ntohl(head->ssrc));
this->updateState(TRANSPORT_READY, transport);
}
head->ssrc = htonl(this->getVideoSinkSSRC());
videoSink_->deliverVideoData(buf, length);
}
}
}
}
示例8: cands
bool NicerConnection::setRemoteCandidates(const std::vector<CandidateInfo> &candidates, bool is_bundle) {
std::vector<CandidateInfo> cands(candidates);
auto remote_candidates_promise = std::make_shared<std::promise<void>>();
nr_ice_peer_ctx *peer = peer_;
nr_ice_media_stream *stream = stream_;
std::shared_ptr<NicerInterface> nicer = nicer_;
async([cands, is_bundle, nicer, peer, stream, this, remote_candidates_promise] {
ELOG_DEBUG("%s message: adding remote candidates (%ld)", toLog(), cands.size());
for (const CandidateInfo &cand : cands) {
std::string sdp = cand.sdp;
std::size_t pos = sdp.find(",");
std::string candidate = sdp.substr(0, pos);
ELOG_DEBUG("%s message: New remote ICE candidate (%s)", toLog(), candidate.c_str());
UINT4 r = nicer->IcePeerContextParseTrickleCandidate(peer, stream, const_cast<char *>(candidate.c_str()));
if (r && r != R_ALREADY) {
ELOG_WARN("%s message: Couldn't add remote ICE candidate (%s) (%d)", toLog(), candidate.c_str(), r);
}
}
remote_candidates_promise->set_value();
});
std::future<void> remote_candidates_future = remote_candidates_promise->get_future();
std::future_status status = remote_candidates_future.wait_for(std::chrono::seconds(1));
if (status == std::future_status::timeout) {
ELOG_WARN("%s message: Could not set remote candidates", toLog());
return false;
}
return true;
}
示例9: ELOG_DEBUG
ExternalOutput::~ExternalOutput(){
ELOG_DEBUG("Destructing");
// Stop our thread so we can safely nuke libav stuff and close our
// our file.
recording_ = false;
cond_.notify_one();
thread_.join();
if (audio_stream_ != NULL && video_stream_ != NULL && context_ != NULL){
av_write_trailer(context_);
}
if (video_stream_ && video_stream_->codec != NULL){
avcodec_close(video_stream_->codec);
}
if (audio_stream_ && audio_stream_->codec != NULL){
avcodec_close(audio_stream_->codec);
}
if (context_ != NULL){
avio_close(context_->pb);
avformat_free_context(context_);
context_ = NULL;
}
ELOG_DEBUG("Closed Successfully");
}
示例10: ELOG_DEBUG
bool MediaStream::setRemoteSdp(std::shared_ptr<SdpInfo> sdp) {
ELOG_DEBUG("%s message: setting remote SDP", toLog());
remote_sdp_ = sdp;
if (remote_sdp_->videoBandwidth != 0) {
ELOG_DEBUG("%s message: Setting remote BW, maxVideoBW: %u", toLog(), remote_sdp_->videoBandwidth);
this->rtcp_processor_->setMaxVideoBW(remote_sdp_->videoBandwidth*1000);
}
if (pipeline_initialized_) {
pipeline_->notifyUpdate();
return true;
}
bundle_ = remote_sdp_->isBundle;
setVideoSourceSSRCList(remote_sdp_->video_ssrc_list);
setAudioSourceSSRC(remote_sdp_->audio_ssrc);
audio_enabled_ = remote_sdp_->hasAudio;
video_enabled_ = remote_sdp_->hasVideo;
rtcp_processor_->addSourceSsrc(getAudioSourceSSRC());
std::for_each(video_source_ssrc_list_.begin(), video_source_ssrc_list_.end(), [this] (uint32_t new_ssrc){
rtcp_processor_->addSourceSsrc(new_ssrc);
});
initializePipeline();
return true;
}
示例11: avcodec_find_encoder
bool ExternalOutput::initContext() {
if (context_->oformat->video_codec != AV_CODEC_ID_NONE &&
context_->oformat->audio_codec != AV_CODEC_ID_NONE &&
video_stream_ == NULL &&
audio_stream_ == NULL) {
AVCodec* videoCodec = avcodec_find_encoder(context_->oformat->video_codec);
ELOG_DEBUG("Found Video Codec %s", videoCodec->name);
if (videoCodec==NULL) {
ELOG_ERROR("Could not find video codec");
return false;
}
video_stream_ = avformat_new_stream (context_, videoCodec);
video_stream_->id = 0;
video_stream_->codec->codec_id = context_->oformat->video_codec;
video_stream_->codec->width = 640;
video_stream_->codec->height = 480;
video_stream_->codec->time_base = (AVRational) {
1,30
}; // A decent guess here suffices; if processing the file with ffmpeg,
// use -vsync 0 to force it not to duplicate frames.
video_stream_->codec->pix_fmt = PIX_FMT_YUV420P;
if (context_->oformat->flags & AVFMT_GLOBALHEADER) {
video_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER;
}
context_->oformat->flags |= AVFMT_VARIABLE_FPS;
AVCodec* audioCodec = avcodec_find_encoder(context_->oformat->audio_codec);
if (audioCodec==NULL) {
ELOG_ERROR("Could not find audio codec");
return false;
}
ELOG_DEBUG("Found Audio Codec %s", audioCodec->name);
audio_stream_ = avformat_new_stream (context_, audioCodec);
audio_stream_->id = 1;
audio_stream_->codec->codec_id = context_->oformat->audio_codec;
audio_stream_->codec->sample_rate = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 8000 : 48000; // TODO is it always 48 khz for opus?
audio_stream_->codec->time_base = (AVRational) {
1, audio_stream_->codec->sample_rate
};
audio_stream_->codec->channels = context_->oformat->audio_codec == AV_CODEC_ID_PCM_MULAW ? 1 : 2; // TODO is it always two channels for opus?
if (context_->oformat->flags & AVFMT_GLOBALHEADER) {
audio_stream_->codec->flags|=CODEC_FLAG_GLOBAL_HEADER;
}
context_->streams[0] = video_stream_;
context_->streams[1] = audio_stream_;
if (avio_open(&context_->pb, context_->filename, AVIO_FLAG_WRITE) < 0) {
ELOG_ERROR("Error opening output file");
return false;
}
if (avformat_write_header(context_, NULL) < 0) {
ELOG_ERROR("Error writing header");
return false;
}
ELOG_DEBUG("avformat configured");
}
return true;
}
示例12: lock
void DtlsTransport::onHandshakeCompleted(DtlsSocketContext* ctx, std::string clientKey, std::string serverKey, std::string srtp_profile)
{
boost::mutex::scoped_lock lock(sessionMutex_);
if (ctx == dtlsRtp_.get()) {
ELOG_DEBUG("%s - Setting RTP srtp params", transport_name.c_str());
srtp_.reset(new SrtpChannel());
if (srtp_->setRtpParams((char*)clientKey.c_str(), (char*)serverKey.c_str())) {
readyRtp_ = true;
} else {
updateTransportState(TRANSPORT_FAILED);
}
if (dtlsRtcp_ == NULL) {
readyRtcp_ = true;
}
}
if (ctx == dtlsRtcp_.get()) {
ELOG_DEBUG("%s - Setting RTCP srtp params", transport_name.c_str());
srtcp_.reset(new SrtpChannel());
if (srtcp_->setRtpParams((char*)clientKey.c_str(), (char*)serverKey.c_str())) {
readyRtcp_ = true;
} else {
updateTransportState(TRANSPORT_FAILED);
}
}
ELOG_DEBUG("%s - Ready? %d %d", transport_name.c_str(), readyRtp_, readyRtcp_);
if (readyRtp_ && readyRtcp_) {
ELOG_DEBUG("%s - Ready!!!", transport_name.c_str());
updateTransportState(TRANSPORT_READY);
}
}
示例13: ELOG_DEBUG
int VideoDecoder::initDecoder (const VideoCodecInfo& info){
ELOG_DEBUG("Init Decoder");
vDecoder = avcodec_find_decoder(VideoCodecID2ffmpegDecoderID(info.codec));
if (!vDecoder) {
ELOG_DEBUG("Error getting video decoder");
return -1;
}
vDecoderContext = avcodec_alloc_context3(vDecoder);
if (!vDecoderContext) {
ELOG_DEBUG("Error getting allocating decoder context");
return -1;
}
vDecoderContext->width = info.width;
vDecoderContext->height = info.height;
if (avcodec_open2(vDecoderContext, vDecoder, NULL) < 0) {
ELOG_DEBUG("Error opening video decoder");
return -1;
}
dPicture = av_frame_alloc();
if (!dPicture) {
ELOG_DEBUG("Error allocating video frame");
return -1;
}
return 0;
}
示例14: ELOG_DEBUG
// memory is only valid for duration of callback; must be copied if queueing
// is required
DtlsSocketContext::DtlsSocketContext() {
started = false;
mSocket = NULL;
receiver = NULL;
DtlsSocketContext::Init();
ELOG_DEBUG("Creating Dtls factory, Openssl v %s", OPENSSL_VERSION_TEXT);
mContext = SSL_CTX_new(DTLSv1_method());
assert(mContext);
int r = SSL_CTX_use_certificate(mContext, mCert);
assert(r == 1);
r = SSL_CTX_use_PrivateKey(mContext, privkey);
assert(r == 1);
SSL_CTX_set_cipher_list(mContext, "ALL:!ADH:!LOW:!EXP:!MD5:@STRENGTH");
SSL_CTX_set_info_callback(mContext, SSLInfoCallback);
SSL_CTX_set_verify(mContext, SSL_VERIFY_PEER |SSL_VERIFY_FAIL_IF_NO_PEER_CERT,
SSLVerifyCallback);
// SSL_CTX_set_session_cache_mode(mContext, SSL_SESS_CACHE_OFF);
// SSL_CTX_set_options(mContext, SSL_OP_NO_TICKET);
// Set SRTP profiles
r = SSL_CTX_set_tlsext_use_srtp(mContext, DefaultSrtpProfile);
assert(r == 0);
SSL_CTX_set_verify_depth(mContext, 2);
SSL_CTX_set_read_ahead(mContext, 1);
ELOG_DEBUG("DtlsSocketContext %p created", this);
}
示例15: ELOG_DEBUG
int AudioEncoder::initEncoder(const AudioCodecInfo& mediaInfo) {
ELOG_DEBUG("Init audioEncoder begin");
aCoder_ = avcodec_find_encoder(AudioCodecID2ffmpegDecoderID(mediaInfo.codec));
if (!aCoder_) {
ELOG_DEBUG("Audio Codec not found");
return false;
}
aCoderContext_ = avcodec_alloc_context3(aCoder_);
if (!aCoderContext_) {
ELOG_DEBUG("Memory error allocating audio coder context");
return false;
}
aCoderContext_->sample_fmt = AV_SAMPLE_FMT_FLT;
// aCoderContext_->bit_rate = mediaInfo.bitRate;
aCoderContext_->sample_rate = 8 /*mediaInfo.sampleRate*/;
aCoderContext_->channels = 1;
char errbuff[500];
int res = avcodec_open2(aCoderContext_, aCoder_, NULL);
if (res != 0) {
av_strerror(res, reinterpret_cast<char*>(&errbuff), 500);
ELOG_DEBUG("fail when opening input %s", errbuff);
return -1;
}
ELOG_DEBUG("Init audioEncoder end");
return true;
}