本文整理汇总了C++中DPTR_D函数的典型用法代码示例。如果您正苦于以下问题:C++ DPTR_D函数的具体用法?C++ DPTR_D怎么用?C++ DPTR_D使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了DPTR_D函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: DPTR_D
void AVThread::resetState()
{
DPTR_D(AVThread);
pause(false);
if (d.writer)
d.writer->pause(false); //stop waiting. Important when replay
d.stop = false;
d.demux_end = false;
d.packets.setBlocking(true);
d.packets.clear();
//not neccesary context is managed by filters.
d.filter_context = 0;
}
示例2: DPTR_D
bool AVDecoder::open()
{
DPTR_D(AVDecoder);
if (!d.codec_ctx) {
qWarning("FFmpeg codec context not ready");
return false;
}
AVCodec *codec = 0;
if (!d.codec_name.isEmpty()) {
codec = avcodec_find_decoder_by_name(d.codec_name.toUtf8().constData());
} else {
codec = avcodec_find_decoder(d.codec_ctx->codec_id);
}
if (!codec) {
QString es(tr("No codec could be found for '%1'"));
if (d.codec_name.isEmpty()) {
es = es.arg(avcodec_get_name(d.codec_ctx->codec_id));
} else {
es = es.arg(d.codec_name);
}
qWarning() << es;
AVError::ErrorCode ec(AVError::CodecError);
switch (d.codec_ctx->coder_type) {
case AVMEDIA_TYPE_VIDEO:
ec = AVError::VideoCodecNotFound;
break;
case AVMEDIA_TYPE_AUDIO:
ec = AVError::AudioCodecNotFound;
break;
case AVMEDIA_TYPE_SUBTITLE:
ec = AVError::SubtitleCodecNotFound;
default:
break;
}
emit error(AVError(ec, es));
return false;
}
// hwa extra init can be here
if (!d.open()) {
d.close();
return false;
}
d.applyOptionsForDict();
int ret = avcodec_open2(d.codec_ctx, codec, d.options.isEmpty() ? NULL : &d.dict);
if (ret < 0) {
qWarning("open video codec failed: %s", av_err2str(ret));
return false;
}
d.is_open = true;
return true;
}
示例3: DPTR_D
void OSDFilterGL::process()
{
if (mShowType == ShowNone)
return;
DPTR_D(Filter);
GLFilterContext *ctx = static_cast<GLFilterContext*>(d.context);
//TODO: render off screen
#if QTAV_HAVE(GL)
QGLWidget *glw = static_cast<QGLWidget*>(ctx->paint_device);
if (!glw)
return;
glw->renderText(ctx->rect.x(), ctx->rect.y(), text(d.statistics), font());
#endif //QTAV_HAVE(GL)
}
示例4: Q_UNUSED
void GLWidgetRenderer::resizeGL(int w, int h)
{
Q_UNUSED(w);
Q_UNUSED(h);
DPTR_D(GLWidgetRenderer);
qDebug("%s @%d %dx%d", __FUNCTION__, __LINE__, d.out_rect.width(), d.out_rect.height());
//TODO: if whole widget as viewport, we can set rect by glVertex, thus paint logic is the same as others
glViewport(d.out_rect.x(), d.out_rect.y(), d.out_rect.width(), d.out_rect.height());
//??
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
}
示例5: DPTR_D
bool AudioDecoder::prepare()
{
DPTR_D(AudioDecoder);
if (!d.codec_ctx)
return false;
if (!d.resampler)
return true;
d.resampler->setInChannelLayout(d.codec_ctx->channel_layout);
d.resampler->setInChannels(d.codec_ctx->channels);
d.resampler->setInSampleFormat(d.codec_ctx->sample_fmt);
d.resampler->setInSampleRate(d.codec_ctx->sample_rate);
d.resampler->prepare();
return true;
}
示例6: DPTR_D
void OpenGLRendererBase::onInitializeGL()
{
DPTR_D(OpenGLRendererBase);
//makeCurrent();
QOpenGLContext *ctx = const_cast<QOpenGLContext*>(QOpenGLContext::currentContext()); //qt4 returns const
d.glv.setOpenGLContext(ctx);
//const QByteArray extensions(reinterpret_cast<const char *>(glGetString(GL_EXTENSIONS)));
bool hasGLSL = QOpenGLShaderProgram::hasOpenGLShaderPrograms();
qDebug("OpenGL version: %d.%d hasGLSL: %d", ctx->format().majorVersion(), ctx->format().minorVersion(), hasGLSL);
initializeOpenGLFunctions();
glEnable(GL_TEXTURE_2D);
glDisable(GL_DEPTH_TEST);
glClearColor(0.0, 0.0, 0.0, 0.0);
}
示例7: DPTR_D
void GLWidgetRenderer::resizeGL(int w, int h)
{
DPTR_D(GLWidgetRenderer);
qDebug("%s @%d %dx%d", __FUNCTION__, __LINE__, d.out_rect.width(), d.out_rect.height());
glViewport(0, 0, w, h);
d.setupAspectRatio();
#ifndef QT_OPENGL_ES_2
//??
glMatrixMode(GL_PROJECTION);
glLoadIdentity();
glMatrixMode(GL_MODELVIEW);
glLoadIdentity();
#endif //QT_OPENGL_ES_2
}
示例8: DPTR_D
bool AudioOutputOpenSL::open()
{
DPTR_D(AudioOutputOpenSL);
d.available = false;
resetStatus();
SLDataLocator_BufferQueue bufferQueueLocator = { SL_DATALOCATOR_BUFFERQUEUE, (SLuint32)d.nb_buffers };
SLDataFormat_PCM pcmFormat = audioFormatToSL(audioFormat());
SLDataSource audioSrc = { &bufferQueueLocator, &pcmFormat };
// OutputMix
SL_RUN_CHECK_FALSE((*d.engine)->CreateOutputMix(d.engine, &d.m_outputMixObject, 0, NULL, NULL));
SL_RUN_CHECK_FALSE((*d.m_outputMixObject)->Realize(d.m_outputMixObject, SL_BOOLEAN_FALSE));
SLDataLocator_OutputMix outputMixLocator = { SL_DATALOCATOR_OUTPUTMIX, d.m_outputMixObject };
SLDataSink audioSink = { &outputMixLocator, NULL };
const SLInterfaceID ids[] = { SL_IID_BUFFERQUEUE};//, SL_IID_VOLUME };
const SLboolean req[] = { SL_BOOLEAN_TRUE};//, SL_BOOLEAN_TRUE };
// AudioPlayer
SL_RUN_CHECK_FALSE((*d.engine)->CreateAudioPlayer(d.engine, &d.m_playerObject, &audioSrc, &audioSink, sizeof(ids)/sizeof(ids[0]), ids, req));
SL_RUN_CHECK_FALSE((*d.m_playerObject)->Realize(d.m_playerObject, SL_BOOLEAN_FALSE));
// Buffer interface
SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_BUFFERQUEUE, &d.m_bufferQueueItf));
SL_RUN_CHECK_FALSE((*d.m_bufferQueueItf)->RegisterCallback(d.m_bufferQueueItf, AudioOutputOpenSLPrivate::bufferQueueCallback, &d));
// Play interface
SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_PLAY, &d.m_playItf));
// call when SL_PLAYSTATE_STOPPED
SL_RUN_CHECK_FALSE((*d.m_playItf)->RegisterCallback(d.m_playItf, AudioOutputOpenSLPrivate::playCallback, this));
#if 0
SLuint32 mask = SL_PLAYEVENT_HEADATEND;
// TODO: what does this do?
SL_RUN_CHECK_FALSE((*d.m_playItf)->SetPositionUpdatePeriod(d.m_playItf, 100));
SL_RUN_CHECK_FALSE((*d.m_playItf)->SetCallbackEventsMask(d.m_playItf, mask));
#endif
// Volume interface
//SL_RUN_CHECK_FALSE((*d.m_playerObject)->GetInterface(d.m_playerObject, SL_IID_VOLUME, &d.m_volumeItf));
const int kBufferSize = 1024*4;
static char init_data[kBufferSize];
memset(init_data, 0, sizeof(init_data));
for (quint32 i = 0; i < d.nb_buffers; ++i) {
SL_RUN_CHECK_FALSE((*d.m_bufferQueueItf)->Enqueue(d.m_bufferQueueItf, init_data, sizeof(init_data)));
d.nextEnqueueInfo().data_size = sizeof(init_data);
d.nextEnqueueInfo().timestamp = 0;
d.bufferAdded();
d.buffers_queued++;
}
SL_RUN_CHECK_FALSE((*d.m_playItf)->SetPlayState(d.m_playItf, SL_PLAYSTATE_PLAYING));
d.available = true;
return true;
}
示例9: DPTR_D
void QuickFBORenderer::drawFrame()
{
DPTR_D(QuickFBORenderer);
if (d.glctx != QOpenGLContext::currentContext()) {
d.glctx = QOpenGLContext::currentContext();
d.glv.setOpenGLContext(d.glctx);
}
if (!d.video_frame.isValid()) {
d.glv.fill(QColor(0, 0, 0, 0));
return;
}
//d.glv.setCurrentFrame(d.video_frame);
d.glv.render(d.out_rect, normalizedROI(), d.matrix);
}
示例10: DPTR_D
bool AVDecoder::open()
{
DPTR_D(AVDecoder);
// codec_ctx can't be null for none-ffmpeg based decoders because we may use it's properties in those decoders
if (!d.codec_ctx) {
qWarning("FFmpeg codec context not ready");
return false;
}
AVCodec *codec = 0;
if (!d.codec_name.isEmpty()) {
codec = avcodec_find_decoder_by_name(d.codec_name.toUtf8().constData());
} else {
codec = avcodec_find_decoder(d.codec_ctx->codec_id);
}
if (!codec) { // TODO: can be null for none-ffmpeg based decoders
QString es(tr("No codec could be found for '%1'"));
if (d.codec_name.isEmpty()) {
es = es.arg(avcodec_get_name(d.codec_ctx->codec_id));
} else {
es = es.arg(d.codec_name);
}
qWarning() << es;
AVError::ErrorCode ec(AVError::CodecError);
switch (d.codec_ctx->coder_type) {
case AVMEDIA_TYPE_VIDEO:
ec = AVError::VideoCodecNotFound;
break;
case AVMEDIA_TYPE_AUDIO:
ec = AVError::AudioCodecNotFound;
break;
case AVMEDIA_TYPE_SUBTITLE:
ec = AVError::SubtitleCodecNotFound;
default:
break;
}
emit error(AVError(ec, es));
return false;
}
// hwa extra init can be here
if (!d.open()) {
d.close();
return false;
}
// TODO: skip for none-ffmpeg based decoders
d.applyOptionsForDict();
av_opt_set_int(d.codec_ctx, "refcounted_frames", d.enableFrameRef(), 0); // why dict may have no effect?
AV_ENSURE_OK(avcodec_open2(d.codec_ctx, codec, d.options.isEmpty() ? NULL : &d.dict), false);
d.is_open = true;
return true;
}
示例11: DPTR_D
VideoFrame VideoDecoderDXVA::frame()
{
DPTR_D(VideoDecoderDXVA);
if (!d.frame->opaque || !d.frame->data[0])
return VideoFrame();
if (d.width <= 0 || d.height <= 0 || !d.codec_ctx)
return VideoFrame();
class ScopedD3DLock {
public:
ScopedD3DLock(IDirect3DSurface9* d3d, D3DLOCKED_RECT *rect)
: mpD3D(d3d)
{
if (FAILED(mpD3D->LockRect(rect, NULL, D3DLOCK_READONLY))) {
qWarning("Failed to lock surface");
mpD3D = 0;
}
}
~ScopedD3DLock() {
if (mpD3D)
mpD3D->UnlockRect();
}
private:
IDirect3DSurface9 *mpD3D;
};
IDirect3DSurface9 *d3d = (IDirect3DSurface9*)(uintptr_t)d.frame->data[3];
//picth >= desc.Width
//D3DSURFACE_DESC desc;
//d3d->GetDesc(&desc);
D3DLOCKED_RECT lock;
ScopedD3DLock(d3d, &lock);
if (lock.Pitch == 0) {
return VideoFrame();
}
const VideoFormat fmt = VideoFormat((int)D3dFindFormat(d.render)->avpixfmt);
if (!fmt.isValid()) {
qWarning("unsupported dxva pixel format: %#x", d.render);
return VideoFrame();
}
//YV12 need swap, not imc3?
// imc3 U V pitch == Y pitch, but half of the U/V plane is space. we convert to yuv420p here
// nv12 bpp(1)==1
// 3rd plane is not used for nv12
int pitch[3] = { lock.Pitch, 0, 0}; //compute chroma later
uint8_t *src[] = { (uint8_t*)lock.pBits, 0, 0}; //compute chroma later
const bool swap_uv = d.render == MAKEFOURCC('I','M','C','3');
return copyToFrame(fmt, d.surface_height, src, pitch, swap_uv);
}
示例12: DPTR_D
void XVRenderer::drawFrame()
{
DPTR_D(XVRenderer);
QRect roi = realROI();
if (!d.use_shm)
XvPutImage(d.display, d.xv_port, winId(), d.gc, d.xv_image
, roi.x(), roi.y(), roi.width(), roi.height()
, d.out_rect.x(), d.out_rect.y(), d.out_rect.width(), d.out_rect.height());
else
XvShmPutImage(d.display, d.xv_port, winId(), d.gc, d.xv_image
, roi.x(), roi.y(), roi.width(), roi.height()
, d.out_rect.x(), d.out_rect.y(), d.out_rect.width(), d.out_rect.height()
, false /*true: send event*/);
}
示例13: DPTR_D
void AVDecoder::setOptions(const QVariantHash &dict)
{
DPTR_D(AVDecoder);
d.options = dict;
if (d.dict) {
av_dict_free(&d.dict);
d.dict = 0; //aready 0 in av_free
}
if (dict.isEmpty())
return;
QVariantHash avcodec_dict(dict);
if (dict.contains("avcodec"))
avcodec_dict = dict.value("avcodec").toHash();
// workaround for VideoDecoderFFmpeg. now it does not call av_opt_set_xxx, so set here in dict
if (dict.contains("FFmpeg"))
avcodec_dict.unite(dict.value("FFmpeg").toHash());
QHashIterator<QString, QVariant> i(avcodec_dict);
while (i.hasNext()) {
i.next();
switch (i.value().type()) {
case QVariant::Hash: // for example "vaapi": {...}
continue;
case QVariant::Bool:
// QVariant.toByteArray(): "true" or "false", can not recognized by avcodec
av_dict_set(&d.dict, i.key().toLower().toUtf8().constData(), QByteArray::number(i.value().toBool()), 0);
break;
default:
// avcodec key and value are in lower case
av_dict_set(&d.dict, i.key().toLower().toUtf8().constData(), i.value().toByteArray().toLower().constData(), 0);
break;
}
qDebug("avcodec option: %s=>%s", i.key().toUtf8().constData(), i.value().toByteArray().constData());
}
if (name() == "avcodec")
return;
QVariantHash property_dict(dict.value(name()).toHash());
if (property_dict.isEmpty())
property_dict = dict.value(name().toLower()).toHash();
if (property_dict.isEmpty())
return;
i = QHashIterator<QString, QVariant>(property_dict);
while (i.hasNext()) {
i.next();
if (i.value().type() == QVariant::Hash) // for example "vaapi": {...}
continue;
setProperty(i.key().toUtf8().constData(), i.value());
qDebug("decoder property: %s=>%s", i.key().toUtf8().constData(), i.value().toByteArray().constData());
}
}
示例14: DPTR_D
void OpenGLRendererBase::onPaintGL()
{
DPTR_D(OpenGLRendererBase);
/* we can mix gl and qpainter.
* QPainter painter(this);
* painter.beginNativePainting();
* gl functions...
* painter.endNativePainting();
* swapBuffers();
*/
handlePaintEvent();
//context()->swapBuffers(this);
if (d.painter && d.painter->isActive())
d.painter->end();
}
示例15: DPTR_D
bool AVThread::installFilter(Filter *filter, bool lock)
{
DPTR_D(AVThread);
if (lock) {
QMutexLocker locker(&d.mutex);
if (d.filters.contains(filter))
return false;
d.filters.push_back(filter);
} else {
if (d.filters.contains(filter))
return false;
d.filters.push_back(filter);
}
return true;
}