当前位置: 首页>>代码示例>>C++>>正文


C++ QVideoFrame::bits方法代码示例

本文整理汇总了C++中QVideoFrame::bits方法的典型用法代码示例。如果您正苦于以下问题:C++ QVideoFrame::bits方法的具体用法?C++ QVideoFrame::bits怎么用?C++ QVideoFrame::bits使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在QVideoFrame的用法示例。


在下文中一共展示了QVideoFrame::bits方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: processFrame

void FrameProcessor::processFrame(QVideoFrame frame )
{
    double tot = 0;

    do {

        if (!frame.map(QAbstractVideoBuffer::ReadOnly)){
            qDebug() << "Unable to map frame!";
            break;
        }

        if (frame.pixelFormat() == QVideoFrame::Format_YUV420P ||
            frame.pixelFormat() == QVideoFrame::Format_NV12) {
            // Process YUV data
            uchar *b = frame.bits();
            for (int y = 0; y < frame.height(); y++) {
                uchar *lastPixel = b + frame.width();
                for (uchar *curPixel = b; curPixel < lastPixel; curPixel++){
                    if(*curPixel != 16 ) tot += *curPixel;
                    //histogram[(*curPixel * levels) >> 8] += 1.0;
                }
                b += frame.bytesPerLine();
            }
        } else {
            QImage::Format imageFormat = QVideoFrame::imageFormatFromPixelFormat(frame.pixelFormat());
            if (imageFormat != QImage::Format_Invalid) {
                // Process RGB data
                QImage image(frame.bits(), frame.width(), frame.height(), imageFormat);
                image = image.convertToFormat(QImage::Format_RGB32);

                const QRgb* b = (const QRgb*)image.bits();
                for (int y = 0; y < image.height(); y++) {
                    const QRgb *lastPixel = b + frame.width();
                    for (const QRgb *curPixel = b; curPixel < lastPixel; curPixel++){
                        //histogram[(qGray(*curPixel) * levels) >> 8] += 1.0;
                        if(*curPixel != 16 ) tot+= qGray(*curPixel);
                    }
                    b = (const QRgb*)((uchar*)b + image.bytesPerLine());
                }
            }
        }

        frame.unmap();
    } while (false);

    // Compute mean
    int mean = tot/frame.width()/frame.height();
    int timestamp = frame.startTime()/1000;
    emit dataReady(timestamp,mean);
}
开发者ID:fcinema,项目名称:standalone-intf,代码行数:50,代码来源:xcorr.cpp

示例2: present

bool ChilitagsSurface::present(const QVideoFrame &frame) {

    //qDebug("time: %d", m_timer.elapsed());
    //qDebug("newFrame: %dx%d", frame.width(), frame.height());
    //m_timer.restart();

    QVideoFrame copy = frame;
    //if (m_frame.pixelFormat() == QVideoFrame::Format_UYVY) {
    if (copy.map(QAbstractVideoBuffer::ReadOnly)) {
        const cv::Mat mat(copy.height(), copy.width(), CV_8UC3,
                          copy.bits());
        if (m_videoSurface) {
            m_converted.create(copy.height(), copy.width(), CV_8UC4);
            cv::cvtColor(mat, m_converted, CV_BGR2RGBA);
        }

        m_item.setTags(m_chilitags.estimate(mat));

        copy.unmap();
    }
    //qDebug("%lu tags", m_tags.size());

    if (m_videoSurface) {
        QImage image(m_converted.data,
                     m_converted.cols, m_converted.rows,
                     QImage::Format_ARGB32);
        return m_videoSurface->present(QVideoFrame(image));
    }

    return true;
}
开发者ID:heejeongkim,项目名称:qimchi,代码行数:31,代码来源:chilitagssurface.cpp

示例3: updateFrame

/*!
  Stores the frame as member to allow it to be processed on paint.
  Returns false when there is error, otherwise returns true.
*/
bool CustomCamera::updateFrame(const QVideoFrame &frame)
{
    if (!frame.isValid()) {
        qDebug() << "CustomCameras::updateFrame: Invalid frame";
        return false;
    }
    
    if (m_processedFrameCounter != m_incomingFrameCounter) {
        // Discard frame.
        return true;
    }
    
    m_incomingFrameCounter++;
    
    QVideoFrame f = frame;
    
    if (f.map(QAbstractVideoBuffer::ReadOnly)) {
        if (m_imageFrame.isNull() || m_imageFrame.width() != f.width() ||
                m_imageFrame.height() != f.height()) {
            m_imageFrame = QImage(f.width(), f.height(), QImage::Format_RGB32);
        }
        
        memcpy(m_imageFrame.bits(), f.bits(), f.mappedBytes());
        
        f.unmap();
        
        update();
    }
    
    return true;
}
开发者ID:tomsplx,项目名称:qt-camera-demo,代码行数:35,代码来源:customcamera.cpp

示例4: processFrame

void ImageSource::processFrame(QVideoFrame frame)
{
  qDebug() << recorder->duration();
  if (sendFrame) {
    if (frame.map(QAbstractVideoBuffer::ReadOnly)) {
      if (frame.pixelFormat() == QVideoFrame::Format_NV21) {
        QImage img(frame.size(), QImage::Format_RGB32);
        qt_convert_NV21_to_ARGB32((uchar*)frame.bits(), (quint32*)img.bits(), frame.width(), frame.height());
        img.save(QString("/sdcard/DCIM/DashCam/Images/%1.jpg").arg(QDateTime::currentDateTime().toString(Qt::ISODate)), "JPG");
        emit newFrame(img);
        qDebug() << "Saving Frame" << counter;
        sendFrame = false;
      }
      frame.unmap();
    }
  }

  if (startRecording) {
    QTimer::singleShot(300000, this, SLOT(newVideo()));
    recorder->setOutputLocation(QUrl::fromLocalFile(QString("/sdcard/DCIM/DashCam/Video/%1.mp4").arg(QDateTime::currentDateTime().toString(Qt::ISODate))));
    recorder->record();

    qDebug() << recorder->supportedResolutions();
    qDebug() << recorder->state();
    qDebug() << recorder->status();
    qDebug() << recorder->error();
    startRecording = false;
  }
}
开发者ID:thatisazam,项目名称:android-dashcam,代码行数:29,代码来源:imagesource.cpp

示例5: ConvertFormat

//如果转换成功,则调用者使用完 pOutFrame 后,需要调用 avpicture_free(pOutFrame) 释放内存  
//成功返回0,不成功返回非0  
int CTool::ConvertFormat(/*[in]*/ const QVideoFrame &inFrame,
                         /*[out]*/AVPicture &outFrame,
                         /*[in]*/ int nOutWidth,
                         /*[in]*/ int nOutHeight,
                         /*[in]*/ AVPixelFormat pixelFormat)
{
    int nRet = 0;
    
    AVPicture pic;
    nRet = avpicture_fill(&pic, (uint8_t*) inFrame.bits(),
                  QVideoFrameFormatToFFMpegPixFormat(inFrame.pixelFormat()),
                  inFrame.width(),
                  inFrame.height());
    if(nRet < 0)
    {
        LOG_MODEL_DEBUG("Tool", "avpicture_fill fail:%x", nRet);
        return nRet;
    }
    
    nRet = ConvertFormat(pic, inFrame.width(), inFrame.height(),
                  QVideoFrameFormatToFFMpegPixFormat(inFrame.pixelFormat()),
                  outFrame, nOutWidth, nOutHeight, pixelFormat);

    return nRet;
}
开发者ID:corefan,项目名称:rabbitim,代码行数:27,代码来源:Tool.cpp

示例6: processFrame

void MyProbe::processFrame(QVideoFrame frame)
{
    // convert input to cv::mat here
    if (!frame.isValid())
        return;

    if (frame.map(QAbstractVideoBuffer::ReadOnly))
    {
        if (frame.pixelFormat() == QVideoFrame::Format_YUV420P ||
                frame.pixelFormat() == QVideoFrame::Format_NV12 ||
                frame.pixelFormat() == QVideoFrame::Format_NV21)
        {
            if (processMutex.tryLock())
            {
                // extracts gray channel from yuv image
                img = QImage(frame.bits(), frame.width(), frame.height(),
                             frame.bytesPerLine(), QImage::Format_Grayscale8).copy();
                processMutex.unlock();
                emit finished();
            }
        }
        else if (frame.pixelFormat() == QVideoFrame::Format_BGR32)
        {
            if (processMutex.tryLock())
            {
                QImage img_tmp;
                // extracts gray channel from yuv image
                img_tmp = QImage(frame.bits(), frame.width(), frame.height(),
                                 frame.bytesPerLine(), QImage::Format_ARGB32).copy();
                img = img_tmp.convertToFormat(QImage::Format_Grayscale8);
                processMutex.unlock();
                emit finished();
            }
        }
        else
            return;

        frame.unmap();
        return;
    }
    else
        return;
}
开发者ID:the-max0r,项目名称:QVidProbeTemplate,代码行数:43,代码来源:calibration.cpp

示例7: present

bool QtKCaptureBuffer::present(const QVideoFrame &frame)
//qtmultimedia\src\plugins\directshow\camera\dscamerasession.cpp
{
	static int cnt = 0;		
	if(!this->m_doCapture) return false;
	
	m_mutexA.lock();
	QVideoFrame tFrame = frame;	
    if(tFrame.map(QAbstractVideoBuffer::ReadOnly))
    {	
		this->m_doCapture = false;
		if(this->m_widthScale == 0) this->m_widthScale = frame.width();
		switch(this->m_mirrorSetting)
		{
			case mirrorVertical:
				this->m_lastFrame = QImage(frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(), getQImageFormat(tFrame.pixelFormat())).mirrored(0, 1).scaledToWidth(this->m_widthScale, (Qt::TransformationMode)this->m_scaleMode);
				break;

			case mirrorHorizontal:
				this->m_lastFrame = QImage(frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(), getQImageFormat(tFrame.pixelFormat())).mirrored(1, 0).scaledToWidth(this->m_widthScale, (Qt::TransformationMode)this->m_scaleMode);
				break;

			case mirrorAll:
				this->m_lastFrame = QImage(frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(), getQImageFormat(tFrame.pixelFormat())).mirrored(1, 1).scaledToWidth(this->m_widthScale, (Qt::TransformationMode)this->m_scaleMode);
				break;

			case mirrorNone:			
			default:
			this->m_lastFrame = QImage(frame.bits(), frame.width(), frame.height(), frame.bytesPerLine(), getQImageFormat(tFrame.pixelFormat())).scaledToWidth(this->m_widthScale, (Qt::TransformationMode)this->m_scaleMode);
			break;
		}
		
        tFrame.unmap();
		m_mutexA.unlock();	
		
		emit imageCaptured(cnt++, this->m_lastFrame);				
		return true;

	}
	m_mutexA.unlock();
    return false;
}
开发者ID:oskrs111,项目名称:qtkVideoServer,代码行数:42,代码来源:qtkCaptureBuffer.cpp

示例8: paint

void DCameraView::paint(const QVideoFrame &frame)
{
    QPainter painter(this);

    QImage image(
                frame.bits(),
                frame.width(),
                frame.height(),
                frame.bytesPerLine(),
                QVideoFrame::imageFormatFromPixelFormat(frame.pixelFormat()));

    painter.drawImage(0, 0, image.mirrored(m_mirroredHorizontal, m_mirroredVertical));
}
开发者ID:linuxdeepin,项目名称:libdui,代码行数:13,代码来源:dcameraview.cpp

示例9: processFrame

void FrameProcessor::processFrame(QVideoFrame frame, int levels)
{
    QVector<qreal> histogram(levels);

    do {
        if (!levels)
            break;

        if (!frame.map(QAbstractVideoBuffer::ReadOnly))
            break;

        if (frame.pixelFormat() == QVideoFrame::Format_YUV420P ||
            frame.pixelFormat() == QVideoFrame::Format_NV12) {
            // Process YUV data
            uchar *b = frame.bits();
            for (int y = 0; y < frame.height(); y++) {
                uchar *lastPixel = b + frame.width();
                for (uchar *curPixel = b; curPixel < lastPixel; curPixel++)
                    histogram[(*curPixel * levels) >> 8] += 1.0;
                b += frame.bytesPerLine();
            }
        } else {
            QImage::Format imageFormat = QVideoFrame::imageFormatFromPixelFormat(frame.pixelFormat());
            if (imageFormat != QImage::Format_Invalid) {
                // Process RGB data
                QImage image(frame.bits(), frame.width(), frame.height(), imageFormat);
                image = image.convertToFormat(QImage::Format_RGB32);

                const QRgb* b = (const QRgb*)image.bits();
                for (int y = 0; y < image.height(); y++) {
                    const QRgb *lastPixel = b + frame.width();
                    for (const QRgb *curPixel = b; curPixel < lastPixel; curPixel++)
                        histogram[(qGray(*curPixel) * levels) >> 8] += 1.0;
                    b = (const QRgb*)((uchar*)b + image.bytesPerLine());
                }
            }
        }
开发者ID:SchleunigerAG,项目名称:WinEC7_Qt5.3.1_Fixes,代码行数:37,代码来源:histogramwidget.cpp

示例10: concatenateFrames

void DataController::concatenateFrames(DataController::WhichFrame which, QVideoFrame frame)
{
    if (!concatenatingFrameInitialized) {
        int width = frame.width();
        int height = frame.height();
        //concatenatingImage = new QImage(width*2, height, QImage::Format_RGB888);
        //concatenationPainter = new QPainter(concatenatingImage);
        concatenatingFrame = new QVideoFrame(width * 2 * height * 3,
                                             QSize(width*2,height), width*2, QVideoFrame::Format_RGB24);
        qDebug() << "Creating a concatenating frame of size " << 2*width << " x " << height;
        concatenatingFrameInitialized = true;
    }

    if (!frame.map(QAbstractVideoBuffer::ReadOnly))
        qDebug() << "Failed to map current frame";
    else {
        if (!concatenatingFrame->map(QAbstractVideoBuffer::WriteOnly))
            qDebug() << "Failed to map concatenating frame";
        else {
            //concatenationPainter->drawImage(frame.width() * (which==right),0,frame);
            for (int i=0; i < frame.height(); i++)
                memcpy(concatenatingFrame->bits() + concatenatingFrame->width()*3*i
                       + frame.width()*3*(which==right),
                       frame.bits() + frame.width()*3*i, frame.width()*3);
            concatenatingFrame->unmap();

            if (frameConcatenationState == NOT_STARTED) {
                frameConcatenationState = (which==left) ? LEFT_READY : RIGHT_READY;
            } else if (frameConcatenationState == LEFT_READY) {
                if (which == left)
                    qDebug() << "Two left frames received before a right frame";
                else {
                    frameConcatenationState = NOT_STARTED;
                    emit newFrame(*concatenatingFrame);
                }
            } else if (frameConcatenationState == RIGHT_READY) {
                if (which == right)
                    qDebug() << "Two right frames received before a right frame";
                else {
                    frameConcatenationState = NOT_STARTED;
                    emit newFrame(*concatenatingFrame);
                }
            }
        }
        frame.unmap();
    }


}
开发者ID:kemerelab,项目名称:BehavioralVideo,代码行数:49,代码来源:DataController.cpp

示例11: onImageAvailable

void MainWindow::onImageAvailable( int id, const QVideoFrame& buffer )
{
    qDebug() << "Capture image available...";

    QImage::Format imageFormat = QVideoFrame::imageFormatFromPixelFormat( buffer.pixelFormat() );
    QImage img( buffer.bits(), buffer.width(), buffer.height(), buffer.bytesPerLine(), imageFormat );

    QPixmap image = QPixmap::fromImage( img );
    QLabel* l = new QLabel();

    ui->tabWidget->addTab( l, QString( "%1" ).arg( id ) );

    l->setPixmap( image );
    l->show();
}
开发者ID:aatwo,项目名称:TestProjects,代码行数:15,代码来源:mainwindow.cpp

示例12: newFrame

void VideoWriter::newFrame(QVideoFrame frame)
{
    if ((frame.width() != width) || (frame.height() != height)) {
        width = frame.width();
        height = frame.height();
    }
    if (waitingToInitialize) {
        initialize(*vFilename);
    }
    if (currentlyWriting) {
        if (!frame.map(QAbstractVideoBuffer::ReadOnly)) {
            qDebug() << "Failure to map video frame in writer";
            return;
        }

        AVCodecContext *c = video_st->codec;
        avpicture_fill((AVPicture *)tmp_picture, frame.bits(),
                       PIX_FMT_RGB24, c->width, c->height);
        sws_scale(sws_ctx, tmp_picture->data, tmp_picture->linesize,
                  0, c->height, picture->data, picture->linesize);
        picture->pts = frameCounter++;
        frame.unmap();

        /* encode the image */
        /* if zero size, it means the image was buffered */
        /* write the compressed frame in the media file */
        /* XXX: in case of B frames, the pts is not yet valid */
        int out_size = avcodec_encode_video(c, video_outbuf, video_outbuf_size, picture);
        if (out_size > 0) {
            AVPacket pkt;
            av_init_packet(&pkt);
            if (c->coded_frame->pts != AV_NOPTS_VALUE)
                pkt.pts= av_rescale_q(c->coded_frame->pts, c->time_base, video_st->time_base);
            if(c->coded_frame->key_frame)
                pkt.flags |= AV_PKT_FLAG_KEY;
            pkt.stream_index= video_st->index;
            pkt.data= video_outbuf;
            pkt.size= out_size;

            /* write the compressed frame in the media file */
            //ret = av_interleaved_write_frame(oc, &pkt);
            int ret = av_write_frame(oc, &pkt);
        }


        // Save time stamp
    }
}
开发者ID:kemerelab,项目名称:BehavioralVideo,代码行数:48,代码来源:VideoWriter.cpp

示例13:

void c3::onBufferAvailable(int id, const QVideoFrame& pFrame)
{
    this->camera->unlock();
    this->camera->stop();

    QVideoFrame lvFrame = pFrame;
    if (!lvFrame.map(QAbstractVideoBuffer::ReadOnly)) {
        return;
    }
    QImage lvImage;
    lvImage.loadFromData((const uchar*)lvFrame.bits(), lvFrame.mappedBytes(), (const char*)"JPEG");
    lvFrame.unmap();

/* here you can process lvImage before saving */
//    lvImage.invertPixels(QImage::InvertRgb);

    lvImage.save(defaultSaveFileName, "JPEG");
    this->camera->start();
}
开发者ID:vladkrylov,项目名称:qtcamera,代码行数:19,代码来源:c3.cpp

示例14: convertAllFrames

    void convertAllFrames() {
        mProgressBar->setMaximum(mFrames.length() - 1);
        int count = 0;

        foreach(QVideoFrame frame, mFrames) {
            mProgressBar->setValue(count++);
            QImage image;
            if (frame.pixelFormat() == QVideoFrame::Format_RGB32) {
                // Copy const QVideoFrame to mutable QVideoFrame.
                QVideoFrame nonConstFrame = frame;
                // Unlock for reading the stack frame (increment ref pointer)
                nonConstFrame.map(QAbstractVideoBuffer::ReadOnly);
                // Create new image from the frame bits
                image = QImage(
                        nonConstFrame.bits(),
                        nonConstFrame.width(),
                        nonConstFrame.height(),
                        nonConstFrame.bytesPerLine(),
                        QVideoFrame::imageFormatFromPixelFormat(nonConstFrame.pixelFormat()));
                nonConstFrame.unmap();
            } else {
                image = QImage(frame.size(), QImage::Format_RGB32);
                mFrameConverter->convertFrame(frame, &image);
            }

            QString imgFileName = QString("%1.%2.png").arg(mFileName).arg(++mCount, 2, 10, QChar('0'));
            //QFile file(imgFileName);
            //file.open(QFile::WriteOnly);

            bool saved = image.save(imgFileName, "png");
            if (saved) {
                log->info("File: %1 saved", imgFileName);
            } else {
                log->info("File: %1 not saved", imgFileName);
            }

        }
开发者ID:CaptEmulation,项目名称:morpheus-converter,代码行数:37,代码来源:QMPanConverter.cpp

示例15: convertFrame

    void convertFrame(QVideoFrame &frame, QImage *image) {
        if(frame.pixelFormat() == QVideoFrame::Format_YUV420P && frame.map(QAbstractVideoBuffer::ReadOnly)) {
            const qint32 frameWidth = frame.width();
            const qint32 frameHeight = frame.height();
            const qint32 frameSize = frameHeight * frameWidth;
            const uchar *lumaYBytes = frame.bits();
            const uchar *chromaUBytes = lumaYBytes + frameSize;
            const uchar *chromaVBytes = chromaUBytes + (frameSize / 4);

            for (int y = 0; y < frameHeight; y++) {
                for (int x = 0; x < frameWidth; x++) {
                    const int Y = lumaYBytes[y * frameWidth + x];
                    const int U = chromaUBytes[(y / 2) * (frameWidth / 2) + (x / 2)];
                    const int V = chromaVBytes[(y / 2) * (frameWidth / 2) + (x / 2)];
                    const int r = qBound(0.0, 1.164 * (Y - 16) + 1.596 * (V - 128), 255.0);
                    const int g = qBound(0.0, 1.164 * (Y - 16) - 0.813 * (V - 128) - 0.391 * (U - 128), 255.0);
                    const int b = qBound(0.0, 1.164 * (Y - 16) + 2.018 * (U - 128), 255.0);

                    image->setPixel(x, y, qRgb(r, g, b));
                }
            }
            frame.unmap();
        }
    }
开发者ID:CaptEmulation,项目名称:morpheus-converter,代码行数:24,代码来源:QMFrameSoftwareRenderYuv.cpp


注:本文中的QVideoFrame::bits方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。