当前位置: 首页>>代码示例>>C++>>正文


C++ duration函数代码示例

本文整理汇总了C++中duration函数的典型用法代码示例。如果您正苦于以下问题:C++ duration函数的具体用法?C++ duration怎么用?C++ duration使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了duration函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: sprintf

void cSemaineEmotionSender::sendDimensionsFSRE_I( cComponentMessage *_msg )
{
  // range check:
  //if (_msg->floatData[0] < 0) _msg->floatData[0] = 0.0;
  //if (_msg->floatData[0] > 1) _msg->floatData[0] = 1.0;
  //--

  char strtmp[50];
  sprintf(strtmp,"%.2f",_msg->floatData[0]);
  std::string dimVal(strtmp);

  sprintf(strtmp,"%ld",smileTimeToSemaineTime(_msg->userTime1));
  std::string startTm(strtmp);
  sprintf(strtmp,"%ld",(long long)round((_msg->userTime2 - _msg->userTime1)*1000.0));
  std::string duration(strtmp);

  sprintf(strtmp,"%s",(const char *)(_msg->msgname));
  std::string codername(strtmp);

  // Create and fill a simple EMMA EmotionML document
  XERCESC_NS::DOMDocument * document = XMLTool::newDocument(EMMA::E_EMMA, EMMA::namespaceURI, EMMA::version);
  XMLTool::setPrefix(document->getDocumentElement(), "emma");

  XERCESC_NS::DOMElement * interpretation = XMLTool::appendChildElement(document->getDocumentElement(), EMMA::E_INTERPRETATION);
  XMLTool::setAttribute(interpretation, EMMA::A_OFFSET_TO_START, startTm);
  XMLTool::setAttribute(interpretation, EMMA::A_DURATION, duration);
  XMLTool::setAttribute(interpretation, EMMA::A_CONFIDENCE, "1.0");
  XMLTool::setPrefix(interpretation, "emma");
  
  XERCESC_NS::DOMElement * emotion = XMLTool::appendChildElement(interpretation, EmotionML::E_EMOTION, EmotionML::namespaceURI);
  XMLTool::setAttribute(emotion, EmotionML::A_DIMENSION_VOCABULARY, EmotionML::VOC_FSRE_DIMENSION_DEFINITION);
  XMLTool::setAttribute(emotion, EmotionML::A_MODALITY, "voice");
  XMLTool::setPrefix(emotion, "emotion");

  int i; int intIdx = -1;
  sClassifierResults * re = ((sClassifierResults*)(_msg->custData));
  for (i=0; i<re->nFilled; i++) {
    if (!strcmp(re->resnameA[i],intensityStr)) {
      intIdx = i; continue;
    }

    char strtmp[50];
    if (!strcmp(re->resnameA[i],unpredictabilityStr)) {
      re->res[i] = (1.0 - re->res[i])/2.0;   //// not nice hack...
    } else {
      re->res[i] = (re->res[i] + 1.0)/2.0;
    } 
    if (re->res[i] < 0.0) re->res[i] = 0.0;
    if (re->res[i] > 1.0) re->res[i] = 1.0;

    sprintf(strtmp,"%.2f",re->res[i]);
    std::string dimVal(strtmp);
    sprintf(strtmp,"%s",re->resnameA[i]);
    std::string dimStr(strtmp);

    XERCESC_NS::DOMElement * dimension = XMLTool::appendChildElement(emotion, EmotionML::E_DIMENSION, EmotionML::namespaceURI);
    XMLTool::setAttribute(dimension, EmotionML::A_NAME, dimStr); // dimensionStr
    XMLTool::setAttribute(dimension, EmotionML::A_VALUE, dimVal);
    XMLTool::setPrefix(dimension, "emotion");
  }

  XERCESC_NS::DOMElement * info = XMLTool::appendChildElement(emotion, EmotionML::E_INFO);
  XERCESC_NS::DOMElement * coder = XMLTool::appendChildElement(info, "predictor");
  XMLTool::setAttribute(coder, "value", codername);
  XMLTool::setPrefix(coder, "emotion");
  XMLTool::setPrefix(info, "emotion");

  if (intIdx >= 0) {
    XERCESC_NS::DOMElement * intensity = XMLTool::appendChildElement(interpretation, EmotionML::E_EMOTION, EmotionML::namespaceURI);
    XMLTool::setAttribute(intensity, EmotionML::A_DIMENSION_VOCABULARY, EmotionML::VOC_SEMAINE_INTENSITY_DIMENSION_DEFINITON);
    XMLTool::setAttribute(intensity, EmotionML::A_MODALITY, "voice");
    XMLTool::setPrefix(emotion, "emotion");

    sprintf(strtmp,"%.2f",re->res[intIdx]);
    std::string dimVal(strtmp);
    sprintf(strtmp,"%s",re->resnameA[intIdx]);
    std::string dimStr(strtmp);

    XERCESC_NS::DOMElement * idim = XMLTool::appendChildElement(intensity, EmotionML::E_DIMENSION, EmotionML::namespaceURI);
    XMLTool::setAttribute(idim, EmotionML::A_NAME, dimStr); // dimensionStr
    XMLTool::setAttribute(idim, EmotionML::A_VALUE, dimVal);
    XMLTool::setPrefix(idim, "emotion");

    XERCESC_NS::DOMElement * iinfo = XMLTool::appendChildElement(intensity, EmotionML::E_INFO);
    XERCESC_NS::DOMElement * icoder = XMLTool::appendChildElement(iinfo, "predictor");
    XMLTool::setAttribute(icoder, "value", codername);
    XMLTool::setPrefix(icoder, "emotion");
    XMLTool::setPrefix(iinfo, "emotion");
  }

  sendDocument(document);
}
开发者ID:AlexHung780312,项目名称:opensmile,代码行数:92,代码来源:semaineEmotionSender.cpp

示例2: time_point

 system_clock::time_point system_clock::from_time_t(std::time_t t)
 {
     return time_point(duration(static_cast<system_clock::rep>(t) * 1000000000));
 }
开发者ID:AsgeirSH,项目名称:Client,代码行数:4,代码来源:chrono.hpp

示例3: vrpn_gettimeofday

int vrpn_Tng3::syncDatastream (double seconds) {

    struct timeval miniDelay;
    miniDelay.tv_sec = 0;
    miniDelay.tv_usec = 50000;

    unsigned long maxDelay = 1000000L * (long) seconds;
    struct timeval start_time;
    vrpn_gettimeofday(&start_time, NULL);

    int loggedOn = 0;
    int numRead;

    if (serial_fd < 0) {
	return 0;
    }

    // ensure that the packet start byte is valid
    if ( bDataPacketStart != 0x55 && bDataPacketStart != 0xAA ) {
      bDataPacketStart = 0x55;
    }

    vrpn_flush_input_buffer(serial_fd);

//    vrpn_write_characters(serial_fd, (const unsigned char *)"E", 1);
    pause (0.01);

    while (!loggedOn) {
	struct timeval current_time;
	vrpn_gettimeofday(&current_time, NULL);
	if (duration(current_time, start_time) > maxDelay ) {
	    // if we've timed out, go back unhappy
	    fprintf(stderr,"vrpn_Tng3::syncDatastream timeout expired: %d secs\n", (int)seconds);
	    return 0;  // go back unhappy
	}
	
	// get a byte
        if (1 != vrpn_read_available_characters(serial_fd, _buffer, 1, &miniDelay)) {
	    continue;
        }
	// if not a record start, skip
        if (_buffer[0] != bDataPacketStart) {
	    continue;
        }
	// invert the packet start byte for the next test
	bDataPacketStart ^= 0xFF;
	
	// get an entire report
	numRead = vrpn_read_available_characters(serial_fd, 
		    _buffer, DATA_RECORD_LENGTH, &miniDelay);

        if (numRead < DATA_RECORD_LENGTH) {
	    continue;
        }

	// get the start byte for the next packet
        if (1 != vrpn_read_available_characters(serial_fd, _buffer, 1, &miniDelay)) {
	    continue;
        }

	// if not the anticipated record start, things are not yet sync'd
        if (_buffer[0] != bDataPacketStart) {
	    continue;
        }

	// invert the packet start byte in anticipation of normal operation
	bDataPacketStart ^= 0xFF;

	// get an entire report
	numRead = vrpn_read_available_characters(serial_fd, 
			_buffer, DATA_RECORD_LENGTH, &miniDelay);

        if (numRead < DATA_RECORD_LENGTH) {
	    continue;
        }

	return 1;
    }
    return 0;
}
开发者ID:BlueBrain,项目名称:vrpn,代码行数:80,代码来源:vrpn_Tng3.C

示例4: LOG

void MediaPlayerPrivateAVFoundation::updateStates()
{
    MediaPlayer::NetworkState oldNetworkState = m_networkState;
    MediaPlayer::ReadyState oldReadyState = m_readyState;

    LOG(Media, "MediaPlayerPrivateAVFoundation::updateStates(%p) - entering with networkState = %i, readyState = %i", 
        this, static_cast<int>(m_networkState), static_cast<int>(m_readyState));

    if (m_loadingMetadata)
        m_networkState = MediaPlayer::Loading;
    else {
        // -loadValuesAsynchronouslyForKeys:completionHandler: has invoked its handler; test status of keys and determine state.
        AVAssetStatus avAssetStatus = assetStatus();
        ItemStatus itemStatus = playerItemStatus();
        
        m_assetIsPlayable = (avAssetStatus == MediaPlayerAVAssetStatusPlayable);
        if (m_readyState < MediaPlayer::HaveMetadata && avAssetStatus > MediaPlayerAVAssetStatusLoading) {
            if (m_assetIsPlayable) {
                if (itemStatus == MediaPlayerAVPlayerItemStatusUnknown) {
                    if (avAssetStatus == MediaPlayerAVAssetStatusFailed || m_preload > MediaPlayer::MetaData) {
                        // We may have a playable asset that doesn't support inspection prior to playback; go ahead 
                        // and create the AVPlayerItem now. When the AVPlayerItem becomes ready to play, we will 
                        // have access to its metadata. Or we may have been asked to become ready to play immediately.
                        m_networkState = MediaPlayer::Loading;
                        prepareToPlay();
                    } else
                        m_networkState = MediaPlayer::Idle;
                }
                if (avAssetStatus == MediaPlayerAVAssetStatusLoaded)
                    m_readyState = MediaPlayer::HaveMetadata;
            } else {
                // FIX ME: fetch the error associated with the @"playable" key to distinguish between format 
                // and network errors.
                m_networkState = MediaPlayer::FormatError;
            }
        }
        
        if (avAssetStatus >= MediaPlayerAVAssetStatusLoaded && itemStatus > MediaPlayerAVPlayerItemStatusUnknown) {
            if (seeking())
                m_readyState = m_readyState >= MediaPlayer::HaveMetadata ? MediaPlayer::HaveMetadata : MediaPlayer::HaveNothing;
            else {
                float maxLoaded = maxTimeLoaded();
                switch (itemStatus) {
                case MediaPlayerAVPlayerItemStatusUnknown:
                    break;
                case MediaPlayerAVPlayerItemStatusFailed:
                    m_networkState = MediaPlayer::DecodeError;
                    break;
                case MediaPlayerAVPlayerItemStatusPlaybackLikelyToKeepUp:
                    m_readyState = MediaPlayer::HaveEnoughData;
                    break;
                case MediaPlayerAVPlayerItemStatusReadyToPlay:
                case MediaPlayerAVPlayerItemStatusPlaybackBufferEmpty:
                case MediaPlayerAVPlayerItemStatusPlaybackBufferFull:
                    if (maxLoaded > currentTime())
                        m_readyState = MediaPlayer::HaveFutureData;
                    else
                        m_readyState = MediaPlayer::HaveCurrentData;
                    break;
                }

                if (itemStatus >= MediaPlayerAVPlayerItemStatusReadyToPlay)
                    m_networkState = (maxLoaded == duration()) ? MediaPlayer::Loaded : MediaPlayer::Loading;
            }
        }
    }

    if (isReadyForVideoSetup() && currentRenderingMode() != preferredRenderingMode())
        setUpVideoRendering();

    if (m_networkState != oldNetworkState)
        m_player->networkStateChanged();

    if (m_readyState != oldReadyState)
        m_player->readyStateChanged();

    LOG(Media, "MediaPlayerPrivateAVFoundation::updateStates(%p) - exiting with networkState = %i, readyState = %i", 
        this, static_cast<int>(m_networkState), static_cast<int>(m_readyState));
}
开发者ID:dankurka,项目名称:webkit_titanium,代码行数:79,代码来源:MediaPlayerPrivateAVFoundation.cpp

示例5: Q_UNUSED

void RGBMatrix::write(MasterTimer* timer, QList<Universe *> universes)
{
    Q_UNUSED(timer);

    {
        QMutexLocker algorithmLocker(&m_algorithmMutex);
        if (m_group == NULL)
        {
            // No fixture group to control
            stop(FunctionParent::master());
            return;
        }

        // No time to do anything.
        if (duration() == 0)
            return;

        // Invalid/nonexistent script
        if (m_algorithm == NULL || m_algorithm->apiVersion() == 0)
            return;

        if (isPaused() == false)
        {
            // Get a new map every time elapsed is reset to zero
            if (elapsed() < MasterTimer::tick())
            {
                if (tempoType() == Beats)
                    m_stepBeatDuration = beatsToTime(duration(), timer->beatTimeDuration());

                //qDebug() << "RGBMatrix step" << m_stepHandler->currentStepIndex() << ", color:" << QString::number(m_stepHandler->stepColor().rgb(), 16);
                RGBMap map = m_algorithm->rgbMap(m_group->size(), m_stepHandler->stepColor().rgb(), m_stepHandler->currentStepIndex());
                updateMapChannels(map, m_group);
            }
        }
    }

    // Run the generic fader that takes care of fading in/out individual channels
    m_fader->write(universes, isPaused());

    if (isPaused() == false)
    {
        // Increment the ms elapsed time
        incrementElapsed();

        /* Check if we need to change direction, stop completely or go to next step
         * The cases are:
         * 1- time tempo type: act normally, on ms elapsed time
         * 2- beat tempo type, beat occurred: check if the elapsed beats is a multiple of
         *    the step beat duration. If so, proceed to the next step
         * 3- beat tempo type, not beat: if the ms elapsed time reached the step beat
         *    duration in ms, and the ms time to the next beat is not less than 1/16 of
         *    the step beat duration in ms, then proceed to the next step. If the ms time to the
         *    next beat is less than 1/16 of the step beat duration in ms, then defer the step
         *    change to case #2, to resync the matrix to the next beat
         */
        if (tempoType() == Time && elapsed() >= duration())
        {
            roundCheck();
        }
        else if (tempoType() == Beats)
        {
            if (timer->isBeat())
            {
                incrementElapsedBeats();
                qDebug() << "Elapsed beats:" << elapsedBeats() << ", time elapsed:" << elapsed() << ", step time:" << m_stepBeatDuration;
                if (elapsedBeats() % duration() == 0)
                {
                    roundCheck();
                    resetElapsed();
                }
            }
            else if (elapsed() >= m_stepBeatDuration && (uint)timer->timeToNextBeat() > m_stepBeatDuration / 16)
            {
                qDebug() << "Elapsed exceeded";
                roundCheck();
            }
        }
    }
}
开发者ID:janosvitok,项目名称:qlcplus,代码行数:79,代码来源:rgbmatrix.cpp

示例6: ipAddressStr

char* ServerMediaSession::generateSDPDescription() {
  AddressString ipAddressStr(ourIPAddress(envir()));
  unsigned ipAddressStrSize = strlen(ipAddressStr.val());

  // For a SSM sessions, we need a "a=source-filter: incl ..." line also:
  char* sourceFilterLine;
  if (fIsSSM) {
    char const* const sourceFilterFmt =
      "a=source-filter: incl IN IP4 * %s\r\n"
      "a=rtcp-unicast: reflection\r\n";
    unsigned const sourceFilterFmtSize = strlen(sourceFilterFmt) + ipAddressStrSize + 1;

    sourceFilterLine = new char[sourceFilterFmtSize];
    sprintf(sourceFilterLine, sourceFilterFmt, ipAddressStr.val());
  } else {
    sourceFilterLine = strDup("");
  }

  char* rangeLine = NULL; // for now
  char* sdp = NULL; // for now

  do {
    // Count the lengths of each subsession's media-level SDP lines.
    // (We do this first, because the call to "subsession->sdpLines()"
    // causes correct subsession 'duration()'s to be calculated later.)
    unsigned sdpLength = 0;
    ServerMediaSubsession* subsession;
    for (subsession = fSubsessionsHead; subsession != NULL;
	 subsession = subsession->fNext) {
      char const* sdpLines = subsession->sdpLines();
      if (sdpLines == NULL) continue; // the media's not available
      sdpLength += strlen(sdpLines);
    }
    if (sdpLength == 0) break; // the session has no usable subsessions

    // Unless subsessions have differing durations, we also have a "a=range:" line:
    float dur = duration();
    if (dur == 0.0) {
      rangeLine = strDup("a=range:npt=0-\r\n");
    } else if (dur > 0.0) {
      char buf[100];
      sprintf(buf, "a=range:npt=0-%.3f\r\n", dur);
      rangeLine = strDup(buf);
    } else { // subsessions have differing durations, so "a=range:" lines go there
      rangeLine = strDup("");
    }

    char const* const sdpPrefixFmt =
      "v=0\r\n"
      "o=- %ld%06ld %d IN IP4 %s\r\n"
      "s=%s\r\n"
      "i=%s\r\n"
      "t=0 0\r\n"
      "a=tool:%s%s\r\n"
      "a=type:broadcast\r\n"
      "a=control:*\r\n"
      "%s"
      "%s"
      "a=x-qt-text-nam:%s\r\n"
      "a=x-qt-text-inf:%s\r\n"
      "%s";
    sdpLength += strlen(sdpPrefixFmt)
      + 20 + 6 + 20 + ipAddressStrSize
      + strlen(fDescriptionSDPString)
      + strlen(fInfoSDPString)
      + strlen(libNameStr) + strlen(libVersionStr)
      + strlen(sourceFilterLine)
      + strlen(rangeLine)
      + strlen(fDescriptionSDPString)
      + strlen(fInfoSDPString)
      + strlen(fMiscSDPLines);
    sdpLength += 1000; // in case the length of the "subsession->sdpLines()" calls below change
    sdp = new char[sdpLength];
    if (sdp == NULL) break;

    // Generate the SDP prefix (session-level lines):
    snprintf(sdp, sdpLength, sdpPrefixFmt,
	     fCreationTime.tv_sec, fCreationTime.tv_usec, // o= <session id>
	     1, // o= <version> // (needs to change if params are modified)
	     ipAddressStr.val(), // o= <address>
	     fDescriptionSDPString, // s= <description>
	     fInfoSDPString, // i= <info>
	     libNameStr, libVersionStr, // a=tool:
	     sourceFilterLine, // a=source-filter: incl (if a SSM session)
	     rangeLine, // a=range: line
	     fDescriptionSDPString, // a=x-qt-text-nam: line
	     fInfoSDPString, // a=x-qt-text-inf: line
	     fMiscSDPLines); // miscellaneous session SDP lines (if any)

    // Then, add the (media-level) lines for each subsession:
    char* mediaSDP = sdp;
    for (subsession = fSubsessionsHead; subsession != NULL;
	 subsession = subsession->fNext) {
      unsigned mediaSDPLength = strlen(mediaSDP);
      mediaSDP += mediaSDPLength;
      sdpLength -= mediaSDPLength;
      if (sdpLength <= 1) break; // the SDP has somehow become too long

      char const* sdpLines = subsession->sdpLines();
      if (sdpLines != NULL) snprintf(mediaSDP, sdpLength, "%s", sdpLines);
//.........这里部分代码省略.........
开发者ID:viso89,项目名称:public,代码行数:101,代码来源:ServerMediaSession.cpp

示例7: time_point

cf_clock::time_point cf_clock::from_time_t(const time_t &__t) noexcept
{
    return time_point(duration(CFAbsoluteTime(__t) - kCFAbsoluteTimeIntervalSince1970));
}
开发者ID:becka11y,项目名称:readium-sdk,代码行数:4,代码来源:run_loop_cf.cpp

示例8: duration

quint32 Function::totalDuration()
{
    // fall back to duration in case a
    // subclass doesn't provide this method
    return duration();
}
开发者ID:mcallegari,项目名称:qlcplus,代码行数:6,代码来源:function.cpp

示例9: qDebug

void AVPlayer::seekBackward()
{
    demuxer_thread->seekBackward();
    qDebug("seek %f%%", clock->value()/duration()*100.0);
}
开发者ID:tianqizi,项目名称:QtAV,代码行数:5,代码来源:AVPlayer.cpp

示例10: duration

double GameTime::GetTotalElapsed(void)
{
	boost::posix_time::time_duration duration(m_latestTime - m_startTime);
	return double(duration.total_nanoseconds()*0.000000001);
}
开发者ID:tcsavage,项目名称:aspectgtk,代码行数:5,代码来源:GameTime.cpp

示例11: durationChanged

void MplVideoPlayerBackend::durationIsKnown()
{
    emit durationChanged(duration());
}
开发者ID:Wellsen,项目名称:bluecherry-client,代码行数:4,代码来源:MplVideoPlayerBackend.cpp

示例12: assert

void media_input::open(const std::vector<std::string> &urls, const device_request &dev_request)
{
    assert(urls.size() > 0);

    // Open media objects
    _is_device = dev_request.is_device();
    _media_objects.resize(urls.size());
    for (size_t i = 0; i < urls.size(); i++)
    {
        _media_objects[i].open(urls[i], dev_request);
    }

    // Construct id for this input
    _id = basename(_media_objects[0].url());
    for (size_t i = 1; i < _media_objects.size(); i++)
    {
        _id += '/';
        _id += basename(_media_objects[i].url());
    }

    // Gather metadata
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        // Note that we may have multiple identical tag names in our metadata
        for (size_t j = 0; j < _media_objects[i].tags(); j++)
        {
            _tag_names.push_back(_media_objects[i].tag_name(j));
            _tag_values.push_back(_media_objects[i].tag_value(j));
        }
    }

    // Gather streams and stream names
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].video_streams(); j++)
        {
            _video_stream_names.push_back(_media_objects[i].video_frame_template(j).format_info());
        }
    }
    if (_video_stream_names.size() > 1)
    {
        for (size_t i = 0; i < _video_stream_names.size(); i++)
        {
            _video_stream_names[i].insert(0,
                    std::string(1, '#') + str::from(i + 1) + '/'
                    + str::from(_video_stream_names.size()) + ": ");
        }
    }
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].audio_streams(); j++)
        {
            _audio_stream_names.push_back(_media_objects[i].audio_blob_template(j).format_info());
        }
    }
    if (_audio_stream_names.size() > 1)
    {
        for (size_t i = 0; i < _audio_stream_names.size(); i++)
        {
            _audio_stream_names[i].insert(0,
                    std::string(1, '#') + str::from(i + 1) + '/'
                    + str::from(_audio_stream_names.size()) + ": ");
        }
    }
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].subtitle_streams(); j++)
        {
            _subtitle_stream_names.push_back(_media_objects[i].subtitle_box_template(j).format_info());
        }
    }
    if (_subtitle_stream_names.size() > 1)
    {
        for (size_t i = 0; i < _subtitle_stream_names.size(); i++)
        {
            _subtitle_stream_names[i].insert(0,
                    std::string(1, '#') + str::from(i + 1) + '/'
                    + str::from(_subtitle_stream_names.size()) + ": ");
        }
    }

    // Set duration information
    _duration = std::numeric_limits<int64_t>::max();
    for (size_t i = 0; i < _media_objects.size(); i++)
    {
        for (int j = 0; j < _media_objects[i].video_streams(); j++)
        {
            int64_t d = _media_objects[i].video_duration(j);
            if (d < _duration)
            {
                _duration = d;
            }
        }
        for (int j = 0; j < _media_objects[i].audio_streams(); j++)
        {
            int64_t d = _media_objects[i].audio_duration(j);
            if (d < _duration)
            {
                _duration = d;
            }
//.........这里部分代码省略.........
开发者ID:63n,项目名称:bino,代码行数:101,代码来源:media_input.cpp

示例13: algorithmLocker

void RGBMatrix::roundCheck(const QSize& size)
{
    QMutexLocker algorithmLocker(&m_algorithmMutex);
    if (m_algorithm == NULL)
        return;

    if (runOrder() == PingPong)
    {
        if (m_direction == Forward && (m_step + 1) == m_algorithm->rgbMapStepCount(size))
        {
            m_direction = Backward;
            m_step = m_algorithm->rgbMapStepCount(size) - 2;
            if (m_endColor.isValid())
                m_stepColor = m_endColor;

            updateStepColor(m_step);
        }
        else if (m_direction == Backward && (m_step - 1) < 0)
        {
            m_direction = Forward;
            m_step = 1;
            m_stepColor = m_startColor;
            updateStepColor(m_step);
        }
        else
        {
            if (m_direction == Forward)
                m_step++;
            else
                m_step--;
            updateStepColor(m_step);
        }
    }
    else if (runOrder() == SingleShot)
    {
        if (m_direction == Forward)
        {
            if (m_step >= m_algorithm->rgbMapStepCount(size) - 1)
                stop();
            else
            {
                m_step++;
                updateStepColor(m_step);
            }
        }
        else
        {
            if (m_step <= 0)
                stop();
            else
            {
                m_step--;
                updateStepColor(m_step);
            }
        }
    }
    else
    {
        if (m_direction == Forward)
        {
            if (m_step >= m_algorithm->rgbMapStepCount(size) - 1)
            {
                m_step = 0;
                m_stepColor = m_startColor;
            }
            else
            {
                m_step++;
                updateStepColor(m_step);
            }
        }
        else
        {
            if (m_step <= 0)
            {
                m_step = m_algorithm->rgbMapStepCount(size) - 1;
                if (m_endColor.isValid())
                    m_stepColor = m_endColor;
            }
            else
            {
                m_step--;
                updateStepColor(m_step);
            }
        }
    }

    m_roundTime->restart();
    roundElapsed(duration());
}
开发者ID:puryearn,项目名称:qlcplus,代码行数:90,代码来源:rgbmatrix.cpp

示例14: create_ffaudiofileformats

static int create_ffaudiofileformats(JNIEnv *env, AVFormatContext *format_context, jobjectArray *array, jstring url) {
    int res = 0;
    jlong duration_in_microseconds = -1;
    jfloat frame_rate = -1;
    jobject vbr = NULL;
    jboolean big_endian = 1;
    jobject audio_format = NULL;
    jint frame_size = -1;
    jint sample_size = 0;
    int audio_stream_count = 0;
    int audio_stream_number = 0;

    // count possible audio streams
    int i;
    for (i=0; i<format_context->nb_streams; i++) {
        AVStream* stream = format_context->streams[i];
        if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            audio_stream_count++;
        }
    }

#ifdef DEBUG
    fprintf(stderr, "Found %i audio streams.\n", audio_stream_count);
#endif

    // create output array
    *array = (*env)->NewObjectArray(env, audio_stream_count, (*env)->FindClass(env, "javax/sound/sampled/AudioFileFormat"), NULL);
    if (array == NULL) {
        goto bail;
    }

#ifdef DEBUG
    fprintf(stderr, "Created audio file format array.\n");
#endif

    // iterate over audio streams
    for (i=0; i<format_context->nb_streams; i++) {
        AVStream* stream = format_context->streams[i];
        if (stream->codec->codec_type == AVMEDIA_TYPE_AUDIO) {
            res = ff_open_stream(env, stream);
            if (res) {
                goto bail;
            }

            // create object
            duration_in_microseconds = duration(format_context, stream);
            frame_rate = get_frame_rate(stream, duration_in_microseconds);
            big_endian = ff_big_endian(stream->codec->codec_id);
            if (is_pcm(stream->codec->codec_id)) {
                frame_size = (stream->codec->bits_per_coded_sample / 8) * stream->codec->channels;
            }
            // TODO: Support VBR.

            sample_size = stream->codec->bits_per_coded_sample
                ? stream->codec->bits_per_coded_sample
                : stream->codec->bits_per_raw_sample;

            #ifdef DEBUG
                fprintf(stderr, "stream->codec->bits_per_coded_sample: %i\n", stream->codec->bits_per_coded_sample);
                fprintf(stderr, "stream->codec->bits_per_raw_sample  : %i\n", stream->codec->bits_per_raw_sample);
                fprintf(stderr, "stream->codec->bit_rate             : %i\n", stream->codec->bit_rate);
                fprintf(stderr, "format_context->packet_size         : %i\n", format_context->packet_size);
                fprintf(stderr, "frames     : %" PRId64 "\n", stream->nb_frames);
                fprintf(stderr, "sample_rate: %i\n", stream->codec->sample_rate);
                fprintf(stderr, "sampleSize : %i\n", stream->codec->bits_per_coded_sample);
                fprintf(stderr, "channels   : %i\n", stream->codec->channels);
                fprintf(stderr, "frame_size : %i\n", (int)frame_size);
                fprintf(stderr, "codec_id   : %i\n", stream->codec->codec_id);
                fprintf(stderr, "duration   : %" PRId64 "\n", (int64_t)duration_in_microseconds);
                fprintf(stderr, "frame_rate : %f\n", frame_rate);
                if (big_endian) {
                    fprintf(stderr, "big_endian  : true\n");
                } else {
                    fprintf(stderr, "big_endian  : false\n");
                }
            #endif
            audio_format = create_ffaudiofileformat(env, url,
                                                           stream->codec->codec_id,
                                                           (jfloat)stream->codec->sample_rate,
                                                           sample_size,
                                                           stream->codec->channels,
                                                           frame_size,
                                                           frame_rate,
                                                           big_endian,
                                                           duration_in_microseconds,
                                                           stream->codec->bit_rate,
                                                           vbr);

            (*env)->SetObjectArrayElement(env, *array, audio_stream_number, audio_format);
            audio_stream_number++;

            // clean up
            if (stream && stream->codec) {
                avcodec_close(stream->codec);
            }
        }
    }

bail:
    return res;
//.........这里部分代码省略.........
开发者ID:Vanco,项目名称:ffsampledsp,代码行数:101,代码来源:FFAudioFileReader.c

示例15: display

  void display()
  {
    std::vector<cv::Point2f> pointsColor, pointsIr;
    cv::Mat color, ir, irGrey, depth;
    cv::Mat colorDisp, irDisp;
    bool foundColor = false;
    bool foundIr = false;
    bool save = false;
    bool running = true;

    std::chrono::milliseconds duration(1);
    while(!update && ros::ok())
    {
      std::this_thread::sleep_for(duration);
    }

    for(; ros::ok() && running;)
    {
      if(update)
      {
        lock.lock();
        color = this->color;
        ir = this->ir;
        irGrey = this->irGrey;
        depth = this->depth;
        foundColor = this->foundColor;
        foundIr = this->foundIr;
        pointsColor = this->pointsColor;
        pointsIr = this->pointsIr;
        update = false;
        lock.unlock();

        if(mode == COLOR || mode == SYNC)
        {
          cv::cvtColor(color, colorDisp, CV_GRAY2BGR);
          cv::drawChessboardCorners(colorDisp, boardDims, pointsColor, foundColor);
          //cv::resize(colorDisp, colorDisp, cv::Size(), 0.5, 0.5);
          //cv::flip(colorDisp, colorDisp, 1);
        }
        if(mode == IR || mode == SYNC)
        {
          cv::cvtColor(irGrey, irDisp, CV_GRAY2BGR);
          cv::drawChessboardCorners(irDisp, boardDims, pointsIr, foundIr);
          //cv::resize(irDisp, irDisp, cv::Size(), 0.5, 0.5);
          //cv::flip(irDisp, irDisp, 1);
        }
      }

      switch(mode)
      {
      case COLOR:
        cv::imshow("color", colorDisp);
        break;
      case IR:
        cv::imshow("ir", irDisp);
        break;
      case SYNC:
        cv::imshow("color", colorDisp);
        cv::imshow("ir", irDisp);
        break;
      }

      int key = cv::waitKey(10);
      switch(key & 0xFF)
      {
      case ' ':
      case 's':
        save = true;
        break;
      case 27:
      case 'q':
        running = false;
        break;
      case '1':
        minIr = std::max(0, minIr - 100);
        break;
      case '2':
        minIr = std::min(maxIr - 1, minIr + 100);
        break;
      case '3':
        maxIr = std::max(minIr + 1, maxIr - 100);
        break;
      case '4':
        maxIr = std::min(0xFFFF, maxIr + 100);
        break;
      case 'l':
        minIr = std::max(0, minIr - 100);
        maxIr = std::max(minIr + 1, maxIr - 100);
        break;
      case 'h':
        maxIr = std::min(0x7FFF, maxIr + 100);
        minIr = std::min(maxIr - 1, minIr + 100);
        break;
      }

      if(save && ((mode == COLOR && foundColor) || (mode == IR && foundIr) || (mode == SYNC && foundColor && foundIr)))
      {
        store(color, ir, irGrey, depth, pointsColor, pointsIr);
        save = false;
      }
//.........这里部分代码省略.........
开发者ID:mitchellwills,项目名称:iai_kinect2,代码行数:101,代码来源:kinect2_calibration.cpp


注:本文中的duration函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。