当前位置: 首页>>代码示例>>C++>>正文


C++ MediaStreamTrackVector类代码示例

本文整理汇总了C++中MediaStreamTrackVector的典型用法代码示例。如果您正苦于以下问题:C++ MediaStreamTrackVector类的具体用法?C++ MediaStreamTrackVector怎么用?C++ MediaStreamTrackVector使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了MediaStreamTrackVector类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: size

MediaStream* HTMLCanvasElementCapture::captureStream(HTMLCanvasElement& element, bool givenFrameRate, double frameRate, ExceptionState& exceptionState)
{
    if (!element.originClean()) {
        exceptionState.throwSecurityError("Canvas is not origin-clean.");
        return nullptr;
    }

    WebMediaStreamTrack track;
    WebSize size(element.width(), element.height());
    OwnPtr<WebCanvasCaptureHandler> handler;
    if (givenFrameRate)
        handler = adoptPtr(Platform::current()->createCanvasCaptureHandler(size, frameRate, &track));
    else
        handler = adoptPtr(Platform::current()->createCanvasCaptureHandler(size, kDefaultFrameRate, &track));
    ASSERT(handler);
    if (!handler) {
        exceptionState.throwDOMException(NotSupportedError, "No CanvasCapture handler can be created.");
        return nullptr;
    }

    MediaStreamTrackVector tracks;
    if (givenFrameRate)
        tracks.append(CanvasCaptureMediaStreamTrack::create(track, &element, handler.release(), frameRate));
    else
        tracks.append(CanvasCaptureMediaStreamTrack::create(track, &element, handler.release()));
    // We want to capture one frame in the beginning.
    element.notifyListenersCanvasChanged();
    return MediaStream::create(element.executionContext(), tracks);
}
开发者ID:astojilj,项目名称:chromium-crosswalk,代码行数:29,代码来源:HTMLCanvasElementCapture.cpp

示例2: ASSERT

PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& es)
{
    ASSERT(mediaStream);
    if (!mediaStream) {
        es.throwDOMException(InvalidStateError);
        return 0;
    }

    ASSERT(isMainThread());
    lazyInitialize();

    AudioSourceProvider* provider = 0;

    MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();

    // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
    for (size_t i = 0; i < audioTracks.size(); ++i) {
        RefPtr<MediaStreamTrack> localAudio = audioTracks[i];
        if (localAudio->component()->audioSourceProvider()) {
            provider = localAudio->component()->audioSourceProvider();
            break;
        }
    }

    RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);

    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    node->setFormat(2, sampleRate());

    refNode(node.get()); // context keeps reference until node is disconnected
    return node;
}
开发者ID:huningxin,项目名称:blink-crosswalk,代码行数:32,代码来源:AudioContext.cpp

示例3: ASSERT

PassRefPtr<MediaStreamAudioSourceNode> AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionCode& ec)
{
    ASSERT(mediaStream);
    if (!mediaStream) {
        ec = INVALID_STATE_ERR;
        return 0;
    }

    ASSERT(isMainThread());
    lazyInitialize();

    AudioSourceProvider* provider = 0;

    MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    if (mediaStream->isLocal() && audioTracks.size()) {
        // Enable input for the specific local audio device specified in the MediaStreamSource.
        RefPtr<MediaStreamTrack> localAudio = audioTracks[0];
        MediaStreamSource* source = localAudio->component()->source();
        destination()->enableInput(source->deviceId());
        provider = destination()->localAudioInputProvider();
    } else {
        // FIXME: get a provider for non-local MediaStreams (like from a remote peer).
        provider = 0;
    }

    RefPtr<MediaStreamAudioSourceNode> node = MediaStreamAudioSourceNode::create(this, mediaStream, provider);

    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    node->setFormat(2, sampleRate());

    refNode(node.get()); // context keeps reference until node is disconnected
    return node;
}
开发者ID:KnightSwarm,项目名称:WebKitTi,代码行数:33,代码来源:AudioContext.cpp

示例4: removeRemoteTrack

void MediaStream::removeRemoteTrack(MediaStreamComponent* component)
{
    if (ended())
        return;

    MediaStreamTrackVector* tracks = 0;
    switch (component->source()->type()) {
    case MediaStreamSource::TypeAudio:
        tracks = &m_audioTracks;
        break;
    case MediaStreamSource::TypeVideo:
        tracks = &m_videoTracks;
        break;
    }

    size_t index = notFound;
    for (size_t i = 0; i < tracks->size(); ++i) {
        if ((*tracks)[i]->component() == component) {
            index = i;
            break;
        }
    }
    if (index == notFound)
        return;

    RefPtr<MediaStreamTrack> track = (*tracks)[index];
    tracks->remove(index);
    scheduleDispatchEvent(MediaStreamTrackEvent::create(eventNames().removetrackEvent, false, false, track));
}
开发者ID:fatman2021,项目名称:webkitgtk,代码行数:29,代码来源:MediaStream.cpp

示例5: removeRemoteSource

void MediaStream::removeRemoteSource(MediaStreamSource* source)
{
    if (ended())
        return;

    MediaStreamTrackVector* tracks = 0;
    switch (source->type()) {
    case MediaStreamSource::Audio:
        tracks = &m_audioTracks;
        break;
    case MediaStreamSource::Video:
        tracks = &m_videoTracks;
        break;
    }

    size_t index = notFound;
    for (size_t i = 0; i < tracks->size(); ++i) {
        if ((*tracks)[i]->source() == source) {
            index = i;
            break;
        }
    }
    if (index == notFound)
        return;

    m_descriptor->removeSource(source);

    RefPtr<MediaStreamTrack> track = (*tracks)[index];
    tracks->remove(index);
    scheduleDispatchEvent(MediaStreamTrackEvent::create(eventNames().removetrackEvent, false, false, track));
}
开发者ID:webOS-ports,项目名称:webkit,代码行数:31,代码来源:MediaStream.cpp

示例6: ASSERT

MediaStreamAudioSourceNode* AudioContext::createMediaStreamSource(MediaStream* mediaStream, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());

    if (isContextClosed()) {
        throwExceptionForClosedState(exceptionState);
        return nullptr;
    }

    if (!mediaStream) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "invalid MediaStream source");
        return nullptr;
    }

    MediaStreamTrackVector audioTracks = mediaStream->getAudioTracks();
    if (audioTracks.isEmpty()) {
        exceptionState.throwDOMException(
            InvalidStateError,
            "MediaStream has no audio track");
        return nullptr;
    }

    // Use the first audio track in the media stream.
    MediaStreamTrack* audioTrack = audioTracks[0];
    OwnPtr<AudioSourceProvider> provider = audioTrack->createWebAudioSource();
    MediaStreamAudioSourceNode* node = MediaStreamAudioSourceNode::create(this, mediaStream, audioTrack, provider.release());

    // FIXME: Only stereo streams are supported right now. We should be able to accept multi-channel streams.
    node->mediaStreamAudioSourceHandler().setFormat(2, sampleRate());

    refNode(node); // context keeps reference until node is disconnected
    return node;
}
开发者ID:kingysu,项目名称:blink-crosswalk,代码行数:35,代码来源:AudioContext.cpp

示例7: getTracks

MediaStreamTrackVector MediaStream::getTracks() const
{
    MediaStreamTrackVector tracks;
    tracks.reserveCapacity(m_trackSet.size());
    copyValuesToVector(m_trackSet, tracks);

    return tracks;
}
开发者ID:srinivas-kakarla,项目名称:WebKitForWayland,代码行数:8,代码来源:MediaStream.cpp

示例8: clone

MediaStream* MediaStream::clone(ExecutionContext* context)
{
    MediaStreamTrackVector tracks;
    for (MediaStreamTrackVector::iterator iter = m_audioTracks.begin(); iter != m_audioTracks.end(); ++iter)
        tracks.append((*iter)->clone(context));
    for (MediaStreamTrackVector::iterator iter = m_videoTracks.begin(); iter != m_videoTracks.end(); ++iter)
        tracks.append((*iter)->clone(context));
    return MediaStream::create(context, tracks);
}
开发者ID:howardroark2018,项目名称:chromium,代码行数:9,代码来源:MediaStream.cpp

示例9: getTracks

MediaStreamTrackVector MediaStream::getTracks()
{
    MediaStreamTrackVector tracks;
    for (MediaStreamTrackVector::iterator iter = m_audioTracks.begin(); iter != m_audioTracks.end(); ++iter)
        tracks.append(iter->get());
    for (MediaStreamTrackVector::iterator iter = m_videoTracks.begin(); iter != m_videoTracks.end(); ++iter)
        tracks.append(iter->get());
    return tracks;
}
开发者ID:howardroark2018,项目名称:chromium,代码行数:9,代码来源:MediaStream.cpp

示例10: trackVectorForType

MediaStreamTrackVector MediaStream::trackVectorForType(RealtimeMediaSource::Type filterType) const
{
    MediaStreamTrackVector tracks;
    for (auto& track : m_trackSet.values()) {
        if (track->source().type() == filterType)
            tracks.append(track);
    }

    return tracks;
}
开发者ID:srinivas-kakarla,项目名称:WebKitForWayland,代码行数:10,代码来源:MediaStream.cpp

示例11: create

RefPtr<MediaStream> MediaStream::clone()
{
    MediaStreamTrackVector clonedTracks;
    clonedTracks.reserveCapacity(m_trackSet.size());

    for (auto& track : m_trackSet.values())
        clonedTracks.append(track->clone());

    return MediaStream::create(*scriptExecutionContext(), clonedTracks);
}
开发者ID:srinivas-kakarla,项目名称:WebKitForWayland,代码行数:10,代码来源:MediaStream.cpp

示例12: createFromSourceVectors

PassRefPtr<MediaStream> MediaStream::create(ScriptExecutionContext* context, const MediaStreamTrackVector& tracks)
{
    MediaStreamSourceVector audioSources;
    MediaStreamSourceVector videoSources;

    for (size_t i = 0; i < tracks.size(); ++i)
        processTrack(tracks[i].get(), tracks[i]->kind() == "audio" ? audioSources : videoSources);

    return createFromSourceVectors(context, audioSources, videoSources);
}
开发者ID:fatman2021,项目名称:webkitgtk,代码行数:10,代码来源:MediaStream.cpp

示例13: size

MediaStream* HTMLCanvasElementCapture::captureStream(
    HTMLCanvasElement& element,
    bool givenFrameRate,
    double frameRate,
    ExceptionState& exceptionState) {
    if (!element.originClean()) {
        exceptionState.throwSecurityError("Canvas is not origin-clean.");
        return nullptr;
    }

    WebMediaStreamTrack track;
    const WebSize size(element.width(), element.height());
    std::unique_ptr<WebCanvasCaptureHandler> handler;
    if (givenFrameRate)
        handler = wrapUnique(Platform::current()->createCanvasCaptureHandler(
                                 size, frameRate, &track));
    else
        handler = wrapUnique(Platform::current()->createCanvasCaptureHandler(
                                 size, kDefaultFrameRate, &track));

    if (!handler) {
        exceptionState.throwDOMException(
            NotSupportedError, "No CanvasCapture handler can be created.");
        return nullptr;
    }

    CanvasCaptureMediaStreamTrack* canvasTrack;
    if (givenFrameRate)
        canvasTrack = CanvasCaptureMediaStreamTrack::create(
                          track, &element, std::move(handler), frameRate);
    else
        canvasTrack = CanvasCaptureMediaStreamTrack::create(track, &element,
                      std::move(handler));
    // We want to capture a frame in the beginning.
    canvasTrack->requestFrame();

    MediaStreamTrackVector tracks;
    tracks.append(canvasTrack);
    return MediaStream::create(element.getExecutionContext(), tracks);
}
开发者ID:mirror,项目名称:chromium,代码行数:40,代码来源:HTMLCanvasElementCapture.cpp

示例14: ContextDestructionObserver

MediaStream::MediaStream(ScriptExecutionContext& context, const MediaStreamTrackVector& tracks)
    : ContextDestructionObserver(&context)
    , m_activityEventTimer(*this, &MediaStream::activityEventTimerFired)
{
    // This constructor preserves MediaStreamTrack instances and must be used by calls originating
    // from the JavaScript MediaStream constructor.
    MediaStreamTrackPrivateVector trackPrivates;
    trackPrivates.reserveCapacity(tracks.size());

    for (auto& track : tracks) {
        track->addObserver(this);
        m_trackSet.add(track->id(), track);
        trackPrivates.append(&track->privateTrack());
    }

    m_private = MediaStreamPrivate::create(trackPrivates);
    setIsActive(m_private->active());
    m_private->addObserver(*this);
    MediaStreamRegistry::shared().registerStream(*this);
}
开发者ID:srinivas-kakarla,项目名称:WebKitForWayland,代码行数:20,代码来源:MediaStream.cpp

示例15: succeed

void UserMediaRequest::succeed(PassRefPtr<MediaStreamDescriptor> streamDescriptor)
{
    if (!executionContext())
        return;

    RefPtrWillBeRawPtr<MediaStream> stream = MediaStream::create(executionContext(), streamDescriptor);

    MediaStreamTrackVector audioTracks = stream->getAudioTracks();
    for (MediaStreamTrackVector::iterator iter = audioTracks.begin(); iter != audioTracks.end(); ++iter) {
        (*iter)->component()->source()->setConstraints(m_audio);
    }

    MediaStreamTrackVector videoTracks = stream->getVideoTracks();
    for (MediaStreamTrackVector::iterator iter = videoTracks.begin(); iter != videoTracks.end(); ++iter) {
        (*iter)->component()->source()->setConstraints(m_video);
    }

    m_successCallback->handleEvent(stream.get());
}
开发者ID:darktears,项目名称:blink-crosswalk,代码行数:19,代码来源:UserMediaRequest.cpp


注:本文中的MediaStreamTrackVector类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。