当前位置: 首页>>代码示例>>C++>>正文


C++ AudioContext类代码示例

本文整理汇总了C++中AudioContext的典型用法代码示例。如果您正苦于以下问题:C++ AudioContext类的具体用法?C++ AudioContext怎么用?C++ AudioContext使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。


在下文中一共展示了AudioContext类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: ASSERT

AbstractAudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (s_hardwareContextCount >= MaxHardwareContexts) {
        exceptionState.throwDOMException(
            NotSupportedError,
            ExceptionMessages::indexExceedsMaximumBound(
                "number of hardware contexts",
                s_hardwareContextCount,
                MaxHardwareContexts));
        return nullptr;
    }

    AudioContext* audioContext = new AudioContext(document);
    audioContext->suspendIfNeeded();

    // This starts the audio thread. The destination node's
    // provideInput() method will now be called repeatedly to render
    // audio.  Each time provideInput() is called, a portion of the
    // audio stream is rendered. Let's call this time period a "render
    // quantum". NOTE: for now AudioContext does not need an explicit
    // startRendering() call from JavaScript.  We may want to consider
    // requiring it for symmetry with OfflineAudioContext.
    audioContext->startRendering();
    ++s_hardwareContextCount;
#if DEBUG_AUDIONODE_REFERENCES
    fprintf(stderr, "%p: AudioContext::AudioContext(): %u #%u\n",
        audioContext, audioContext->m_contextId, s_hardwareContextCount);
#endif

    return audioContext;
}
开发者ID:dstockwell,项目名称:blink,代码行数:32,代码来源:AudioContext.cpp

示例2: AudioBasicProcessorNode

WaveShaperNode::WaveShaperNode(AudioContext& context)
    : AudioBasicProcessorNode(context, context.sampleRate())
{
    m_processor = std::make_unique<WaveShaperProcessor>(context.sampleRate(), 1);
    setNodeType(NodeTypeWaveShaper);

    initialize();
}
开发者ID:ollie314,项目名称:webkit,代码行数:8,代码来源:WaveShaperNode.cpp

示例3: uninitializeDispatch

void AudioContext::uninitializeDispatch(void* userData)
{
    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    ASSERT(context);
    if (!context)
        return;

    context->uninitialize();
}
开发者ID:,项目名称:,代码行数:9,代码来源:

示例4: deleteMarkedNodesDispatch

void AudioContext::deleteMarkedNodesDispatch(void* userData)
{
    AudioContext* context = reinterpret_cast<AudioContext*>(userData);
    ASSERT(context);
    if (!context)
        return;

    context->deleteMarkedNodes();
    context->deref();
}
开发者ID:Happy-Ferret,项目名称:webkit.js,代码行数:10,代码来源:AudioContext.cpp

示例5: DispatchAudioProcessEvent

      // Sets up |output| iff buffers are set in event handlers.
      void DispatchAudioProcessEvent(ScriptProcessorNode* aNode,
                                     AudioChunk* aOutput)
      {
        AudioContext* context = aNode->Context();
        if (!context) {
          return;
        }

        AutoJSAPI jsapi;
        if (NS_WARN_IF(!jsapi.Init(aNode->GetOwner()))) {
          return;
        }
        JSContext* cx = jsapi.cx();
        uint32_t inputChannelCount = aNode->ChannelCount();

        // Create the input buffer
        RefPtr<AudioBuffer> inputBuffer;
        if (mInputBuffer) {
          ErrorResult rv;
          inputBuffer =
            AudioBuffer::Create(context->GetOwner(), inputChannelCount,
                                aNode->BufferSize(), context->SampleRate(),
                                mInputBuffer.forget(), rv);
          if (rv.Failed()) {
            rv.SuppressException();
            return;
          }
        }

        // Ask content to produce data in the output buffer
        // Note that we always avoid creating the output buffer here, and we try to
        // avoid creating the input buffer as well.  The AudioProcessingEvent class
        // knows how to lazily create them if needed once the script tries to access
        // them.  Otherwise, we may be able to get away without creating them!
        RefPtr<AudioProcessingEvent> event =
          new AudioProcessingEvent(aNode, nullptr, nullptr);
        event->InitEvent(inputBuffer, inputChannelCount, mPlaybackTime);
        aNode->DispatchTrustedEvent(event);

        // Steal the output buffers if they have been set.
        // Don't create a buffer if it hasn't been used to return output;
        // FinishProducingOutputBuffer() will optimize output = null.
        // GetThreadSharedChannelsForRate() may also return null after OOM.
        if (event->HasOutputBuffer()) {
          ErrorResult rv;
          AudioBuffer* buffer = event->GetOutputBuffer(rv);
          // HasOutputBuffer() returning true means that GetOutputBuffer()
          // will not fail.
          MOZ_ASSERT(!rv.Failed());
          *aOutput = buffer->GetThreadSharedChannelsForRate(cx);
          MOZ_ASSERT(aOutput->IsNull() ||
                     aOutput->mBufferFormat == AUDIO_FORMAT_FLOAT32,
                     "AudioBuffers initialized from JS have float data");
        }
      }
开发者ID:heiher,项目名称:gecko-dev,代码行数:56,代码来源:ScriptProcessorNode.cpp

示例6: AudioNodeExternalInputStream

/* static */
already_AddRefed<AudioNodeExternalInputStream>
AudioNodeExternalInputStream::Create(MediaStreamGraph* aGraph,
                                     AudioNodeEngine* aEngine) {
  AudioContext* ctx = aEngine->NodeMainThread()->Context();
  MOZ_ASSERT(NS_IsMainThread());
  MOZ_ASSERT(aGraph->GraphRate() == ctx->SampleRate());

  RefPtr<AudioNodeExternalInputStream> stream =
      new AudioNodeExternalInputStream(aEngine, aGraph->GraphRate());
  stream->mSuspendedCount += ctx->ShouldSuspendNewStream();
  aGraph->AddStream(stream);
  return stream.forget();
}
开发者ID:jasonLaster,项目名称:gecko-dev,代码行数:14,代码来源:AudioNodeExternalInputStream.cpp

示例7: throwError

v8::Handle<v8::Value> V8AudioContext::createBufferCallback(const v8::Arguments& args)
{
    if (args.Length() < 2)
        return throwError("Not enough arguments", V8Proxy::SyntaxError);

    AudioContext* audioContext = toNative(args.Holder());
    ASSERT(audioContext);

    v8::Handle<v8::Value> arg = args[0];
    
    // AudioBuffer createBuffer(in ArrayBuffer buffer, in boolean mixToMono);
    if (V8ArrayBuffer::HasInstance(arg)) {
        v8::Handle<v8::Object> object = v8::Handle<v8::Object>::Cast(arg);
        ArrayBuffer* arrayBuffer = V8ArrayBuffer::toNative(object);
        ASSERT(arrayBuffer);

        if (arrayBuffer) {
            bool mixToMono = args[1]->ToBoolean()->Value();

            RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(arrayBuffer, mixToMono);
            if (!audioBuffer.get())
                return throwError("Error decoding audio file data", V8Proxy::SyntaxError);

            return toV8(audioBuffer.get());
        }
        
        return v8::Undefined();
    }
    
    // AudioBuffer createBuffer(in unsigned long numberOfChannels, in unsigned long numberOfFrames, in float sampleRate);
    if (args.Length() < 3)
        return throwError("Not enough arguments", V8Proxy::SyntaxError);

    bool ok = false;
    
    int32_t numberOfChannels = toInt32(args[0], ok);
    if (!ok || numberOfChannels <= 0 || numberOfChannels > 10)
        return throwError("Invalid number of channels", V8Proxy::SyntaxError);
    
    int32_t numberOfFrames = toInt32(args[1], ok);
    if (!ok || numberOfFrames <= 0)
        return throwError("Invalid number of frames", V8Proxy::SyntaxError);
    
    float sampleRate = toFloat(args[2]);
    
    RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(numberOfChannels, numberOfFrames, sampleRate);
    if (!audioBuffer.get())
        return throwError("Error creating AudioBuffer", V8Proxy::SyntaxError);

    return toV8(audioBuffer.get());
}
开发者ID:1833183060,项目名称:wke,代码行数:51,代码来源:V8AudioContextCustom.cpp

示例8: throwError

JSValue JSAudioContext::createBuffer(ExecState* exec)
{
    if (exec->argumentCount() < 2)
        return throwError(exec, createSyntaxError(exec, "Not enough arguments"));

    AudioContext* audioContext = static_cast<AudioContext*>(impl());
    ASSERT(audioContext);

    // AudioBuffer createBuffer(in ArrayBuffer buffer, in boolean mixToMono);
    JSValue val = exec->argument(0);
    if (val.inherits(&JSArrayBuffer::s_info)) {
        ArrayBuffer* arrayBuffer = toArrayBuffer(val);
        ASSERT(arrayBuffer);
        if (arrayBuffer) {
            bool mixToMono = exec->argument(1).toBoolean(exec);

            RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(arrayBuffer, mixToMono);
            if (!audioBuffer.get())
                return throwError(exec, createSyntaxError(exec, "Error decoding audio file data"));

            return toJS(exec, globalObject(), audioBuffer.get());
        }

        return jsUndefined();
    }
    
    // AudioBuffer createBuffer(in unsigned long numberOfChannels, in unsigned long numberOfFrames, in float sampleRate);
    if (exec->argumentCount() < 3)
        return throwError(exec, createSyntaxError(exec, "Not enough arguments"));
    
    int32_t numberOfChannels = exec->argument(0).toInt32(exec);
    int32_t numberOfFrames = exec->argument(1).toInt32(exec);
    float sampleRate = exec->argument(2).toFloat(exec);

    if (numberOfChannels <= 0 || numberOfChannels > 10)
        return throwError(exec, createSyntaxError(exec, "Invalid number of channels"));

    if (numberOfFrames <= 0)
        return throwError(exec, createSyntaxError(exec, "Invalid number of frames"));

    if (sampleRate <= 0)
        return throwError(exec, createSyntaxError(exec, "Invalid sample rate"));

    RefPtr<AudioBuffer> audioBuffer = audioContext->createBuffer(numberOfChannels, numberOfFrames, sampleRate);
    if (!audioBuffer.get())
        return throwError(exec, createSyntaxError(exec, "Error creating AudioBuffer"));

    return toJS(exec, globalObject(), audioBuffer.get());
}
开发者ID:Treeeater,项目名称:WebPermission,代码行数:49,代码来源:JSAudioContextCustom.cpp

示例9: ASSERT

AudioContext* AudioContext::create(Document& document, ExceptionState& exceptionState)
{
    ASSERT(isMainThread());
    if (s_hardwareContextCount >= MaxHardwareContexts) {
        exceptionState.throwDOMException(
            NotSupportedError,
            ExceptionMessages::indexExceedsMaximumBound(
                "number of hardware contexts",
                s_hardwareContextCount,
                MaxHardwareContexts));
        return nullptr;
    }

    AudioContext* audioContext = new AudioContext(&document);
    audioContext->suspendIfNeeded();
    return audioContext;
}
开发者ID:kingysu,项目名称:blink-crosswalk,代码行数:17,代码来源:AudioContext.cpp

示例10: valueForContextTime

float AudioParamTimeline::valueForContextTime(AudioContext& context, float defaultValue, bool& hasValue)
{
    {
        std::unique_lock<Lock> lock(m_eventsMutex, std::try_to_lock);
        if (!lock.owns_lock() || !m_events.size() || context.currentTime() < m_events[0].time()) {
            hasValue = false;
            return defaultValue;
        }
    }

    // Ask for just a single value.
    float value;
    double sampleRate = context.sampleRate();
    double startTime = context.currentTime();
    double endTime = startTime + 1.1 / sampleRate; // time just beyond one sample-frame
    double controlRate = sampleRate / AudioNode::ProcessingSizeInFrames; // one parameter change per render quantum
    value = valuesForTimeRange(startTime, endTime, defaultValue, &value, 1, sampleRate, controlRate);

    hasValue = true;
    return value;
}
开发者ID:Comcast,项目名称:WebKitForWayland,代码行数:21,代码来源:AudioParamTimeline.cpp

示例11: AudioNode

MediaElementAudioSourceNode::MediaElementAudioSourceNode(AudioContext& context, HTMLMediaElement& mediaElement)
    : AudioNode(context, context.sampleRate())
    , m_mediaElement(mediaElement)
    , m_sourceNumberOfChannels(0)
    , m_sourceSampleRate(0)
{
    // Default to stereo. This could change depending on what the media element .src is set to.
    addOutput(std::make_unique<AudioNodeOutput>(this, 2));

    setNodeType(NodeTypeMediaElementAudioSource);

    initialize();
}
开发者ID:Comcast,项目名称:WebKitForWayland,代码行数:13,代码来源:MediaElementAudioSourceNode.cpp

示例12: MediaStreamAudioDestinationNode

/* static */ already_AddRefed<MediaStreamAudioDestinationNode>
MediaStreamAudioDestinationNode::Create(AudioContext& aAudioContext,
                                        const AudioNodeOptions& aOptions,
                                        ErrorResult& aRv)
{
  if (aAudioContext.IsOffline()) {
    aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
    return nullptr;
  }

  if (aAudioContext.CheckClosed(aRv)) {
    return nullptr;
  }

  RefPtr<MediaStreamAudioDestinationNode> audioNode =
    new MediaStreamAudioDestinationNode(&aAudioContext);

  audioNode->Initialize(aOptions, aRv);
  if (NS_WARN_IF(aRv.Failed())) {
    return nullptr;
  }

  return audioNode.forget();
}
开发者ID:heiher,项目名称:gecko-dev,代码行数:24,代码来源:MediaStreamAudioDestinationNode.cpp

示例13: Create

/* static */ already_AddRefed<AudioBuffer>
AudioBuffer::Constructor(const GlobalObject& aGlobal,
                         AudioContext& aAudioContext,
                         const AudioBufferOptions& aOptions,
                         ErrorResult& aRv)
{
  if (!aOptions.mNumberOfChannels) {
    aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
    return nullptr;
  }

  float sampleRate = aOptions.mSampleRate.WasPassed()
                       ? aOptions.mSampleRate.Value()
                       : aAudioContext.SampleRate();
  return Create(&aAudioContext, aOptions.mNumberOfChannels, aOptions.mLength,
                sampleRate, aRv);
}
开发者ID:mephisto41,项目名称:gecko-dev,代码行数:17,代码来源:AudioBuffer.cpp

示例14: create

Ref<AudioBuffer> AudioBuffer::read(AudioContext& context, const std::string& sampleName)
{
  ResourceCache& cache = context.cache();

  std::string name;
  name += "sample:";
  name += sampleName;

  if (Ref<AudioBuffer> buffer = cache.find<AudioBuffer>(name))
    return buffer;

  Ref<Sample> data = Sample::read(cache, sampleName);
  if (!data)
  {
    logError("Failed to read sample for buffer %s", name.c_str());
    return nullptr;
  }

  return create(ResourceInfo(cache, name), context, *data);
}
开发者ID:elmindreda,项目名称:Nori,代码行数:20,代码来源:Audio.cpp

示例15: AudioNode

PannerNode::PannerNode(AudioContext& context, float sampleRate)
    : AudioNode(context, sampleRate)
    , m_panningModel(PanningModelType::HRTF)
    , m_lastGain(-1.0)
    , m_connectionCount(0)
{
    // Load the HRTF database asynchronously so we don't block the Javascript thread while creating the HRTF database.
    m_hrtfDatabaseLoader = HRTFDatabaseLoader::createAndLoadAsynchronouslyIfNecessary(context.sampleRate());

    addInput(std::make_unique<AudioNodeInput>(this));
    addOutput(std::make_unique<AudioNodeOutput>(this, 2));

    // Node-specific default mixing rules.
    m_channelCount = 2;
    m_channelCountMode = ClampedMax;
    m_channelInterpretation = AudioBus::Speakers;

    m_distanceGain = AudioParam::create(context, "distanceGain", 1.0, 0.0, 1.0);
    m_coneGain = AudioParam::create(context, "coneGain", 1.0, 0.0, 1.0);

    m_position = FloatPoint3D(0, 0, 0);
    m_orientation = FloatPoint3D(1, 0, 0);
    m_velocity = FloatPoint3D(0, 0, 0);

    setNodeType(NodeTypePanner);

    initialize();
}
开发者ID:eocanha,项目名称:webkit,代码行数:28,代码来源:PannerNode.cpp


注:本文中的AudioContext类示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。