本文整理汇总了C++中Float32Array类的典型用法代码示例。如果您正苦于以下问题:C++ Float32Array类的具体用法?C++ Float32Array怎么用?C++ Float32Array使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Float32Array类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: jsFloat32ArrayPrototypeFunctionSubarray
EncodedJSValue JSC_HOST_CALL jsFloat32ArrayPrototypeFunctionSubarray(ExecState* exec)
{
JSValue thisValue = exec->hostThisValue();
if (!thisValue.inherits(&JSFloat32Array::s_info))
return throwVMTypeError(exec);
JSFloat32Array* castedThis = static_cast<JSFloat32Array*>(asObject(thisValue));
ASSERT_GC_OBJECT_INHERITS(castedThis, &JSFloat32Array::s_info);
Float32Array* imp = static_cast<Float32Array*>(castedThis->impl());
int start(exec->argument(0).toInt32(exec));
if (exec->hadException())
return JSValue::encode(jsUndefined());
size_t argsCount = exec->argumentCount();
if (argsCount <= 1) {
JSC::JSValue result = toJS(exec, castedThis->globalObject(), WTF::getPtr(imp->subarray(start)));
return JSValue::encode(result);
}
int end(exec->argument(1).toInt32(exec));
if (exec->hadException())
return JSValue::encode(jsUndefined());
JSC::JSValue result = toJS(exec, castedThis->globalObject(), WTF::getPtr(imp->subarray(start, end)));
return JSValue::encode(result);
}
示例2: PodMove
void
AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource,
uint32_t aChannelNumber, uint32_t aStartInChannel,
ErrorResult& aRv)
{
aSource.ComputeLengthAndData();
uint32_t length = aSource.Length();
CheckedInt<uint32_t> end = aStartInChannel;
end += length;
if (aChannelNumber >= NumberOfChannels() ||
!end.isValid() || end.value() > mLength) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
if (!mSharedChannels && JS_GetTypedArrayLength(mJSChannels[aChannelNumber]) != mLength) {
// The array was probably neutered
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
if (!RestoreJSChannelData(aJSContext)) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return;
}
PodMove(JS_GetFloat32ArrayData(mJSChannels[aChannelNumber]) + aStartInChannel,
aSource.Data(), length);
}
示例3: PodCopy
void
AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource,
uint32_t aChannelNumber, uint32_t aStartInChannel,
ErrorResult& aRv)
{
uint32_t length = aSource.Length();
if (aChannelNumber >= NumberOfChannels() ||
aStartInChannel + length >= mLength) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
if (!mSharedChannels && JS_GetTypedArrayLength(mJSChannels[aChannelNumber]) != mLength) {
// The array was probably neutered
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
if (!RestoreJSChannelData(aJSContext)) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return;
}
PodCopy(JS_GetFloat32ArrayData(mJSChannels[aChannelNumber]) + aStartInChannel,
aSource.Data(), length);
}
示例4: jsFloat32ArrayLength
JSValue jsFloat32ArrayLength(ExecState* exec, JSValue slotBase, const Identifier&)
{
JSFloat32Array* castedThis = static_cast<JSFloat32Array*>(asObject(slotBase));
UNUSED_PARAM(exec);
Float32Array* imp = static_cast<Float32Array*>(castedThis->impl());
JSValue result = jsNumber(imp->length());
return result;
}
示例5: DOMMatrix
already_AddRefed<DOMMatrix>
DOMMatrix::Constructor(const GlobalObject& aGlobal, const Float32Array& aArray32, ErrorResult& aRv)
{
RefPtr<DOMMatrix> obj = new DOMMatrix(aGlobal.GetAsSupports());
aArray32.ComputeLengthAndData();
SetDataInMatrix(obj, aArray32.Data(), aArray32.Length(), aRv);
return obj.forget();
}
示例6:
PassRefPtr<Float32Array> AudioBuffer::getChannelData(unsigned channelIndex, ExceptionCode& ec)
{
if (channelIndex >= m_channels.size()) {
ec = SYNTAX_ERR;
return nullptr;
}
Float32Array* channelData = m_channels[channelIndex].get();
return Float32Array::create(channelData->buffer(), channelData->byteOffset(), channelData->length());
}
示例7: index
PassRefPtr<Float32Array> AudioBuffer::getChannelData(unsigned channelIndex, ExceptionState& exceptionState)
{
if (channelIndex >= m_channels.size()) {
exceptionState.throwDOMException(IndexSizeError, "channel index (" + String::number(channelIndex) + ") exceeds number of channels (" + String::number(m_channels.size()) + ")");
return nullptr;
}
Float32Array* channelData = m_channels[channelIndex].get();
return Float32Array::create(channelData->buffer(), channelData->byteOffset(), channelData->length());
}
示例8:
void
AnalyserNode::GetFloatTimeDomainData(const Float32Array& aArray)
{
float* buffer = aArray.Data();
uint32_t length = std::min(aArray.Length(), mBuffer.Length());
for (uint32_t i = 0; i < length; ++i) {
buffer[i] = mBuffer[(i + mWriteIndex) % mBuffer.Length()];;
}
}
示例9: frequencies
void
BiquadFilterNode::GetFrequencyResponse(const Float32Array& aFrequencyHz,
const Float32Array& aMagResponse,
const Float32Array& aPhaseResponse)
{
aFrequencyHz.ComputeLengthAndData();
aMagResponse.ComputeLengthAndData();
aPhaseResponse.ComputeLengthAndData();
uint32_t length = std::min(std::min(aFrequencyHz.Length(), aMagResponse.Length()),
aPhaseResponse.Length());
if (!length) {
return;
}
nsAutoArrayPtr<float> frequencies(new float[length]);
float* frequencyHz = aFrequencyHz.Data();
const double nyquist = Context()->SampleRate() * 0.5;
// Normalize the frequencies
for (uint32_t i = 0; i < length; ++i) {
frequencies[i] = static_cast<float>(frequencyHz[i] / nyquist);
}
const double currentTime = Context()->CurrentTime();
double freq = mFrequency->GetValueAtTime(currentTime);
double q = mQ->GetValueAtTime(currentTime);
double gain = mGain->GetValueAtTime(currentTime);
double detune = mDetune->GetValueAtTime(currentTime);
WebCore::Biquad biquad;
SetParamsOnBiquad(biquad, Context()->SampleRate(), mType, freq, q, gain, detune);
biquad.getFrequencyResponse(int(length), frequencies, aMagResponse.Data(), aPhaseResponse.Data());
}
示例10: subarrayCallback
static v8::Handle<v8::Value> subarrayCallback(const v8::Arguments& args)
{
INC_STATS("DOM.Float32Array.subarray");
Float32Array* imp = V8Float32Array::toNative(args.Holder());
EXCEPTION_BLOCK(int, start, toInt32(args[0]));
if (args.Length() <= 1) {
return toV8(imp->subarray(start));
}
EXCEPTION_BLOCK(int, end, toInt32(args[1]));
return toV8(imp->subarray(start, end));
}
示例11: ASSERT
void WaveShaperDSPKernel::processCurve(const float* source, float* destination, size_t framesToProcess)
{
ASSERT(source && destination && waveShaperProcessor());
Float32Array* curve = waveShaperProcessor()->curve();
if (!curve) {
// Act as "straight wire" pass-through if no curve is set.
memcpy(destination, source, sizeof(float) * framesToProcess);
return;
}
float* curveData = curve->data();
int curveLength = curve->length();
ASSERT(curveData);
if (!curveData || !curveLength) {
memcpy(destination, source, sizeof(float) * framesToProcess);
return;
}
// Apply waveshaping curve.
for (unsigned i = 0; i < framesToProcess; ++i) {
const float input = source[i];
// Calculate a virtual index based on input -1 -> +1 with -1 being curve[0], +1 being
// curve[curveLength - 1], and 0 being at the center of the curve data. Then linearly
// interpolate between the two points in the curve.
double virtualIndex = 0.5 * (input + 1) * (curveLength - 1);
double output;
if (virtualIndex < 0) {
// input < -1, so use curve[0]
output = curveData[0];
} else if (virtualIndex >= curveLength - 1) {
// input >= 1, so use last curve value
output = curveData[curveLength - 1];
} else {
// The general case where -1 <= input < 1, where 0 <= virtualIndex < curveLength - 1,
// so interpolate between the nearest samples on the curve.
unsigned index1 = static_cast<unsigned>(virtualIndex);
unsigned index2 = index1 + 1;
double interpolationFactor = virtualIndex - index1;
double value1 = curveData[index1];
double value2 = curveData[index2];
output = (1.0 - interpolationFactor) * value1 + interpolationFactor * value2;
}
destination[i] = output;
}
}
示例12: ASSERT
void WaveShaperDSPKernel::processCurve(const float* source, float* destination, size_t framesToProcess)
{
ASSERT(source && destination && waveShaperProcessor());
Float32Array* curve = waveShaperProcessor()->curve();
if (!curve) {
// Act as "straight wire" pass-through if no curve is set.
memcpy(destination, source, sizeof(float) * framesToProcess);
return;
}
float* curveData = curve->data();
int curveLength = curve->length();
ASSERT(curveData);
if (!curveData || !curveLength) {
memcpy(destination, source, sizeof(float) * framesToProcess);
return;
}
// Apply waveshaping curve.
for (unsigned i = 0; i < framesToProcess; ++i) {
const float input = source[i];
// Calculate a virtual index based on input -1 -> +1 with 0 being at the center of the curve data.
// Then linearly interpolate between the two points in the curve.
double virtualIndex = 0.5 * (input + 1) * curveLength;
int index1 = static_cast<int>(virtualIndex);
int index2 = index1 + 1;
double interpolationFactor = virtualIndex - index1;
// Clip index to the input range of the curve.
// This takes care of input outside of nominal range -1 -> +1
index1 = max(index1, 0);
index1 = min(index1, curveLength - 1);
index2 = max(index2, 0);
index2 = min(index2, curveLength - 1);
double value1 = curveData[index1];
double value2 = curveData[index2];
double output = (1.0 - interpolationFactor) * value1 + interpolationFactor * value2;
destination[i] = output;
}
}
示例13: Length
void
AudioBuffer::CopyFromChannel(const Float32Array& aDestination, uint32_t aChannelNumber,
uint32_t aStartInChannel, ErrorResult& aRv)
{
aDestination.ComputeLengthAndData();
uint32_t length = aDestination.Length();
CheckedInt<uint32_t> end = aStartInChannel;
end += length;
if (aChannelNumber >= NumberOfChannels() ||
!end.isValid() || end.value() > Length()) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
JS::AutoCheckCannotGC nogc;
JSObject* channelArray = mJSChannels[aChannelNumber];
if (channelArray) {
if (JS_GetTypedArrayLength(channelArray) != Length()) {
// The array's buffer was detached.
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
bool isShared = false;
const float* sourceData =
JS_GetFloat32ArrayData(channelArray, &isShared, nogc);
// The sourceData arrays should all have originated in
// RestoreJSChannelData, where they are created unshared.
MOZ_ASSERT(!isShared);
PodMove(aDestination.Data(), sourceData + aStartInChannel, length);
return;
}
if (!mSharedChannels.IsNull()) {
CopyChannelDataToFloat(mSharedChannels, aChannelNumber, aStartInChannel,
aDestination.Data(), length);
return;
}
PodZero(aDestination.Data(), length);
}
示例14: JS_GetFloat32ArrayData
void
AudioBuffer::CopyToChannel(JSContext* aJSContext, const Float32Array& aSource,
uint32_t aChannelNumber, uint32_t aStartInChannel,
ErrorResult& aRv)
{
aSource.ComputeLengthAndData();
uint32_t length = aSource.Length();
CheckedInt<uint32_t> end = aStartInChannel;
end += length;
if (aChannelNumber >= NumberOfChannels() ||
!end.isValid() || end.value() > mLength) {
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
if (!RestoreJSChannelData(aJSContext)) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return;
}
JS::AutoCheckCannotGC nogc;
JSObject* channelArray = mJSChannels[aChannelNumber];
if (JS_GetTypedArrayLength(channelArray) != mLength) {
// The array's buffer was detached.
aRv.Throw(NS_ERROR_DOM_INDEX_SIZE_ERR);
return;
}
bool isShared = false;
float* channelData = JS_GetFloat32ArrayData(channelArray, &isShared, nogc);
// The channelData arrays should all have originated in
// RestoreJSChannelData, where they are created unshared.
MOZ_ASSERT(!isShared);
PodMove(channelData + aStartInChannel, aSource.Data(), length);
}
示例15: PeriodicWave
already_AddRefed<PeriodicWave>
AudioContext::CreatePeriodicWave(const Float32Array& aRealData,
const Float32Array& aImagData,
ErrorResult& aRv)
{
if (aRealData.Length() != aImagData.Length() ||
aRealData.Length() == 0 ||
aRealData.Length() > 4096) {
aRv.Throw(NS_ERROR_DOM_NOT_SUPPORTED_ERR);
return nullptr;
}
nsRefPtr<PeriodicWave> periodicWave =
new PeriodicWave(this, aRealData.Data(), aRealData.Length(),
aImagData.Data(), aImagData.Length());
return periodicWave.forget();
}