本文整理汇总了C++中ThreeDPoint::IsZero方法的典型用法代码示例。如果您正苦于以下问题:C++ ThreeDPoint::IsZero方法的具体用法?C++ ThreeDPoint::IsZero怎么用?C++ ThreeDPoint::IsZero使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类ThreeDPoint
的用法示例。
在下文中一共展示了ThreeDPoint::IsZero方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: acos
// This algorithm is specified in the webaudio spec.
void
PannerNodeEngine::ComputeAzimuthAndElevation(float& aAzimuth, float& aElevation)
{
ThreeDPoint sourceListener = mPosition - mListenerPosition;
if (sourceListener.IsZero()) {
aAzimuth = 0.0;
aElevation = 0.0;
return;
}
sourceListener.Normalize();
// Project the source-listener vector on the x-z plane.
const ThreeDPoint& listenerFront = mListenerFrontVector;
const ThreeDPoint& listenerRight = mListenerRightVector;
ThreeDPoint up = listenerRight.CrossProduct(listenerFront);
double upProjection = sourceListener.DotProduct(up);
aElevation = 90 - 180 * acos(upProjection) / M_PI;
if (aElevation > 90) {
aElevation = 180 - aElevation;
} else if (aElevation < -90) {
aElevation = -180 - aElevation;
}
ThreeDPoint projectedSource = sourceListener - up * upProjection;
if (projectedSource.IsZero()) {
// source - listener direction is up or down.
aAzimuth = 0.0;
return;
}
projectedSource.Normalize();
// Actually compute the angle, and convert to degrees
double projection = projectedSource.DotProduct(listenerRight);
aAzimuth = 180 * acos(projection) / M_PI;
// Compute whether the source is in front or behind the listener.
double frontBack = projectedSource.DotProduct(listenerFront);
if (frontBack < 0) {
aAzimuth = 360 - aAzimuth;
}
// Rotate the azimuth so it is relative to the listener front vector instead
// of the right vector.
if ((aAzimuth >= 0) && (aAzimuth <= 270)) {
aAzimuth = 90 - aAzimuth;
} else {
aAzimuth = 450 - aAzimuth;
}
}
示例2: acos
// This algorithm is described in the WebAudio spec.
float
PannerNodeEngine::ComputeConeGain(const ThreeDPoint& position,
const ThreeDPoint& orientation)
{
// Omnidirectional source
if (orientation.IsZero() || ((mConeInnerAngle == 360) && (mConeOuterAngle == 360))) {
return 1;
}
// Normalized source-listener vector
ThreeDPoint sourceToListener = mListenerPosition - position;
sourceToListener.Normalize();
// Angle between the source orientation vector and the source-listener vector
double dotProduct = sourceToListener.DotProduct(orientation);
double angle = 180 * acos(dotProduct) / M_PI;
double absAngle = fabs(angle);
// Divide by 2 here since API is entire angle (not half-angle)
double absInnerAngle = fabs(mConeInnerAngle) / 2;
double absOuterAngle = fabs(mConeOuterAngle) / 2;
double gain = 1;
if (absAngle <= absInnerAngle) {
// No attenuation
gain = 1;
} else if (absAngle >= absOuterAngle) {
// Max attenuation
gain = mConeOuterGain;
} else {
// Between inner and outer cones
// inner -> outer, x goes from 0 -> 1
double x = (absAngle - absInnerAngle) / (absOuterAngle - absInnerAngle);
gain = (1 - x) + mConeOuterGain * x;
}
return gain;
}
示例3: ComputeAzimuthAndElevation
void
PannerNodeEngine::HRTFPanningFunction(const AudioBlock& aInput,
AudioBlock* aOutput,
StreamTime tick)
{
// The output of this node is always stereo, no matter what the inputs are.
aOutput->AllocateChannels(2);
float azimuth, elevation;
ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick);
ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick);
if (!orientation.IsZero()) {
orientation.Normalize();
}
ComputeAzimuthAndElevation(position, azimuth, elevation);
AudioBlock input = aInput;
// Gain is applied before the delay and convolution of the HRTF.
input.mVolume *= ComputeConeGain(position, orientation) * ComputeDistanceGain(position);
mHRTFPanner->pan(azimuth, elevation, &input, aOutput);
}
示例4: position
void
PannerNodeEngine::EqualPowerPanningFunction(const AudioBlock& aInput,
AudioBlock* aOutput,
StreamTime tick)
{
float azimuth, elevation, gainL, gainR, normalizedAzimuth, distanceGain, coneGain;
int inputChannels = aInput.ChannelCount();
// Optimize the case where the position and orientation is constant for this
// processing block: we can just apply a constant gain on the left and right
// channel
if (mPositionX.HasSimpleValue() &&
mPositionY.HasSimpleValue() &&
mPositionZ.HasSimpleValue() &&
mOrientationX.HasSimpleValue() &&
mOrientationY.HasSimpleValue() &&
mOrientationZ.HasSimpleValue()) {
ThreeDPoint position = ConvertAudioParamTimelineTo3DP(mPositionX, mPositionY, mPositionZ, tick);
ThreeDPoint orientation = ConvertAudioParamTimelineTo3DP(mOrientationX, mOrientationY, mOrientationZ, tick);
if (!orientation.IsZero()) {
orientation.Normalize();
}
// If both the listener are in the same spot, and no cone gain is specified,
// this node is noop.
if (mListenerPosition == position &&
mConeInnerAngle == 360 &&
mConeOuterAngle == 360) {
*aOutput = aInput;
return;
}
// The output of this node is always stereo, no matter what the inputs are.
aOutput->AllocateChannels(2);
ComputeAzimuthAndElevation(position, azimuth, elevation);
coneGain = ComputeConeGain(position, orientation);
// The following algorithm is described in the spec.
// Clamp azimuth in the [-90, 90] range.
azimuth = min(180.f, max(-180.f, azimuth));
// Wrap around
if (azimuth < -90.f) {
azimuth = -180.f - azimuth;
} else if (azimuth > 90) {
azimuth = 180.f - azimuth;
}
// Normalize the value in the [0, 1] range.
if (inputChannels == 1) {
normalizedAzimuth = (azimuth + 90.f) / 180.f;
} else {
if (azimuth <= 0) {
normalizedAzimuth = (azimuth + 90.f) / 90.f;
} else {
normalizedAzimuth = azimuth / 90.f;
}
}
distanceGain = ComputeDistanceGain(position);
// Actually compute the left and right gain.
gainL = cos(0.5 * M_PI * normalizedAzimuth);
gainR = sin(0.5 * M_PI * normalizedAzimuth);
// Compute the output.
ApplyStereoPanning(aInput, aOutput, gainL, gainR, azimuth <= 0);
aOutput->mVolume = aInput.mVolume * distanceGain * coneGain;
} else {
float positionX[WEBAUDIO_BLOCK_SIZE];
float positionY[WEBAUDIO_BLOCK_SIZE];
float positionZ[WEBAUDIO_BLOCK_SIZE];
float orientationX[WEBAUDIO_BLOCK_SIZE];
float orientationY[WEBAUDIO_BLOCK_SIZE];
float orientationZ[WEBAUDIO_BLOCK_SIZE];
// The output of this node is always stereo, no matter what the inputs are.
aOutput->AllocateChannels(2);
if (!mPositionX.HasSimpleValue()) {
mPositionX.GetValuesAtTime(tick, positionX, WEBAUDIO_BLOCK_SIZE);
} else {
positionX[0] = mPositionX.GetValueAtTime(tick);
}
if (!mPositionY.HasSimpleValue()) {
mPositionY.GetValuesAtTime(tick, positionY, WEBAUDIO_BLOCK_SIZE);
} else {
positionY[0] = mPositionY.GetValueAtTime(tick);
}
if (!mPositionZ.HasSimpleValue()) {
mPositionZ.GetValuesAtTime(tick, positionZ, WEBAUDIO_BLOCK_SIZE);
} else {
positionZ[0] = mPositionZ.GetValueAtTime(tick);
}
if (!mOrientationX.HasSimpleValue()) {
mOrientationX.GetValuesAtTime(tick, orientationX, WEBAUDIO_BLOCK_SIZE);
} else {
//.........这里部分代码省略.........