本文整理汇总了C++中AutoTArray::Elements方法的典型用法代码示例。如果您正苦于以下问题:C++ AutoTArray::Elements方法的具体用法?C++ AutoTArray::Elements怎么用?C++ AutoTArray::Elements使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类AutoTArray
的用法示例。
在下文中一共展示了AutoTArray::Elements方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。
示例1: ceil
void
AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
mMonitor.AssertCurrentThreadOwns();
// We need to call the non-locking version, because we already have the lock.
if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
return;
}
uint32_t toPopFrames =
ceil(aWriter.Available() * mAudioClock.GetPlaybackRate());
while (mTimeStretcher->numSamples() < aWriter.Available()) {
UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
if (c->Frames() == 0) {
break;
}
MOZ_ASSERT(c->Frames() <= toPopFrames);
if (IsValidAudioFormat(c.get())) {
mTimeStretcher->putSamples(c->Data(), c->Frames());
} else {
// Write silence if invalid format.
AutoTArray<AudioDataValue, 1000> buf;
buf.SetLength(mOutChannels * c->Frames());
memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue));
mTimeStretcher->putSamples(buf.Elements(), c->Frames());
}
}
auto timeStretcher = mTimeStretcher;
aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
return timeStretcher->receiveSamples(aPtr, aFrames);
}, aWriter.Available());
}
示例2: PodZero
static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
uint32_t aOffsetInBlock)
{
uint32_t blockChannels = aBlock->ChannelCount();
AutoTArray<const T*,2> channels;
if (aInput.IsNull()) {
channels.SetLength(blockChannels);
PodZero(channels.Elements(), blockChannels);
} else {
const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
channels.SetLength(inputChannels.Length());
PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
if (channels.Length() != blockChannels) {
// We only need to upmix here because aBlock's channel count has been
// chosen to be a superset of the channel count of every chunk.
AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
}
}
for (uint32_t c = 0; c < blockChannels; ++c) {
float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
if (channels[c]) {
ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
} else {
PodZero(outputData, aInput.GetDuration());
}
}
}
示例3:
void
nsShmImage::Put(const mozilla::LayoutDeviceIntRegion& aRegion)
{
AutoTArray<xcb_rectangle_t, 32> xrects;
xrects.SetCapacity(aRegion.GetNumRects());
for (auto iter = aRegion.RectIter(); !iter.Done(); iter.Next()) {
const mozilla::LayoutDeviceIntRect &r = iter.Get();
xcb_rectangle_t xrect = { (short)r.x, (short)r.y, (unsigned short)r.width, (unsigned short)r.height };
xrects.AppendElement(xrect);
}
if (!mGC) {
mGC = xcb_generate_id(mConnection);
xcb_create_gc(mConnection, mGC, mWindow, 0, nullptr);
}
xcb_set_clip_rectangles(mConnection, XCB_CLIP_ORDERING_YX_BANDED, mGC, 0, 0,
xrects.Length(), xrects.Elements());
if (mPixmap != XCB_NONE) {
mLastRequest = xcb_copy_area_checked(mConnection, mPixmap, mWindow, mGC,
0, 0, 0, 0, mSize.width, mSize.height);
} else {
mLastRequest = xcb_shm_put_image_checked(mConnection, mWindow, mGC,
mSize.width, mSize.height,
0, 0, mSize.width, mSize.height,
0, 0, mDepth,
XCB_IMAGE_FORMAT_Z_PIXMAP, 0,
mShmSeg, 0);
}
xcb_flush(mConnection);
}
示例4: ceil
void
AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
mMonitor.AssertCurrentThreadOwns();
// We need to call the non-locking version, because we already have the lock.
if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
return;
}
uint32_t toPopFrames =
ceil(aWriter.Available() * mAudioClock.GetPlaybackRate());
while (mTimeStretcher->numSamples() < aWriter.Available()) {
UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
if (c->Frames() == 0) {
break;
}
MOZ_ASSERT(c->Frames() <= toPopFrames);
if (IsValidAudioFormat(c.get())) {
mTimeStretcher->putSamples(c->Data(), c->Frames());
} else {
// Write silence if invalid format.
AutoTArray<AudioDataValue, 1000> buf;
auto size = CheckedUint32(mOutChannels) * c->Frames();
if (!size.isValid()) {
// The overflow should not happen in normal case.
LOGW("Invalid member data: %d channels, %d frames", mOutChannels, c->Frames());
return;
}
buf.SetLength(size.value());
size = size * sizeof(AudioDataValue);
if (!size.isValid()) {
LOGW("The required memory size is too large.");
return;
}
memset(buf.Elements(), 0, size.value());
mTimeStretcher->putSamples(buf.Elements(), c->Frames());
}
}
auto timeStretcher = mTimeStretcher;
aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
return timeStretcher->receiveSamples(aPtr, aFrames);
}, aWriter.Available());
}
示例5: ReadChunk
// Read audio data in aChunk, resample them if needed,
// and then send the result to OMX input buffer (or buffers if one buffer is not enough).
// aSamplesRead will be the number of samples that have been read from aChunk.
BufferState ReadChunk(AudioChunk& aChunk, size_t* aSamplesRead)
{
size_t chunkSamples = aChunk.GetDuration();
size_t bytesToCopy = chunkSamples * mOMXAEncoder.mResamplingRatio
* mOMXAEncoder.mChannels * sizeof(AudioDataValue);
size_t bytesCopied = 0;
if (bytesToCopy <= AvailableSize()) {
if (aChunk.IsNull()) {
bytesCopied = SendSilenceToBuffer(chunkSamples);
} else {
bytesCopied = SendChunkToBuffer(aChunk, chunkSamples);
}
UpdateAfterSendChunk(chunkSamples, bytesCopied, aSamplesRead);
} else {
// Interleave data to a temporary buffer.
AutoTArray<AudioDataValue, 9600> pcm;
pcm.SetLength(bytesToCopy);
AudioDataValue* interleavedSource = pcm.Elements();
AudioTrackEncoder::InterleaveTrackData(aChunk, chunkSamples,
mOMXAEncoder.mChannels,
interleavedSource);
// When the data size of chunk is larger than the buffer capacity,
// we split it into sub-chunks to fill up buffers.
size_t subChunkSamples = 0;
while(GetNextSubChunk(bytesToCopy, subChunkSamples)) {
// To avoid enqueueing an empty buffer, we follow the order that
// clear up buffer first, then create one, send data to it in the end.
if (!IsEmpty()) {
// Submit the filled-up buffer and request a new buffer.
status_t result = Enqueue(mOMXAEncoder.mTimestamp,
mInputFlags & ~OMXCodecWrapper::BUFFER_EOS);
if (result != OK) {
return BUFFER_FAIL;
}
result = Dequeue();
if (result == -EAGAIN) {
return WAIT_FOR_NEW_BUFFER;
}
if (result != OK) {
return BUFFER_FAIL;
}
}
if (aChunk.IsNull()) {
bytesCopied = SendSilenceToBuffer(subChunkSamples);
} else {
bytesCopied = SendInterleavedSubChunkToBuffer(interleavedSource, subChunkSamples);
}
UpdateAfterSendChunk(subChunkSamples, bytesCopied, aSamplesRead);
// Move to the position where samples are not yet send to the buffer.
interleavedSource += subChunkSamples * mOMXAEncoder.mChannels;
}
}
return BUFFER_OK;
}
示例6: memset
void
NS_GetComplexLineBreaks(const char16_t* aText, uint32_t aLength,
uint8_t* aBreakBefore)
{
NS_ASSERTION(aText, "aText shouldn't be null");
int outItems = 0;
HRESULT result;
AutoTArray<SCRIPT_ITEM, 64> items;
char16ptr_t text = aText;
memset(aBreakBefore, false, aLength);
if (!items.AppendElements(64))
return;
do {
result = ScriptItemize(text, aLength, items.Length(), nullptr, nullptr,
items.Elements(), &outItems);
if (result == E_OUTOFMEMORY) {
if (!items.AppendElements(items.Length()))
return;
}
} while (result == E_OUTOFMEMORY);
for (int iItem = 0; iItem < outItems; ++iItem) {
uint32_t endOffset = (iItem + 1 == outItems ? aLength : items[iItem + 1].iCharPos);
uint32_t startOffset = items[iItem].iCharPos;
AutoTArray<SCRIPT_LOGATTR, 64> sla;
if (!sla.AppendElements(endOffset - startOffset))
return;
if (ScriptBreak(text + startOffset, endOffset - startOffset,
&items[iItem].a, sla.Elements()) < 0)
return;
for (uint32_t j=0; j+startOffset < endOffset; ++j) {
aBreakBefore[j+startOffset] = sla[j].fSoftBreak;
}
}
}
示例7: fwrite
typename EnableIf<IsSame<T, float>::value, void>::Type
WriteDumpFileHelper(T* aInput, size_t aSamples, FILE* aFile) {
AutoTArray<uint8_t, 1024*2> buf;
buf.SetLength(aSamples*2);
uint8_t* output = buf.Elements();
for (uint32_t i = 0; i < aSamples; ++i) {
SetUint16LE(output + i*2, int16_t(aInput[i]*32767.0f));
}
fwrite(output, 2, aSamples, aFile);
fflush(aFile);
}
示例8: SendChunkToBuffer
// Interleave chunk data and send it to buffer,
// and return the copied bytes number of audio data.
size_t SendChunkToBuffer(AudioChunk& aSource, size_t aSamplesNum)
{
AudioDataValue* dst = reinterpret_cast<AudioDataValue*>(GetPointer());
size_t bytesToCopy = aSamplesNum * mOMXAEncoder.mResamplingRatio
* mOMXAEncoder.mChannels * sizeof(AudioDataValue);
uint32_t dstSamplesCopied = aSamplesNum;
if (mOMXAEncoder.mResampler) {
AutoTArray<AudioDataValue, 9600> pcm;
pcm.SetLength(bytesToCopy);
AudioTrackEncoder::InterleaveTrackData(aSource, aSamplesNum,
mOMXAEncoder.mChannels,
pcm.Elements());
int16_t* tempSource = reinterpret_cast<int16_t*>(pcm.Elements());
speex_resampler_process_interleaved_int(mOMXAEncoder.mResampler, tempSource,
&aSamplesNum, dst,
&dstSamplesCopied);
} else {
AudioTrackEncoder::InterleaveTrackData(aSource, aSamplesNum,
mOMXAEncoder.mChannels, dst);
}
return dstSamplesCopied * mOMXAEncoder.mChannels * sizeof(AudioDataValue);
}
示例9: GetDataFromMatrix
void
DOMMatrixReadOnly::ToFloat64Array(JSContext* aCx, JS::MutableHandle<JSObject*> aResult, ErrorResult& aRv) const
{
AutoTArray<double, 16> arr;
arr.SetLength(16);
GetDataFromMatrix(this, arr.Elements());
JS::Rooted<JS::Value> value(aCx);
if (!ToJSValue(aCx, TypedArrayCreator<Float64Array>(arr), &value)) {
aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
return;
}
aResult.set(&value.toObject());
}
示例10: memset
void
NS_GetComplexLineBreaks(const char16_t* aText, uint32_t aLength,
uint8_t* aBreakBefore)
{
NS_ASSERTION(aText, "aText shouldn't be null");
memset(aBreakBefore, false, aLength * sizeof(uint8_t));
AutoTArray<PangoLogAttr, 2000> attrBuffer;
if (!attrBuffer.AppendElements(aLength + 1))
return;
NS_ConvertUTF16toUTF8 aUTF8(aText, aLength);
const gchar* p = aUTF8.Data();
const gchar* end = p + aUTF8.Length();
uint32_t u16Offset = 0;
static PangoLanguage* language = pango_language_from_string("en");
while (p < end)
{
PangoLogAttr* attr = attrBuffer.Elements();
pango_get_log_attrs(p, end - p, -1, language, attr, attrBuffer.Length());
while (p < end)
{
aBreakBefore[u16Offset] = attr->is_line_break;
if (NS_IS_LOW_SURROGATE(aText[u16Offset]))
aBreakBefore[++u16Offset] = false; // Skip high surrogate
++u16Offset;
bool err;
uint32_t ch = UTF8CharEnumerator::NextChar(&p, end, &err);
++attr;
if (ch == 0 || err) {
// pango_break (pango 1.16.2) only analyses text before the
// first NUL (but sets one extra attr). Workaround loop to call
// pango_break again to analyse after the NUL is done somewhere else
// (gfx/thebes/gfxFontconfigFonts.cpp: SetupClusterBoundaries()).
// So, we do the same here for pango_get_log_attrs.
break;
}
}
}
}
示例11: if
void
AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk,
uint32_t aOutputChannelCount,
nsTArray<const float*>& aOutputChannels,
DownmixBufferType& aDownmixBuffer)
{
for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) {
aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i]));
}
if (aOutputChannels.Length() < aOutputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
AudioChannelsUpMix<float>(&aOutputChannels, aOutputChannelCount, nullptr);
NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
"We called GetAudioChannelsSuperset to avoid this");
} else {
// Fill up the remaining aOutputChannels by zeros
for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
aOutputChannels.AppendElement(nullptr);
}
}
} else if (aOutputChannels.Length() > aOutputChannelCount) {
if (mChannelInterpretation == ChannelInterpretation::Speakers) {
AutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
outputChannels.SetLength(aOutputChannelCount);
aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
}
AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);
aOutputChannels.SetLength(aOutputChannelCount);
for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
aOutputChannels[j] = outputChannels[j];
}
} else {
// Drop the remaining aOutputChannels
aOutputChannels.RemoveElementsAt(aOutputChannelCount,
aOutputChannels.Length() - aOutputChannelCount);
}
}
}
示例12: Intl
NS_IMETHODIMP
xpcAccessibleTable::GetSelectedRowIndices(uint32_t* aRowsArraySize,
int32_t** aRowsArray) {
NS_ENSURE_ARG_POINTER(aRowsArraySize);
*aRowsArraySize = 0;
NS_ENSURE_ARG_POINTER(aRowsArray);
*aRowsArray = 0;
if (!Intl()) return NS_ERROR_FAILURE;
AutoTArray<uint32_t, XPC_TABLE_DEFAULT_SIZE> rowsArray;
Intl()->SelectedRowIndices(&rowsArray);
*aRowsArraySize = rowsArray.Length();
*aRowsArray =
static_cast<int32_t*>(moz_xmalloc(*aRowsArraySize * sizeof(int32_t)));
memcpy(*aRowsArray, rowsArray.Elements(), *aRowsArraySize * sizeof(int32_t));
return NS_OK;
}
示例13:
void
nsShmImage::Put(const mozilla::LayoutDeviceIntRegion& aRegion)
{
AutoTArray<xcb_rectangle_t, 32> xrects;
xrects.SetCapacity(aRegion.GetNumRects());
for (auto iter = aRegion.RectIter(); !iter.Done(); iter.Next()) {
const mozilla::LayoutDeviceIntRect &r = iter.Get();
xcb_rectangle_t xrect = { (short)r.x, (short)r.y, (unsigned short)r.width, (unsigned short)r.height };
xrects.AppendElement(xrect);
}
if (!mGC) {
mGC = xcb_generate_id(mConnection);
xcb_create_gc(mConnection, mGC, mWindow, 0, nullptr);
}
xcb_set_clip_rectangles(mConnection, XCB_CLIP_ORDERING_YX_BANDED, mGC, 0, 0,
xrects.Length(), xrects.Elements());
if (mPixmap != XCB_NONE) {
mPutRequest = xcb_copy_area_checked(mConnection, mPixmap, mWindow, mGC,
0, 0, 0, 0, mSize.width, mSize.height);
} else {
mPutRequest = xcb_shm_put_image_checked(mConnection, mWindow, mGC,
mSize.width, mSize.height,
0, 0, mSize.width, mSize.height,
0, 0, mDepth,
XCB_IMAGE_FORMAT_Z_PIXMAP, 0,
mShmSeg, 0);
}
// Send a request that returns a response so that we don't have to start a
// sync in nsShmImage::CreateDrawTarget to retrieve the result of mPutRequest.
mSyncRequest = xcb_get_input_focus(mConnection);
mRequestPending = true;
xcb_flush(mConnection);
}
示例14: FormatTMTime
//.........这里部分代码省略.........
break;
case kDateFormatYearMonth:
case kDateFormatWeekday:
dateStyle = kCFDateFormatterNoStyle; // formats handled below
break;
case kDateFormatNone:
dateStyle = kCFDateFormatterNoStyle;
break;
default:
NS_ERROR("Unknown nsDateFormatSelector");
res = NS_ERROR_FAILURE;
dateStyle = kCFDateFormatterNoStyle;
}
// Get the time style for the formatter:
CFDateFormatterStyle timeStyle;
switch (timeFormatSelector) {
case kTimeFormatSeconds:
case kTimeFormatSecondsForce24Hour: // 24 hour part fixed below
timeStyle = kCFDateFormatterMediumStyle;
break;
case kTimeFormatNoSeconds:
case kTimeFormatNoSecondsForce24Hour: // 24 hour part fixed below
timeStyle = kCFDateFormatterShortStyle;
break;
case kTimeFormatNone:
timeStyle = kCFDateFormatterNoStyle;
break;
default:
NS_ERROR("Unknown nsTimeFormatSelector");
res = NS_ERROR_FAILURE;
timeStyle = kCFDateFormatterNoStyle;
}
// Create the formatter and fix up its formatting as necessary:
CFDateFormatterRef formatter =
CFDateFormatterCreate(nullptr, formatterLocale, dateStyle, timeStyle);
CFRelease(formatterLocale);
if (dateFormatSelector == kDateFormatYearMonth ||
dateFormatSelector == kDateFormatWeekday) {
CFStringRef dateFormat =
dateFormatSelector == kDateFormatYearMonth ? CFSTR("yyyy/MM ") : CFSTR("EEE ");
CFStringRef oldFormat = CFDateFormatterGetFormat(formatter);
CFMutableStringRef newFormat = CFStringCreateMutableCopy(nullptr, 0, oldFormat);
CFStringInsert(newFormat, 0, dateFormat);
CFDateFormatterSetFormat(formatter, newFormat);
CFRelease(newFormat); // note we don't own oldFormat
}
if (timeFormatSelector == kTimeFormatSecondsForce24Hour ||
timeFormatSelector == kTimeFormatNoSecondsForce24Hour) {
// Replace "h" with "H", and remove "a":
CFStringRef oldFormat = CFDateFormatterGetFormat(formatter);
CFMutableStringRef newFormat = CFStringCreateMutableCopy(nullptr, 0, oldFormat);
CFIndex replaceCount = CFStringFindAndReplace(newFormat,
CFSTR("h"), CFSTR("H"),
CFRangeMake(0, CFStringGetLength(newFormat)),
0);
NS_ASSERTION(replaceCount <= 2, "Unexpected number of \"h\" occurrences");
replaceCount = CFStringFindAndReplace(newFormat,
CFSTR("a"), CFSTR(""),
CFRangeMake(0, CFStringGetLength(newFormat)),
0);
NS_ASSERTION(replaceCount <= 1, "Unexpected number of \"a\" occurrences");
CFDateFormatterSetFormat(formatter, newFormat);
CFRelease(newFormat); // note we don't own oldFormat
}
// Now get the formatted date:
CFGregorianDate date;
date.second = tmTime->tm_sec;
date.minute = tmTime->tm_min;
date.hour = tmTime->tm_hour;
date.day = tmTime->tm_mday; // Mac is 1-based, tm is 1-based
date.month = tmTime->tm_mon + 1; // Mac is 1-based, tm is 0-based
date.year = tmTime->tm_year + 1900;
CFTimeZoneRef timeZone = CFTimeZoneCopySystem(); // tmTime is in local time
CFAbsoluteTime absTime = CFGregorianDateGetAbsoluteTime(date, timeZone);
CFRelease(timeZone);
CFStringRef formattedDate = CFDateFormatterCreateStringWithAbsoluteTime(nullptr,
formatter,
absTime);
CFIndex stringLen = CFStringGetLength(formattedDate);
AutoTArray<UniChar, 256> stringBuffer;
stringBuffer.SetLength(stringLen + 1);
CFStringGetCharacters(formattedDate, CFRangeMake(0, stringLen), stringBuffer.Elements());
stringOut.Assign(reinterpret_cast<char16_t*>(stringBuffer.Elements()), stringLen);
CFRelease(formattedDate);
CFRelease(formatter);
return res;
}
示例15: while
//.........这里部分代码省略.........
c.baseGlyph = gIndex;
c.nGlyphs = 0;
++cIndex;
}
// increment cluster's glyph count to include current slot
NS_ASSERTION(cIndex < aLength, "cIndex beyond word length");
++clusters[cIndex].nGlyphs;
// bump |after| index if it falls in the middle of a surrogate pair
if (NS_IS_HIGH_SURROGATE(aText[after]) && after < aLength - 1 &&
NS_IS_LOW_SURROGATE(aText[after + 1])) {
after++;
}
// extend cluster if necessary to reach the glyph's "after" index
if (clusters[cIndex].baseChar + clusters[cIndex].nChars < after + 1) {
clusters[cIndex].nChars = after + 1 - clusters[cIndex].baseChar;
}
}
bool roundX, roundY;
GetRoundOffsetsToPixels(aDrawTarget, &roundX, &roundY);
gfxShapedText::CompressedGlyph *charGlyphs =
aShapedText->GetCharacterGlyphs() + aOffset;
// now put glyphs into the textrun, one cluster at a time
for (uint32_t i = 0; i <= cIndex; ++i) {
const Cluster& c = clusters[i];
float adv; // total advance of the cluster
if (rtl) {
if (i == 0) {
adv = gr_seg_advance_X(aSegment) - xLocs[c.baseGlyph];
} else {
adv = xLocs[clusters[i-1].baseGlyph] - xLocs[c.baseGlyph];
}
} else {
if (i == cIndex) {
adv = gr_seg_advance_X(aSegment) - xLocs[c.baseGlyph];
} else {
adv = xLocs[clusters[i+1].baseGlyph] - xLocs[c.baseGlyph];
}
}
// Check for default-ignorable char that didn't get filtered, combined,
// etc by the shaping process, and skip it.
uint32_t offs = c.baseChar;
NS_ASSERTION(offs < aLength, "unexpected offset");
if (c.nGlyphs == 1 && c.nChars == 1 &&
aShapedText->FilterIfIgnorable(aOffset + offs, aText[offs])) {
continue;
}
uint32_t appAdvance = roundX ? NSToIntRound(adv) * dev2appUnits :
NSToIntRound(adv * dev2appUnits);
if (c.nGlyphs == 1 &&
gfxShapedText::CompressedGlyph::IsSimpleGlyphID(gids[c.baseGlyph]) &&
gfxShapedText::CompressedGlyph::IsSimpleAdvance(appAdvance) &&
charGlyphs[offs].IsClusterStart() &&
yLocs[c.baseGlyph] == 0)
{
charGlyphs[offs].SetSimpleGlyph(appAdvance, gids[c.baseGlyph]);
} else {
// not a one-to-one mapping with simple metrics: use DetailedGlyph
AutoTArray<gfxShapedText::DetailedGlyph,8> details;
float clusterLoc;
for (uint32_t j = c.baseGlyph; j < c.baseGlyph + c.nGlyphs; ++j) {
gfxShapedText::DetailedGlyph* d = details.AppendElement();
d->mGlyphID = gids[j];
d->mYOffset = roundY ? NSToIntRound(-yLocs[j]) * dev2appUnits :
-yLocs[j] * dev2appUnits;
if (j == c.baseGlyph) {
d->mXOffset = 0;
d->mAdvance = appAdvance;
clusterLoc = xLocs[j];
} else {
float dx = rtl ? (xLocs[j] - clusterLoc) :
(xLocs[j] - clusterLoc - adv);
d->mXOffset = roundX ? NSToIntRound(dx) * dev2appUnits :
dx * dev2appUnits;
d->mAdvance = 0;
}
}
gfxShapedText::CompressedGlyph g;
g.SetComplex(charGlyphs[offs].IsClusterStart(),
true, details.Length());
aShapedText->SetGlyphs(aOffset + offs, g, details.Elements());
}
for (uint32_t j = c.baseChar + 1; j < c.baseChar + c.nChars; ++j) {
NS_ASSERTION(j < aLength, "unexpected offset");
gfxShapedText::CompressedGlyph &g = charGlyphs[j];
NS_ASSERTION(!g.IsSimpleGlyph(), "overwriting a simple glyph");
g.SetComplex(g.IsClusterStart(), false, 0);
}
}
return NS_OK;
}