当前位置: 首页>>代码示例>>C++>>正文


C++ AutoTArray::SetLength方法代码示例

本文整理汇总了C++中AutoTArray::SetLength方法的典型用法代码示例。如果您正苦于以下问题:C++ AutoTArray::SetLength方法的具体用法?C++ AutoTArray::SetLength怎么用?C++ AutoTArray::SetLength使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在AutoTArray的用法示例。


在下文中一共展示了AutoTArray::SetLength方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的C++代码示例。

示例1: PodCopy

void
AudioCaptureStream::MixerCallback(AudioDataValue* aMixedBuffer,
                                  AudioSampleFormat aFormat, uint32_t aChannels,
                                  uint32_t aFrames, uint32_t aSampleRate)
{
  AutoTArray<nsTArray<AudioDataValue>, MONO> output;
  AutoTArray<const AudioDataValue*, MONO> bufferPtrs;
  output.SetLength(MONO);
  bufferPtrs.SetLength(MONO);

  uint32_t written = 0;
  // We need to copy here, because the mixer will reuse the storage, we should
  // not hold onto it. Buffers are in planar format.
  for (uint32_t channel = 0; channel < aChannels; channel++) {
    AudioDataValue* out = output[channel].AppendElements(aFrames);
    PodCopy(out, aMixedBuffer + written, aFrames);
    bufferPtrs[channel] = out;
    written += aFrames;
  }
  AudioChunk chunk;
  chunk.mBuffer = new mozilla::SharedChannelArrayBuffer<AudioDataValue>(&output);
  chunk.mDuration = aFrames;
  chunk.mBufferFormat = aFormat;
  chunk.mVolume = 1.0f;
  chunk.mChannelData.SetLength(MONO);
  for (uint32_t channel = 0; channel < aChannels; channel++) {
    chunk.mChannelData[channel] = bufferPtrs[channel];
  }

  // Now we have mixed data, simply append it to out track.
  EnsureTrack(mTrackId)->Get<AudioSegment>()->AppendAndConsumeChunk(&chunk);
}
开发者ID:Shaif95,项目名称:gecko-dev,代码行数:32,代码来源:AudioCaptureStream.cpp

示例2: switch

/*static*/
void
AudioTrackEncoder::InterleaveTrackData(AudioChunk& aChunk,
                                       int32_t aDuration,
                                       uint32_t aOutputChannels,
                                       AudioDataValue* aOutput)
{
    switch(aChunk.mBufferFormat) {
    case AUDIO_FORMAT_S16: {
        AutoTArray<const int16_t*, 2> array;
        array.SetLength(aOutputChannels);
        for (uint32_t i = 0; i < array.Length(); i++) {
            array[i] = static_cast<const int16_t*>(aChunk.mChannelData[i]);
        }
        InterleaveTrackData(array, aDuration, aOutputChannels, aOutput, aChunk.mVolume);
        break;
    }
    case AUDIO_FORMAT_FLOAT32: {
        AutoTArray<const float*, 2> array;
        array.SetLength(aOutputChannels);
        for (uint32_t i = 0; i < array.Length(); i++) {
            array[i] = static_cast<const float*>(aChunk.mChannelData[i]);
        }
        InterleaveTrackData(array, aDuration, aOutputChannels, aOutput, aChunk.mVolume);
        break;
    }
    case AUDIO_FORMAT_SILENCE: {
        MOZ_ASSERT(false, "To implement.");
    }
    };
}
开发者ID:leplatrem,项目名称:gecko-dev,代码行数:31,代码来源:TrackEncoder.cpp

示例3: PodZero

static void
CopyChunkToBlock(AudioChunk& aInput, AudioBlock *aBlock,
                 uint32_t aOffsetInBlock)
{
  uint32_t blockChannels = aBlock->ChannelCount();
  AutoTArray<const T*,2> channels;
  if (aInput.IsNull()) {
    channels.SetLength(blockChannels);
    PodZero(channels.Elements(), blockChannels);
  } else {
    const nsTArray<const T*>& inputChannels = aInput.ChannelData<T>();
    channels.SetLength(inputChannels.Length());
    PodCopy(channels.Elements(), inputChannels.Elements(), channels.Length());
    if (channels.Length() != blockChannels) {
      // We only need to upmix here because aBlock's channel count has been
      // chosen to be a superset of the channel count of every chunk.
      AudioChannelsUpMix(&channels, blockChannels, static_cast<T*>(nullptr));
    }
  }

  for (uint32_t c = 0; c < blockChannels; ++c) {
    float* outputData = aBlock->ChannelFloatsForWrite(c) + aOffsetInBlock;
    if (channels[c]) {
      ConvertAudioSamplesWithScale(channels[c], outputData, aInput.GetDuration(), aInput.mVolume);
    } else {
      PodZero(outputData, aInput.GetDuration());
    }
  }
}
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:29,代码来源:AudioNodeExternalInputStream.cpp

示例4: calculateNormalizationScale

Reverb::Reverb(ThreadSharedFloatArrayBufferList* impulseResponse, size_t impulseResponseBufferLength, size_t maxFFTSize, bool useBackgroundThreads, bool normalize, float sampleRate)
{
    float scale = 1;

    AutoTArray<const float*,4> irChannels;
    for (size_t i = 0; i < impulseResponse->GetChannels(); ++i) {
        irChannels.AppendElement(impulseResponse->GetData(i));
    }
    AutoTArray<float,1024> tempBuf;

    if (normalize) {
        scale = calculateNormalizationScale(impulseResponse, impulseResponseBufferLength, sampleRate);

        if (scale) {
            tempBuf.SetLength(irChannels.Length()*impulseResponseBufferLength);
            for (uint32_t i = 0; i < irChannels.Length(); ++i) {
                float* buf = &tempBuf[i*impulseResponseBufferLength];
                AudioBufferCopyWithScale(irChannels[i], scale, buf,
                                         impulseResponseBufferLength);
                irChannels[i] = buf;
            }
        }
    }

    initialize(irChannels, impulseResponseBufferLength,
               maxFFTSize, useBackgroundThreads);
}
开发者ID:bgrins,项目名称:gecko-dev,代码行数:27,代码来源:Reverb.cpp

示例5: ceil

void
AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
  mMonitor.AssertCurrentThreadOwns();

  // We need to call the non-locking version, because we already have the lock.
  if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
    return;
  }

  uint32_t toPopFrames =
    ceil(aWriter.Available() * mAudioClock.GetPlaybackRate());

  while (mTimeStretcher->numSamples() < aWriter.Available()) {
    UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
    if (c->Frames() == 0) {
      break;
    }
    MOZ_ASSERT(c->Frames() <= toPopFrames);
    if (IsValidAudioFormat(c.get())) {
      mTimeStretcher->putSamples(c->Data(), c->Frames());
    } else {
      // Write silence if invalid format.
      AutoTArray<AudioDataValue, 1000> buf;
      buf.SetLength(mOutChannels * c->Frames());
      memset(buf.Elements(), 0, buf.Length() * sizeof(AudioDataValue));
      mTimeStretcher->putSamples(buf.Elements(), c->Frames());
    }
  }

  auto timeStretcher = mTimeStretcher;
  aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
    return timeStretcher->receiveSamples(aPtr, aFrames);
  }, aWriter.Available());
}
开发者ID:SJasoria,项目名称:gecko-dev,代码行数:35,代码来源:AudioStream.cpp

示例6: ReadChunk

  // Read audio data in aChunk, resample them if needed,
  // and then send the result to OMX input buffer (or buffers if one buffer is not enough).
  // aSamplesRead will be the number of samples that have been read from aChunk.
  BufferState ReadChunk(AudioChunk& aChunk, size_t* aSamplesRead)
  {
    size_t chunkSamples = aChunk.GetDuration();
    size_t bytesToCopy = chunkSamples * mOMXAEncoder.mResamplingRatio
                         * mOMXAEncoder.mChannels * sizeof(AudioDataValue);
    size_t bytesCopied = 0;
    if (bytesToCopy <= AvailableSize()) {
      if (aChunk.IsNull()) {
        bytesCopied = SendSilenceToBuffer(chunkSamples);
      } else {
        bytesCopied = SendChunkToBuffer(aChunk, chunkSamples);
      }
      UpdateAfterSendChunk(chunkSamples, bytesCopied, aSamplesRead);
    } else {
      // Interleave data to a temporary buffer.
      AutoTArray<AudioDataValue, 9600> pcm;
      pcm.SetLength(bytesToCopy);
      AudioDataValue* interleavedSource = pcm.Elements();
      AudioTrackEncoder::InterleaveTrackData(aChunk, chunkSamples,
                                             mOMXAEncoder.mChannels,
                                             interleavedSource);

      // When the data size of chunk is larger than the buffer capacity,
      // we split it into sub-chunks to fill up buffers.
      size_t subChunkSamples = 0;
      while(GetNextSubChunk(bytesToCopy, subChunkSamples)) {
        // To avoid enqueueing an empty buffer, we follow the order that
        // clear up buffer first, then create one, send data to it in the end.
        if (!IsEmpty()) {
          // Submit the filled-up buffer and request a new buffer.
          status_t result = Enqueue(mOMXAEncoder.mTimestamp,
                                    mInputFlags & ~OMXCodecWrapper::BUFFER_EOS);
          if (result != OK) {
            return BUFFER_FAIL;
          }

          result = Dequeue();
          if (result == -EAGAIN) {
            return WAIT_FOR_NEW_BUFFER;
          }
          if (result != OK) {
            return BUFFER_FAIL;
          }
        }
        if (aChunk.IsNull()) {
          bytesCopied = SendSilenceToBuffer(subChunkSamples);
        } else {
          bytesCopied = SendInterleavedSubChunkToBuffer(interleavedSource, subChunkSamples);
        }
        UpdateAfterSendChunk(subChunkSamples, bytesCopied, aSamplesRead);
        // Move to the position where samples are not yet send to the buffer.
        interleavedSource += subChunkSamples * mOMXAEncoder.mChannels;
      }
    }
    return BUFFER_OK;
  }
开发者ID:MichaelKohler,项目名称:gecko-dev,代码行数:59,代码来源:OMXCodecWrapper.cpp

示例7: fwrite

typename EnableIf<IsSame<T, float>::value, void>::Type
WriteDumpFileHelper(T* aInput, size_t aSamples, FILE* aFile) {
  AutoTArray<uint8_t, 1024*2> buf;
  buf.SetLength(aSamples*2);
  uint8_t* output = buf.Elements();
  for (uint32_t i = 0; i < aSamples; ++i) {
    SetUint16LE(output + i*2, int16_t(aInput[i]*32767.0f));
  }
  fwrite(output, 2, aSamples, aFile);
  fflush(aFile);
}
开发者ID:SJasoria,项目名称:gecko-dev,代码行数:11,代码来源:AudioStream.cpp

示例8: GetDataFromMatrix

void
DOMMatrixReadOnly::ToFloat64Array(JSContext* aCx, JS::MutableHandle<JSObject*> aResult, ErrorResult& aRv) const
{
  AutoTArray<double, 16> arr;
  arr.SetLength(16);
  GetDataFromMatrix(this, arr.Elements());
  JS::Rooted<JS::Value> value(aCx);
  if (!ToJSValue(aCx, TypedArrayCreator<Float64Array>(arr), &value)) {
    aRv.Throw(NS_ERROR_OUT_OF_MEMORY);
    return;
  }
  aResult.set(&value.toObject());
}
开发者ID:GuanWen-Chen,项目名称:gecko-b2g,代码行数:13,代码来源:DOMMatrix.cpp

示例9: ceil

void
AudioStream::GetTimeStretched(AudioBufferWriter& aWriter)
{
  mMonitor.AssertCurrentThreadOwns();

  // We need to call the non-locking version, because we already have the lock.
  if (EnsureTimeStretcherInitializedUnlocked() != NS_OK) {
    return;
  }

  uint32_t toPopFrames =
    ceil(aWriter.Available() * mAudioClock.GetPlaybackRate());

  while (mTimeStretcher->numSamples() < aWriter.Available()) {
    UniquePtr<Chunk> c = mDataSource.PopFrames(toPopFrames);
    if (c->Frames() == 0) {
      break;
    }
    MOZ_ASSERT(c->Frames() <= toPopFrames);
    if (IsValidAudioFormat(c.get())) {
      mTimeStretcher->putSamples(c->Data(), c->Frames());
    } else {
      // Write silence if invalid format.
      AutoTArray<AudioDataValue, 1000> buf;
      auto size = CheckedUint32(mOutChannels) * c->Frames();
      if (!size.isValid()) {
        // The overflow should not happen in normal case.
        LOGW("Invalid member data: %d channels, %d frames", mOutChannels, c->Frames());
        return;
      }
      buf.SetLength(size.value());
      size = size * sizeof(AudioDataValue);
      if (!size.isValid()) {
        LOGW("The required memory size is too large.");
        return;
      }
      memset(buf.Elements(), 0, size.value());
      mTimeStretcher->putSamples(buf.Elements(), c->Frames());
    }
  }

  auto timeStretcher = mTimeStretcher;
  aWriter.Write([timeStretcher] (AudioDataValue* aPtr, uint32_t aFrames) {
    return timeStretcher->receiveSamples(aPtr, aFrames);
  }, aWriter.Available());
}
开发者ID:staktrace,项目名称:gecko-dev,代码行数:46,代码来源:AudioStream.cpp

示例10: if

void
AudioNodeStream::UpMixDownMixChunk(const AudioBlock* aChunk,
                                   uint32_t aOutputChannelCount,
                                   nsTArray<const float*>& aOutputChannels,
                                   DownmixBufferType& aDownmixBuffer)
{
  for (uint32_t i = 0; i < aChunk->ChannelCount(); i++) {
    aOutputChannels.AppendElement(static_cast<const float*>(aChunk->mChannelData[i]));
  }
  if (aOutputChannels.Length() < aOutputChannelCount) {
    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
      AudioChannelsUpMix<float>(&aOutputChannels, aOutputChannelCount, nullptr);
      NS_ASSERTION(aOutputChannelCount == aOutputChannels.Length(),
                   "We called GetAudioChannelsSuperset to avoid this");
    } else {
      // Fill up the remaining aOutputChannels by zeros
      for (uint32_t j = aOutputChannels.Length(); j < aOutputChannelCount; ++j) {
        aOutputChannels.AppendElement(nullptr);
      }
    }
  } else if (aOutputChannels.Length() > aOutputChannelCount) {
    if (mChannelInterpretation == ChannelInterpretation::Speakers) {
      AutoTArray<float*,GUESS_AUDIO_CHANNELS> outputChannels;
      outputChannels.SetLength(aOutputChannelCount);
      aDownmixBuffer.SetLength(aOutputChannelCount * WEBAUDIO_BLOCK_SIZE);
      for (uint32_t j = 0; j < aOutputChannelCount; ++j) {
        outputChannels[j] = &aDownmixBuffer[j * WEBAUDIO_BLOCK_SIZE];
      }

      AudioChannelsDownMix(aOutputChannels, outputChannels.Elements(),
                           aOutputChannelCount, WEBAUDIO_BLOCK_SIZE);

      aOutputChannels.SetLength(aOutputChannelCount);
      for (uint32_t j = 0; j < aOutputChannels.Length(); ++j) {
        aOutputChannels[j] = outputChannels[j];
      }
    } else {
      // Drop the remaining aOutputChannels
      aOutputChannels.RemoveElementsAt(aOutputChannelCount,
        aOutputChannels.Length() - aOutputChannelCount);
    }
  }
}
开发者ID:Wafflespeanut,项目名称:gecko-dev,代码行数:43,代码来源:AudioNodeStream.cpp

示例11: SendChunkToBuffer

 // Interleave chunk data and send it to buffer,
 // and return the copied bytes number of audio data.
 size_t SendChunkToBuffer(AudioChunk& aSource, size_t aSamplesNum)
 {
   AudioDataValue* dst = reinterpret_cast<AudioDataValue*>(GetPointer());
   size_t bytesToCopy = aSamplesNum * mOMXAEncoder.mResamplingRatio
                        * mOMXAEncoder.mChannels * sizeof(AudioDataValue);
   uint32_t dstSamplesCopied = aSamplesNum;
   if (mOMXAEncoder.mResampler) {
     AutoTArray<AudioDataValue, 9600> pcm;
     pcm.SetLength(bytesToCopy);
     AudioTrackEncoder::InterleaveTrackData(aSource, aSamplesNum,
                                            mOMXAEncoder.mChannels,
                                            pcm.Elements());
     int16_t* tempSource = reinterpret_cast<int16_t*>(pcm.Elements());
     speex_resampler_process_interleaved_int(mOMXAEncoder.mResampler, tempSource,
                                             &aSamplesNum, dst,
                                             &dstSamplesCopied);
   } else {
     AudioTrackEncoder::InterleaveTrackData(aSource, aSamplesNum,
                                            mOMXAEncoder.mChannels, dst);
   }
   return dstSamplesCopied * mOMXAEncoder.mChannels * sizeof(AudioDataValue);
 }
开发者ID:MichaelKohler,项目名称:gecko-dev,代码行数:24,代码来源:OMXCodecWrapper.cpp

示例12: mon

nsresult
VorbisTrackEncoder::GetEncodedTrack(EncodedFrameContainer& aData)
{
  if (mEosSetInEncoder) {
    return NS_OK;
  }

  PROFILER_LABEL("VorbisTrackEncoder", "GetEncodedTrack",
    js::ProfileEntry::Category::OTHER);

  nsAutoPtr<AudioSegment> sourceSegment;
  sourceSegment = new AudioSegment();
  {
    // Move all the samples from mRawSegment to sourceSegment. We only hold
    // the monitor in this block.
    ReentrantMonitorAutoEnter mon(mReentrantMonitor);

    // Wait if mEncoder is not initialized, or when not enough raw data, but is
    // not the end of stream nor is being canceled.
    while (!mCanceled && mRawSegment.GetDuration() < GetPacketDuration() &&
           !mEndOfStream) {
      mon.Wait();
    }
    VORBISLOG("GetEncodedTrack passes wait, duration is %lld\n",
      mRawSegment.GetDuration());
    if (mCanceled || mEncodingComplete) {
      return NS_ERROR_FAILURE;
    }

    sourceSegment->AppendFrom(&mRawSegment);
  }

  if (mEndOfStream && (sourceSegment->GetDuration() == 0)
      && !mEosSetInEncoder) {
    mEncodingComplete = true;
    mEosSetInEncoder = true;
    VORBISLOG("[Vorbis] Done encoding.");
    vorbis_analysis_wrote(&mVorbisDsp, 0);
    GetEncodedFrames(aData);

    return NS_OK;
  }

  // Start encoding data.
  AudioSegment::ChunkIterator iter(*sourceSegment);

  AudioDataValue **vorbisBuffer =
    vorbis_analysis_buffer(&mVorbisDsp, (int)sourceSegment->GetDuration());

  int framesCopied = 0;
  AutoTArray<AudioDataValue, 9600> interleavedPcm;
  AutoTArray<AudioDataValue, 9600> nonInterleavedPcm;
  interleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels);
  nonInterleavedPcm.SetLength(sourceSegment->GetDuration() * mChannels);
  while (!iter.IsEnded()) {
    AudioChunk chunk = *iter;
    int frameToCopy = chunk.GetDuration();
    if (!chunk.IsNull()) {
      InterleaveTrackData(chunk, frameToCopy, mChannels,
                          interleavedPcm.Elements() + framesCopied * mChannels);
    } else { // empty data
      memset(interleavedPcm.Elements() + framesCopied * mChannels, 0,
             frameToCopy * mChannels * sizeof(AudioDataValue));
    }
    framesCopied += frameToCopy;
    iter.Next();
  }
  // De-interleave the interleavedPcm.
  DeInterleaveTrackData(interleavedPcm.Elements(), framesCopied, mChannels,
                        nonInterleavedPcm.Elements());
  // Copy the nonInterleavedPcm to vorbis buffer.
  for(uint8_t i = 0; i < mChannels; ++i) {
    memcpy(vorbisBuffer[i], nonInterleavedPcm.Elements() + framesCopied * i,
           framesCopied * sizeof(AudioDataValue));
  }

  // Now the vorbisBuffer contain the all data in non-interleaved.
  // Tell the library how much we actually submitted.
  vorbis_analysis_wrote(&mVorbisDsp, framesCopied);
  VORBISLOG("vorbis_analysis_wrote framesCopied %d\n", framesCopied);
  GetEncodedFrames(aData);

  return NS_OK;
}
开发者ID:Shaif95,项目名称:gecko-dev,代码行数:84,代码来源:VorbisTrackEncoder.cpp

示例13: LogTime

void
MediaEngineWebRTCMicrophoneSource::InsertInGraph(const T* aBuffer,
                                                 size_t aFrames,
                                                 uint32_t aChannels)
{
  if (mState != kStarted) {
    return;
  }

  if (MOZ_LOG_TEST(AudioLogModule(), LogLevel::Debug)) {
    mTotalFrames += aFrames;
    if (mTotalFrames > mLastLogFrames + mSampleFrequency) { // ~ 1 second
      MOZ_LOG(AudioLogModule(), LogLevel::Debug,
              ("%p: Inserting %zu samples into graph, total frames = %" PRIu64,
               (void*)this, aFrames, mTotalFrames));
      mLastLogFrames = mTotalFrames;
    }
  }

  size_t len = mSources.Length();
  for (size_t i = 0; i < len; i++) {
    if (!mSources[i]) {
      continue;
    }

    TimeStamp insertTime;
    // Make sure we include the stream and the track.
    // The 0:1 is a flag to note when we've done the final insert for a given input block.
    LogTime(AsyncLatencyLogger::AudioTrackInsertion,
            LATENCY_STREAM_ID(mSources[i].get(), mTrackID),
            (i+1 < len) ? 0 : 1, insertTime);

    // Bug 971528 - Support stereo capture in gUM
    MOZ_ASSERT(aChannels == 1 || aChannels == 2,
        "GraphDriver only supports mono and stereo audio for now");

    nsAutoPtr<AudioSegment> segment(new AudioSegment());
    RefPtr<SharedBuffer> buffer =
      SharedBuffer::Create(aFrames * aChannels * sizeof(T));
    AutoTArray<const T*, 8> channels;
    if (aChannels == 1) {
      PodCopy(static_cast<T*>(buffer->Data()), aBuffer, aFrames);
      channels.AppendElement(static_cast<T*>(buffer->Data()));
    } else {
      channels.SetLength(aChannels);
      AutoTArray<T*, 8> write_channels;
      write_channels.SetLength(aChannels);
      T * samples = static_cast<T*>(buffer->Data());

      size_t offset = 0;
      for(uint32_t i = 0; i < aChannels; ++i) {
        channels[i] = write_channels[i] = samples + offset;
        offset += aFrames;
      }

      DeinterleaveAndConvertBuffer(aBuffer,
                                   aFrames,
                                   aChannels,
                                   write_channels.Elements());
    }

    MOZ_ASSERT(aChannels == channels.Length());
    segment->AppendFrames(buffer.forget(), channels, aFrames,
                         mPrincipalHandles[i]);
    segment->GetStartTime(insertTime);

    mSources[i]->AppendToTrack(mTrackID, segment);
  }
}
开发者ID:bgrins,项目名称:gecko-dev,代码行数:69,代码来源:MediaEngineWebRTCAudio.cpp

示例14: FormatTMTime


//.........这里部分代码省略.........
      break;
    case kDateFormatYearMonth:
    case kDateFormatWeekday:
      dateStyle = kCFDateFormatterNoStyle; // formats handled below
      break;
    case kDateFormatNone:
      dateStyle = kCFDateFormatterNoStyle;
      break;
    default:
      NS_ERROR("Unknown nsDateFormatSelector");
      res = NS_ERROR_FAILURE;
      dateStyle = kCFDateFormatterNoStyle;
  }
  
  // Get the time style for the formatter:
  CFDateFormatterStyle timeStyle;
  switch (timeFormatSelector) {
    case kTimeFormatSeconds:
    case kTimeFormatSecondsForce24Hour: // 24 hour part fixed below
      timeStyle = kCFDateFormatterMediumStyle;
      break;
    case kTimeFormatNoSeconds:
    case kTimeFormatNoSecondsForce24Hour: // 24 hour part fixed below
      timeStyle = kCFDateFormatterShortStyle;
      break;
    case kTimeFormatNone:
      timeStyle = kCFDateFormatterNoStyle;
      break;
    default:
      NS_ERROR("Unknown nsTimeFormatSelector");
      res = NS_ERROR_FAILURE;
      timeStyle = kCFDateFormatterNoStyle;
  }
  
  // Create the formatter and fix up its formatting as necessary:
  CFDateFormatterRef formatter =
    CFDateFormatterCreate(nullptr, formatterLocale, dateStyle, timeStyle);
  
  CFRelease(formatterLocale);
  
  if (dateFormatSelector == kDateFormatYearMonth ||
      dateFormatSelector == kDateFormatWeekday) {
    CFStringRef dateFormat =
      dateFormatSelector == kDateFormatYearMonth ? CFSTR("yyyy/MM ") : CFSTR("EEE ");
    
    CFStringRef oldFormat = CFDateFormatterGetFormat(formatter);
    CFMutableStringRef newFormat = CFStringCreateMutableCopy(nullptr, 0, oldFormat);
    CFStringInsert(newFormat, 0, dateFormat);
    CFDateFormatterSetFormat(formatter, newFormat);
    CFRelease(newFormat); // note we don't own oldFormat
  }
  
  if (timeFormatSelector == kTimeFormatSecondsForce24Hour ||
      timeFormatSelector == kTimeFormatNoSecondsForce24Hour) {
    // Replace "h" with "H", and remove "a":
    CFStringRef oldFormat = CFDateFormatterGetFormat(formatter);
    CFMutableStringRef newFormat = CFStringCreateMutableCopy(nullptr, 0, oldFormat);
    CFIndex replaceCount = CFStringFindAndReplace(newFormat,
                                                  CFSTR("h"), CFSTR("H"),
                                                  CFRangeMake(0, CFStringGetLength(newFormat)),	
                                                  0);
    NS_ASSERTION(replaceCount <= 2, "Unexpected number of \"h\" occurrences");
    replaceCount = CFStringFindAndReplace(newFormat,
                                          CFSTR("a"), CFSTR(""),
                                          CFRangeMake(0, CFStringGetLength(newFormat)),	
                                          0);
    NS_ASSERTION(replaceCount <= 1, "Unexpected number of \"a\" occurrences");
    CFDateFormatterSetFormat(formatter, newFormat);
    CFRelease(newFormat); // note we don't own oldFormat
  }
  
  // Now get the formatted date:
  CFGregorianDate date;
  date.second = tmTime->tm_sec;
  date.minute = tmTime->tm_min;
  date.hour = tmTime->tm_hour;
  date.day = tmTime->tm_mday;      // Mac is 1-based, tm is 1-based
  date.month = tmTime->tm_mon + 1; // Mac is 1-based, tm is 0-based
  date.year = tmTime->tm_year + 1900;

  CFTimeZoneRef timeZone = CFTimeZoneCopySystem(); // tmTime is in local time
  CFAbsoluteTime absTime = CFGregorianDateGetAbsoluteTime(date, timeZone);
  CFRelease(timeZone);

  CFStringRef formattedDate = CFDateFormatterCreateStringWithAbsoluteTime(nullptr,
                                                                          formatter,
                                                                          absTime);

  CFIndex stringLen = CFStringGetLength(formattedDate);

  AutoTArray<UniChar, 256> stringBuffer;
  stringBuffer.SetLength(stringLen + 1);
  CFStringGetCharacters(formattedDate, CFRangeMake(0, stringLen), stringBuffer.Elements());
  stringOut.Assign(reinterpret_cast<char16_t*>(stringBuffer.Elements()), stringLen);

  CFRelease(formattedDate);
  CFRelease(formatter);

  return res;
}
开发者ID:MekliCZ,项目名称:positron,代码行数:101,代码来源:nsDateTimeFormatMac.cpp

示例15: while

nsresult
gfxGraphiteShaper::SetGlyphsFromSegment(DrawTarget      *aDrawTarget,
                                        gfxShapedText   *aShapedText,
                                        uint32_t         aOffset,
                                        uint32_t         aLength,
                                        const char16_t *aText,
                                        gr_segment      *aSegment)
{
    int32_t dev2appUnits = aShapedText->GetAppUnitsPerDevUnit();
    bool rtl = aShapedText->IsRightToLeft();

    uint32_t glyphCount = gr_seg_n_slots(aSegment);

    // identify clusters; graphite may have reordered/expanded/ligated glyphs.
    AutoTArray<Cluster,SMALL_GLYPH_RUN> clusters;
    AutoTArray<uint16_t,SMALL_GLYPH_RUN> gids;
    AutoTArray<float,SMALL_GLYPH_RUN> xLocs;
    AutoTArray<float,SMALL_GLYPH_RUN> yLocs;

    if (!clusters.SetLength(aLength, fallible) ||
        !gids.SetLength(glyphCount, fallible) ||
        !xLocs.SetLength(glyphCount, fallible) ||
        !yLocs.SetLength(glyphCount, fallible))
    {
        return NS_ERROR_OUT_OF_MEMORY;
    }

    // walk through the glyph slots and check which original character
    // each is associated with
    uint32_t gIndex = 0; // glyph slot index
    uint32_t cIndex = 0; // current cluster index
    for (const gr_slot *slot = gr_seg_first_slot(aSegment);
         slot != nullptr;
         slot = gr_slot_next_in_segment(slot), gIndex++)
    {
        uint32_t before =
            gr_cinfo_base(gr_seg_cinfo(aSegment, gr_slot_before(slot)));
        uint32_t after =
            gr_cinfo_base(gr_seg_cinfo(aSegment, gr_slot_after(slot)));
        gids[gIndex] = gr_slot_gid(slot);
        xLocs[gIndex] = gr_slot_origin_X(slot);
        yLocs[gIndex] = gr_slot_origin_Y(slot);

        // if this glyph has a "before" character index that precedes the
        // current cluster's char index, we need to merge preceding
        // clusters until it gets included
        while (before < clusters[cIndex].baseChar && cIndex > 0) {
            clusters[cIndex-1].nChars += clusters[cIndex].nChars;
            clusters[cIndex-1].nGlyphs += clusters[cIndex].nGlyphs;
            --cIndex;
        }

        // if there's a gap between the current cluster's base character and
        // this glyph's, extend the cluster to include the intervening chars
        if (gr_slot_can_insert_before(slot) && clusters[cIndex].nChars &&
            before >= clusters[cIndex].baseChar + clusters[cIndex].nChars)
        {
            NS_ASSERTION(cIndex < aLength - 1, "cIndex at end of word");
            Cluster& c = clusters[cIndex + 1];
            c.baseChar = clusters[cIndex].baseChar + clusters[cIndex].nChars;
            c.nChars = before - c.baseChar;
            c.baseGlyph = gIndex;
            c.nGlyphs = 0;
            ++cIndex;
        }

        // increment cluster's glyph count to include current slot
        NS_ASSERTION(cIndex < aLength, "cIndex beyond word length");
        ++clusters[cIndex].nGlyphs;

        // bump |after| index if it falls in the middle of a surrogate pair
        if (NS_IS_HIGH_SURROGATE(aText[after]) && after < aLength - 1 &&
            NS_IS_LOW_SURROGATE(aText[after + 1])) {
            after++;
        }
        // extend cluster if necessary to reach the glyph's "after" index
        if (clusters[cIndex].baseChar + clusters[cIndex].nChars < after + 1) {
            clusters[cIndex].nChars = after + 1 - clusters[cIndex].baseChar;
        }
    }

    bool roundX, roundY;
    GetRoundOffsetsToPixels(aDrawTarget, &roundX, &roundY);

    gfxShapedText::CompressedGlyph *charGlyphs =
        aShapedText->GetCharacterGlyphs() + aOffset;

    // now put glyphs into the textrun, one cluster at a time
    for (uint32_t i = 0; i <= cIndex; ++i) {
        const Cluster& c = clusters[i];

        float adv; // total advance of the cluster
        if (rtl) {
            if (i == 0) {
                adv = gr_seg_advance_X(aSegment) - xLocs[c.baseGlyph];
            } else {
                adv = xLocs[clusters[i-1].baseGlyph] - xLocs[c.baseGlyph];
            }
        } else {
            if (i == cIndex) {
//.........这里部分代码省略.........
开发者ID:MichaelKohler,项目名称:gecko-dev,代码行数:101,代码来源:gfxGraphiteShaper.cpp


注:本文中的AutoTArray::SetLength方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。