本文整理汇总了Java中com.google.android.exoplayer2.C.TRACK_TYPE_AUDIO属性的典型用法代码示例。如果您正苦于以下问题:Java C.TRACK_TYPE_AUDIO属性的具体用法?Java C.TRACK_TYPE_AUDIO怎么用?Java C.TRACK_TYPE_AUDIO使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类com.google.android.exoplayer2.C
的用法示例。
在下文中一共展示了C.TRACK_TYPE_AUDIO属性的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: deriveFormat
/**
* Derives a track format corresponding to a given container format, by combining it with sample
* level information obtained from the samples.
*
* @param containerFormat The container format for which the track format should be derived.
* @param sampleFormat A sample format from which to obtain sample level information.
* @return The derived track format.
*/
private static Format deriveFormat(Format containerFormat, Format sampleFormat) {
if (containerFormat == null) {
return sampleFormat;
}
String codecs = null;
int sampleTrackType = MimeTypes.getTrackType(sampleFormat.sampleMimeType);
if (sampleTrackType == C.TRACK_TYPE_AUDIO) {
codecs = getAudioCodecs(containerFormat.codecs);
} else if (sampleTrackType == C.TRACK_TYPE_VIDEO) {
codecs = getVideoCodecs(containerFormat.codecs);
}
return sampleFormat.copyWithContainerInfo(containerFormat.id, codecs, containerFormat.bitrate,
containerFormat.width, containerFormat.height, containerFormat.selectionFlags,
containerFormat.language);
}
示例2: parseHdlr
/**
* Parses an hdlr atom.
*
* @param hdlr The hdlr atom to decode.
* @return The track type.
*/
private static int parseHdlr(ParsableByteArray hdlr) {
hdlr.setPosition(Atom.FULL_HEADER_SIZE + 4);
int trackType = hdlr.readInt();
if (trackType == TYPE_soun) {
return C.TRACK_TYPE_AUDIO;
} else if (trackType == TYPE_vide) {
return C.TRACK_TYPE_VIDEO;
} else if (trackType == TYPE_text || trackType == TYPE_sbtl || trackType == TYPE_subt
|| trackType == TYPE_clcp) {
return C.TRACK_TYPE_TEXT;
} else if (trackType == TYPE_meta) {
return C.TRACK_TYPE_METADATA;
} else {
return C.TRACK_TYPE_UNKNOWN;
}
}
示例3: getDefaultBufferSize
/**
* Maps a {@link C} {@code TRACK_TYPE_*} constant to the corresponding {@link C}
* {@code DEFAULT_*_BUFFER_SIZE} constant.
*
* @param trackType The track type.
* @return The corresponding default buffer size in bytes.
*/
public static int getDefaultBufferSize(int trackType) {
switch (trackType) {
case C.TRACK_TYPE_DEFAULT:
return C.DEFAULT_MUXED_BUFFER_SIZE;
case C.TRACK_TYPE_AUDIO:
return C.DEFAULT_AUDIO_BUFFER_SIZE;
case C.TRACK_TYPE_VIDEO:
return C.DEFAULT_VIDEO_BUFFER_SIZE;
case C.TRACK_TYPE_TEXT:
return C.DEFAULT_TEXT_BUFFER_SIZE;
case C.TRACK_TYPE_METADATA:
return C.DEFAULT_METADATA_BUFFER_SIZE;
default:
throw new IllegalStateException();
}
}
示例4: getTrackType
/**
* Returns the {@link C}{@code .TRACK_TYPE_*} constant that corresponds to a specified mime type.
* {@link C#TRACK_TYPE_UNKNOWN} if the mime type is not known or the mapping cannot be
* established.
*
* @param mimeType The mimeType.
* @return The {@link C}{@code .TRACK_TYPE_*} constant that corresponds to a specified mime type.
*/
public static int getTrackType(String mimeType) {
if (TextUtils.isEmpty(mimeType)) {
return C.TRACK_TYPE_UNKNOWN;
} else if (isAudio(mimeType)) {
return C.TRACK_TYPE_AUDIO;
} else if (isVideo(mimeType)) {
return C.TRACK_TYPE_VIDEO;
} else if (isText(mimeType) || APPLICATION_CEA608.equals(mimeType)
|| APPLICATION_CEA708.equals(mimeType) || APPLICATION_MP4CEA608.equals(mimeType)
|| APPLICATION_SUBRIP.equals(mimeType) || APPLICATION_TTML.equals(mimeType)
|| APPLICATION_TX3G.equals(mimeType) || APPLICATION_MP4VTT.equals(mimeType)
|| APPLICATION_RAWCC.equals(mimeType) || APPLICATION_VOBSUB.equals(mimeType)
|| APPLICATION_PGS.equals(mimeType) || APPLICATION_DVBSUBS.equals(mimeType)) {
return C.TRACK_TYPE_TEXT;
} else if (APPLICATION_ID3.equals(mimeType)
|| APPLICATION_EMSG.equals(mimeType)
|| APPLICATION_SCTE35.equals(mimeType)
|| APPLICATION_CAMERA_MOTION.equals(mimeType)) {
return C.TRACK_TYPE_METADATA;
} else {
return C.TRACK_TYPE_UNKNOWN;
}
}
示例5: updateButtonVisibilities
private void updateButtonVisibilities() {
debugRootView.removeAllViews();
retryButton.setVisibility(playerNeedsSource ? View.VISIBLE : View.GONE);
debugRootView.addView(retryButton);
if (player == null) {
return;
}
MappedTrackInfo mappedTrackInfo = trackSelector.getCurrentMappedTrackInfo();
if (mappedTrackInfo == null) {
return;
}
for (int i = 0; i < mappedTrackInfo.length; i++) {
TrackGroupArray trackGroups = mappedTrackInfo.getTrackGroups(i);
if (trackGroups.length != 0) {
Button button = new Button(this);
int label;
switch (player.getRendererType(i)) {
case C.TRACK_TYPE_AUDIO:
label = R.string.audio;
break;
case C.TRACK_TYPE_VIDEO:
label = R.string.video;
break;
case C.TRACK_TYPE_TEXT:
label = R.string.text;
break;
default:
continue;
}
button.setText(label);
button.setTag(i);
button.setOnClickListener(this);
debugRootView.addView(button, debugRootView.getChildCount() - 1);
}
}
}
示例6: processMoovAtom
/**
* Updates the stored track metadata to reflect the contents of the specified moov atom.
*/
private void processMoovAtom(ContainerAtom moov) throws ParserException {
long durationUs = C.TIME_UNSET;
List<Mp4Track> tracks = new ArrayList<>();
long earliestSampleOffset = Long.MAX_VALUE;
Metadata metadata = null;
GaplessInfoHolder gaplessInfoHolder = new GaplessInfoHolder();
Atom.LeafAtom udta = moov.getLeafAtomOfType(Atom.TYPE_udta);
if (udta != null) {
metadata = AtomParsers.parseUdta(udta, isQuickTime);
if (metadata != null) {
gaplessInfoHolder.setFromMetadata(metadata);
}
}
for (int i = 0; i < moov.containerChildren.size(); i++) {
Atom.ContainerAtom atom = moov.containerChildren.get(i);
if (atom.type != Atom.TYPE_trak) {
continue;
}
Track track = AtomParsers.parseTrak(atom, moov.getLeafAtomOfType(Atom.TYPE_mvhd),
C.TIME_UNSET, null, isQuickTime);
if (track == null) {
continue;
}
Atom.ContainerAtom stblAtom = atom.getContainerAtomOfType(Atom.TYPE_mdia)
.getContainerAtomOfType(Atom.TYPE_minf).getContainerAtomOfType(Atom.TYPE_stbl);
TrackSampleTable trackSampleTable = AtomParsers.parseStbl(track, stblAtom, gaplessInfoHolder);
if (trackSampleTable.sampleCount == 0) {
continue;
}
Mp4Track mp4Track = new Mp4Track(track, trackSampleTable,
extractorOutput.track(i, track.type));
// Each sample has up to three bytes of overhead for the start code that replaces its length.
// Allow ten source samples per output sample, like the platform extractor.
int maxInputSize = trackSampleTable.maximumSize + 3 * 10;
Format format = track.format.copyWithMaxInputSize(maxInputSize);
if (track.type == C.TRACK_TYPE_AUDIO) {
if (gaplessInfoHolder.hasGaplessInfo()) {
format = format.copyWithGaplessInfo(gaplessInfoHolder.encoderDelay,
gaplessInfoHolder.encoderPadding);
}
if (metadata != null) {
format = format.copyWithMetadata(metadata);
}
}
mp4Track.trackOutput.format(format);
durationUs = Math.max(durationUs, track.durationUs);
tracks.add(mp4Track);
long firstSampleOffset = trackSampleTable.offsets[0];
if (firstSampleOffset < earliestSampleOffset) {
earliestSampleOffset = firstSampleOffset;
}
}
this.durationUs = durationUs;
this.tracks = tracks.toArray(new Mp4Track[tracks.size()]);
extractorOutput.endTracks();
extractorOutput.seekMap(this);
}
示例7: selectTracks
@Override
protected TrackSelection[] selectTracks(RendererCapabilities[] rendererCapabilities,
TrackGroupArray[] rendererTrackGroupArrays, int[][][] rendererFormatSupports)
throws ExoPlaybackException {
// Make a track selection for each renderer.
int rendererCount = rendererCapabilities.length;
TrackSelection[] rendererTrackSelections = new TrackSelection[rendererCount];
Parameters params = paramsReference.get();
boolean videoTrackAndRendererPresent = false;
for (int i = 0; i < rendererCount; i++) {
if (C.TRACK_TYPE_VIDEO == rendererCapabilities[i].getTrackType()) {
rendererTrackSelections[i] = selectVideoTrack(rendererCapabilities[i],
rendererTrackGroupArrays[i], rendererFormatSupports[i], params.maxVideoWidth,
params.maxVideoHeight, params.maxVideoBitrate, params.allowNonSeamlessAdaptiveness,
params.allowMixedMimeAdaptiveness, params.viewportWidth, params.viewportHeight,
params.orientationMayChange, adaptiveTrackSelectionFactory,
params.exceedVideoConstraintsIfNecessary, params.exceedRendererCapabilitiesIfNecessary);
videoTrackAndRendererPresent |= rendererTrackGroupArrays[i].length > 0;
}
}
for (int i = 0; i < rendererCount; i++) {
switch (rendererCapabilities[i].getTrackType()) {
case C.TRACK_TYPE_VIDEO:
// Already done. Do nothing.
break;
case C.TRACK_TYPE_AUDIO:
rendererTrackSelections[i] = selectAudioTrack(rendererTrackGroupArrays[i],
rendererFormatSupports[i], params.preferredAudioLanguage,
params.exceedRendererCapabilitiesIfNecessary, params.allowMixedMimeAdaptiveness,
videoTrackAndRendererPresent ? null : adaptiveTrackSelectionFactory);
break;
case C.TRACK_TYPE_TEXT:
rendererTrackSelections[i] = selectTextTrack(rendererTrackGroupArrays[i],
rendererFormatSupports[i], params.preferredTextLanguage,
params.preferredAudioLanguage, params.exceedRendererCapabilitiesIfNecessary);
break;
default:
rendererTrackSelections[i] = selectOtherTrack(rendererCapabilities[i].getTrackType(),
rendererTrackGroupArrays[i], rendererFormatSupports[i],
params.exceedRendererCapabilitiesIfNecessary);
break;
}
}
return rendererTrackSelections;
}
示例8: maybeConfigureRenderersForTunneling
/**
* Determines whether tunneling should be enabled, replacing {@link RendererConfiguration}s in
* {@code rendererConfigurations} with configurations that enable tunneling on the appropriate
* renderers if so.
*
* @param rendererCapabilities The {@link RendererCapabilities} of the renderers for which
* {@link TrackSelection}s are to be generated.
* @param rendererTrackGroupArrays An array of {@link TrackGroupArray}s where each entry
* corresponds to the renderer of equal index in {@code renderers}.
* @param rendererFormatSupports Maps every available track to a specific level of support as
* defined by the renderer {@code FORMAT_*} constants.
* @param rendererConfigurations The renderer configurations. Configurations may be replaced with
* ones that enable tunneling as a result of this call.
* @param trackSelections The renderer track selections.
* @param tunnelingAudioSessionId The audio session id to use when tunneling, or
* {@link C#AUDIO_SESSION_ID_UNSET} if tunneling should not be enabled.
*/
private static void maybeConfigureRenderersForTunneling(
RendererCapabilities[] rendererCapabilities, TrackGroupArray[] rendererTrackGroupArrays,
int[][][] rendererFormatSupports, RendererConfiguration[] rendererConfigurations,
TrackSelection[] trackSelections, int tunnelingAudioSessionId) {
if (tunnelingAudioSessionId == C.AUDIO_SESSION_ID_UNSET) {
return;
}
// Check whether we can enable tunneling. To enable tunneling we require exactly one audio and
// one video renderer to support tunneling and have a selection.
int tunnelingAudioRendererIndex = -1;
int tunnelingVideoRendererIndex = -1;
boolean enableTunneling = true;
for (int i = 0; i < rendererCapabilities.length; i++) {
int rendererType = rendererCapabilities[i].getTrackType();
TrackSelection trackSelection = trackSelections[i];
if ((rendererType == C.TRACK_TYPE_AUDIO || rendererType == C.TRACK_TYPE_VIDEO)
&& trackSelection != null) {
if (rendererSupportsTunneling(rendererFormatSupports[i], rendererTrackGroupArrays[i],
trackSelection)) {
if (rendererType == C.TRACK_TYPE_AUDIO) {
if (tunnelingAudioRendererIndex != -1) {
enableTunneling = false;
break;
} else {
tunnelingAudioRendererIndex = i;
}
} else {
if (tunnelingVideoRendererIndex != -1) {
enableTunneling = false;
break;
} else {
tunnelingVideoRendererIndex = i;
}
}
}
}
}
enableTunneling &= tunnelingAudioRendererIndex != -1 && tunnelingVideoRendererIndex != -1;
if (enableTunneling) {
RendererConfiguration tunnelingRendererConfiguration =
new RendererConfiguration(tunnelingAudioSessionId);
rendererConfigurations[tunnelingAudioRendererIndex] = tunnelingRendererConfiguration;
rendererConfigurations[tunnelingVideoRendererIndex] = tunnelingRendererConfiguration;
}
}
示例9: SimpleDecoderAudioRenderer
/**
* @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
* null if delivery of events is not required.
* @param eventListener A listener of events. May be null if delivery of events is not required.
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
* default capabilities (no encoded audio passthrough support) should be assumed.
* @param drmSessionManager For use with encrypted media. May be null if support for encrypted
* media is not required.
* @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
* For example a media file may start with a short clear region so as to allow playback to
* begin in parallel with key acquisition. This parameter specifies whether the renderer is
* permitted to play clear regions of encrypted media files before {@code drmSessionManager}
* has obtained the keys necessary to decrypt encrypted regions of the media.
* @param audioProcessors Optional {@link AudioProcessor}s that will process audio before output.
*/
public SimpleDecoderAudioRenderer(Handler eventHandler,
AudioRendererEventListener eventListener, AudioCapabilities audioCapabilities,
DrmSessionManager<ExoMediaCrypto> drmSessionManager, boolean playClearSamplesWithoutKeys,
AudioProcessor... audioProcessors) {
super(C.TRACK_TYPE_AUDIO);
this.drmSessionManager = drmSessionManager;
this.playClearSamplesWithoutKeys = playClearSamplesWithoutKeys;
eventDispatcher = new EventDispatcher(eventHandler, eventListener);
audioTrack = new AudioTrack(audioCapabilities, audioProcessors, new AudioTrackListener());
formatHolder = new FormatHolder();
flagsOnlyBuffer = DecoderInputBuffer.newFlagsOnlyInstance();
decoderReinitializationState = REINITIALIZATION_STATE_NONE;
audioTrackNeedsConfigure = true;
}
示例10: MediaCodecAudioRenderer
/**
* @param mediaCodecSelector A decoder selector.
* @param drmSessionManager For use with encrypted content. May be null if support for encrypted
* content is not required.
* @param playClearSamplesWithoutKeys Encrypted media may contain clear (un-encrypted) regions.
* For example a media file may start with a short clear region so as to allow playback to
* begin in parallel with key acquisition. This parameter specifies whether the renderer is
* permitted to play clear regions of encrypted media files before {@code drmSessionManager}
* has obtained the keys necessary to decrypt encrypted regions of the media.
* @param eventHandler A handler to use when delivering events to {@code eventListener}. May be
* null if delivery of events is not required.
* @param eventListener A listener of events. May be null if delivery of events is not required.
* @param audioCapabilities The audio capabilities for playback on this device. May be null if the
* default capabilities (no encoded audio passthrough support) should be assumed.
* @param audioProcessors Optional {@link AudioProcessor}s that will process PCM audio before
* output.
*/
public MediaCodecAudioRenderer(MediaCodecSelector mediaCodecSelector,
DrmSessionManager<FrameworkMediaCrypto> drmSessionManager,
boolean playClearSamplesWithoutKeys, Handler eventHandler,
AudioRendererEventListener eventListener, AudioCapabilities audioCapabilities,
AudioProcessor... audioProcessors) {
super(C.TRACK_TYPE_AUDIO, mediaCodecSelector, drmSessionManager, playClearSamplesWithoutKeys);
audioTrack = new AudioTrack(audioCapabilities, audioProcessors, new AudioTrackListener());
eventDispatcher = new EventDispatcher(eventHandler, eventListener);
}