本文整理匯總了Java中javax.sound.midi.Sequence.getDivisionType方法的典型用法代碼示例。如果您正苦於以下問題:Java Sequence.getDivisionType方法的具體用法?Java Sequence.getDivisionType怎麽用?Java Sequence.getDivisionType使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類javax.sound.midi.Sequence
的用法示例。
在下文中一共展示了Sequence.getDivisionType方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: setSequence
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
synchronized void setSequence(Sequence seq) {
if (seq == null) {
init();
return;
}
tracks = seq.getTracks();
muteSoloChanged();
resolution = seq.getResolution();
divisionType = seq.getDivisionType();
trackReadPos = new int[tracks.length];
// trigger re-initialization
checkPointMillis = 0;
needReindex = true;
}
示例2: ChangeResolution
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Change resolution (TPQN) and retiming events.
* @param seq Sequence to be processed.
* @param resolution Ticks per quarter note of new sequence.
* @return New sequence with new resolution.
* @throws InvalidMidiDataException throw if MIDI data is invalid.
*/
public static Sequence ChangeResolution(Sequence sourceSeq, int resolution) throws InvalidMidiDataException
{
// sequence must be tick-based
if (sourceSeq.getDivisionType() != Sequence.PPQ)
{
throw new UnsupportedOperationException("SMPTE is not supported.");
}
Sequence seq = new Sequence(sourceSeq.getDivisionType(), resolution);
// process all input tracks
for (int trackIndex = 0; trackIndex < sourceSeq.getTracks().length; trackIndex++)
{
Track sourceTrack = sourceSeq.getTracks()[trackIndex];
Track track = seq.createTrack();
// process all events
double timingRate = (double) resolution / sourceSeq.getResolution();
for (int eventIndex = 0; eventIndex < sourceTrack.size(); eventIndex++)
{
MidiEvent sourceEvent = sourceTrack.get(eventIndex);
MidiEvent event = new MidiEvent(sourceEvent.getMessage(), Math.round(sourceEvent.getTick() * timingRate));
track.add(event);
}
}
// if the target resolution is shorter than source resolution,
// events at different timing might be located at the same timing.
// As a result, there might be zero-length note and/or
// same control changes at the same timing.
//
// Probably, they should be removed for better conversion.
// I do not remove them anyway at the moment,
// because it does not cause any major problems.
return seq;
}
示例3: printSeqInfo
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
protected void printSeqInfo(Sequence seq) {
//System.out.println("Score Title: " + scoreTitle);
//System.out.println("Score TempoEvent: " + m_currentTempo + " BPM");
//System.out.print("Sequence Division Type = ");
float type = seq.getDivisionType();
/*
if (Sequence.PPQ == type)
System.out.println("PPQ");
else if (Sequence.SMPTE_24 == type)
System.out.println("SMPTE 24 (24 fps)");
else if (Sequence.SMPTE_25 == type)
System.out.println("SMPTE 25 (25 fps)");
else if (Sequence.SMPTE_30 == type)
System.out.println("SMPTE 30 (30 fps)");
else if (Sequence.SMPTE_30DROP == type)
System.out.println("SMPTE 30 Drop (29.97 fps)");
else
System.out.println("Unknown");
System.out.println("Sequence Resolution = " +
seq.getResolution());
System.out.println("Sequence TickLength = " +
seq.getTickLength());
System.out.println("Sequence Microsecond Length = " +
seq.getMicrosecondLength());
System.out.println("Sequencer TempoEvent (BPM) = " +
m_sequencer.getTempoInBPM());
System.out.println("Sequencer TempoEvent (MPQ) = " +
m_sequencer.getTempoInMPQ());
System.out.println("Sequencer TempoFactor = " +
m_sequencer.getTempoFactor());
*/
}
示例4: FrinikaSequence
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
public FrinikaSequence(Sequence sequence) throws InvalidMidiDataException
{
super(sequence.getDivisionType(),sequence.getResolution());
for(Track track : sequence.getTracks())
{
FrinikaTrackWrapper trackWrapper = new FrinikaTrackWrapper(track);
trackWrapper.setSequence(this);
tracks.add(track);
frinikaTrackWrappers.add(trackWrapper);
}
}
示例5: tick2microsecond
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Given a tick, convert to microsecond
* @param cache tempo info and current tempo
*/
public static long tick2microsecond(Sequence seq, long tick, TempoCache cache) {
if (seq.getDivisionType() != Sequence.PPQ ) {
double seconds = ((double)tick / (double)(seq.getDivisionType() * seq.getResolution()));
return (long) (1000000 * seconds);
}
if (cache == null) {
cache = new TempoCache(seq);
}
int resolution = seq.getResolution();
long[] ticks = cache.ticks;
int[] tempos = cache.tempos; // in MPQ
int cacheCount = tempos.length;
// optimization to not always go through entire list of tempo events
int snapshotIndex = cache.snapshotIndex;
int snapshotMicro = cache.snapshotMicro;
// walk through all tempo changes and add time for the respective blocks
long us = 0; // microsecond
if (snapshotIndex <= 0
|| snapshotIndex >= cacheCount
|| ticks[snapshotIndex] > tick) {
snapshotMicro = 0;
snapshotIndex = 0;
}
if (cacheCount > 0) {
// this implementation needs a tempo event at tick 0!
int i = snapshotIndex + 1;
while (i < cacheCount && ticks[i] <= tick) {
snapshotMicro += ticks2microsec(ticks[i] - ticks[i - 1], tempos[i - 1], resolution);
snapshotIndex = i;
i++;
}
us = snapshotMicro
+ ticks2microsec(tick - ticks[snapshotIndex],
tempos[snapshotIndex],
resolution);
}
cache.snapshotIndex = snapshotIndex;
cache.snapshotMicro = snapshotMicro;
return us;
}
示例6: getAudioInputStream
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
private AudioInputStream getAudioInputStream(final Sequence seq)
throws InvalidMidiDataException {
AudioSynthesizer synth = (AudioSynthesizer) new SoftSynthesizer();
AudioInputStream stream;
Receiver recv;
try {
stream = synth.openStream(format, null);
recv = synth.getReceiver();
} catch (MidiUnavailableException e) {
throw new InvalidMidiDataException(e.toString());
}
float divtype = seq.getDivisionType();
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = 500000;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true) {
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++) {
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size()) {
MidiEvent event = track.get(trackpos);
if (selevent == null || event.getTick() < selevent.getTick()) {
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = (long) ((tick * 1000000.0 * divtype) / seqres);
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage) {
if (divtype == Sequence.PPQ) {
if (((MetaMessage) msg).getType() == 0x51) {
byte[] data = ((MetaMessage) msg).getData();
if (data.length < 3) {
throw new InvalidMidiDataException();
}
mpq = ((data[0] & 0xff) << 16)
| ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
}
} else {
recv.send(msg, curtime);
}
}
long totallen = curtime / 1000000;
long len = (long) (stream.getFormat().getFrameRate() * (totallen + 4));
stream = new AudioInputStream(stream, stream.getFormat(), len);
return stream;
}
示例7: getAudioInputStream
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
public AudioInputStream getAudioInputStream(Sequence seq)
throws UnsupportedAudioFileException, IOException {
AudioSynthesizer synth = (AudioSynthesizer) new SoftSynthesizer();
AudioInputStream stream;
Receiver recv;
try {
stream = synth.openStream(format, null);
recv = synth.getReceiver();
} catch (MidiUnavailableException e) {
throw new IOException(e.toString());
}
float divtype = seq.getDivisionType();
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = 500000;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true) {
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++) {
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size()) {
MidiEvent event = track.get(trackpos);
if (selevent == null || event.getTick() < selevent.getTick()) {
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = (long) ((tick * 1000000.0 * divtype) / seqres);
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage) {
if (divtype == Sequence.PPQ) {
if (((MetaMessage) msg).getType() == 0x51) {
byte[] data = ((MetaMessage) msg).getData();
if (data.length < 3) {
throw new UnsupportedAudioFileException();
}
mpq = ((data[0] & 0xff) << 16)
| ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
}
} else {
recv.send(msg, curtime);
}
}
long totallen = curtime / 1000000;
long len = (long) (stream.getFormat().getFrameRate() * (totallen + 4));
stream = new AudioInputStream(stream, stream.getFormat(), len);
return stream;
}
示例8: send
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
private double send(Sequence seq, Receiver recv) {
float divtype = seq.getDivisionType();
assert (seq.getDivisionType() == Sequence.PPQ);
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = (int)SEC2US / 2;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true) {
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++) {
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size()) {
MidiEvent event = track.get(trackpos);
if (selevent == null
|| event.getTick() < selevent.getTick()) {
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = tick;
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage) {
if (divtype == Sequence.PPQ)
if (((MetaMessage) msg).getType() == 0x51) {
byte[] data = ((MetaMessage) msg).getData();
mpq = ((data[0] & 0xff) << 16)
| ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
} else {
if(recv != null)
sendMidiMsg(recv, msg, curtime);
}
}
return curtime / SEC2US;
}
示例9: AssumeResolution
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Change resolution (TPQN) without retiming events.
* @param seq Sequence to be processed.
* @param resolution Ticks per quarter note of new sequence.
* @param adjustTempo true if adjust the tempo value to keep the song tempo.
* @return New sequence with new resolution.
* @throws InvalidMidiDataException throw if MIDI data is invalid.
*/
public static Sequence AssumeResolution(Sequence sourceSeq, int resolution, boolean adjustTempo) throws InvalidMidiDataException
{
// sequence must be tick-based
if (sourceSeq.getDivisionType() != Sequence.PPQ)
{
throw new UnsupportedOperationException("SMPTE is not supported.");
}
Sequence seq = new Sequence(sourceSeq.getDivisionType(), resolution);
// process all input tracks
double tempoScale = (double) sourceSeq.getResolution() / seq.getResolution();
for (int trackIndex = 0; trackIndex < sourceSeq.getTracks().length; trackIndex++)
{
Track sourceTrack = sourceSeq.getTracks()[trackIndex];
Track track = seq.createTrack();
// process all events
for (int eventIndex = 0; eventIndex < sourceTrack.size(); eventIndex++)
{
MidiEvent sourceEvent = sourceTrack.get(eventIndex);
MidiEvent event = new MidiEvent(sourceEvent.getMessage(), sourceEvent.getTick());
if (adjustTempo)
{
if (event.getMessage() instanceof MetaMessage)
{
MetaMessage message = (MetaMessage) event.getMessage();
if (message.getType() == MidiUtil.META_TEMPO)
{
byte[] data = message.getData();
if (data.length != 3)
{
throw new InvalidMidiDataException("Illegal tempo event.");
}
int sourceTempo = ((data[0] & 0xff) << 16) | ((data[1] & 0xff) << 8) | (data[2] & 0xff);
int newTempo = (int) Math.floor(sourceTempo / tempoScale);
data = new byte[] { (byte) ((newTempo >> 16) & 0xff), (byte) ((newTempo >> 8) & 0xff), (byte) (newTempo & 0xff) };
message.setMessage(MidiUtil.META_TEMPO, data, data.length);
}
}
}
track.add(event);
}
}
return seq;
}
示例10: send
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Send entiry MIDI Sequence into Receiver using timestamps.
*/
public static double send(Sequence seq, Receiver recv)
{
float divtype = seq.getDivisionType();
assert (seq.getDivisionType() == Sequence.PPQ);
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = 500000;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true)
{
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++)
{
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size())
{
MidiEvent event = track.get(trackpos);
if (selevent == null || event.getTick() < selevent.getTick())
{
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = (long) ((tick * 1000000.0 * divtype) / seqres);
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage)
{
if (divtype == Sequence.PPQ)
if (((MetaMessage) msg).getType() == 0x51)
{
byte[] data = ((MetaMessage) msg).getData();
mpq = ((data[0] & 0xff) << 16) | ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
}
else
{
if (recv != null)
recv.send(msg, curtime);
}
}
return curtime / 1000000.0;
}