本文整理匯總了Java中javax.sound.midi.Sequence.getResolution方法的典型用法代碼示例。如果您正苦於以下問題:Java Sequence.getResolution方法的具體用法?Java Sequence.getResolution怎麽用?Java Sequence.getResolution使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類javax.sound.midi.Sequence
的用法示例。
在下文中一共展示了Sequence.getResolution方法的13個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Java代碼示例。
示例1: render
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Adds this Note at the specified time on the specified Track and
* channel in the specified Sequence, then returns the time that a
* sequential Note should be added.
* @param seq the Sequence to which to add this Note
* @param track the Track in the Sequence to which to add this Note
* @param time the time at which to start this Note
* @param ch the channel on which to put this Note
* @param transposition amount by which to transpose this note in semitones
* @param sendBankSelect
* @return
* @throws javax.sound.midi.InvalidMidiDataException
*/
public long render(Sequence seq,
Track track,
long time,
int ch,
int transposition,
boolean sendBankSelect)
throws InvalidMidiDataException
{
if( sendBankSelect )
{
}
int dur = getRhythmValue();
long offTime = time + dur * seq.getResolution() / BEAT;
render(seq, track, time, offTime, ch, transposition);
return offTime;
}
示例2: writeToTrack
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Loads and adds the effects to the given midi track
*
* @param track The track to write to
* @param channel The channel to write to
*/
@Override
public void writeToTrack(Track track, int channel) {
long lastEnd = 0;
for (String name : effects.keySet()) {
Sequence sequence = FileUtils.LoadMidiFile("effects/" + name + ".mid");
if (sequence != null) {
int start =// Position effect in track
(int) (QUARTER * tempo.averageBpm / 60.0 * //beats per second
15 * //because 15 seconds
effects.get(name) //i-th word in text
);
if (start < lastEnd)
start = (int) lastEnd;
if (sequence.getMicrosecondLength() / 1000000.0 + TicksInSecs(start, tempo.resolution) > 15.0) {
start -= SecsInTicks(TicksInSecs(start, tempo.resolution) + sequence.getMicrosecondLength() / 1000000.0 - 15, tempo.resolution);
}
float scale = tempo.resolution / (float) sequence.getResolution(); // Make the tempo fit
for (Track t : sequence.getTracks()) {
for (int i = 0; i < t.size(); i++) {
MidiEvent event = t.get(i);
byte[] data = event.getMessage().getMessage();//(command & 0xF0) | (channel & 0x0F)
data[0] += 2; // Keep channel 1 and 2 free
long tick = (long) (event.getTick() * scale) + start;
MidiEvent ev = new MidiEvent(new MidiMessage(data) {
@Override
public Object clone() {
return null;
}
}, tick);
track.add(ev);
if (tick > lastEnd)
lastEnd = tick;
}
}
}
}
}
示例3: setSequence
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
synchronized void setSequence(Sequence seq) {
if (seq == null) {
init();
return;
}
tracks = seq.getTracks();
muteSoloChanged();
resolution = seq.getResolution();
divisionType = seq.getDivisionType();
trackReadPos = new int[tracks.length];
// trigger re-initialization
checkPointMillis = 0;
needReindex = true;
}
示例4: ChangeResolution
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Change resolution (TPQN) and retiming events.
* @param seq Sequence to be processed.
* @param resolution Ticks per quarter note of new sequence.
* @return New sequence with new resolution.
* @throws InvalidMidiDataException throw if MIDI data is invalid.
*/
public static Sequence ChangeResolution(Sequence sourceSeq, int resolution) throws InvalidMidiDataException
{
// sequence must be tick-based
if (sourceSeq.getDivisionType() != Sequence.PPQ)
{
throw new UnsupportedOperationException("SMPTE is not supported.");
}
Sequence seq = new Sequence(sourceSeq.getDivisionType(), resolution);
// process all input tracks
for (int trackIndex = 0; trackIndex < sourceSeq.getTracks().length; trackIndex++)
{
Track sourceTrack = sourceSeq.getTracks()[trackIndex];
Track track = seq.createTrack();
// process all events
double timingRate = (double) resolution / sourceSeq.getResolution();
for (int eventIndex = 0; eventIndex < sourceTrack.size(); eventIndex++)
{
MidiEvent sourceEvent = sourceTrack.get(eventIndex);
MidiEvent event = new MidiEvent(sourceEvent.getMessage(), Math.round(sourceEvent.getTick() * timingRate));
track.add(event);
}
}
// if the target resolution is shorter than source resolution,
// events at different timing might be located at the same timing.
// As a result, there might be zero-length note and/or
// same control changes at the same timing.
//
// Probably, they should be removed for better conversion.
// I do not remove them anyway at the moment,
// because it does not cause any major problems.
return seq;
}
示例5: setSequence
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
public void setSequence(Sequence sequence) {
this.sequence = sequence;
// calculate length in beats and notes count
Track track = getTrack();
notesCount = 0;
long start = -1;
long end = -1;
int size = track.size();
if (size > 0) {
for (int i = 0; i < size; i++) {
MidiEvent ev = track.get(i);
MidiMessage msg = ev.getMessage();
if ((msg instanceof ShortMessage) && (((ShortMessage)msg).getCommand() == ShortMessage.NOTE_ON)) {
notesCount++;
if (start == -1) {
start = ev.getTick();
}
end = ev.getTick();
}
}
int resolution = sequence.getResolution();
long b1 = (start + (resolution/4)) / resolution; // +res/2: allow notes to start a bit earlier but still be counted to other bar
long b2 = (end - (resolution/4)) / resolution; // - res/2: ...
lengthInBeats = (int)(b2 - b1 + 1);
length = lengthInBeats * resolution;
} else {
length = 0;
}
}
示例6: FrinikaSequence
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
public FrinikaSequence(Sequence sequence) throws InvalidMidiDataException
{
super(sequence.getDivisionType(),sequence.getResolution());
for(Track track : sequence.getTracks())
{
FrinikaTrackWrapper trackWrapper = new FrinikaTrackWrapper(track);
trackWrapper.setSequence(this);
tracks.add(track);
frinikaTrackWrappers.add(trackWrapper);
}
}
示例7: tick2microsecond
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Given a tick, convert to microsecond
* @param cache tempo info and current tempo
*/
public static long tick2microsecond(Sequence seq, long tick, TempoCache cache) {
if (seq.getDivisionType() != Sequence.PPQ ) {
double seconds = ((double)tick / (double)(seq.getDivisionType() * seq.getResolution()));
return (long) (1000000 * seconds);
}
if (cache == null) {
cache = new TempoCache(seq);
}
int resolution = seq.getResolution();
long[] ticks = cache.ticks;
int[] tempos = cache.tempos; // in MPQ
int cacheCount = tempos.length;
// optimization to not always go through entire list of tempo events
int snapshotIndex = cache.snapshotIndex;
int snapshotMicro = cache.snapshotMicro;
// walk through all tempo changes and add time for the respective blocks
long us = 0; // microsecond
if (snapshotIndex <= 0
|| snapshotIndex >= cacheCount
|| ticks[snapshotIndex] > tick) {
snapshotMicro = 0;
snapshotIndex = 0;
}
if (cacheCount > 0) {
// this implementation needs a tempo event at tick 0!
int i = snapshotIndex + 1;
while (i < cacheCount && ticks[i] <= tick) {
snapshotMicro += ticks2microsec(ticks[i] - ticks[i - 1], tempos[i - 1], resolution);
snapshotIndex = i;
i++;
}
us = snapshotMicro
+ ticks2microsec(tick - ticks[snapshotIndex],
tempos[snapshotIndex],
resolution);
}
cache.snapshotIndex = snapshotIndex;
cache.snapshotMicro = snapshotMicro;
return us;
}
示例8: getAudioInputStream
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
private AudioInputStream getAudioInputStream(final Sequence seq)
throws InvalidMidiDataException {
AudioSynthesizer synth = (AudioSynthesizer) new SoftSynthesizer();
AudioInputStream stream;
Receiver recv;
try {
stream = synth.openStream(format, null);
recv = synth.getReceiver();
} catch (MidiUnavailableException e) {
throw new InvalidMidiDataException(e.toString());
}
float divtype = seq.getDivisionType();
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = 500000;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true) {
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++) {
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size()) {
MidiEvent event = track.get(trackpos);
if (selevent == null || event.getTick() < selevent.getTick()) {
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = (long) ((tick * 1000000.0 * divtype) / seqres);
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage) {
if (divtype == Sequence.PPQ) {
if (((MetaMessage) msg).getType() == 0x51) {
byte[] data = ((MetaMessage) msg).getData();
if (data.length < 3) {
throw new InvalidMidiDataException();
}
mpq = ((data[0] & 0xff) << 16)
| ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
}
} else {
recv.send(msg, curtime);
}
}
long totallen = curtime / 1000000;
long len = (long) (stream.getFormat().getFrameRate() * (totallen + 4));
stream = new AudioInputStream(stream, stream.getFormat(), len);
return stream;
}
示例9: getAudioInputStream
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
public AudioInputStream getAudioInputStream(Sequence seq)
throws UnsupportedAudioFileException, IOException {
AudioSynthesizer synth = (AudioSynthesizer) new SoftSynthesizer();
AudioInputStream stream;
Receiver recv;
try {
stream = synth.openStream(format, null);
recv = synth.getReceiver();
} catch (MidiUnavailableException e) {
throw new IOException(e.toString());
}
float divtype = seq.getDivisionType();
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = 500000;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true) {
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++) {
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size()) {
MidiEvent event = track.get(trackpos);
if (selevent == null || event.getTick() < selevent.getTick()) {
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = (long) ((tick * 1000000.0 * divtype) / seqres);
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage) {
if (divtype == Sequence.PPQ) {
if (((MetaMessage) msg).getType() == 0x51) {
byte[] data = ((MetaMessage) msg).getData();
if (data.length < 3) {
throw new UnsupportedAudioFileException();
}
mpq = ((data[0] & 0xff) << 16)
| ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
}
} else {
recv.send(msg, curtime);
}
}
long totallen = curtime / 1000000;
long len = (long) (stream.getFormat().getFrameRate() * (totallen + 4));
stream = new AudioInputStream(stream, stream.getFormat(), len);
return stream;
}
示例10: send
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
private double send(Sequence seq, Receiver recv) {
float divtype = seq.getDivisionType();
assert (seq.getDivisionType() == Sequence.PPQ);
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = (int)SEC2US / 2;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true) {
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++) {
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size()) {
MidiEvent event = track.get(trackpos);
if (selevent == null
|| event.getTick() < selevent.getTick()) {
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = tick;
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage) {
if (divtype == Sequence.PPQ)
if (((MetaMessage) msg).getType() == 0x51) {
byte[] data = ((MetaMessage) msg).getData();
mpq = ((data[0] & 0xff) << 16)
| ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
} else {
if(recv != null)
sendMidiMsg(recv, msg, curtime);
}
}
return curtime / SEC2US;
}
示例11: AssumeResolution
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Change resolution (TPQN) without retiming events.
* @param seq Sequence to be processed.
* @param resolution Ticks per quarter note of new sequence.
* @param adjustTempo true if adjust the tempo value to keep the song tempo.
* @return New sequence with new resolution.
* @throws InvalidMidiDataException throw if MIDI data is invalid.
*/
public static Sequence AssumeResolution(Sequence sourceSeq, int resolution, boolean adjustTempo) throws InvalidMidiDataException
{
// sequence must be tick-based
if (sourceSeq.getDivisionType() != Sequence.PPQ)
{
throw new UnsupportedOperationException("SMPTE is not supported.");
}
Sequence seq = new Sequence(sourceSeq.getDivisionType(), resolution);
// process all input tracks
double tempoScale = (double) sourceSeq.getResolution() / seq.getResolution();
for (int trackIndex = 0; trackIndex < sourceSeq.getTracks().length; trackIndex++)
{
Track sourceTrack = sourceSeq.getTracks()[trackIndex];
Track track = seq.createTrack();
// process all events
for (int eventIndex = 0; eventIndex < sourceTrack.size(); eventIndex++)
{
MidiEvent sourceEvent = sourceTrack.get(eventIndex);
MidiEvent event = new MidiEvent(sourceEvent.getMessage(), sourceEvent.getTick());
if (adjustTempo)
{
if (event.getMessage() instanceof MetaMessage)
{
MetaMessage message = (MetaMessage) event.getMessage();
if (message.getType() == MidiUtil.META_TEMPO)
{
byte[] data = message.getData();
if (data.length != 3)
{
throw new InvalidMidiDataException("Illegal tempo event.");
}
int sourceTempo = ((data[0] & 0xff) << 16) | ((data[1] & 0xff) << 8) | (data[2] & 0xff);
int newTempo = (int) Math.floor(sourceTempo / tempoScale);
data = new byte[] { (byte) ((newTempo >> 16) & 0xff), (byte) ((newTempo >> 8) & 0xff), (byte) (newTempo & 0xff) };
message.setMessage(MidiUtil.META_TEMPO, data, data.length);
}
}
}
track.add(event);
}
}
return seq;
}
示例12: normalize
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Copies all note-events from seq to track 0, channel 0, starting at offset 4 beats
*
* (Might fail if multiple tracks are imported and a track other than #0 has an earlier
* note than track #0. However, this method is mainly intended for loading internally
* saved grooved patterns.)
*
* @param part
* @param sequence empty 1-track sequence to be filled
*/
static void normalize(Sequence seq, Sequence sequence) {
int srcRes = seq.getResolution();
int dstRes = sequence.getResolution();
Track track = sequence.getTracks()[0];
boolean firstNote = true;
long shift = 4 * srcRes; // offset start: 4 beats
Track[] srcTracks = seq.getTracks();
for (int i = 0; i < srcTracks.length; i++) {
Track srcTrack = srcTracks[i];
int size = srcTrack.size();
for (int j = 0; j < size; j++) {
MidiEvent ev = srcTrack.get(j);
MidiMessage msg = ev.getMessage();
if (msg instanceof ShortMessage) {
ShortMessage sh = (ShortMessage)msg;
int cmd = sh.getCommand();
if (cmd == ShortMessage.NOTE_ON || cmd == ShortMessage.NOTE_OFF) {
int note = sh.getData1();
int vel = sh.getData2();
long start = ev.getTick();
if (firstNote) {
shift -= ((start + (srcRes / 4)) / srcRes) * srcRes ; // correct offset by leading number of empty beats (so pattern will always start in first beat (or a little earlier for negative groove-shifts, this is why resolution/2 is added))
firstNote = false;
}
start += shift;
if (srcRes != dstRes) {
start = translateResolution(start, srcRes, dstRes);
}
// insert new event into target sequence
try {
ShortMessage sm = new ShortMessage();
sm.setMessage(cmd, 0, note, vel);
MidiEvent event = new MidiEvent(sm, start);
track.add(event);
} catch (InvalidMidiDataException imde) {
imde.printStackTrace();
}
}
}
}
}
}
示例13: send
import javax.sound.midi.Sequence; //導入方法依賴的package包/類
/**
* Send entiry MIDI Sequence into Receiver using timestamps.
*/
public static double send(Sequence seq, Receiver recv)
{
float divtype = seq.getDivisionType();
assert (seq.getDivisionType() == Sequence.PPQ);
Track[] tracks = seq.getTracks();
int[] trackspos = new int[tracks.length];
int mpq = 500000;
int seqres = seq.getResolution();
long lasttick = 0;
long curtime = 0;
while (true)
{
MidiEvent selevent = null;
int seltrack = -1;
for (int i = 0; i < tracks.length; i++)
{
int trackpos = trackspos[i];
Track track = tracks[i];
if (trackpos < track.size())
{
MidiEvent event = track.get(trackpos);
if (selevent == null || event.getTick() < selevent.getTick())
{
selevent = event;
seltrack = i;
}
}
}
if (seltrack == -1)
break;
trackspos[seltrack]++;
long tick = selevent.getTick();
if (divtype == Sequence.PPQ)
curtime += ((tick - lasttick) * mpq) / seqres;
else
curtime = (long) ((tick * 1000000.0 * divtype) / seqres);
lasttick = tick;
MidiMessage msg = selevent.getMessage();
if (msg instanceof MetaMessage)
{
if (divtype == Sequence.PPQ)
if (((MetaMessage) msg).getType() == 0x51)
{
byte[] data = ((MetaMessage) msg).getData();
mpq = ((data[0] & 0xff) << 16) | ((data[1] & 0xff) << 8) | (data[2] & 0xff);
}
}
else
{
if (recv != null)
recv.send(msg, curtime);
}
}
return curtime / 1000000.0;
}