本文整理汇总了Java中edu.cmu.sphinx.frontend.FrontEnd类的典型用法代码示例。如果您正苦于以下问题:Java FrontEnd类的具体用法?Java FrontEnd怎么用?Java FrontEnd使用的例子?那么, 这里精选的类代码示例或许可以为您提供帮助。
FrontEnd类属于edu.cmu.sphinx.frontend包,在下文中一共展示了FrontEnd类的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Java代码示例。
示例1: VoiceActivityDetector
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
/**
* Creates a new voice activity detector
* @param ais input stream like LocalMicrophone or SocketMicrophone
* @param AisName name of the microphone LocalMicrophone or SocketMicrophone
*/
@SuppressWarnings({ "rawtypes", "unchecked" })
public VoiceActivityDetector(AudioInputStream ais, String AisName) {
super(null,new AudioFormat(16000, 16, 1, true, false), 0);
//audio source with 3200 byte read per read
AudioFileDataSource audioDataSource = new AudioFileDataSource(3200,
null);
audioDataSource.setInputStream(ais, AisName);
ArrayList pipeline = new ArrayList();
pipeline.add(audioDataSource);
//blocks data into frames
pipeline.add(new DataBlocker(10));
//classifies speech frames
pipeline.add(new SpeechClassifier(10, 0.015, 10, 0));
//marks as speech
pipeline.add(new SpeechMarker(200, 300, 100, 30, 100, 15.0));
//removes non speech
pipeline.add(new NonSpeechDataFilter());
frontend = new FrontEnd(pipeline);
}
示例2: VQVADPipeline
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
/**
* The learning rate determines how a newly trained model is merged
* with the existing one according to:
*
* model_values = lambda * new_model_values + (1-lambda) * old_model_values
*
* That means if the learning rate is 0, no new values are applied and
* the model is unchanged. If the learning rate is 1, the new model replaces
* the old model. Intermediate values merge the values of both models.
*
* To disable training, set the learning rate to 0.
*
* @param src
* @param learningRate
*/
public VQVADPipeline(AudioFileDataSource src, double learning_rate) {
ArrayList<DataProcessor> pipeline = new ArrayList<DataProcessor>();
float frame_length_ms = 30;
float frame_shift_ms = 10;
double lower_freq = 0;
trainer = new VQVADTrainer();
pipeline.add(src);
pipeline.add(new Dither());
pipeline.add(new RaisedCosineWindower(0, frame_length_ms, frame_shift_ms));
pipeline.add(new MFCCPipeline(lower_freq, src.getSampleRate()/2, 27));
pipeline.add(trainer);
pipeline.add(new VQVADClassifier(learning_rate));
pipeline.add(new FrameOverlapFilter(frame_length_ms, frame_shift_ms));
pipeline.add(new GapSmoothing(12));
frontend = new FrontEnd(pipeline);
}
示例3: processFile
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
static private void processFile(String inputFile, String outputFile,
ConfigurationManager cm) throws MalformedURLException, IOException {
FrontEnd frontend = (FrontEnd) cm.lookup("endpointer");
AudioFileDataSource dataSource = (AudioFileDataSource) cm
.lookup("audioFileDataSource");
System.out.println(inputFile);
dataSource.setAudioFile(new File(inputFile), null);
WavWriter wavWriter = (WavWriter) cm.lookup("wavWriter");
wavWriter.setOutFilePattern(outputFile);
frontend.initialize();
Data data = null;
do {
data = frontend.getData();
} while (data != null);
}
示例4: setupFrontend
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
protected FrontEnd setupFrontend(AudioFileDataSource audioDataSource) {
ArrayList<DataProcessor> pipeline = new ArrayList<DataProcessor>();
// VQVAD pipeline
pipeline.add(new VQVADPipeline(audioDataSource));
// Mark speech start/end
pipeline.add(new SpeechMarker(200, 400, 100, 30, 100, 15.0));
return new FrontEnd(pipeline);
}
示例5: main
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/foo.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noise_only.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp12.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp03.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp05.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp06.wav");
URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp07.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp07_train_sn10.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_5dB/sp07_train_sn5.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp07_train_sn10_long.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp12_train_sn10.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_5dB/sp12_train_sn5.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/car_10dB/sp12_car_sn10.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_then_noise_only.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_preceeding_noise.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_preceeding_noise_sp12_train_sn10_again.wav");
AudioFileDataSource audioDataSource = new AudioFileDataSource(3200, null);
audioDataSource.setAudioFile(path, "in");
ArrayList<DataProcessor> pipeline = new ArrayList<DataProcessor>();
pipeline.add(new VQVADPipeline(audioDataSource));
pipeline.add(new ClassificationResultDumper(path.getFile(), 30, 10, "/tmp/vqvad_classification_result"));
FrontEnd frontend = new FrontEnd(pipeline);
Data d;
do {
d = frontend.getData();
} while(d != null && !(d instanceof DataEndSignal));
}
示例6: main
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
public static void main(String[] args) throws MalformedURLException {
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/foo.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset//noise_only.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp12.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp06.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp07.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp07_train_sn10.wav");
URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_5dB/sp07_train_sn5.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp12_train_sn10.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_5dB/sp12_train_sn5.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/car_10dB/sp12_car_sn10.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_then_noise_only.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_preceeding_noise.wav");
// URL path = new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_preceeding_noise_sp12_train_sn10_again.wav");
AudioFileDataSource audioDataSource = new AudioFileDataSource(3200, null);
audioDataSource.setAudioFile(path, "in");
ArrayList<DataProcessor> pipeline = new ArrayList<DataProcessor>();
pipeline.add(audioDataSource);
//blocks data into frames
pipeline.add(new DataBlocker(10));
//classifies speech frames
pipeline.add(new SpeechClassifier(10, 0.015, 10, 0));
pipeline.add(new GapSmoothing(12));
pipeline.add(new ClassificationResultDumper(path.getFile(), 10, 10, "/tmp/vqvad_classification_result"));
FrontEnd frontend = new FrontEnd(pipeline);
Data d;
do {
d = frontend.getData();
} while(d != null && !(d instanceof DataEndSignal));
}
示例7: MFCCPipeline
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
public MFCCPipeline(double minFreq, double maxFreq, int numFilters) {
final ArrayList<DataProcessor> pipeline = new ArrayList<DataProcessor>();
singleDataBuffer = new SingleDataBuffer();
denoisedFrameBuffer = new SingleDataBuffer();
pipeline.add(singleDataBuffer);
pipeline.add(new DiscreteFourierTransform());
pipeline.add(new Denoise(0.7, 0.995, 0.5));
pipeline.add(denoisedFrameBuffer);
pipeline.add(new MelFrequencyFilterBank(minFreq, maxFreq, numFilters));
pipeline.add(new DiscreteCosineTransform2(numFilters, 12));
frontend = new FrontEnd(pipeline);
}
示例8: getFrontEndProcessor
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
/** Returns a the next <code>DataProcessor</code> of type <code>predecClass</code> which precedes <code>dp</code>
* @param dp
* @param predecClass
*/
public static <T extends DataProcessor> T getFrontEndProcessor(DataProcessor dp, Class<T> predecClass) {
while (!predecClass.isInstance(dp)) {
if (dp instanceof FrontEnd)
dp = ((FrontEnd) dp).getLastDataProcessor();
else
dp = dp.getPredecessor();
if (dp == null)
return null;
}
return predecClass.cast(dp);
}
示例9: SpectrogramPanel
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
/**
* Creates a new SpectrogramPanel for the given AudioData.
*
* @param frontEnd the front end to use
* @param dataSource the source of audio
* @param audioData the AudioData
*/
public SpectrogramPanel(FrontEnd frontEnd,
StreamDataSource dataSource, AudioData audioData) {
audio = audioData;
this.frontEnd = frontEnd;
this.dataSource = dataSource;
audio.addChangeListener(new ChangeListener() {
@Override
public void stateChanged(ChangeEvent event) {
computeSpectrogram();
}
});
}
示例10: main
import edu.cmu.sphinx.frontend.FrontEnd; //导入依赖的package包/类
/**
* @param args
* @throws IOException
*/
public static void main(String[] args) throws IOException {
URL[] paths = new URL[]{
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp12.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp03.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp05.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp06.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/clean/sp07.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp05_train_sn10.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp06_train_sn10.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp07_train_sn10.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_5dB/sp07_train_sn5.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp07_train_sn10_long.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_10dB/sp12_train_sn10.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/train_5dB/sp12_train_sn5.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/noizeus_train/car_10dB/sp12_car_sn10.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_then_noise_only.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_preceeding_noise.wav"),
new URL("file:///home/nemo/Documents/Studium/Master/study/code/VQVAD/trainset/sp12_train_sn10_preceeding_noise_sp12_train_sn10_again.wav"),
};
VQVADPipeline vadpipe = null;
VQVADModel lastModel = null;
for (URL path : paths) {
System.out.println(path);
AudioFileDataSource audioDataSource = new AudioFileDataSource(3200, null);
audioDataSource.setAudioFile(path, "in");
vadpipe = new VQVADPipeline(audioDataSource);
if (lastModel != null) {
vadpipe.setStartingModel(lastModel);
}
ArrayList<DataProcessor> pipeline = new ArrayList<DataProcessor>();
pipeline.add(vadpipe);
pipeline.add(new ClassificationResultDumper(path.getFile(), 30, 10, "/tmp/vqvad_classification_result"));
FrontEnd frontend = new FrontEnd(pipeline);
Data d;
do {
d = frontend.getData();
} while(d != null && !(d instanceof DataEndSignal));
lastModel = vadpipe.getCurrentModel();
}
}