本文整理汇总了Python中pipeline.Pipeline.process方法的典型用法代码示例。如果您正苦于以下问题:Python Pipeline.process方法的具体用法?Python Pipeline.process怎么用?Python Pipeline.process使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pipeline.Pipeline
的用法示例。
在下文中一共展示了Pipeline.process方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run_batch_mode
# 需要导入模块: from pipeline import Pipeline [as 别名]
# 或者: from pipeline.Pipeline import process [as 别名]
def run_batch_mode(pipeline_name, image_name):
ext_loader = ExtensionLoader()
pipeline = Pipeline(ext_loader.cats_container)
pipeline_url = get_assets_resource_path_pipelines(pipeline_name)
image_url = get_assets_resource_path_image(image_name)
directory = os.path.join(get_batch_mode_output_path(),
image_name.split(".")[0] + "_" + pipeline_name.split(".")[0])
pipeline.load_pipeline_json(pipeline_url)
pipeline.set_input(image_url)
if not os.path.exists(directory):
os.makedirs(directory)
pipeline.set_output_dir(directory)
pipeline.process()
示例2: main
# 需要导入模块: from pipeline import Pipeline [as 别名]
# 或者: from pipeline.Pipeline import process [as 别名]
def main():
# Read input file path from command line argument
if len(sys.argv) != 2:
print 'Please use the application as follows: python main.py <filepath>.wav'
sys.exit(2)
fpath = sys.argv[1]
# If the file path ends with .wav only the given file will be processed
# Else we assume that the input is a path to all the .wav files to be batch processed
audio_files = []
if fpath.endswith('.wav'):
audio_files.append(fpath)
else:
audio_files.extend(glob.glob(fpath + '/*.wav'))
# TODO: Compare human/model recognition
# TODO: Maybe save (for instance) spectral images of sound_generation that failed
# Setup logging
logging.basicConfig(filename='pitch_perception.log', level=logging.INFO)
n_channels = int(Config.get_config_option('n_channels'))
transducer = BrianTransducer(n_channels)
available_pitch_extractors = {'naive': NaivePitchExtractor, 'spectral': SpectralPitchExtractor(n_channels),
'temporal': TemporalPitchExtractor, 'xcorr': XcorrPitchExtractor(n_channels)}
pitch_extractor = available_pitch_extractors[Config.get_config_option('pitch_extraction')]
# Init pipeline
pipeline = Pipeline(transducer, pitch_extractor, test_mode=False)
# Collect results
results = []
for af in audio_files:
# Run processing
pitch = pipeline.process(af)
log_string = 'File: %s\tPitch: %i' % (af, pitch)
logging.info(log_string)
# Output final pitch
print log_string
results.append((af, pitch))
# Export results
CsvExporter.export('results.csv', pitch_extractor.__class__.__name__, results)
示例3: TopsyReader
# 需要导入模块: from pipeline import Pipeline [as 别名]
# 或者: from pipeline.Pipeline import process [as 别名]
from tokenizer import Tokenizer
from normalizer import Normalizer
from languageTagger import LanguageTagger
from tokenTagger import CommonTwitterTokenTagger, PunctuationTagger, StopwordTagger, NamedEntityTagger
from casConsumer import DefaultWriter
input_directory = sys.argv[1]
output_directory = sys.argv[2]
corpus = sys.argv[3]
topsy_reader = TopsyReader(joinp(input_directory, "topsy"))
twitter_reader = TwitterReader(joinp(input_directory, "twitter"))
raw_reader = RawReader(joinp(input_directory, "raw"))
lang_tagger = LanguageTagger()
tokenizer = Tokenizer()
normalizer = Normalizer()
ner_tagger = NamedEntityTagger("preprocessor/config/data/multiwordlist")
stopword_tagger = StopwordTagger("preprocessor/config/data/german_stopwords")
punctuation_tagger = PunctuationTagger()
other_tagger = CommonTwitterTokenTagger()
writer = DefaultWriter(output_directory, corpus)
topsy_pipe = Pipeline(topsy_reader, lang_tagger, tokenizer, normalizer, ner_tagger, stopword_tagger, punctuation_tagger,
other_tagger, writer)
twitter_pipe = Pipeline(twitter_reader, lang_tagger, tokenizer, normalizer, ner_tagger, stopword_tagger,
punctuation_tagger, other_tagger, writer)
counter = twitter_pipe.process()
counter = topsy_pipe.process(counter=counter)