本文整理汇总了Python中preprocessor.Preprocessor.load_sample方法的典型用法代码示例。如果您正苦于以下问题:Python Preprocessor.load_sample方法的具体用法?Python Preprocessor.load_sample怎么用?Python Preprocessor.load_sample使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类preprocessor.Preprocessor
的用法示例。
在下文中一共展示了Preprocessor.load_sample方法的1个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: Bird
# 需要导入模块: from preprocessor import Preprocessor [as 别名]
# 或者: from preprocessor.Preprocessor import load_sample [as 别名]
#.........这里部分代码省略.........
# loads and randomizes data
def load_data(self):
(paths, labels) = self.load_labels(self.label_path,self.label_bg_path)
self.load_meta_data(self.meta_path)
nr_files = len(paths)
mask = np.arange(nr_files)
np.random.shuffle(mask)
train_size = int(nr_files * (1 - self.train_val_ratio))
paths = np.array(paths)[mask]
labels = np.array(labels)[mask]
self.class_weights = {}
for i in range(self.nb_species):
weight_mask = labels == str(i) # np.equal(labels, i*np.ones(labels.shape))
nb_class = np.sum(weight_mask)
if nb_class == 0:
print("No data for class", str(i))
continue
self.class_weights[i] = nr_files/np.sum(weight_mask)
self.paths = paths[:train_size]
self.labels = labels[:train_size]
self.nr_files = train_size
self.val_paths = paths[train_size:]
self.val_labels = labels[train_size:]
self.nr_val_files = (nr_files - train_size) // self.batch_size * self.batch_size
def train_data_generator(self):
while True:
specs = []
labels = []
for i in range(self.batch_size):
(spec, label) = self.get_random_training_sample()
specs.append(np.array([spec]).transpose((1, 2, 0)))
labels.append(np.array([label]))
yield (np.array(specs), np.array(labels))
def val_data_generator(self):
specs = []
labels = []
for val_path, val_label in zip(self.val_paths, self.val_labels):
sample = self.preprocessor.load_sample(val_path)
if np.max(sample[0]) <= 0:
continue
spec = self.preprocessor.preprocess(sample)
# spec = self.augmenter.augment_transform(spec, val_label)
specs.append(np.array([spec[0]]).transpose((1, 2, 0)))
labels.append(np.array([val_label]))
if len(specs) == self.batch_size:
yield (np.array(specs), np.array(labels))
specs = []
labels = []
if len(specs) > 0:
yield (np.array(specs), np.array(labels))
# loads a single new training sample from disc.
# preprocesses and augments the training sample.
def get_random_training_sample(self):
r = random.randint(0, self.nr_files - 1)
path = self.paths[r]
label = self.labels[r]
sample = self.preprocessor.load_sample(path)
if np.max(sample[0]) <= 0:
return self.get_random_training_sample()
spec = self.preprocessor.preprocess(sample)
spec = self.augmenter.augment_transform(spec, label)
return (spec[0], label)
# start training process
def train(self):
self.load_data()
self.model = models.model_fg_bg(self.nb_species,
(self.nb_f_steps, self.nb_t_steps))
sgd = SGD(lr=0.01, decay=0.0, momentum=0.9, nesterov=True)
self.model.compile(loss='sparse_categorical_crossentropy',optimizer=sgd,metrics=['accuracy'])
self.model.summary()
modelCheckpoint = keras.callbacks.ModelCheckpoint("/" + self.training_description + "/{epoch:02d}-{val_loss:.2f}.hdf5")
reduce_lr = keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.1, patience=2, verbose=1,
min_lr=0.0001, epsilon=1e-5)
history = self.model.fit_generator(self.train_data_generator(), samples_per_epoch=self.nr_files,
nb_epoch=self.nr_epoch, verbose=1, max_q_size=self.batch_size,
validation_data=self.val_data_generator(), nb_val_samples=self.nr_val_files,
nb_worker=4, pickle_safe=True, callbacks=[modelCheckpoint, reduce_lr])