本文整理匯總了Python中utils.load方法的典型用法代碼示例。如果您正苦於以下問題:Python utils.load方法的具體用法?Python utils.load怎麽用?Python utils.load使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類utils
的用法示例。
在下文中一共展示了utils.load方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: parse_cycles
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def parse_cycles():
logging.debug(locals())
assert len(args.add_width) == len(args.add_layers) == len(args.dropout_rate) == len(args.num_to_keep)
assert len(args.add_width) == len(args.num_morphs) == len(args.grace_epochs) == len(args.epochs)
cycles = []
for i in range(len(args.add_width)):
try_load = args.try_load and i > 0
net_layers = args.layers + int(args.add_layers[i])
net_init_c = args.init_channels + int(args.add_width[i])
if len(cycles) > 0 and try_load:
if cycles[-1].net_layers != net_layers or cycles[-1].net_init_c != net_init_c:
try_load = False
cycles.append(Cycle(
num=i,
net_layers=args.layers + int(args.add_layers[i]),
net_init_c=args.init_channels + int(args.add_width[i]),
net_dropout=float(args.dropout_rate[i]),
ops_keep=args.num_to_keep[i],
epochs=args.epochs[i],
grace_epochs=args.grace_epochs[i] if not args.test else 0,
morphs=args.num_morphs[i],
init_morphed=try_load,
load=try_load,
is_last=(i == len(args.num_to_keep) - 1)))
return cycles
示例2: evaluate
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def evaluate(args, dataset, model, instruments):
perfs = list()
model.eval()
with torch.no_grad():
for example in dataset:
print("Evaluating " + example["mix"])
# Load source references in their original sr and channel number
target_sources = np.stack([utils.load(example[instrument], sr=None, mono=False)[0].T for instrument in instruments])
# Predict using mixture
pred_sources = predict_song(args, example["mix"], model)
pred_sources = np.stack([pred_sources[key].T for key in instruments])
# Evaluate
SDR, ISR, SIR, SAR, _ = museval.metrics.bss_eval(target_sources, pred_sources)
song = {}
for idx, name in enumerate(instruments):
song[name] = {"SDR" : SDR[idx], "ISR" : ISR[idx], "SIR" : SIR[idx], "SAR" : SAR[idx]}
perfs.append(song)
return perfs
示例3: _get_annotation
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def _get_annotation(self, label_path: str) -> dict:
boxes = []
texts = []
ignores = []
with open(label_path, encoding='utf-8', mode='r') as f:
for line in f.readlines():
params = line.strip().strip('\ufeff').strip('\xef\xbb\xbf').split(',')
try:
box = order_points_clockwise(np.array(list(map(float, params[:8]))).reshape(-1, 2))
if cv2.contourArea(box) > 0:
boxes.append(box)
label = params[8]
texts.append(label)
ignores.append(label in self.ignore_tags)
except:
print('load label failed on {}'.format(label_path))
data = {
'text_polys': np.array(boxes),
'texts': texts,
'ignore_tags': ignores,
}
return data
示例4: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def main():
if not torch.cuda.is_available():
logging.info('No GPU found!')
sys.exit(1)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
logging.info("Args = %s", args)
_, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_imagenet(model_state_dict, optimizer_state_dict, epoch=epoch-1)
while epoch < args.epochs:
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = valid(valid_queue, model, eval_criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
epoch += 1
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best)
示例5: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def main():
if not torch.cuda.is_available():
logging.info('No GPU found!')
sys.exit(1)
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = True
cudnn.benchmark = False
cudnn.deterministic = True
args.steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
logging.info("Args = %s", args)
_, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
build_fn = get_builder(args.dataset)
train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_fn(model_state_dict, optimizer_state_dict, epoch=epoch-1)
while epoch < args.epochs:
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_obj = valid(valid_queue, model, eval_criterion)
logging.info('valid_acc %f', valid_acc_top1)
epoch += 1
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best)
示例6: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def main():
if not torch.cuda.is_available():
logging.info('No GPU found!')
sys.exit(1)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = True
cudnn.benchmark = True
args.steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
logging.info("Args = %s", args)
_, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
build_fn = get_builder(args.dataset)
train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_fn(model_state_dict, optimizer_state_dict, epoch=epoch-1)
while epoch < args.epochs:
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_obj = valid(valid_queue, model, eval_criterion)
logging.info('valid_acc %f', valid_acc_top1)
epoch += 1
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best)
示例7: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def main():
if not torch.cuda.is_available():
logging.info('no gpu device available')
sys.exit(1)
np.random.seed(args.seed)
torch.cuda.set_device(args.gpu)
cudnn.benchmark = True
torch.manual_seed(args.seed)
cudnn.enabled=True
torch.cuda.manual_seed(args.seed)
logging.info('gpu device = %d' % args.gpu)
logging.info("args = %s", args)
genotype = eval("genotypes.%s" % args.arch)
model = Network(args.init_channels, CIFAR_CLASSES, args.layers, args.auxiliary, genotype)
model = model.cuda()
utils.load(model, args.model_path)
logging.info("param size = %fMB", utils.count_parameters_in_MB(model))
criterion = nn.CrossEntropyLoss()
criterion = criterion.cuda()
_, test_transform = utils._data_transforms_cifar10(args)
test_data = dset.CIFAR10(root=args.data, train=False, download=True, transform=test_transform)
test_queue = torch.utils.data.DataLoader(
test_data, batch_size=args.batch_size, shuffle=False, pin_memory=True, num_workers=2)
model.drop_path_prob = args.drop_path_prob
test_acc, test_obj = infer(test_queue, model, criterion)
logging.info('test_acc %f', test_acc)
示例8: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def main():
if not torch.cuda.is_available():
logging.info('No GPU found!')
sys.exit(1)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
torch.cuda.manual_seed(args.seed)
torch.cuda.manual_seed_all(args.seed)
cudnn.enabled = True
cudnn.benchmark = False
cudnn.deterministic = True
logging.info("Args = %s", args)
_, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_imagenet(model_state_dict, optimizer_state_dict, epoch=epoch-1)
while epoch < args.epochs:
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = valid(valid_queue, model, eval_criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
epoch += 1
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best)
示例9: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def main():
if not torch.cuda.is_available():
logging.info('No GPU found!')
sys.exit(1)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
cudnn.enabled = True
cudnn.benchmark = False
cudnn.deterministic = True
torch.cuda.manual_seed(args.seed)
logging.info("Args = %s", args)
_, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_imagenet(model_state_dict, optimizer_state_dict, epoch=epoch-1)
while epoch < args.epochs:
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_acc_top5, valid_obj = valid(valid_queue, model, eval_criterion)
logging.info('valid_acc_top1 %f', valid_acc_top1)
logging.info('valid_acc_top5 %f', valid_acc_top5)
epoch += 1
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best)
示例10: main
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def main():
if not torch.cuda.is_available():
logging.info('No GPU found!')
sys.exit(1)
np.random.seed(args.seed)
cudnn.benchmark = False
torch.manual_seed(args.seed)
cudnn.enabled = True
torch.cuda.manual_seed(args.seed)
args.steps = int(np.ceil(50000 / args.batch_size)) * args.epochs
logging.info("Args = %s", args)
_, model_state_dict, epoch, step, optimizer_state_dict, best_acc_top1 = utils.load(args.output_dir)
build_fn = get_builder(args.dataset)
train_queue, valid_queue, model, train_criterion, eval_criterion, optimizer, scheduler = build_fn(model_state_dict, optimizer_state_dict, epoch=epoch-1)
while epoch < args.epochs:
scheduler.step()
logging.info('epoch %d lr %e', epoch, scheduler.get_lr()[0])
train_acc, train_obj, step = train(train_queue, model, optimizer, step, train_criterion)
logging.info('train_acc %f', train_acc)
valid_acc_top1, valid_obj = valid(valid_queue, model, eval_criterion)
logging.info('valid_acc %f', valid_acc_top1)
epoch += 1
is_best = False
if valid_acc_top1 > best_acc_top1:
best_acc_top1 = valid_acc_top1
is_best = True
utils.save(args.output_dir, args, model, epoch, step, optimizer, best_acc_top1, is_best)
示例11: test_examples
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def test_examples(platforms, path):
#: Load
dir_path = os.path.abspath(os.path.split(os.path.dirname(__file__))[0])
enaml_file = os.path.join(dir_path, 'examples', os.path.normpath(path))
#: Run for each platform
for platform in platforms:
app = MockApplication.instance(platform)
with enaml.imports():
with open(enaml_file, 'rb') as f:
ContentView = load(f.read())
app.view = ContentView()
app.run()
示例12: test_demo_app
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def test_demo_app():
with enaml.imports():
with open('examples/demo/view.enaml', 'rb') as f:
ContentView = load(f.read())
app = MockApplication.instance('android')
app.view = ContentView()
app.run()
示例13: test_playground_app
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def test_playground_app():
with enaml.imports():
with open('examples/playground/view.enaml', 'rb') as f:
ContentView = load(f.read())
app = MockApplication.instance('android')
app.view = ContentView()
app.run()
示例14: getMUSDBHQ
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def getMUSDBHQ(database_path):
subsets = list()
for subset in ["train", "test"]:
print("Loading " + subset + " set...")
tracks = glob.glob(os.path.join(database_path, subset, "*"))
samples = list()
# Go through tracks
for track_folder in sorted(tracks):
# Skip track if mixture is already written, assuming this track is done already
example = dict()
for stem in ["mix", "bass", "drums", "other", "vocals"]:
filename = stem if stem != "mix" else "mixture"
audio_path = os.path.join(track_folder, filename + ".wav")
example[stem] = audio_path
# Add other instruments to form accompaniment
acc_path = os.path.join(track_folder, "accompaniment.wav")
if not os.path.exists(acc_path):
print("Writing accompaniment to " + track_folder)
stem_audio = []
for stem in ["bass", "drums", "other"]:
audio, sr = load(example[stem], sr=None, mono=False)
stem_audio.append(audio)
acc_audio = np.clip(sum(stem_audio), -1.0, 1.0)
write_wav(acc_path, acc_audio, sr)
example["accompaniment"] = acc_path
samples.append(example)
subsets.append(samples)
return subsets
示例15: load_data
# 需要導入模塊: import utils [as 別名]
# 或者: from utils import load [as 別名]
def load_data(self, data_path: str) -> list:
"""
從json文件中讀取出 文本行的坐標和gt,字符的坐標和gt
:param data_path:
:return:
"""
data_list = []
for path in data_path:
content = load(path)
for gt in tqdm(content['data_list'], desc='read file {}'.format(path)):
img_path = os.path.join(content['data_root'], gt['img_name'])
polygons = []
texts = []
illegibility_list = []
language_list = []
for annotation in gt['annotations']:
if len(annotation['polygon']) == 0 or len(annotation['text']) == 0:
continue
if len(annotation['text']) > 1 and self.expand_one_char:
annotation['polygon'] = expand_polygon(annotation['polygon'])
polygons.append(annotation['polygon'])
texts.append(annotation['text'])
illegibility_list.append(annotation['illegibility'])
language_list.append(annotation['language'])
if self.load_char_annotation:
for char_annotation in annotation['chars']:
if len(char_annotation['polygon']) == 0 or len(char_annotation['char']) == 0:
continue
polygons.append(char_annotation['polygon'])
texts.append(char_annotation['char'])
illegibility_list.append(char_annotation['illegibility'])
language_list.append(char_annotation['language'])
data_list.append({'img_path': img_path, 'img_name': gt['img_name'], 'text_polys': np.array(polygons),
'texts': texts, 'ignore_tags': illegibility_list})
return data_list