本文整理汇总了Python中progress.bar.Bar方法的典型用法代码示例。如果您正苦于以下问题:Python bar.Bar方法的具体用法?Python bar.Bar怎么用?Python bar.Bar使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类progress.bar
的用法示例。
在下文中一共展示了bar.Bar方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: preprocess_midi_files_under
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def preprocess_midi_files_under(midi_root, save_dir):
midi_paths = list(utils.find_files_by_extensions(midi_root, ['.mid', '.midi']))
os.makedirs(save_dir, exist_ok=True)
out_fmt = '{}-{}.data'
for path in Bar('Processing').iter(midi_paths):
print(' ', end='[{}]'.format(path), flush=True)
try:
data = preprocess_midi(path)
except KeyboardInterrupt:
print(' Abort')
return
except EOFError:
print('EOF Error')
return
with open('{}/{}.pickle'.format(save_dir, path.split('/')[-1]), 'wb') as f:
pickle.dump(data, f)
示例2: main
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def main():
if len(sys.argv) < 2:
print("Usage: {} dataset_name".format(sys.argv[0]))
exit(1)
file_name = sys.argv[1]
log_file = h5py.File('../dataset/log/{}.h5'.format(file_name))
camera_file = h5py.File('../dataset/camera/{}.h5'.format(file_name))
zipped_log = izip(
log_file['times'],
log_file['fiber_accel'],
log_file['fiber_gyro'])
with rosbag.Bag('{}.bag'.format(file_name), 'w') as bag:
bar = Bar('Camera', max=len(camera_file['X']))
for i, img_data in enumerate(camera_file['X']):
m_img = Image()
m_img.header.stamp = rospy.Time.from_sec(0.01 * i)
m_img.height = img_data.shape[1]
m_img.width = img_data.shape[2]
m_img.step = 3 * img_data.shape[2]
m_img.encoding = 'rgb8'
m_img.data = np.transpose(img_data, (1, 2, 0)).flatten().tolist()
bag.write('/camera/image_raw', m_img, m_img.header.stamp)
bar.next()
bar.finish()
bar = Bar('IMU', max=len(log_file['times']))
for time, v_accel, v_gyro in zipped_log:
m_imu = Imu()
m_imu.header.stamp = rospy.Time.from_sec(time)
[setattr(m_imu.linear_acceleration, c, v_accel[i]) for i, c in enumerate('xyz')]
[setattr(m_imu.angular_velocity, c, v_gyro[i]) for i, c in enumerate('xyz')]
bag.write('/fiber_imu', m_imu, m_imu.header.stamp)
bar.next()
bar.finish()
示例3: undeploy
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def undeploy(self, lab_hash, selected_machines=None):
machines = self.get_machines_by_filters(lab_hash=lab_hash)
pool_size = utils.get_pool_size()
machines_pool = Pool(pool_size)
items = utils.chunk_list(machines, pool_size)
progress_bar = Bar("Deleting machines...", max=len(machines) if not selected_machines
else len(selected_machines)
)
for chunk in items:
machines_pool.map(func=partial(self._undeploy_machine, selected_machines, True, progress_bar),
iterable=chunk
)
progress_bar.finish()
示例4: deploy_links
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def deploy_links(self, lab):
pool_size = utils.get_pool_size()
link_pool = Pool(pool_size)
links = lab.links.items()
items = utils.chunk_list(links, pool_size)
progress_bar = Bar('Deploying links...', max=len(links))
for chunk in items:
link_pool.map(func=partial(self._deploy_link, progress_bar), iterable=chunk)
progress_bar.finish()
# Create a docker bridge link in the lab object and assign the Docker Network object associated to it.
docker_bridge = self.get_docker_bridge()
link = lab.get_or_new_link(BRIDGE_LINK_NAME)
link.api_object = docker_bridge
示例5: exifJSON
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def exifJSON():
print("Running exiftool to JSON")
os.chdir(ROOT_DIR + "/media/")
mediadir = os.listdir()
mediafiles = len(mediadir)
jsonbar = Bar('Processing', max=mediafiles)
for i in range(mediafiles):
for filename in os.listdir("."):
exifoutputjson = exif.get_json(filename)
#basejson = os.path.basename(filename)
os.chdir(ROOT_DIR + "/exifdata/json")
#Prints output to json file
print(json.dumps(exifoutputjson, sort_keys=True, indent=0, separators=(',', ': ')),
file= open(filename + ".json","w"))
#print(json.dumps(exifoutputjson, sort_keys=True, indent=0, separators=(',', ': ')),
# file= open(os.path.splitext(basejson)[0]+".json","w"))
jsonbar.next()
os.chdir(ROOT_DIR + "/media")
break
jsonbar.finish()
#exiftool in HTML
示例6: exifHTML
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def exifHTML():
print("Running exiftool to HTML")
os.chdir(ROOT_DIR + "/media/")
mediadir = os.listdir()
mediafiles = len(mediadir)
htmlbar = Bar('Processing', max=mediafiles)
for i in range(mediafiles):
for filename in os.listdir("."):
#Prints output to HTML
#basehtml = os.path.basename(filename)
exifoutputhtml = exif.command_line(['exiftool', '-h', filename])
os.chdir(ROOT_DIR + "/exifdata/html")
#print(exifoutputhtml,file = open(os.path.splitext(basehtml)[0]+ ".html", "w"))
print(exifoutputhtml,file = open(filename + ".html","w"))
htmlbar.next()
os.chdir(ROOT_DIR + "/media")
break
htmlbar.finish()
#exiftool hex dump to html
示例7: exifHTMLDump
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def exifHTMLDump():
print("Running exiftool to HTML Dump")
os.chdir(ROOT_DIR + "/media/")
mediadir = os.listdir()
mediafiles = len(mediadir)
os.chdir(ROOT_DIR + "/media/")
htmldumpbar = Bar('Processing', max=mediafiles)
for i in range(mediafiles):
for filename in os.listdir("."):
#basehtmldump = os.path.basename(filename)
exifoutputhtmldump = exif.command_line(['exiftool', '-htmlDump', filename])
os.chdir(ROOT_DIR + "/exifdata/hex_html")
#htmldumpfile = open(os.path.splitext(basehtmldump)[0] + ".html", 'wb')
htmldumpfile = open(filename + ".html", 'wb')
htmldumpfile.write(exifoutputhtmldump)
htmldumpfile.close()
htmldumpbar.next()
os.chdir(ROOT_DIR + "/media")
break
htmldumpbar.finish()
示例8: test
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def test(model, test_loader):
"""
test a model on a given dataset
"""
total, correct = 0, 0
bar = Bar('Testing', max=len(test_loader))
model.eval()
with torch.no_grad():
for batch_idx, (inputs, targets) in enumerate(test_loader):
inputs, targets = inputs.cuda(), targets.cuda()
outputs = model(inputs)
_, predicted = outputs.max(1)
total += targets.size(0)
correct += predicted.eq(targets).sum().item()
acc = correct / total
bar.suffix = f'({batch_idx + 1}/{len(test_loader)}) | ETA: {bar.eta_td} | top1: {acc}'
bar.next()
print('\nFinal acc: %.2f%% (%d/%d)' % (100. * acc, correct, total))
bar.finish()
model.train()
return acc
示例9: __init__
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def __init__(self, root, verbose=False):
assert os.path.isdir(root), root
paths = utils.find_files_by_extensions(root, ['.data'])
self.root = root
self.samples = []
self.seqlens = []
if verbose:
paths = Bar(root).iter(list(paths))
for path in paths:
eventseq, controlseq = torch.load(path)
controlseq = ControlSeq.recover_compressed_array(controlseq)
assert len(eventseq) == len(controlseq)
self.samples.append((eventseq, controlseq))
self.seqlens.append(len(eventseq))
self.avglen = np.mean(self.seqlens)
示例10: preprocess_midi_files_under
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def preprocess_midi_files_under(midi_root, save_dir, num_workers):
midi_paths = list(utils.find_files_by_extensions(midi_root, ['.mid', '.midi']))
os.makedirs(save_dir, exist_ok=True)
out_fmt = '{}-{}.data'
results = []
executor = ProcessPoolExecutor(num_workers)
for path in midi_paths:
try:
results.append((path, executor.submit(preprocess_midi, path)))
except KeyboardInterrupt:
print(' Abort')
return
except:
print(' Error')
continue
for path, future in Bar('Processing').iter(results):
print(' ', end='[{}]'.format(path), flush=True)
name = os.path.basename(path)
code = hashlib.md5(path.encode()).hexdigest()
save_path = os.path.join(save_dir, out_fmt.format(name, code))
torch.save(future.result(), save_path)
print('Done')
示例11: generate
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def generate(self,
prior: torch.Tensor,
length=2048,
tf_board_writer: SummaryWriter = None):
decode_array = prior
result_array = prior
print(config)
print(length)
for i in Bar('generating').iter(range(length)):
if decode_array.size(1) >= config.threshold_len:
decode_array = decode_array[:, 1:]
_, _, look_ahead_mask = \
utils.get_masked_with_pad_tensor(decode_array.size(1), decode_array, decode_array, pad_token=config.pad_token)
# result, _ = self.forward(decode_array, lookup_mask=look_ahead_mask)
# result, _ = decode_fn(decode_array, look_ahead_mask)
result, _ = self.Decoder(decode_array, None)
result = self.fc(result)
result = result.softmax(-1)
if tf_board_writer:
tf_board_writer.add_image("logits", result, global_step=i)
u = 0
if u > 1:
result = result[:, -1].argmax(-1).to(decode_array.dtype)
decode_array = torch.cat((decode_array, result.unsqueeze(-1)), -1)
else:
pdf = dist.OneHotCategorical(probs=result[:, -1])
result = pdf.sample().argmax(-1).unsqueeze(-1)
# result = torch.transpose(result, 1, 0).to(torch.int32)
decode_array = torch.cat((decode_array, result), dim=-1)
result_array = torch.cat((result_array, result), dim=-1)
del look_ahead_mask
result_array = result_array[0]
return result_array
示例12: train
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def train(train_loader, model, optimizer, lr_now=None, max_norm=True, is_cuda=False, dim_used=[], dct_n=15):
t_l = utils.AccumLoss()
model.train()
st = time.time()
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, (inputs, targets, all_seq) in enumerate(train_loader):
batch_size = inputs.shape[0]
if batch_size == 1:
continue
bt = time.time()
if is_cuda:
inputs = Variable(inputs.cuda()).float()
all_seq = Variable(all_seq.cuda(async=True)).float()
outputs = model(inputs)
# calculate loss and backward
loss = loss_funcs.mpjpe_error_p3d(outputs, all_seq, dct_n, dim_used)
optimizer.zero_grad()
loss.backward()
if max_norm:
nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
optimizer.step()
# update the training loss
t_l.update(loss.cpu().data.numpy()[0] * batch_size, batch_size)
bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i + 1, len(train_loader), time.time() - bt,
time.time() - st)
bar.next()
bar.finish()
return lr_now, t_l.avg
示例13: val
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def val(train_loader, model, is_cuda=False, dim_used=[], dct_n=15):
t_3d = utils.AccumLoss()
model.eval()
st = time.time()
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, (inputs, targets, all_seq) in enumerate(train_loader):
bt = time.time()
if is_cuda:
inputs = Variable(inputs.cuda()).float()
all_seq = Variable(all_seq.cuda(async=True)).float()
outputs = model(inputs)
n, _, _ = all_seq.data.shape
m_err = loss_funcs.mpjpe_error_p3d(outputs, all_seq, dct_n, dim_used)
# update the training loss
t_3d.update(m_err.cpu().data.numpy()[0] * n, n)
bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i + 1, len(train_loader), time.time() - bt,
time.time() - st)
bar.next()
bar.finish()
return t_3d.avg
示例14: train
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def train(train_loader, model, optimizer, lr_now=None, max_norm=True, is_cuda=False, dim_used=[], dct_n=15):
t_l = utils.AccumLoss()
model.train()
st = time.time()
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, (inputs, targets, all_seq) in enumerate(train_loader):
batch_size = inputs.shape[0]
if batch_size == 1:
continue
bt = time.time()
if is_cuda:
inputs = Variable(inputs.cuda()).float()
all_seq = Variable(all_seq.cuda(async=True)).float()
outputs = model(inputs)
# calculate loss and backward
loss = loss_funcs.mpjpe_error_p3d(outputs, all_seq, dct_n, dim_used)
optimizer.zero_grad()
loss.backward()
if max_norm:
nn.utils.clip_grad_norm(model.parameters(), max_norm=1)
optimizer.step()
# update the training loss
t_l.update(loss.cpu().data.numpy()[0] * batch_size, batch_size)
bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i+1, len(train_loader), time.time() - bt,
time.time() - st)
bar.next()
bar.finish()
return lr_now, t_l.avg
示例15: val
# 需要导入模块: from progress import bar [as 别名]
# 或者: from progress.bar import Bar [as 别名]
def val(train_loader, model, is_cuda=False, dim_used=[], dct_n=15):
t_3d = utils.AccumLoss()
model.eval()
st = time.time()
bar = Bar('>>>', fill='>', max=len(train_loader))
for i, (inputs, targets, all_seq) in enumerate(train_loader):
bt = time.time()
if is_cuda:
inputs = Variable(inputs.cuda()).float()
all_seq = Variable(all_seq.cuda(async=True)).float()
outputs = model(inputs)
n, _, _ = all_seq.data.shape
m_err = loss_funcs.mpjpe_error_p3d(outputs, all_seq, dct_n, dim_used)
# update the training loss
t_3d.update(m_err.cpu().data.numpy()[0] * n, n)
bar.suffix = '{}/{}|batch time {:.4f}s|total time{:.2f}s'.format(i+1, len(train_loader), time.time() - bt,
time.time() - st)
bar.next()
bar.finish()
return t_3d.avg