本文整理汇总了Python中utils.progress_bar方法的典型用法代码示例。如果您正苦于以下问题:Python utils.progress_bar方法的具体用法?Python utils.progress_bar怎么用?Python utils.progress_bar使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类utils
的用法示例。
在下文中一共展示了utils.progress_bar方法的9个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def __init__(self, obj):
threading.Thread.__init__(self)
self.obj = obj
self.progress_bar = obj.progress_bar
self.logger = obj.logger
self.shared_var = obj.shared_var
self.dl_speed = 0
self.eta = 0
self.lastBytesSamples = [] # list with last 50 Bytes Samples.
self.last_calculated_totalBytes = 0
self.calcETA_queue = []
self.calcETA_i = 0
self.calcETA_val = 0
self.dl_time = -1.0
self.daemon = True
self.start()
示例2: get_progress_bar
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def get_progress_bar(self, length=20):
'''
Returns the current progress of the download as a string containing a progress bar.
.. NOTE::
That's an alias for pySmartDL.utils.progress_bar(obj.get_progress()).
:param length: The length of the progress bar in chars. Default is 20.
:type length: int
:rtype: string
'''
return utils.progress_bar(self.get_progress(), length)
示例3: run
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def run(self):
t1 = time.time()
while not self.obj.pool.done():
self.dl_speed = self.calcDownloadSpeed(self.shared_var.value)
if self.dl_speed > 0:
self.eta = self.calcETA((self.obj.filesize-self.shared_var.value)/self.dl_speed)
if self.progress_bar:
if self.obj.filesize:
status = r"[*] %s / %s @ %s/s %s [%3.1f%%, %s left] " % (utils.sizeof_human(self.shared_var.value), utils.sizeof_human(self.obj.filesize), utils.sizeof_human(self.dl_speed), utils.progress_bar(1.0*self.shared_var.value/self.obj.filesize), self.shared_var.value * 100.0 / self.obj.filesize, utils.time_human(self.eta, fmt_short=True))
else:
status = r"[*] %s / ??? MB @ %s/s " % (utils.sizeof_human(self.shared_var.value), utils.sizeof_human(self.dl_speed))
status = status + chr(8)*(len(status)+1)
print status,
time.sleep(0.1)
if self.obj._killed:
self.logger.debug("File download process has been stopped.")
return
if self.progress_bar:
if self.obj.filesize:
print r"[*] %s / %s @ %s/s %s [100%%, 0s left] " % (utils.sizeof_human(self.obj.filesize), utils.sizeof_human(self.obj.filesize), utils.sizeof_human(self.dl_speed), utils.progress_bar(1.0))
else:
print r"[*] %s / %s @ %s/s " % (utils.sizeof_human(self.shared_var.value), self.shared_var.value / 1024.0**2, utils.sizeof_human(self.dl_speed))
t2 = time.time()
self.dl_time = float(t2-t1)
while self.obj.post_threadpool_thread.is_alive():
time.sleep(0.1)
self.obj.pool.shutdown()
self.obj.status = "finished"
if not self.obj.errors:
self.logger.debug("File downloaded within %.2f seconds." % self.dl_time)
示例4: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def train(epoch):
print('\nEpoch: %d' % epoch)
global Train_acc
net.train()
train_loss = 0
correct = 0
total = 0
if epoch > learning_rate_decay_start and learning_rate_decay_start >= 0:
frac = (epoch - learning_rate_decay_start) // learning_rate_decay_every
decay_factor = learning_rate_decay_rate ** frac
current_lr = opt.lr * decay_factor
utils.set_lr(optimizer, current_lr) # set the decayed rate
else:
current_lr = opt.lr
print('learning_rate: %s' % str(current_lr))
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
utils.clip_gradient(optimizer, 0.1)
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
utils.progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
Train_acc = 100.*correct/total
示例5: PublicTest
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def PublicTest(epoch):
global PublicTest_acc
global best_PublicTest_acc
global best_PublicTest_acc_epoch
net.eval()
PublicTest_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(PublicTestloader):
bs, ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops
loss = criterion(outputs_avg, targets)
PublicTest_loss += loss.data[0]
_, predicted = torch.max(outputs_avg.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
utils.progress_bar(batch_idx, len(PublicTestloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (PublicTest_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# Save checkpoint.
PublicTest_acc = 100.*correct/total
if PublicTest_acc > best_PublicTest_acc:
print('Saving..')
print("best_PublicTest_acc: %0.3f" % PublicTest_acc)
state = {
'net': net.state_dict() if use_cuda else net,
'acc': PublicTest_acc,
'epoch': epoch,
}
if not os.path.isdir(path):
os.mkdir(path)
torch.save(state, os.path.join(path,'PublicTest_model.t7'))
best_PublicTest_acc = PublicTest_acc
best_PublicTest_acc_epoch = epoch
示例6: train
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def train(epoch):
print('\nEpoch: %d' % epoch)
global Train_acc
net.train()
train_loss = 0
correct = 0
total = 0
if epoch > learning_rate_decay_start and learning_rate_decay_start >= 0:
frac = (epoch - learning_rate_decay_start) // learning_rate_decay_every
decay_factor = learning_rate_decay_rate ** frac
current_lr = opt.lr * decay_factor
utils.set_lr(optimizer, current_lr) # set the decayed rate
else:
current_lr = opt.lr
print('learning_rate: %s' % str(current_lr))
for batch_idx, (inputs, targets) in enumerate(trainloader):
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
optimizer.zero_grad()
inputs, targets = Variable(inputs), Variable(targets)
outputs = net(inputs)
loss = criterion(outputs, targets)
loss.backward()
utils.clip_gradient(optimizer, 0.1)
optimizer.step()
train_loss += loss.data[0]
_, predicted = torch.max(outputs.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
utils.progress_bar(batch_idx, len(trainloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (train_loss/(batch_idx+1), 100.*correct/total, correct, total))
Train_acc = 100.*correct/total
示例7: eval_model
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def eval_model(model, data, params):
model.eval()
reference, candidate, source, alignments = [], [], [], []
count, total_count = 0, len(data['validset'])
validloader = data['validloader']
tgt_vocab = data['tgt_vocab']
for src, tgt, src_len, tgt_len, original_src, original_tgt in validloader:
if config.use_cuda:
src = src.cuda()
src_len = src_len.cuda()
with torch.no_grad():
if config.beam_size > 1:
samples, alignment, weight = model.beam_sample(src, src_len, beam_size=config.beam_size, eval_=True)
else:
samples, alignment = model.sample(src, src_len)
candidate += [tgt_vocab.convertToLabels(s, utils.EOS) for s in samples]
source += original_src
reference += original_tgt
if alignment is not None:
alignments += [align for align in alignment]
count += len(original_src)
utils.progress_bar(count, total_count)
if config.unk and config.attention != 'None':
cands = []
for s, c, align in zip(source, candidate, alignments):
cand = []
for word, idx in zip(c, align):
if word == utils.UNK_WORD and idx < len(s):
try:
cand.append(s[idx])
except:
cand.append(word)
print("%d %d\n" % (len(s), idx))
else:
cand.append(word)
cands.append(cand)
if len(cand) == 0:
print('Error!')
candidate = cands
with codecs.open(params['log_path']+'candidate.txt','w+','utf-8') as f:
for i in range(len(candidate)):
f.write(" ".join(candidate[i])+'\n')
score = {}
for metric in config.metrics:
score[metric] = getattr(utils, metric)(reference, candidate, params['log_path'], params['log'], config)
return score
示例8: PrivateTest
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def PrivateTest(epoch):
global PrivateTest_acc
global best_PrivateTest_acc
global best_PrivateTest_acc_epoch
net.eval()
PrivateTest_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(PrivateTestloader):
bs, ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops
loss = criterion(outputs_avg, targets)
PrivateTest_loss += loss.data[0]
_, predicted = torch.max(outputs_avg.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
utils.progress_bar(batch_idx, len(PublicTestloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (PrivateTest_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# Save checkpoint.
PrivateTest_acc = 100.*correct/total
if PrivateTest_acc > best_PrivateTest_acc:
print('Saving..')
print("best_PrivateTest_acc: %0.3f" % PrivateTest_acc)
state = {
'net': net.state_dict() if use_cuda else net,
'best_PublicTest_acc': best_PublicTest_acc,
'best_PrivateTest_acc': PrivateTest_acc,
'best_PublicTest_acc_epoch': best_PublicTest_acc_epoch,
'best_PrivateTest_acc_epoch': epoch,
}
if not os.path.isdir(path):
os.mkdir(path)
torch.save(state, os.path.join(path,'PrivateTest_model.t7'))
best_PrivateTest_acc = PrivateTest_acc
best_PrivateTest_acc_epoch = epoch
示例9: test
# 需要导入模块: import utils [as 别名]
# 或者: from utils import progress_bar [as 别名]
def test(epoch):
global Test_acc
global best_Test_acc
global best_Test_acc_epoch
net.eval()
PrivateTest_loss = 0
correct = 0
total = 0
for batch_idx, (inputs, targets) in enumerate(testloader):
bs, ncrops, c, h, w = np.shape(inputs)
inputs = inputs.view(-1, c, h, w)
if use_cuda:
inputs, targets = inputs.cuda(), targets.cuda()
inputs, targets = Variable(inputs, volatile=True), Variable(targets)
outputs = net(inputs)
outputs_avg = outputs.view(bs, ncrops, -1).mean(1) # avg over crops
loss = criterion(outputs_avg, targets)
PrivateTest_loss += loss.data[0]
_, predicted = torch.max(outputs_avg.data, 1)
total += targets.size(0)
correct += predicted.eq(targets.data).cpu().sum()
utils.progress_bar(batch_idx, len(testloader), 'Loss: %.3f | Acc: %.3f%% (%d/%d)'
% (PrivateTest_loss / (batch_idx + 1), 100. * correct / total, correct, total))
# Save checkpoint.
Test_acc = 100.*correct/total
if Test_acc > best_Test_acc:
print('Saving..')
print("best_Test_acc: %0.3f" % Test_acc)
state = {'net': net.state_dict() if use_cuda else net,
'best_Test_acc': Test_acc,
'best_Test_acc_epoch': epoch,
}
if not os.path.isdir(opt.dataset + '_' + opt.model):
os.mkdir(opt.dataset + '_' + opt.model)
if not os.path.isdir(path):
os.mkdir(path)
torch.save(state, os.path.join(path, 'Test_model.t7'))
best_Test_acc = Test_acc
best_Test_acc_epoch = epoch