本文整理匯總了Python中pyprind.prog_bar方法的典型用法代碼示例。如果您正苦於以下問題:Python pyprind.prog_bar方法的具體用法?Python pyprind.prog_bar怎麽用?Python pyprind.prog_bar使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類pyprind
的用法示例。
在下文中一共展示了pyprind.prog_bar方法的4個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: test_generator
# 需要導入模塊: import pyprind [as 別名]
# 或者: from pyprind import prog_bar [as 別名]
def test_generator():
for i in pyprind.prog_bar(range(n), stream=sys.stdout):
time.sleep(sleeptime)
示例2: __new__
# 需要導入模塊: import pyprind [as 別名]
# 或者: from pyprind import prog_bar [as 別名]
def __new__(cls, iterable=None, desc=None, total=None, leave=True,
backend=None, verbose=True):
if backend is None:
backend = Progressbar.backend
if not verbose:
backend = "hide"
if backend == "tqdm":
from tqdm import tqdm
return tqdm(iterable=iterable, desc=desc, total=total, leave=leave,
ascii=True, ncols=80, file=sys.stdout,
bar_format="{l_bar}{bar}| {n_fmt}/{total_fmt} [{elapsed"
"}<{remaining}{postfix}]") # remove rate_fmt
elif backend == "tqdm_notebook":
from tqdm import tqdm_notebook
return tqdm_notebook(iterable=iterable, desc=desc, total=total,
leave=leave)
elif backend == "pyprind":
from pyprind import ProgBar, prog_bar
ProgBar._adjust_width = lambda self: None # keep constant width
if iterable is None:
return ProgBar(total, title=desc, stream=1)
else:
return prog_bar(iterable, title=desc, stream=1,
iterations=total)
elif backend == "hide":
return NoProgressbar(iterable=iterable)
else:
raise NotImplementedError("unknown backend")
示例3: train
# 需要導入模塊: import pyprind [as 別名]
# 或者: from pyprind import prog_bar [as 別名]
def train(self):
memory = ReplayMem(
obs_dim=self.env.observation_space.flat_dim,
act_dim=self.env.action_space.flat_dim,
memory_size=self.memory_size)
itr = 0
path_length = 0
path_return = 0
end = False
obs = self.env.reset()
for epoch in range(self.n_epochs):
logger.push_prefix("epoch #%d | " % epoch)
logger.log("Training started")
for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
# run the policy
if end:
# reset the environment and stretegy when an episode ends
obs = self.env.reset()
self.strategy.reset()
# self.policy.reset()
self.strategy_path_returns.append(path_return)
path_length = 0
path_return = 0
# note action is sampled from the policy not the target policy
act = self.strategy.get_action(obs, self.policy)
nxt, rwd, end, _ = self.env.step(act)
path_length += 1
path_return += rwd
if not end and path_length >= self.max_path_length:
end = True
if self.include_horizon_terminal:
memory.add_sample(obs, act, rwd, end)
else:
memory.add_sample(obs, act, rwd, end)
obs = nxt
if memory.size >= self.memory_start_size:
for update_time in range(self.n_updates_per_sample):
batch = memory.get_batch(self.batch_size)
self.do_update(itr, batch)
itr += 1
logger.log("Training finished")
if memory.size >= self.memory_start_size:
self.evaluate(epoch, memory)
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
# self.env.terminate()
# self.policy.terminate()
示例4: train
# 需要導入模塊: import pyprind [as 別名]
# 或者: from pyprind import prog_bar [as 別名]
def train(self):
memory = ReplayMem(
obs_dim=self.env.observation_space.flat_dim,
act_dim=self.env.action_space.flat_dim,
memory_size=self.memory_size)
itr = 0
path_length = 0
path_return = 0
end = False
obs = self.env.reset()
for epoch in xrange(self.n_epochs):
logger.push_prefix("epoch #%d | " % epoch)
logger.log("Training started")
for epoch_itr in pyprind.prog_bar(range(self.epoch_length)):
# run the policy
if end:
# reset the environment and stretegy when an episode ends
obs = self.env.reset()
self.strategy.reset()
# self.policy.reset()
self.strategy_path_returns.append(path_return)
path_length = 0
path_return = 0
# note action is sampled from the policy not the target policy
act = self.strategy.get_action(obs, self.policy)
nxt, rwd, end, _ = self.env.step(act)
path_length += 1
path_return += rwd
if not end and path_length >= self.max_path_length:
end = True
if self.include_horizon_terminal:
memory.add_sample(obs, act, rwd, end)
else:
memory.add_sample(obs, act, rwd, end)
obs = nxt
if memory.size >= self.memory_start_size:
for update_time in xrange(self.n_updates_per_sample):
batch = memory.get_batch(self.batch_size)
self.do_update(itr, batch)
itr += 1
logger.log("Training finished")
if memory.size >= self.memory_start_size:
self.evaluate(epoch, memory)
logger.dump_tabular(with_prefix=False)
logger.pop_prefix()
# self.env.terminate()
# self.policy.terminate()