本文整理匯總了Python中builtins.print方法的典型用法代碼示例。如果您正苦於以下問題:Python builtins.print方法的具體用法?Python builtins.print怎麽用?Python builtins.print使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類builtins
的用法示例。
在下文中一共展示了builtins.print方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: distributed_init
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def distributed_init(args):
if args.distributed_world_size == 1:
raise ValueError('Cannot initialize distributed with distributed_world_size=1')
print('| distributed init (rank {}): {}'.format(
args.distributed_rank, args.distributed_init_method), flush=True)
if args.distributed_init_method.startswith('tcp://'):
torch.distributed.init_process_group(
backend=args.distributed_backend, init_method=args.distributed_init_method,
world_size=args.distributed_world_size, rank=args.distributed_rank)
else:
torch.distributed.init_process_group(
backend=args.distributed_backend, init_method=args.distributed_init_method,
world_size=args.distributed_world_size)
args.distributed_rank = torch.distributed.get_rank()
if not is_master(args):
suppress_output()
return args.distributed_rank
示例2: _print_banner
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def _print_banner(self):
banner = BANNER
banner_len = len(max(banner.split(os.linesep), key=len))
author = '{0:^{1}}'.format(f"{framework.Colors.O}[{self._name} v{__version__}, {__author__}]{framework.Colors.N}", banner_len + 8)
if self._accessible:
banner = BANNER_SMALL
author = f"{framework.Colors.O}{self._name}, version {__version__}, by {__author__}{framework.Colors.N}"
print(banner)
print(author)
print('')
counts = [(len(self._loaded_category[x]), x) for x in self._loaded_category]
if counts:
count_len = len(max([self.to_unicode_str(x[0]) for x in counts], key=len))
for count in sorted(counts, reverse=True):
cnt = f"[{count[0]}]"
print(f"{framework.Colors.B}{cnt.ljust(count_len+2)} {count[1].title()} modules{framework.Colors.N}")
# create dynamic easter egg command based on counts
setattr(self, f"do_{count[0]}", self._menu_egg)
else:
self.alert('No modules enabled/installed.')
print('')
示例3: _menu_egg
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def _menu_egg(self, params):
eggs = [
'Really? A menu option? Try again.',
'You clearly need \'help\'.',
'That makes no sense to me.',
'*grunt* *grunt* Nope. I got nothin\'.',
'Wait for it...',
'This is not the Social Engineering Toolkit.',
'Don\'t you think if that worked the numbers would at least be in order?',
'Reserving that option for the next-NEXT generation of the framework.',
'You\'ve clearly got the wrong framework. Attempting to start SET...',
'1980 called. They want there menu driven UI back.',
]
print(random.choice(eggs))
return
#==================================================
# WORKSPACE METHODS
#==================================================
示例4: _do_marketplace_search
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def _do_marketplace_search(self, params):
'''Searches marketplace modules'''
modules = [m for m in self._module_index]
if params:
self.output(f"Searching module index for '{params}'...")
modules = self._search_module_index(params)
if modules:
rows = []
for module in sorted(modules, key=lambda m: m['path']):
row = []
for key in ('path', 'version', 'status', 'last_updated'):
row.append(module[key])
row.append('*' if module['dependencies'] else '')
row.append('*' if module['required_keys'] else '')
rows.append(row)
header = ('Path', 'Version', 'Status', 'Updated', 'D', 'K')
self.table(rows, header=header)
print(f"{self.spacer}D = Has dependencies. See info for details.")
print(f"{self.spacer}K = Requires keys. See info for details.{os.linesep}")
else:
self.error('No modules found.')
self._help_marketplace_search()
示例5: init_distributed_mode
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def init_distributed_mode(args):
args.distributed = False
if args.device == 'cuda' and 'WORLD_SIZE' in os.environ:
args.distributed = True
args.world_size = int(os.environ['WORLD_SIZE'])
args.rank = int(os.environ['RANK'])
torch.cuda.set_device(args.local_rank)
# args.local_rank, os.environ["RANK"],os.environ['WORLD_SIZE'] 會自動賦值
print("args.local_rank:{},RANK:{},WORLD_SIZE:{}".format(args.local_rank, os.environ["RANK"],
os.environ['WORLD_SIZE']))
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
setup_for_distributed(args.rank == 0)
示例6: train_one_epoch
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def train_one_epoch(model, criterion, optimizer, data_loader, device, epoch, args):
epoch_loss = 0.0
for image, target, input_len, target_len in tqdm(data_loader):
image = image.to(device)
# print(target, target_len, input_len)
outputs = model(image.to(torch.float32)) # [B,N,C]
outputs = torch.log_softmax(outputs, dim=2)
outputs = outputs.permute([1, 0, 2]) # [N,B,C]
loss = criterion(outputs[:], target, input_len, target_len)
# 梯度更新
model.zero_grad()
loss.backward()
optimizer.step()
# 當前輪的loss
epoch_loss += loss.item() * image.size(0)
if np.isnan(loss.item()):
print(target, input_len, target_len)
epoch_loss = epoch_loss / len(data_loader.dataset)
# 打印日誌,保存權重
print('Epoch: {}/{} loss: {:03f}'.format(epoch + 1, args.epochs, epoch_loss))
return epoch_loss
示例7: init_distributed_mode
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def init_distributed_mode(args):
if 'RANK' in os.environ and 'WORLD_SIZE' in os.environ:
args.rank = int(os.environ["RANK"])
args.world_size = int(os.environ['WORLD_SIZE'])
args.gpu = int(os.environ['LOCAL_RANK'])
elif 'SLURM_PROCID' in os.environ:
args.rank = int(os.environ['SLURM_PROCID'])
args.gpu = args.rank % torch.cuda.device_count()
else:
print('Not using distributed mode')
args.distributed = False
return
args.distributed = True
torch.cuda.set_device(args.gpu)
args.dist_backend = 'nccl'
print('| distributed init (rank {}): {}'.format(
args.rank, args.dist_url), flush=True)
torch.distributed.init_process_group(backend=args.dist_backend, init_method=args.dist_url,
world_size=args.world_size, rank=args.rank)
torch.distributed.barrier()
setup_for_distributed(args.rank == 0)
示例8: show_banner
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def show_banner(self):
banner = open(os.path.join(self.data_path, "banner.txt")).read()
print(banner)
# print version
print(' '*15 + __version__)
print('')
if self.loaded_category == {}:
print(
f'{framework.Colors.B}[0] No Module To Display{framework.Colors.N}')
else:
counts = [(self.loaded_category[x], x)
for x in self.loaded_category] if self.loaded_category != [] else [0]
count_len = len(max([str(x[0]) for x in counts], key=len))
for count in sorted(counts, reverse=True):
cnt = f'[{count[0]}]'
mod_name = count[1].title() if '/' in count[1] else count[1]
print(f'{framework.Colors.B}{cnt.ljust(count_len + 2)} {mod_name} modules{framework.Colors.N}')
print('')
示例9: plot_pairwise_transfer_learning
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def plot_pairwise_transfer_learning(root_dir,classifier_name,archive_name,
df_perf,df_transfer,fig,color,title,new_classifier_name,agg='max',already_calculated=False):
"""
"""
print('Drwaing',agg,' accuracy plot with transfer learning\
vs without transfer learning')
if agg=='maximum':
df_transfer_agg = df_transfer.max(axis=0)
elif agg == 'minimum':
df_transfer_agg = df_transfer.min(axis=0)
elif agg=='agg':
# already aggregated
df_transfer_agg = df_transfer
elif agg=='median':
df_transfer_agg = df_transfer.median(axis=0)
else:
df_transfer_agg = df_transfer.loc[agg]
df_perf= concat_to_df_perf(df_perf,df_perf,df_transfer_agg,classifier_name,
new_classifier_name,archive_name,print_win_losses=True,
already_calculated = already_calculated)
plot_pairwise(root_dir,classifier_name,new_classifier_name,
res_df=df_perf, title=title, fig=fig,color=color,label=agg)
示例10: concat_to_df_perf
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def concat_to_df_perf(df_perf,df_to_concat,df_transfer_agg,classifier_name,new_classifier_name,
archive_name,print_win_losses=False,already_calculated = False):
original_acc = df_perf.loc[df_perf['classifier_name']==classifier_name]['accuracy'].values
if already_calculated == False:
# calculate the accuracy using the percentrage already computed
df_transfer_agg[:] = (df_transfer_agg.values*original_acc)/100 + original_acc
# win loss draw
if print_win_losses == True:
print(classifier_name,'vs',new_classifier_name)
uniq , counts = np.unique(original_acc<df_transfer_agg.values,return_counts=True)
print('\tWin:',counts[-1])
uniq , counts = np.unique(original_acc>df_transfer_agg.values,return_counts=True)
print('\tLosses:',counts[-1])
uniq , counts = np.unique(original_acc==df_transfer_agg.values,return_counts=True)
print('\tDraw:',counts[-1])
# create a dataframe
df_perf_transfer = pd.DataFrame({'dataset_name':df_transfer_agg.index,
'accuracy':df_transfer_agg.values})
# add the neccessary attributes
df_perf_transfer['classifier_name']=new_classifier_name
df_perf_transfer['archive_name'] = archive_name
# concat with the original classifier's results
return pd.concat([df_to_concat,df_perf_transfer],sort=False)
示例11: Print_Function
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def Print_Function():
""" Print out the source of the current function """
if off_mode:
return
tmp_str = inspect.getsource(inspect.currentframe().f_back)
if GaLatexPrinter.latex_flg:
#print '#Code for '+fct_name
print(r'##\begin{lstlisting}[language=Python,showspaces=false,'
r'showstringspaces=false,backgroundcolor=\color{gray},frame=single]')
print(tmp_str)
print('##\\end{lstlisting}')
print('#Code Output:')
else:
print('\n' + 80 * '*')
#print '\nCode for '+fct_name
print(tmp_str)
print('Code output:\n')
return
示例12: suppress_output
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def suppress_output():
"""Suppress printing on the current device. Force printing with `force=True`."""
import builtins as __builtin__
builtin_print = __builtin__.print
def print(*args, **kwargs):
if 'force' in kwargs:
force = kwargs.pop('force')
if force:
builtin_print(*args, **kwargs)
__builtin__.print = print
import mlperf_compliance.mlperf_log as mpl
def empty_func(*args, **kwargs):
pass
mpl.transformer_print = empty_func
示例13: __call__
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def __call__(self,func):
'''
#==============================================================
# 類裝飾器入口
#==============================================================
'''
orig_func[func.__name__] = func
@functools.wraps(func)
def _run_threads(*args,**kw):
p = []
for _ in range(self.num):
# 這裏包裝一下異常捕捉,防止異常導致的不 join
def _func():
try:
func(*args,**kw)
except Exception as e:
if log_flag._elog:
print(traceback.format_exc())
p.append(Thread(target=_func))
for i in p: i.start()
if self.join:
for i in p: i.join()
return _run_threads
示例14: show
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def show(self):
'''
#==============================================================
# 簡單的打印一下當前的線程池的組數
# 以及打印每一組線程池的線程數量
#
# >>> vthread.show()
# [ MainThread ] threads group number: 3
# [ MainThread ] gqueue:0, alive threads number:6
# [ MainThread ] gqueue:1, alive threads number:5
# [ MainThread ] gqueue:2, alive threads number:2
# >>>
#==============================================================
'''
l = len(self._pool_func_num)
print("threads group number: {}".format(l))
for i,j in self._pool_func_num.items():
print("gqueue:{}, alive threads number:{}".format(i, j))
示例15: unpatch_all
# 需要導入模塊: import builtins [as 別名]
# 或者: from builtins import print [as 別名]
def unpatch_all(can_be_repatch=False):
'''
#==============================================================
# 去補丁函數
# :can_be_repatch=False
# 因為設計是在每次裝飾時就會默認patch一次
# 卸載後不可被重新patch的參數添加就是為了
# 可以使得在頭部執行這個函數後後麵的裝飾都不會再patch
#==============================================================
'''
global _new_print,_org_print
builtins.print = _org_print
if not can_be_repatch:
_new_print = builtins.print
# 函數