本文整理匯總了Python中tqdm.tqdm_notebook方法的典型用法代碼示例。如果您正苦於以下問題:Python tqdm.tqdm_notebook方法的具體用法?Python tqdm.tqdm_notebook怎麽用?Python tqdm.tqdm_notebook使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類tqdm
的用法示例。
在下文中一共展示了tqdm.tqdm_notebook方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: copy_model_weights
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def copy_model_weights(src_model, dst_model):
"""
copy weights from the src keras model to the dst keras model via layer names
Parameters:
src_model: source keras model to copy from
dst_model: destination keras model to copy to
"""
for layer in tqdm(dst_model.layers):
try:
wts = src_model.get_layer(layer.name).get_weights()
layer.set_weights(wts)
except:
print('Could not copy weights of %s' % layer.name)
continue
示例2: set_representative_sequence
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def set_representative_sequence(self, force_rerun=False):
"""Automatically consolidate loaded sequences (manual, UniProt, or KEGG) and set a single representative sequence.
Manually set representative sequences override all existing mappings. UniProt mappings override KEGG mappings
except when KEGG mappings have PDBs associated with them and UniProt doesn't.
Args:
force_rerun (bool): Set to True to recheck stored sequences
"""
# TODO: rethink use of multiple database sources - may lead to inconsistency with genome sources
successfully_mapped_counter = 0
for g in tqdm(self.genes):
repseq = g.protein.set_representative_sequence(force_rerun=force_rerun)
if repseq:
if repseq.sequence_file:
successfully_mapped_counter += 1
log.info('{}/{}: number of genes with a representative sequence'.format(len(self.genes_with_a_representative_sequence),
len(self.genes)))
log.info('See the "df_representative_sequences" attribute for a summary dataframe.')
示例3: pdb_downloader_and_metadata
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def pdb_downloader_and_metadata(self, outdir=None, pdb_file_type=None, force_rerun=False):
"""Download ALL mapped experimental structures to each protein's structures directory.
Args:
outdir (str): Path to output directory, if GEM-PRO directories were not set or other output directory is
desired
pdb_file_type (str): Type of PDB file to download, if not already set or other format is desired
force_rerun (bool): If files should be re-downloaded if they already exist
"""
if not pdb_file_type:
pdb_file_type = self.pdb_file_type
counter = 0
for g in tqdm(self.genes):
pdbs = g.protein.pdb_downloader_and_metadata(outdir=outdir, pdb_file_type=pdb_file_type, force_rerun=force_rerun)
if pdbs:
counter += len(pdbs)
log.info('Updated PDB metadata dataframe. See the "df_pdb_metadata" attribute for a summary dataframe.')
log.info('Saved {} structures total'.format(counter))
示例4: get_freesasa_annotations
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def get_freesasa_annotations(self, include_hetatms=False, representatives_only=True, force_rerun=False):
"""Run freesasa on structures and store calculations.
Annotations are stored in the protein structure's chain sequence at:
``<chain_prop>.seq_record.letter_annotations['*-freesasa']``
Args:
include_hetatms (bool): If HETATMs should be included in calculations. Defaults to ``False``.
representative_only (bool): If analysis should only be run on the representative structure
force_rerun (bool): If calculations should be rerun even if an output file exists
"""
for g in tqdm(self.genes):
g.protein.get_freesasa_annotations(include_hetatms=include_hetatms,
representative_only=representatives_only,
force_rerun=force_rerun)
示例5: download_patric_genomes
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def download_patric_genomes(self, ids, force_rerun=False):
"""Download genome files from PATRIC given a list of PATRIC genome IDs and load them as strains.
Args:
ids (str, list): PATRIC ID or list of PATRIC IDs
force_rerun (bool): If genome files should be downloaded again even if they exist
"""
ids = ssbio.utils.force_list(ids)
counter = 0
log.info('Downloading sequences from PATRIC...')
for patric_id in tqdm(ids):
f = ssbio.databases.patric.download_coding_sequences(patric_id=patric_id, seqtype='protein',
outdir=self.sequences_by_organism_dir,
force_rerun=force_rerun)
if f:
self.load_strain(patric_id, f)
counter += 1
log.debug('{}: downloaded sequence'.format(patric_id))
else:
log.warning('{}: unable to download sequence'.format(patric_id))
log.info('Created {} new strain GEM-PROs, accessible at "strains" attribute'.format(counter))
示例6: create_bar
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def create_bar(bar, batch_size, n_iters, n_epochs, drop_last, length):
""" Create progress bar with desired number of total iterations."""
if n_iters is not None:
total = n_iters
elif n_epochs is None:
total = sys.maxsize
elif drop_last:
total = length // batch_size * n_epochs
else:
total = math.ceil(length * n_epochs / batch_size)
if callable(bar):
progressbar = bar(total=total)
elif bar == 'n':
progressbar = tqdm.tqdm_notebook(total=total)
else:
progressbar = tqdm.tqdm(total=total)
return progressbar
示例7: _memory_process
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def _memory_process(self, df):
init_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('Original data occupies {} GB memory.'.format(init_memory))
df_cols = df.columns
for col in tqdm_notebook(df_cols):
try:
if 'float' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'float')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
elif 'int' in str(df[col].dtypes):
max_val = df[col].max()
min_val = df[col].min()
trans_types = self._get_type(min_val, max_val, 'int')
if trans_types is not None:
df[col] = df[col].astype(trans_types)
except:
print(' Can not do any process for column, {}.'.format(col))
afterprocess_memory = df.memory_usage().sum() / 1024 ** 2 / 1024
print('After processing, the data occupies {} GB memory.'.format(afterprocess_memory))
return df
開發者ID:WeavingWong,項目名稱:DigiX_HuaWei_Population_Age_Attribution_Predict,代碼行數:25,代碼來源:predict_output_usage.py
示例8: on_epoch_begin
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def on_epoch_begin(self, net, dataset_train=None, dataset_valid=None, **kwargs):
# Assume it is a number until proven otherwise.
batches_per_epoch = self.batches_per_epoch
if self.batches_per_epoch == 'auto':
batches_per_epoch = self._get_batches_per_epoch(
net, dataset_train, dataset_valid
)
elif self.batches_per_epoch == 'count':
if len(net.history) <= 1:
# No limit is known until the end of the first epoch.
batches_per_epoch = None
else:
batches_per_epoch = len(net.history[-2, 'batches'])
if self._use_notebook():
self.pbar_ = tqdm.tqdm_notebook(total=batches_per_epoch, leave=False)
else:
self.pbar_ = tqdm.tqdm(total=batches_per_epoch, leave=False)
示例9: default_progress
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def default_progress(verbose=None, iftop=False):
'''
Returns a progress function that can wrap iterators to print
progress messages, if verbose is True.
If verbose is False or if iftop is True and there is already
a top-level tqdm loop being reported, then a quiet non-printing
identity function is returned.
verbose can also be set to a spefific progress function rather
than True, and that function will be used.
'''
global default_verbosity
if verbose is None:
verbose = default_verbosity
if not verbose or (iftop and nested_tqdm()) or tqdm is None:
return lambda x, *args, **kw: x
if verbose == True:
return tqdm_notebook if in_notebook() else tqdm_terminal
return verbose
示例10: _get_progress_bar
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def _get_progress_bar(self, progress_bar_type):
"""Construct a tqdm progress bar object, if tqdm is installed."""
if tqdm is None:
if progress_bar_type is not None:
warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
return None
description = "Downloading"
unit = "rows"
try:
if progress_bar_type == "tqdm":
return tqdm.tqdm(desc=description, total=self.total_rows, unit=unit)
elif progress_bar_type == "tqdm_notebook":
return tqdm.tqdm_notebook(
desc=description, total=self.total_rows, unit=unit
)
elif progress_bar_type == "tqdm_gui":
return tqdm.tqdm_gui(desc=description, total=self.total_rows, unit=unit)
except (KeyError, TypeError):
# Protect ourselves from any tqdm errors. In case of
# unexpected tqdm behavior, just fall back to showing
# no progress bar.
warnings.warn(_NO_TQDM_ERROR, UserWarning, stacklevel=3)
return None
示例11: build_db
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def build_db(self, kw_path):
def extract_verb(item):
for word in item.split(";"):
if "#v" in word:
return word.split("#")[0]
with open(kw_path) as f:
for _ in tqdm(range(10211391)):
line = f.readline()
e1, r, e2, n2 = line.strip().split("\t")
if self.rel_set and r not in self.rel_set:
continue
concept_id = e1 + "$" + r + "$" + e2
verb = extract_verb(e1)
if verb not in self.verb2triple:
self.verb2triple[verb] = []
self.verb2triple[verb].append(concept_id)
match_key = tuple([t.split("#")[0] for t in e1.split(";")])
if match_key not in self.key2triple:
self.key2triple[match_key] = []
self.key2triple[match_key].append(concept_id)
示例12: get_progress_bar
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def get_progress_bar(module='tqdm'):
"""
TODO: Write proper docstring
"""
if module in ['tqdm']:
try:
from tqdm import tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
return tqdm
elif module in ['tqdm_notebook']:
try:
from tqdm import tqdm_notebook as tqdm
except ImportError:
def tqdm(x, *args, **kwargs):
return x
return tqdm
示例13: compute_by_block
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def compute_by_block(dsx):
"""
"""
# determine index key for each chunk
slices = []
for chunks in dsx.chunks:
L = [0,] + list(np.cumsum(chunks))
slices.append( [slice(a, b)
for a,b in (zip(L[:-1], L[1:]))] )
indexes = list(product(*slices))
# allocate memory to receive result
if isinstance(dsx, xr.DataArray):
result = xr.zeros_like(dsx).load()
else:
result = np.zeros(dsx.shape)
#evaluate each chunk one at a time
for index in tqdm_notebook(indexes, leave=False):
block = dsx.__getitem__(index).compute()
result.__setitem__(index, block)
return result
示例14: bering_strait
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def bering_strait(expts=[]):
"""
Plot Bering Strait transport.
Parameters
----------
expts : str or list of str
Experiment name(s).
"""
plt.figure(figsize=(12, 6))
if not isinstance(expts, list):
expts = [expts]
for expt in tqdm_notebook(expts, leave=False, desc='experiments'):
transport = cc.diagnostics.bering_strait(expt)
transport.plot(label=expt)
IPython.display.clear_output()
plt.title('Bering Strait Transport')
plt.xlabel('Time')
plt.ylabel('Transport (Sv)')
plt.legend(fontsize=10, loc='best')
示例15: aabw
# 需要導入模塊: import tqdm [as 別名]
# 或者: from tqdm import tqdm_notebook [as 別名]
def aabw(expts=[]):
"""
Plot timeseries of AABW transport measured at 55S.
Parameters
----------
expts : str or list of str
Experiment name(s).
"""
plt.figure(figsize=(12, 6))
if not isinstance(expts, list):
expts = [expts]
for expt in tqdm_notebook(expts, leave=False, desc='experiments'):
psi_aabw = cc.diagnostics.calc_aabw(expt)
psi_aabw.plot(label=expt)
IPython.display.clear_output()
plt.title('AABW Transport at 40S')
plt.xlabel('Time')
plt.ylabel('Transport (Sv)')
plt.legend(fontsize=10, loc='best')