本文整理汇总了Python中detectron.utils.c2.UnscopeName方法的典型用法代码示例。如果您正苦于以下问题:Python c2.UnscopeName方法的具体用法?Python c2.UnscopeName怎么用?Python c2.UnscopeName使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类detectron.utils.c2
的用法示例。
在下文中一共展示了c2.UnscopeName方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_params
# 需要导入模块: from detectron.utils import c2 [as 别名]
# 或者: from detectron.utils.c2 import UnscopeName [as 别名]
def get_params(model):
blobs = {} # gpu_0 blobs with unscoped_name as key
all_blobs = {} # all blobs with scoped name as key
# Save all parameters
for param in model.params:
scoped_name = str(param)
unscoped_name = c2_utils.UnscopeName(scoped_name)
if 'gpu_0' in scoped_name:
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
all_blobs[scoped_name] = workspace.FetchBlob(scoped_name)
for param in model.TrainableParams():
scoped_name = str(param) + '_momentum'
unscoped_name = c2_utils.UnscopeName(scoped_name)
if 'gpu_0' in scoped_name:
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
all_blobs[scoped_name] = workspace.FetchBlob(scoped_name)
return blobs, all_blobs
示例2: save_model_to_weights_file
# 需要导入模块: from detectron.utils import c2 [as 别名]
# 或者: from detectron.utils.c2 import UnscopeName [as 别名]
def save_model_to_weights_file(weights_file, model):
"""Stash model weights in a dictionary and pickle them to a file. We map
GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
'conv1_w').
"""
logger.info(
'Saving parameters and momentum to {}'.format(
os.path.abspath(weights_file)))
blobs = {}
# Save all parameters
for param in model.params:
scoped_name = str(param)
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save momentum
for param in model.TrainableParams():
scoped_name = str(param) + '_momentum'
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save preserved blobs
for scoped_name in workspace.Blobs():
if scoped_name.startswith('__preserve__/'):
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(
' {:s} -> {:s} (preserved)'.format(
scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
cfg_yaml = envu.yaml_dump(cfg)
save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
示例3: AddLosses
# 需要导入模块: from detectron.utils import c2 [as 别名]
# 或者: from detectron.utils.c2 import UnscopeName [as 别名]
def AddLosses(self, losses):
if not isinstance(losses, list):
losses = [losses]
# Conversion to str allows losses to include BlobReferences
losses = [c2_utils.UnscopeName(str(l)) for l in losses]
self.losses = list(set(self.losses + losses))
示例4: test_restore_checkpoint
# 需要导入模块: from detectron.utils import c2 [as 别名]
# 或者: from detectron.utils.c2 import UnscopeName [as 别名]
def test_restore_checkpoint():
# Create Model
model = model_builder.create(cfg.MODEL.TYPE, train=True)
add_momentum_init_ops(model)
init_weights(model)
# Fill input blobs
roidb = combined_roidb_for_training(
cfg.TRAIN.DATASETS, cfg.TRAIN.PROPOSAL_FILES
)
model_builder.add_training_inputs(model, roidb=roidb)
workspace.CreateNet(model.net)
# Bookkeeping for checkpoint creation
iter_num = 0
checkpoints = {}
output_dir = get_output_dir(cfg.TRAIN.DATASETS, training=True)
chk_file_path = os.path.join(output_dir, 'model_iter{}.pkl'.format(iter_num))
checkpoints[iter_num] = chk_file_path
# Save model weights
nu.save_model_to_weights_file(checkpoints[iter_num], model)
orig_gpu_0_params, orig_all_params = get_params(model)
# Change the model weights
init_weights(model)
# Reload the weights in the model
nu.initialize_gpu_from_weights_file(model, chk_file_path, gpu_id=0)
nu.broadcast_parameters(model)
shutil.rmtree(cfg.OUTPUT_DIR)
_, restored_all_params = get_params(model)
# Check if all params are loaded correctly
for scoped_name, blob in orig_all_params.items():
np.testing.assert_array_equal(blob, restored_all_params[scoped_name])
# Check if broadcast_parameters works
for scoped_name, blob in restored_all_params.items():
unscoped_name = c2_utils.UnscopeName(scoped_name)
np.testing.assert_array_equal(blob, orig_gpu_0_params[unscoped_name])
示例5: save_model_to_weights_file
# 需要导入模块: from detectron.utils import c2 [as 别名]
# 或者: from detectron.utils.c2 import UnscopeName [as 别名]
def save_model_to_weights_file(weights_file, model):
"""Stash model weights in a dictionary and pickle them to a file. We map
GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
'conv1_w').
"""
logger.info(
'Saving parameters and momentum to {}'.format(
os.path.abspath(weights_file)))
blobs = {}
# Save all parameters
for param in model.params:
scoped_name = str(param)
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save momentum
for param in model.TrainableParams():
scoped_name = str(param) + '_momentum'
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save preserved blobs
for scoped_name in workspace.Blobs():
if scoped_name.startswith('__preserve__/'):
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(
' {:s} -> {:s} (preserved)'.format(
scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
cfg_yaml = yaml.dump(cfg)
save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
示例6: print_net
# 需要导入模块: from detectron.utils import c2 [as 别名]
# 或者: from detectron.utils.c2 import UnscopeName [as 别名]
def print_net(model, namescope='gpu_0'):
"""Print the model network."""
logger.info('Printing model: {}'.format(model.net.Name()))
op_list = model.net.Proto().op
for op in op_list:
input_name = op.input
# For simplicity: only print the first output
# Not recommended if there are split layers
output_name = str(op.output[0])
op_type = op.type
op_name = op.name
if namescope is None or output_name.startswith(namescope):
# Only print the forward pass network
if output_name.find('grad') >= 0 or output_name.find('__m') >= 0:
continue
try:
# Under some conditions (e.g., dynamic memory optimization)
# it is possible that the network frees some blobs when they are
# no longer needed. Handle this case...
output_shape = workspace.FetchBlob(output_name).shape
except BaseException:
output_shape = '<unknown>'
first_blob = True
op_label = op_type + (op_name if op_name == '' else ':' + op_name)
suffix = ' ------- (op: {})'.format(op_label)
for j in range(len(input_name)):
if input_name[j] in model.params:
continue
input_blob = workspace.FetchBlob(input_name[j])
if isinstance(input_blob, np.ndarray):
input_shape = input_blob.shape
logger.info('{:28s}: {:20s} => {:28s}: {:20s}{}'.format(
c2_utils.UnscopeName(str(input_name[j])),
'{}'.format(input_shape),
c2_utils.UnscopeName(str(output_name)),
'{}'.format(output_shape),
suffix))
if first_blob:
first_blob = False
suffix = ' ------|'
logger.info('End of model: {}'.format(model.net.Name()))
示例7: print_net
# 需要导入模块: from detectron.utils import c2 [as 别名]
# 或者: from detectron.utils.c2 import UnscopeName [as 别名]
def print_net(model, namescope='gpu_0'):
"""Print the model network."""
logger.info('Printing model: {}'.format(model.net.Name()))
op_list = model.net.Proto().op
for op in op_list:
input_name = op.input
# For simplicity: only print the first output
# Not recommended if there are split layers
try:
output_name = str(op.output[0])
except BaseException:
output_name = '<nothing>'
op_type = op.type
op_name = op.name
if namescope is None or output_name.startswith(namescope):
# Only print the forward pass network
if output_name.find('grad') >= 0 or output_name.find('__m') >= 0:
continue
try:
# Under some conditions (e.g., dynamic memory optimization)
# it is possible that the network frees some blobs when they are
# no longer needed. Handle this case...
output_shape = workspace.FetchBlob(output_name).shape
except BaseException:
output_shape = '<unknown>'
first_blob = True
op_label = op_type + (op_name if op_name == '' else ':' + op_name)
suffix = ' ------- (op: {})'.format(op_label)
for j in range(len(input_name)):
if input_name[j] in model.params:
continue
input_blob = workspace.FetchBlob(input_name[j])
if isinstance(input_blob, np.ndarray):
input_shape = input_blob.shape
logger.info('{:28s}: {:20s} => {:28s}: {:20s}{}'.format(
c2_utils.UnscopeName(str(input_name[j])),
'{}'.format(input_shape),
c2_utils.UnscopeName(str(output_name)),
'{}'.format(output_shape),
suffix))
if first_blob:
first_blob = False
suffix = ' ------|'
logger.info('End of model: {}'.format(model.net.Name()))