本文整理汇总了Python中caffe2.python.workspace.Blobs方法的典型用法代码示例。如果您正苦于以下问题:Python workspace.Blobs方法的具体用法?Python workspace.Blobs怎么用?Python workspace.Blobs使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类caffe2.python.workspace
的用法示例。
在下文中一共展示了workspace.Blobs方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_blobs_if_not_existed
# 需要导入模块: from caffe2.python import workspace [as 别名]
# 或者: from caffe2.python.workspace import Blobs [as 别名]
def create_blobs_if_not_existed(blob_names):
existd_names = set(workspace.Blobs())
for xx in blob_names:
if xx not in existd_names:
workspace.CreateBlob(str(xx))
示例2: save_model_to_weights_file
# 需要导入模块: from caffe2.python import workspace [as 别名]
# 或者: from caffe2.python.workspace import Blobs [as 别名]
def save_model_to_weights_file(weights_file, model):
"""Stash model weights in a dictionary and pickle them to a file. We map
GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
'conv1_w').
"""
logger.info(
'Saving parameters and momentum to {}'.format(
os.path.abspath(weights_file)))
blobs = {}
# Save all parameters
for param in model.params:
scoped_name = str(param)
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save momentum
for param in model.TrainableParams():
scoped_name = str(param) + '_momentum'
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save preserved blobs
for scoped_name in workspace.Blobs():
if scoped_name.startswith('__preserve__/'):
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(
' {:s} -> {:s} (preserved)'.format(
scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
cfg_yaml = envu.yaml_dump(cfg)
save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
示例3: get_ws_blobs
# 需要导入模块: from caffe2.python import workspace [as 别名]
# 或者: from caffe2.python.workspace import Blobs [as 别名]
def get_ws_blobs(blob_names=None):
''' Get blobs in 'blob_names' in the default workspace,
get all blobs if blob_names is None '''
blobs = {}
if blob_names is None:
blob_names = workspace.Blobs()
blobs = {x: workspace.FetchBlob(x) for x in blob_names}
return blobs
示例4: print_all
# 需要导入模块: from caffe2.python import workspace [as 别名]
# 或者: from caffe2.python.workspace import Blobs [as 别名]
def print_all(self):
# approach 1: all
print(workspace.Blobs(), end='\n')
for _, l in enumerate(workspace.Blobs()):
print(l)
print(self.FetchBlobWrapper(l))
# approach 2: only summary
# for param in self.model.params:
# self.model.Summarize(param, [], to_file=1)
# self.model.Summarize(self.model.param_to_grad[param], [], to_file=1)
示例5: save_model_to_weights_file
# 需要导入模块: from caffe2.python import workspace [as 别名]
# 或者: from caffe2.python.workspace import Blobs [as 别名]
def save_model_to_weights_file(weights_file, model):
"""Stash model weights in a dictionary and pickle them to a file. We map
GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
'conv1_w').
"""
logger.info(
'Saving parameters and momentum to {}'.format(
os.path.abspath(weights_file)))
blobs = {}
# Save all parameters
for param in model.params:
scoped_name = str(param)
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save momentum
for param in model.TrainableParams():
scoped_name = str(param) + '_momentum'
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save preserved blobs
for scoped_name in workspace.Blobs():
if scoped_name.startswith('__preserve__/'):
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(
' {:s} -> {:s} (preserved)'.format(
scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
cfg_yaml = yaml.dump(cfg)
save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
示例6: initialize_gpu_from_weights_file
# 需要导入模块: from caffe2.python import workspace [as 别名]
# 或者: from caffe2.python.workspace import Blobs [as 别名]
def initialize_gpu_from_weights_file(model, weights_file, gpu_id=0):
"""Initialize a network with ops on a specific GPU. If you use CUDA_VISIBLE_DEVICES to target specific GPUs,
Caffe2 will automatically map logical GPU ids (starting from 0) to the physical GPUs specified in CUDA_VISIBLE_DEVICES.
"""
logger = logging.getLogger(__name__)
logger.info('Loading weights from: {}'.format(weights_file))
ws_blobs = workspace.Blobs()
with open(weights_file, 'r') as f:
src_blobs = pickle.load(f)
if 'cfg' in src_blobs:
print('------------cfg exist-----------------------------')
if 'blobs' in src_blobs:
# Backwards compat--dictionary used to be only blobs, now they are stored under the 'blobs' key
src_blobs = src_blobs['blobs']
# Initialize weights on GPU gpu_id only
unscoped_param_names = OrderedDict() # Print these out in model order
for blob in model.params:
unscoped_param_names[UnscopeName(str(blob))] = True
with NamedCudaScope(gpu_id):
for unscoped_param_name in unscoped_param_names.keys():
if (unscoped_param_name.find(']_') >= 0 and unscoped_param_name not in src_blobs):
# Special case for sharing initialization from a pretrained model: If a blob named '_[xyz]_foo' is in model.params and not
# in the initialization blob dictionary, then load source blob 'foo' into destination blob '_[xyz]_foo'
src_name = unscoped_param_name[
unscoped_param_name.find(']_') + 2:]
else:
src_name = unscoped_param_name
if src_name not in src_blobs:
logger.info('{:s} not found'.format(src_name))
continue
dst_name = core.ScopedName(unscoped_param_name)
has_momentum = src_name + '_momentum' in src_blobs
has_momentum_str = ' [+ momentum]' if has_momentum else ''
logger.debug( '{:s}{:} loaded from weights file into {:s}: {}'.format( src_name, has_momentum_str, dst_name, src_blobs[src_name].shape ) )
if dst_name in ws_blobs:
# If the blob is already in the workspace, make sure that it matches the shape of the loaded blob
ws_blob = workspace.FetchBlob(dst_name)
assert ws_blob.shape == src_blobs[src_name].shape, ('Workspace blob {} with shape {} does not match weights file shape {}').format(
src_name, ws_blob.shape, src_blobs[src_name].shape)
workspace.FeedBlob( dst_name, src_blobs[src_name].astype(np.float32, copy=False) )
if has_momentum:
workspace.FeedBlob( dst_name + '_momentum', src_blobs[src_name + '_momentum'].astype(np.float32, copy=False) )
# We preserve blobs that are in the weights file but not used by the current model. We load these into CPU memory under the '__preserve__/' namescope.
# These blobs will be stored when saving a model to a weights file. This feature allows for alternating optimization of Faster R-CNN in which blobs
# unused by one step can still be preserved forward and used to initialize another step.
for src_name in src_blobs.keys():
if (src_name not in unscoped_param_names and
not src_name.endswith('_momentum') and
src_blobs[src_name] is not None):
with CpuScope():
workspace.FeedBlob('__preserve__/{:s}'.format(src_name), src_blobs[src_name])
logger.debug('{:s} preserved in workspace (unused)'.format(src_name))
示例7: save_model_to_weights_file
# 需要导入模块: from caffe2.python import workspace [as 别名]
# 或者: from caffe2.python.workspace import Blobs [as 别名]
def save_model_to_weights_file(weights_file, model):
"""Stash model weights in a dictionary and pickle them to a file. We map
GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
'conv1_w').
"""
logger.info(
'Saving parameters and momentum to {}'.format(
os.path.abspath(weights_file)))
blobs = {}
# Save all parameters
for param in model.params:
scoped_name = str(param)
unscoped_name = utils.blob.unscope_name(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save momentum
for param in model.TrainableParams():
scoped_name = str(param) + '_momentum'
unscoped_name = utils.blob.unscope_name(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save preserved blobs
for scoped_name in workspace.Blobs():
if scoped_name.startswith('__preserve__/'):
unscoped_name = utils.blob.unscope_name(scoped_name)
if unscoped_name not in blobs:
logger.debug(
' {:s} -> {:s} (preserved)'.format(
scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save the _rm/_riv for the models with batch norm
for scoped_name in workspace.Blobs():
if scoped_name.endswith('_rm') or scoped_name.endswith('_riv'):
unscoped_name = utils.blob.unscope_name(scoped_name)
if unscoped_name not in blobs:
logger.debug(
' {:s} -> {:s} (preserved)'.format(
scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
cfg_yaml = yaml.dump(cfg)
robust_pickle_dump(dict(blobs=blobs, cfg=cfg_yaml), weights_file)