本文整理汇总了Python中caffe2.proto.caffe2_pb2.DeviceOption方法的典型用法代码示例。如果您正苦于以下问题:Python caffe2_pb2.DeviceOption方法的具体用法?Python caffe2_pb2.DeviceOption怎么用?Python caffe2_pb2.DeviceOption使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类caffe2.proto.caffe2_pb2
的用法示例。
在下文中一共展示了caffe2_pb2.DeviceOption方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _tf_device
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def _tf_device(device_option):
'''
Handle the devices.
Args:
device_option (caffe2_pb2.DeviceOption): DeviceOption protobuf,
associated to an operator, that contains information such as
device_type (optional), cuda_gpu_id (optional), node_name (optional,
tells which node the operator should execute on). See caffe2.proto
in caffe2/proto for the full list.
Returns:
Formatted string representing device information contained in
device_option.
'''
if not device_option.HasField("device_type"):
return ""
if device_option.device_type == caffe2_pb2.CPU or device_option.device_type == caffe2_pb2.MKLDNN:
return "/cpu:*"
if device_option.device_type == caffe2_pb2.CUDA:
return "/gpu:{}".format(device_option.device_id)
raise Exception("Unhandled device", device_option)
示例2: __init__
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def __init__(self, device_option: DeviceOption):
super(Caffe2Network, self).__init__()
self.device_option = device_option
self.train_model = model_helper.ModelHelper(name="train_default_net")
self.test_model = model_helper.ModelHelper(name="test_default_net", init_params=False)
self.train_net = self.train_model.net
self.test_net = self.test_model.net
self.train_init_net = self.train_model.param_init_net
self.test_init_net = self.test_model.param_init_net
self.workspace = workspace
self.output_dict = {}
self.param_names = None
# dict that helps us remember that we already added the gradients to the graph for a given loss
self.gradients_by_loss = {}
self.is_cuda = (device_option.device_type == caffe2_pb2.CUDA)
示例3: create_const_fill_op
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def create_const_fill_op(
name: str,
blob: Union[np.ndarray, workspace.Int8Tensor],
device_option: Optional[caffe2_pb2.DeviceOption] = None,
) -> caffe2_pb2.OperatorDef:
"""
Given a blob object, return the Caffe2 operator that creates this blob
as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
"""
tensor_type = type(blob)
assert tensor_type in [
np.ndarray,
workspace.Int8Tensor,
], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
name, type(blob)
)
if tensor_type == np.ndarray:
return _create_const_fill_op_from_numpy(name, blob, device_option)
elif tensor_type == workspace.Int8Tensor:
assert device_option is None
return _create_const_fill_op_from_c2_int8_tensor(name, blob)
示例4: construct_init_net_from_params
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def construct_init_net_from_params(
params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
) -> caffe2_pb2.NetDef:
"""
Construct the init_net from params dictionary
"""
init_net = caffe2_pb2.NetDef()
device_options = device_options or {}
for name, blob in params.items():
if isinstance(blob, str):
logger.warning(
(
"Blob {} with type {} is not supported in generating init net,"
" skipped.".format(name, type(blob))
)
)
continue
init_net.op.extend(
[create_const_fill_op(name, blob, device_option=device_options.get(name, None))]
)
init_net.external_output.append(name)
return init_net
示例5: get_device_option_cpu
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def get_device_option_cpu():
device_option = core.DeviceOption(caffe2_pb2.CPU)
return device_option
示例6: get_device_option_cuda
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def get_device_option_cuda(gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.device_id = gpu_id
return device_option
示例7: Caffe2ToOnnx
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def Caffe2ToOnnx(init_def, predict_def, data_shape):
"""transfer caffe2 to onnx"""
from caffe2.proto import caffe2_pb2
from caffe2.python.onnx import frontend
from caffe2.python import workspace
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_onnx_porting_", True)
data_type = onnx.TensorProto.FLOAT
value_info = {
str(predict_def.op[0].input[0]) : (data_type, data_shape)
}
device_opts_cpu = caffe2_pb2.DeviceOption()
device_opts_cpu.device_type = caffe2_pb2.CPU
UpdateDeviceOption(device_opts_cpu, init_def)
UpdateDeviceOption(device_opts_cpu, predict_def)
onnx_model = frontend.caffe2_net_to_onnx_model(
predict_def,
init_def,
value_info
)
onnx.checker.check_model(onnx_model)
workspace.SwitchWorkspace(old_ws_name)
return onnx_model
示例8: get_device_option_cuda
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def get_device_option_cuda(gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
return device_option
示例9: __init__
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def __init__(self, model: d5.ops.OnnxModel, device_option: DeviceOption,
events: List[d5.ExecutorEvent] = []):
super(Caffe2GraphExecutor, self).__init__(Caffe2Network(device_option), events)
self.device_option = device_option
self.model = model
with core.DeviceScope(self.device_option):
model.accept(Caffe2Visitor(device_option), self.network)
示例10: get_params_from_init_net
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def get_params_from_init_net(
init_net: caffe2_pb2.NetDef,
) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
"""
Take the output blobs from init_net by running it.
Outputs:
params: dict from blob name to numpy array
device_options: dict from blob name to the device option of its creating op
"""
# NOTE: this assumes that the params is determined by producer op with the
# only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
def _get_device_option(producer_op):
if producer_op.type == "CopyGPUToCPU":
return caffe2_pb2.DeviceOption()
else:
return producer_op.device_option
with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws:
ws.RunNetOnce(init_net)
params = {b: fetch_any_blob(b) for b in init_net.external_output}
ssa, versions = core.get_ssa(init_net)
producer_map = get_producer_map(ssa)
device_options = {
b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
for b in init_net.external_output
}
return params, device_options
示例11: convert_net
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import DeviceOption [as 别名]
def convert_net(args, net, blobs):
@op_filter()
def convert_op_name(op):
if args.device != 'gpu':
if op.engine != 'DEPTHWISE_3x3':
op.engine = ''
op.device_option.CopyFrom(caffe2_pb2.DeviceOption())
reset_names(op.input)
reset_names(op.output)
return [op]
@op_filter(type="Python", inputs=['rpn_cls_probs', 'rpn_bbox_pred', 'im_info'])
def convert_gen_proposal(op_in):
gen_proposals_op, ext_input = convert_gen_proposals(
op_in, blobs,
rpn_min_size=float(cfg.TEST.RPN_MIN_SIZE),
rpn_post_nms_topN=cfg.TEST.RPN_POST_NMS_TOP_N,
rpn_pre_nms_topN=cfg.TEST.RPN_PRE_NMS_TOP_N,
rpn_nms_thres=cfg.TEST.RPN_NMS_THRESH,
)
net.external_input.extend([ext_input])
return [gen_proposals_op]
@op_filter(input_has='rois')
def convert_rpn_rois(op):
for j in range(0, len(op.input)):
if op.input[j] == 'rois':
print('Converting op {} input name: rois -> rpn_rois:\n{}'.format(
op.type, op))
op.input[j] = 'rpn_rois'
return [op]
@op_filter(type_in=['StopGradient', 'Alias'])
def convert_remove_op(op):
print('Removing op {}:\n{}'.format(op.type, op))
return []
convert_op_in_proto(net, convert_op_name)
convert_op_in_proto(net, [
convert_gen_proposal, convert_rpn_rois, convert_remove_op
])
reset_names(net.external_input)
reset_names(net.external_output)
reset_blob_names(blobs)