本文整理汇总了Python中caffe2.proto.caffe2_pb2.CPU属性的典型用法代码示例。如果您正苦于以下问题:Python caffe2_pb2.CPU属性的具体用法?Python caffe2_pb2.CPU怎么用?Python caffe2_pb2.CPU使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类caffe2.proto.caffe2_pb2
的用法示例。
在下文中一共展示了caffe2_pb2.CPU属性的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _tf_device
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def _tf_device(device_option):
'''
Handle the devices.
Args:
device_option (caffe2_pb2.DeviceOption): DeviceOption protobuf,
associated to an operator, that contains information such as
device_type (optional), cuda_gpu_id (optional), node_name (optional,
tells which node the operator should execute on). See caffe2.proto
in caffe2/proto for the full list.
Returns:
Formatted string representing device information contained in
device_option.
'''
if not device_option.HasField("device_type"):
return ""
if device_option.device_type == caffe2_pb2.CPU or device_option.device_type == caffe2_pb2.MKLDNN:
return "/cpu:*"
if device_option.device_type == caffe2_pb2.CUDA:
return "/gpu:{}".format(device_option.device_id)
raise Exception("Unhandled device", device_option)
示例2: UpdateDeviceOption
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def UpdateDeviceOption(dev_opt, net_def):
"""update device options in net_def"""
# net_def.device_option.CopyFrom(dev_opt)
# gpufallbackop=['GenerateProposals', 'BoxWithNMSLimit', 'BBoxTransform',
# 'PackedInt8BGRANHWCToNCHWCStylizerPreprocess', 'BRGNCHWCToPackedInt8BGRAStylizerDeprocess']
gpufallbackop = ['GenerateProposals', 'BoxWithNMSLimit', 'BBoxTransform']
# gpufallbackop=[]
ideepfallbackop = []
from caffe2.proto import caffe2_pb2
for eop in net_def.op:
if (eop.type in gpufallbackop and dev_opt.device_type == caffe2_pb2.CUDA) or (
eop.type in ideepfallbackop and dev_opt.device_type == caffe2_pb2.IDEEP):
eop.device_option.device_type = caffe2_pb2.CPU
elif (
eop.device_option and
eop.device_option.device_type != dev_opt.device_type
):
eop.device_option.device_type = dev_opt.device_type
示例3: get_device_option
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def get_device_option(gpu=None):
"""Constructs `core.DeviceOption` object
:param int gpu: Identifier of GPU to use or None for CPU.
:return: Instance of `core.DeviceOption`.
"""
dev_opt = None
if gpu is None:
dev_opt = core.DeviceOption(caffe2_pb2.CPU)
else:
assert workspace.has_gpu_support, "Workspace does not support GPUs"
assert gpu >= 0 and gpu < workspace.NumCudaDevices(),\
"Workspace does not provide this gpu (%d). "\
"Number of GPUs is %d" % (gpu, workspace.NumCudaDevices())
dev_opt = core.DeviceOption(caffe2_pb2.CUDA, gpu)
return dev_opt
示例4: CpuScope
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def CpuScope():
"""Create a CPU device scope."""
cpu_dev = core.DeviceOption(caffe2_pb2.CPU)
with core.DeviceScope(cpu_dev):
yield
示例5: get_device_option_cpu
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def get_device_option_cpu():
device_option = core.DeviceOption(caffe2_pb2.CPU)
return device_option
示例6: Caffe2ToOnnx
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def Caffe2ToOnnx(init_def, predict_def, data_shape):
"""transfer caffe2 to onnx"""
from caffe2.proto import caffe2_pb2
from caffe2.python.onnx import frontend
from caffe2.python import workspace
old_ws_name = workspace.CurrentWorkspace()
workspace.SwitchWorkspace("_onnx_porting_", True)
data_type = onnx.TensorProto.FLOAT
value_info = {
str(predict_def.op[0].input[0]) : (data_type, data_shape)
}
device_opts_cpu = caffe2_pb2.DeviceOption()
device_opts_cpu.device_type = caffe2_pb2.CPU
UpdateDeviceOption(device_opts_cpu, init_def)
UpdateDeviceOption(device_opts_cpu, predict_def)
onnx_model = frontend.caffe2_net_to_onnx_model(
predict_def,
init_def,
value_info
)
onnx.checker.check_model(onnx_model)
workspace.SwitchWorkspace(old_ws_name)
return onnx_model
示例7: _get_device
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def _get_device(device_option: d5.DeviceType) -> core.DeviceOption:
device = core.DeviceOption(caffe2_pb2.CPU)
if device_option.is_gpu():
device = core.DeviceOption(caffe2_pb2.CUDA)
return device
示例8: build_consistent_parameter_server_gradients
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def build_consistent_parameter_server_gradients(self, network: Caffe2Network, comm_network: CommunicationNetwork):
_load_custom_dll()
gradients = network.gradient()
ptr = comm_network.get_comm_numpy_ptr()
network.feed_tensor("mpi_comm", ptr, device_option=core.DeviceOption(caffe2_pb2.CPU))
# Copy GPU data to CPU
if network.is_cuda:
for (param_name, grad_name) in gradients:
grad_name_from = grad_name + "_cpu" if network.is_cuda else grad_name
with core.DeviceScope(network.device_option):
# we copy on the same device on where mpi_comm is
network.train_model.EnsureCPUOutput([grad_name], grad_name_from)
# Invoke MPI
for (param_name, grad_name) in gradients:
grad_name_from = grad_name + "_cpu" if network.is_cuda else grad_name
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
# we use the copied tensor as input
network.train_model.DMpiReduceMean([grad_name_from, "mpi_comm"], grad_name + "_buffer")
network.train_model.DMpiBroadcast([grad_name + "_buffer", "mpi_comm"], grad_name_from)
# We have to copy back the communicated tensor if we are on the GPU
if network.is_cuda:
for (param_name, grad_name) in gradients:
with core.DeviceScope(network.device_option):
# we copy on the same device on where mpi_comm is
network.train_model.CopyFromCPUInput([grad_name + "_cpu"], grad_name)
示例9: build_allreduce_gradients
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def build_allreduce_gradients(self, network: Caffe2Network, comm_network: CommunicationNetwork):
_load_custom_dll()
gradients = network.gradient()
ptr = comm_network.get_comm_numpy_ptr()
network.feed_tensor("mpi_comm", ptr, device_option=core.DeviceOption(caffe2_pb2.CPU))
# Copy GPU data to CPU
if network.is_cuda:
for (param_name, grad_name) in gradients:
with core.DeviceScope(self.network.device_option):
# we copy on the same device on where mpi_comm is
network.train_model.EnsureCPUOutput([grad_name], grad_name + "_cpu")
# Invoke MPI
for (param_name, grad_name) in gradients:
grad_name_from = grad_name + "_cpu" if network.is_cuda else grad_name
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
# we use the copied tensor as input
network.train_model.DMpiAllReduceMean([grad_name_from, "mpi_comm"], grad_name_from)
# We have to copy back the communicated tensor if we are on the GPU
if network.is_cuda:
for (param_name, grad_name) in gradients:
with core.DeviceScope(self.network.device_option):
# we copy on the same device on where mpi_comm is
network.train_model.CopyFromCPUInput([grad_name + "_cpu"], grad_name)
示例10: build_allreduce_neighbors_gradients
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def build_allreduce_neighbors_gradients(self, network: Caffe2Network, comm_network: CommunicationNetwork):
_load_custom_dll()
gradients = network.gradient()
ptr = comm_network.get_comm_neighbor_numpy_ptr()
network.feed_tensor("mpi_comm", ptr, device_option=core.DeviceOption(caffe2_pb2.CPU))
# Copy GPU data to CPU
if network.is_cuda:
for (param_name, grad_name) in gradients:
with core.DeviceScope(self.network.device_option):
# we copy on the same device on where mpi_comm is
network.train_model.EnsureCPUOutput([grad_name], grad_name + "_cpu")
# Invoke MPI
for (param_name, grad_name) in gradients:
grad_name_from = grad_name + "_cpu" if network.is_cuda else grad_name
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CPU)):
# we use the copied tensor as input
network.train_model.DMpiAllReduceMean([grad_name_from, "mpi_comm"], grad_name_from)
# We have to copy back the communicated tensor if we are on the GPU
if network.is_cuda:
for (param_name, grad_name) in gradients:
with core.DeviceScope(self.network.device_option):
# we copy on the same device on where mpi_comm is
network.train_model.CopyFromCPUInput([grad_name + "_cpu"], grad_name)
示例11: _tf_device
# 需要导入模块: from caffe2.proto import caffe2_pb2 [as 别名]
# 或者: from caffe2.proto.caffe2_pb2 import CPU [as 别名]
def _tf_device(device_option):
'''Handle the devices.'''
if not device_option.HasField("device_type"):
return ""
if device_option.device_type == caffe2_pb2.CPU:
return "/cpu:*"
if device_option.device_type == caffe2_pb2.CUDA:
return "/gpu:{}".format(device_option.cuda_gpu_id)
raise Exception("Un-handled device", device_option)