本文整理匯總了Python中caffe2.python.core.DeviceOption方法的典型用法代碼示例。如果您正苦於以下問題:Python core.DeviceOption方法的具體用法?Python core.DeviceOption怎麽用?Python core.DeviceOption使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類caffe2.python.core
的用法示例。
在下文中一共展示了core.DeviceOption方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: BroacastParameters
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def BroacastParameters(model, src_gpu, gpus):
log.info("Broadcasting parameters from gpu {} to gpu: {}".format(
src_gpu, ','.join([str(g) for g in gpus]))
)
for param in model.params:
if 'gpu_{}'.format(gpus[0]) in str(param):
for i in gpus:
blob = workspace.FetchBlob(str(param))
target_blob_name = str(param).replace(
'gpu_{}'.format(src_gpu),
'gpu_{}'.format(i)
)
log.info('broadcast {} -> {}'.format(
str(param), target_blob_name)
)
workspace.FetchBlob(str(param))
with core.DeviceScope(
core.DeviceOption(caffe2_pb2.CUDA, i)):
workspace.FeedBlob(target_blob_name, blob)
示例2: _run_test
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def _run_test(self, A, B, check_grad=False):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('SpatialNarrowAs', ['A', 'B'], ['C'])
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
workspace.RunOperatorOnce(op)
C = workspace.FetchBlob('C')
if check_grad:
gc = gradient_checker.GradientChecker(
stepsize=0.005,
threshold=0.005,
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
)
res, grad, grad_estimated = gc.CheckSimple(op, [A, B], 0, [0])
self.assertTrue(res, 'Grad check failed')
dims = C.shape
C_ref = A[:dims[0], :dims[1], :dims[2], :dims[3]]
np.testing.assert_allclose(C, C_ref, rtol=1e-5, atol=1e-08)
示例3: enqueue_blobs
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def enqueue_blobs(
self,
gpu_id,
enqueue_blobs_names,
blob_values,
):
enqueue_blobs_names = [
'gpu_{}/{}'.format(
gpu_id, enqueue_blob_name
) for enqueue_blob_name in enqueue_blobs_names
]
deviceOption = core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
for (blob_name, blob) in zip(enqueue_blobs_names, blob_values):
workspace.FeedBlob(blob_name, blob, device_option=deviceOption)
queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
workspace.RunOperatorOnce(
core.CreateOperator(
'SafeEnqueueBlobs',
[queue_name] + enqueue_blobs_names,
enqueue_blobs_names + [queue_name + '_enqueue_status'],
device_option=deviceOption,
)
)
示例4: _run_op_test
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def _run_op_test(self, X, I, check_grad=False):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', X)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if check_grad:
gc = gradient_checker.GradientChecker(
stepsize=0.1,
threshold=0.001,
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
)
res, grad, grad_estimated = gc.CheckSimple(op, [X, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = X[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-5, atol=1e-08)
示例5: get_device_option
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def get_device_option(gpu=None):
"""Constructs `core.DeviceOption` object
:param int gpu: Identifier of GPU to use or None for CPU.
:return: Instance of `core.DeviceOption`.
"""
dev_opt = None
if gpu is None:
dev_opt = core.DeviceOption(caffe2_pb2.CPU)
else:
assert workspace.has_gpu_support, "Workspace does not support GPUs"
assert gpu >= 0 and gpu < workspace.NumCudaDevices(),\
"Workspace does not provide this gpu (%d). "\
"Number of GPUs is %d" % (gpu, workspace.NumCudaDevices())
dev_opt = core.DeviceOption(caffe2_pb2.CUDA, gpu)
return dev_opt
示例6: main
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def main():
args = parser.parse_args()
args.gpu_id = 0
model = model_helper.ModelHelper(name="le_net", init_params=False)
# Bring in the init net from init_net.pb
init_net_proto = caffe2_pb2.NetDef()
with open(args.c2_init, "rb") as f:
init_net_proto.ParseFromString(f.read())
model.param_init_net = core.Net(init_net_proto) # model.param_init_net.AppendNet(core.Net(init_net_proto)) #
# bring in the predict net from predict_net.pb
predict_net_proto = caffe2_pb2.NetDef()
with open(args.c2_predict, "rb") as f:
predict_net_proto.ParseFromString(f.read())
model.net = core.Net(predict_net_proto) # model.net.AppendNet(core.Net(predict_net_proto))
# CUDA performance not impressive
#device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id)
#model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
#model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
input_blob = model.net.external_inputs[0]
model.param_init_net.GaussianFill(
[],
input_blob.GetUnscopedName(),
shape=(args.batch_size, 3, args.img_size, args.img_size),
mean=0.0,
std=1.0)
workspace.RunNetOnce(model.param_init_net)
workspace.CreateNet(model.net, overwrite=True)
workspace.BenchmarkNet(model.net.Proto().name, 5, 20, True)
示例7: CpuScope
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def CpuScope():
"""Create a CPU device scope."""
cpu_dev = core.DeviceOption(caffe2_pb2.CPU)
with core.DeviceScope(cpu_dev):
yield
示例8: CudaDevice
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def CudaDevice(gpu_id):
"""Create a Cuda device."""
return core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
示例9: get_device_option_cpu
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def get_device_option_cpu():
device_option = core.DeviceOption(caffe2_pb2.CPU)
return device_option
示例10: get_device_option_cuda
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def get_device_option_cuda(gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.device_id = gpu_id
return device_option
示例11: test_size_exceptions
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def test_size_exceptions(self):
A = np.random.randn(2, 256, 42, 86).astype(np.float32)
I = np.array(np.random.permutation(10), dtype=np.int32)
with self.assertRaises(RuntimeError):
self._run_op_test(A, I)
# See doc string in _run_speed_test
# def test_perf(self):
# with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
# self._run_speed_test()
示例12: run_net
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def run_net(net):
workspace.RunNetOnce(net)
gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
name_scope = 'gpu_{}'.format(0)
with core.NameScope(name_scope):
with core.DeviceScope(gpu_dev):
data = workspace.FetchBlob(core.ScopedName('data'))
return data
示例13: test_forward_and_gradient
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def test_forward_and_gradient(self):
Y = np.random.randn(128, 4 * 21).astype(np.float32)
Y_hat = np.random.randn(128, 4 * 21).astype(np.float32)
inside_weights = np.random.randn(128, 4 * 21).astype(np.float32)
inside_weights[inside_weights < 0] = 0
outside_weights = np.random.randn(128, 4 * 21).astype(np.float32)
outside_weights[outside_weights < 0] = 0
scale = np.random.random()
beta = np.random.random()
op = core.CreateOperator(
'SmoothL1Loss', ['Y_hat', 'Y', 'inside_weights', 'outside_weights'],
['loss'],
scale=scale,
beta=beta
)
gc = gradient_checker.GradientChecker(
stepsize=0.005,
threshold=0.005,
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
)
res, grad, grad_estimated = gc.CheckSimple(
op, [Y_hat, Y, inside_weights, outside_weights], 0, [0]
)
self.assertTrue(
grad.shape == grad_estimated.shape,
'Fail check: grad.shape != grad_estimated.shape'
)
# To inspect the gradient and estimated gradient:
# np.set_printoptions(precision=3, suppress=True)
# print('grad:')
# print(grad)
# print('grad_estimated:')
# print(grad_estimated)
self.assertTrue(res)
示例14: create_threads
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def create_threads(self):
# "worker" threads to construct (partial) minibatches and put them on
# minibatch queue in CPU memory (limited by queue size).
self._worker_ids = self.get_worker_ids()
self._workers = [
threading.Thread(
target=self.minibatch_loader,
name='worker_{}'.format(worker_id),
args=[worker_id],
) for worker_id in self._worker_ids
]
# Create one BlobsQueue per GPU which holds the training data in GPU
# memory and feeds to the net.
root_gpu_id = cfg.ROOT_GPU_ID
for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
self.create_blobs_queue(
queue_name=self._blobs_queue_name,
num_blobs=len(self._blobs_idx_map),
capacity=self._gpu_blobs_queue_capacity
)
# Launch enqueuer threads.
blob_names = self._blobs_idx_map.keys()
enqueue_blobs_names = [
'{}_{}_enqueue'.format(self._split, blob_name)
for blob_name in blob_names
]
for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
with core.DeviceScope(
core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
):
for blob_list in enqueue_blobs_names:
for blob in blob_list:
scoped_blob_name = scope.CurrentNameScope() + blob
workspace.CreateBlob(scoped_blob_name)
self._enqueuer = threading.Thread(
target=self.enqueue_blobs_thread, args=(0, enqueue_blobs_names)
)
示例15: get_device_option_cuda
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import DeviceOption [as 別名]
def get_device_option_cuda(gpu_id=0):
device_option = caffe2_pb2.DeviceOption()
device_option.device_type = caffe2_pb2.CUDA
device_option.cuda_gpu_id = gpu_id
return device_option