本文整理汇总了Python中caffe2.python.core.DeviceScope方法的典型用法代码示例。如果您正苦于以下问题:Python core.DeviceScope方法的具体用法?Python core.DeviceScope怎么用?Python core.DeviceScope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类caffe2.python.core
的用法示例。
在下文中一共展示了core.DeviceScope方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: BroacastParameters
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def BroacastParameters(model, src_gpu, gpus):
log.info("Broadcasting parameters from gpu {} to gpu: {}".format(
src_gpu, ','.join([str(g) for g in gpus]))
)
for param in model.params:
if 'gpu_{}'.format(gpus[0]) in str(param):
for i in gpus:
blob = workspace.FetchBlob(str(param))
target_blob_name = str(param).replace(
'gpu_{}'.format(src_gpu),
'gpu_{}'.format(i)
)
log.info('broadcast {} -> {}'.format(
str(param), target_blob_name)
)
workspace.FetchBlob(str(param))
with core.DeviceScope(
core.DeviceOption(caffe2_pb2.CUDA, i)):
workspace.FeedBlob(target_blob_name, blob)
示例2: _run_test
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def _run_test(self, A, B, check_grad=False):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('SpatialNarrowAs', ['A', 'B'], ['C'])
workspace.FeedBlob('A', A)
workspace.FeedBlob('B', B)
workspace.RunOperatorOnce(op)
C = workspace.FetchBlob('C')
if check_grad:
gc = gradient_checker.GradientChecker(
stepsize=0.005,
threshold=0.005,
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
)
res, grad, grad_estimated = gc.CheckSimple(op, [A, B], 0, [0])
self.assertTrue(res, 'Grad check failed')
dims = C.shape
C_ref = A[:dims[0], :dims[1], :dims[2], :dims[3]]
np.testing.assert_allclose(C, C_ref, rtol=1e-5, atol=1e-08)
示例3: _run_op_test
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def _run_op_test(self, X, I, check_grad=False):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
workspace.FeedBlob('X', X)
workspace.FeedBlob('I', I)
workspace.RunOperatorOnce(op)
Y = workspace.FetchBlob('Y')
if check_grad:
gc = gradient_checker.GradientChecker(
stepsize=0.1,
threshold=0.001,
device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
)
res, grad, grad_estimated = gc.CheckSimple(op, [X, I], 0, [0])
self.assertTrue(res, 'Grad check failed')
Y_ref = X[I]
np.testing.assert_allclose(Y, Y_ref, rtol=1e-5, atol=1e-08)
示例4: get_net
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def get_net(data_loader, name):
logger = logging.getLogger(__name__)
blob_names = data_loader.get_output_names()
net = core.Net(name)
net.type = 'dag'
for gpu_id in range(cfg.NUM_GPUS):
with core.NameScope('gpu_{}'.format(gpu_id)):
with core.DeviceScope(muji.OnGPU(gpu_id)):
for blob_name in blob_names:
blob = core.ScopedName(blob_name)
workspace.CreateBlob(blob)
net.DequeueBlobs(
data_loader._blobs_queue_name, blob_names)
logger.info("Protobuf:\n" + str(net.Proto()))
return net
示例5: _update_bn_stats_gpu
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def _update_bn_stats_gpu(self):
"""
Copy to GPU.
Note: the actual blobs used at test time are "rm" and "riv"
"""
num_gpus = cfg.NUM_GPUS
root_gpu_id = cfg.ROOT_GPU_ID
for i in range(root_gpu_id, root_gpu_id + num_gpus):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, i)):
for bn_layer in self._bn_layers:
workspace.FeedBlob(
'gpu_{}/'.format(i) + bn_layer + '_bn_rm',
np.array(self._meanX_dict[bn_layer], dtype=np.float32),
)
"""
Note: riv is acutally running var (not running inv var)!!!!
"""
workspace.FeedBlob(
'gpu_{}/'.format(i) + bn_layer + '_bn_riv',
np.array(self._var_dict[bn_layer], dtype=np.float32),
)
示例6: CudaScope
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def CudaScope(gpu_id):
"""Create a CUDA device scope for GPU device `gpu_id`."""
gpu_dev = CudaDevice(gpu_id)
with core.DeviceScope(gpu_dev):
yield
示例7: CpuScope
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def CpuScope():
"""Create a CPU device scope."""
cpu_dev = core.DeviceOption(caffe2_pb2.CPU)
with core.DeviceScope(cpu_dev):
yield
示例8: test_size_exceptions
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def test_size_exceptions(self):
A = np.random.randn(2, 256, 42, 86).astype(np.float32)
I = np.array(np.random.permutation(10), dtype=np.int32)
with self.assertRaises(RuntimeError):
self._run_op_test(A, I)
# See doc string in _run_speed_test
# def test_perf(self):
# with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
# self._run_speed_test()
示例9: run_net
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def run_net(net):
workspace.RunNetOnce(net)
gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
name_scope = 'gpu_{}'.format(0)
with core.NameScope(name_scope):
with core.DeviceScope(gpu_dev):
data = workspace.FetchBlob(core.ScopedName('data'))
return data
示例10: add_training_operators
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def add_training_operators(output_segmentation, model, device_opts) :
with core.DeviceScope(device_opts):
loss = model.SigmoidCrossEntropyWithLogits([output_segmentation, "gt_segmentation"], 'loss')
avg_loss = model.AveragedLoss(loss, "avg_loss")
model.AddGradientOperators([loss])
opt = optimizer.build_adam(model, base_learning_rate=0.01)
示例11: train
# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceScope [as 别名]
def train(INIT_NET, PREDICT_NET, epochs, batch_size, device_opts) :
data, gt_segmentation = get_data(batch_size)
workspace.FeedBlob("data", data, device_option=device_opts)
workspace.FeedBlob("gt_segmentation", gt_segmentation, device_option=device_opts)
train_model= model_helper.ModelHelper(name="train_net", arg_scope = {"order": "NHWC"})
output_segmentation = create_unet_model(train_model, device_opts=device_opts, is_test=0)
add_training_operators(output_segmentation, train_model, device_opts=device_opts)
with core.DeviceScope(device_opts):
brew.add_weight_decay(train_model, 0.001)
workspace.RunNetOnce(train_model.param_init_net)
workspace.CreateNet(train_model.net)
print '\ntraining for', epochs, 'epochs'
for j in range(0, epochs):
data, gt_segmentation = get_data(batch_size, 4)
workspace.FeedBlob("data", data, device_option=device_opts)
workspace.FeedBlob("gt_segmentation", gt_segmentation, device_option=device_opts)
workspace.RunNet(train_model.net, 1) # run for 10 times
print str(j) + ': ' + str(workspace.FetchBlob("avg_loss"))
print 'training done'
test_model= model_helper.ModelHelper(name="test_net", arg_scope = {"order": "NHWC"}, init_params=False)
create_unet_model(test_model, device_opts=device_opts, is_test=1)
workspace.RunNetOnce(test_model.param_init_net)
workspace.CreateNet(test_model.net, overwrite=True)
print '\nsaving test model'
save_net(INIT_NET, PREDICT_NET, test_model)