當前位置: 首頁>>代碼示例>>Python>>正文


Python caffe2_pb2.CUDA屬性代碼示例

本文整理匯總了Python中caffe2.proto.caffe2_pb2.CUDA屬性的典型用法代碼示例。如果您正苦於以下問題:Python caffe2_pb2.CUDA屬性的具體用法?Python caffe2_pb2.CUDA怎麽用?Python caffe2_pb2.CUDA使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在caffe2.proto.caffe2_pb2的用法示例。


在下文中一共展示了caffe2_pb2.CUDA屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: BroacastParameters

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def BroacastParameters(model, src_gpu, gpus):

    log.info("Broadcasting parameters from gpu {} to gpu: {}".format(
        src_gpu, ','.join([str(g) for g in gpus]))
    )

    for param in model.params:
        if 'gpu_{}'.format(gpus[0]) in str(param):
            for i in gpus:
                blob = workspace.FetchBlob(str(param))
                target_blob_name = str(param).replace(
                    'gpu_{}'.format(src_gpu),
                    'gpu_{}'.format(i)
                )
                log.info('broadcast {} -> {}'.format(
                    str(param), target_blob_name)
                )
                workspace.FetchBlob(str(param))
                with core.DeviceScope(
                        core.DeviceOption(caffe2_pb2.CUDA, i)):
                    workspace.FeedBlob(target_blob_name, blob) 
開發者ID:facebookresearch,項目名稱:VMZ,代碼行數:23,代碼來源:model_loader.py

示例2: _run_test

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def _run_test(self, A, B, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('SpatialNarrowAs', ['A', 'B'], ['C'])
            workspace.FeedBlob('A', A)
            workspace.FeedBlob('B', B)
        workspace.RunOperatorOnce(op)
        C = workspace.FetchBlob('C')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.005,
                threshold=0.005,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [A, B], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        dims = C.shape
        C_ref = A[:dims[0], :dims[1], :dims[2], :dims[3]]
        np.testing.assert_allclose(C, C_ref, rtol=1e-5, atol=1e-08) 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:23,代碼來源:test_spatial_narrow_as_op.py

示例3: _tf_device

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def _tf_device(device_option):
    '''
    Handle the devices.

    Args:
        device_option (caffe2_pb2.DeviceOption): DeviceOption protobuf,
            associated to an operator, that contains information such as
            device_type (optional), cuda_gpu_id (optional), node_name (optional,
            tells which node the operator should execute on). See caffe2.proto
            in caffe2/proto for the full list.

    Returns:
        Formatted string representing device information contained in
            device_option.
    '''
    if not device_option.HasField("device_type"):
        return ""
    if device_option.device_type == caffe2_pb2.CPU or device_option.device_type == caffe2_pb2.MKLDNN:
        return "/cpu:*"
    if device_option.device_type == caffe2_pb2.CUDA:
        return "/gpu:{}".format(device_option.device_id)
    raise Exception("Unhandled device", device_option) 
開發者ID:lanpa,項目名稱:tensorboardX,代碼行數:24,代碼來源:caffe2_graph.py

示例4: UpdateDeviceOption

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def UpdateDeviceOption(dev_opt, net_def):
    """update device options in net_def"""
    # net_def.device_option.CopyFrom(dev_opt)
    # gpufallbackop=['GenerateProposals', 'BoxWithNMSLimit', 'BBoxTransform',
    #     'PackedInt8BGRANHWCToNCHWCStylizerPreprocess', 'BRGNCHWCToPackedInt8BGRAStylizerDeprocess']
    gpufallbackop = ['GenerateProposals', 'BoxWithNMSLimit', 'BBoxTransform']
    # gpufallbackop=[]
    ideepfallbackop = []
    from caffe2.proto import caffe2_pb2
    for eop in net_def.op:
        if (eop.type in gpufallbackop and dev_opt.device_type == caffe2_pb2.CUDA) or (
                eop.type in ideepfallbackop and dev_opt.device_type == caffe2_pb2.IDEEP):
            eop.device_option.device_type = caffe2_pb2.CPU
        elif (
                eop.device_option and
                eop.device_option.device_type != dev_opt.device_type
        ):
            eop.device_option.device_type = dev_opt.device_type 
開發者ID:intel,項目名稱:optimized-models,代碼行數:20,代碼來源:common_caffe2.py

示例5: _update_bn_stats_gpu

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def _update_bn_stats_gpu(self):
        """
        Copy to GPU.
        Note: the actual blobs used at test time are "rm" and "riv"
        """

        num_gpus = cfg.NUM_GPUS
        root_gpu_id = cfg.ROOT_GPU_ID
        for i in range(root_gpu_id, root_gpu_id + num_gpus):
            with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, i)):
                for bn_layer in self._bn_layers:
                    workspace.FeedBlob(
                        'gpu_{}/'.format(i) + bn_layer + '_bn_rm',
                        np.array(self._meanX_dict[bn_layer], dtype=np.float32),
                    )
                    """
                    Note: riv is acutally running var (not running inv var)!!!!
                    """
                    workspace.FeedBlob(
                        'gpu_{}/'.format(i) + bn_layer + '_bn_riv',
                        np.array(self._var_dict[bn_layer], dtype=np.float32),
                    ) 
開發者ID:facebookresearch,項目名稱:video-long-term-feature-banks,代碼行數:24,代碼來源:bn_helper.py

示例6: enqueue_blobs

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def enqueue_blobs(
        self,
        gpu_id,
        enqueue_blobs_names,
        blob_values,
    ):
        enqueue_blobs_names = [
            'gpu_{}/{}'.format(
                gpu_id, enqueue_blob_name
            ) for enqueue_blob_name in enqueue_blobs_names
        ]

        deviceOption = core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
        for (blob_name, blob) in zip(enqueue_blobs_names, blob_values):
            workspace.FeedBlob(blob_name, blob, device_option=deviceOption)

        queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
        workspace.RunOperatorOnce(
            core.CreateOperator(
                'SafeEnqueueBlobs',
                [queue_name] + enqueue_blobs_names,
                enqueue_blobs_names + [queue_name + '_enqueue_status'],
                device_option=deviceOption,
            )
        ) 
開發者ID:facebookresearch,項目名稱:video-long-term-feature-banks,代碼行數:27,代碼來源:dataloader.py

示例7: __init__

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def __init__(self, device_option: DeviceOption):
        super(Caffe2Network, self).__init__()
        self.device_option = device_option

        self.train_model = model_helper.ModelHelper(name="train_default_net")
        self.test_model = model_helper.ModelHelper(name="test_default_net", init_params=False)
        self.train_net = self.train_model.net
        self.test_net = self.test_model.net
        self.train_init_net = self.train_model.param_init_net
        self.test_init_net = self.test_model.param_init_net
        self.workspace = workspace
        self.output_dict = {}
        self.param_names = None
        # dict that helps us remember that we already added the gradients to the graph for a given loss
        self.gradients_by_loss = {}
        self.is_cuda = (device_option.device_type == caffe2_pb2.CUDA) 
開發者ID:deep500,項目名稱:deep500,代碼行數:18,代碼來源:caffe2_network.py

示例8: inference

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def inference(self, inputs: Dict[str, np.ndarray]) -> Dict[str, np.ndarray]:
        self.network._setup()

        for event in self.events:
            event.before_executor(inputs)
        
        if self.network.device_option.device_type == caffe2_pb2.CUDA:
            self.network.test_model.net.RunAllOnGPU(use_cudnn=True)

        for node_name, value in inputs.items():
            self.network.workspace.FeedBlob(node_name, value, self.device_option)

        self.network.workspace.RunNetOnce(self.network.test_model.net)

        for key in self.network.output_dict.keys():
            self.network.output_dict[key] = self.network.workspace.FetchBlob(key)

        for event in self.events:
            event.after_inference(self.network.output_dict)
            
        return self.network.output_dict 
開發者ID:deep500,項目名稱:deep500,代碼行數:23,代碼來源:caffe2_graph_executor.py

示例9: inference_and_backprop

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def inference_and_backprop(self, inputs: Dict[str, np.ndarray], y: str = 'loss') -> Dict[str, np.ndarray]:
        self.network._setup()
        self.network.add_loss(y)

        for event in self.events:
            event.before_executor(inputs)
        
        if self.device_option.device_type == caffe2_pb2.CUDA:
            self.network.train_model.param_init_net.RunAllOnGPU(use_cudnn=True)
            self.network.train_model.net.RunAllOnGPU(use_cudnn=True)

        for node_name, value in inputs.items():
            self.network.workspace.FeedBlob(node_name, value, self.device_option)

        self.network.workspace.RunNetOnce(self.network.train_model.net)

        for key in self.network.output_dict.keys():
            self.network.output_dict[key] = self.network.workspace.FetchBlob(key)

        for event in self.events:
            event.after_backprop(self.network.output_dict)
                        
        return self.network.output_dict 
開發者ID:deep500,項目名稱:deep500,代碼行數:25,代碼來源:caffe2_graph_executor.py

示例10: _run_op_test

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def _run_op_test(self, X, I, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
            workspace.FeedBlob('X', X)
            workspace.FeedBlob('I', I)
        workspace.RunOperatorOnce(op)
        Y = workspace.FetchBlob('Y')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.1,
                threshold=0.001,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [X, I], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        Y_ref = X[I]
        np.testing.assert_allclose(Y, Y_ref, rtol=1e-5, atol=1e-08) 
開發者ID:ronghanghu,項目名稱:seg_every_thing,代碼行數:22,代碼來源:test_batch_permutation_op.py

示例11: NamedCudaScope

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def NamedCudaScope(gpu_id):
    """Creates a GPU name scope and CUDA device scope. This function is provided
    to reduce `with ...` nesting levels."""
    with GpuNameScope(gpu_id):
        with CudaScope(gpu_id):
            yield 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:8,代碼來源:c2.py

示例12: CudaScope

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def CudaScope(gpu_id):
    """Create a CUDA device scope for GPU device `gpu_id`."""
    gpu_dev = CudaDevice(gpu_id)
    with core.DeviceScope(gpu_dev):
        yield 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:7,代碼來源:c2.py

示例13: CudaDevice

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def CudaDevice(gpu_id):
    """Create a Cuda device."""
    return core.DeviceOption(caffe2_pb2.CUDA, gpu_id) 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:5,代碼來源:c2.py

示例14: get_device_option_cuda

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def get_device_option_cuda(gpu_id=0):
    device_option = caffe2_pb2.DeviceOption()
    device_option.device_type = caffe2_pb2.CUDA
    device_option.device_id = gpu_id
    return device_option 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:7,代碼來源:model_convert_utils.py

示例15: test_size_exceptions

# 需要導入模塊: from caffe2.proto import caffe2_pb2 [as 別名]
# 或者: from caffe2.proto.caffe2_pb2 import CUDA [as 別名]
def test_size_exceptions(self):
        A = np.random.randn(2, 256, 42, 86).astype(np.float32)
        I = np.array(np.random.permutation(10), dtype=np.int32)
        with self.assertRaises(RuntimeError):
            self._run_op_test(A, I)

    # See doc string in _run_speed_test
    # def test_perf(self):
    #     with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
    #         self._run_speed_test() 
開發者ID:yihui-he,項目名稱:KL-Loss,代碼行數:12,代碼來源:test_batch_permutation_op.py


注:本文中的caffe2.proto.caffe2_pb2.CUDA屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。