当前位置: 首页>>代码示例>>Python>>正文


Python core.DeviceOption方法代码示例

本文整理汇总了Python中caffe2.python.core.DeviceOption方法的典型用法代码示例。如果您正苦于以下问题:Python core.DeviceOption方法的具体用法?Python core.DeviceOption怎么用?Python core.DeviceOption使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在caffe2.python.core的用法示例。


在下文中一共展示了core.DeviceOption方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: BroacastParameters

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def BroacastParameters(model, src_gpu, gpus):

    log.info("Broadcasting parameters from gpu {} to gpu: {}".format(
        src_gpu, ','.join([str(g) for g in gpus]))
    )

    for param in model.params:
        if 'gpu_{}'.format(gpus[0]) in str(param):
            for i in gpus:
                blob = workspace.FetchBlob(str(param))
                target_blob_name = str(param).replace(
                    'gpu_{}'.format(src_gpu),
                    'gpu_{}'.format(i)
                )
                log.info('broadcast {} -> {}'.format(
                    str(param), target_blob_name)
                )
                workspace.FetchBlob(str(param))
                with core.DeviceScope(
                        core.DeviceOption(caffe2_pb2.CUDA, i)):
                    workspace.FeedBlob(target_blob_name, blob) 
开发者ID:facebookresearch,项目名称:VMZ,代码行数:23,代码来源:model_loader.py

示例2: _run_test

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def _run_test(self, A, B, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('SpatialNarrowAs', ['A', 'B'], ['C'])
            workspace.FeedBlob('A', A)
            workspace.FeedBlob('B', B)
        workspace.RunOperatorOnce(op)
        C = workspace.FetchBlob('C')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.005,
                threshold=0.005,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [A, B], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        dims = C.shape
        C_ref = A[:dims[0], :dims[1], :dims[2], :dims[3]]
        np.testing.assert_allclose(C, C_ref, rtol=1e-5, atol=1e-08) 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:23,代码来源:test_spatial_narrow_as_op.py

示例3: enqueue_blobs

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def enqueue_blobs(
        self,
        gpu_id,
        enqueue_blobs_names,
        blob_values,
    ):
        enqueue_blobs_names = [
            'gpu_{}/{}'.format(
                gpu_id, enqueue_blob_name
            ) for enqueue_blob_name in enqueue_blobs_names
        ]

        deviceOption = core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
        for (blob_name, blob) in zip(enqueue_blobs_names, blob_values):
            workspace.FeedBlob(blob_name, blob, device_option=deviceOption)

        queue_name = 'gpu_{}/{}'.format(gpu_id, self._blobs_queue_name)
        workspace.RunOperatorOnce(
            core.CreateOperator(
                'SafeEnqueueBlobs',
                [queue_name] + enqueue_blobs_names,
                enqueue_blobs_names + [queue_name + '_enqueue_status'],
                device_option=deviceOption,
            )
        ) 
开发者ID:facebookresearch,项目名称:video-long-term-feature-banks,代码行数:27,代码来源:dataloader.py

示例4: _run_op_test

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def _run_op_test(self, X, I, check_grad=False):
        with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
            op = core.CreateOperator('BatchPermutation', ['X', 'I'], ['Y'])
            workspace.FeedBlob('X', X)
            workspace.FeedBlob('I', I)
        workspace.RunOperatorOnce(op)
        Y = workspace.FetchBlob('Y')

        if check_grad:
            gc = gradient_checker.GradientChecker(
                stepsize=0.1,
                threshold=0.001,
                device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
            )

            res, grad, grad_estimated = gc.CheckSimple(op, [X, I], 0, [0])
            self.assertTrue(res, 'Grad check failed')

        Y_ref = X[I]
        np.testing.assert_allclose(Y, Y_ref, rtol=1e-5, atol=1e-08) 
开发者ID:ronghanghu,项目名称:seg_every_thing,代码行数:22,代码来源:test_batch_permutation_op.py

示例5: get_device_option

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def get_device_option(gpu=None):
        """Constructs `core.DeviceOption` object

        :param int gpu: Identifier of GPU to use or None for CPU.
        :return: Instance of `core.DeviceOption`.
        """
        dev_opt = None
        if gpu is None:
            dev_opt = core.DeviceOption(caffe2_pb2.CPU)
        else:
            assert workspace.has_gpu_support, "Workspace does not support GPUs"
            assert gpu >= 0 and gpu < workspace.NumCudaDevices(),\
                   "Workspace does not provide this gpu (%d). "\
                   "Number of GPUs is %d" % (gpu, workspace.NumCudaDevices())
            dev_opt = core.DeviceOption(caffe2_pb2.CUDA, gpu)
        return dev_opt 
开发者ID:HewlettPackard,项目名称:dlcookbook-dlbs,代码行数:18,代码来源:model.py

示例6: main

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def main():
    args = parser.parse_args()
    args.gpu_id = 0

    model = model_helper.ModelHelper(name="le_net", init_params=False)

    # Bring in the init net from init_net.pb
    init_net_proto = caffe2_pb2.NetDef()
    with open(args.c2_init, "rb") as f:
        init_net_proto.ParseFromString(f.read())
    model.param_init_net = core.Net(init_net_proto)  # model.param_init_net.AppendNet(core.Net(init_net_proto)) #

    # bring in the predict net from predict_net.pb
    predict_net_proto = caffe2_pb2.NetDef()
    with open(args.c2_predict, "rb") as f:
        predict_net_proto.ParseFromString(f.read())
    model.net = core.Net(predict_net_proto)  # model.net.AppendNet(core.Net(predict_net_proto))

    # CUDA performance not impressive
    #device_opts = core.DeviceOption(caffe2_pb2.PROTO_CUDA, args.gpu_id)
    #model.net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)
    #model.param_init_net.RunAllOnGPU(gpu_id=args.gpu_id, use_cudnn=True)

    input_blob = model.net.external_inputs[0]
    model.param_init_net.GaussianFill(
        [],
        input_blob.GetUnscopedName(),
        shape=(args.batch_size, 3, args.img_size, args.img_size),
        mean=0.0,
        std=1.0)
    workspace.RunNetOnce(model.param_init_net)
    workspace.CreateNet(model.net, overwrite=True)
    workspace.BenchmarkNet(model.net.Proto().name, 5, 20, True) 
开发者ID:rwightman,项目名称:gen-efficientnet-pytorch,代码行数:35,代码来源:caffe2_benchmark.py

示例7: CpuScope

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def CpuScope():
    """Create a CPU device scope."""
    cpu_dev = core.DeviceOption(caffe2_pb2.CPU)
    with core.DeviceScope(cpu_dev):
        yield 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:7,代码来源:c2.py

示例8: CudaDevice

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def CudaDevice(gpu_id):
    """Create a Cuda device."""
    return core.DeviceOption(caffe2_pb2.CUDA, gpu_id) 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:5,代码来源:c2.py

示例9: get_device_option_cpu

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def get_device_option_cpu():
    device_option = core.DeviceOption(caffe2_pb2.CPU)
    return device_option 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:5,代码来源:model_convert_utils.py

示例10: get_device_option_cuda

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def get_device_option_cuda(gpu_id=0):
    device_option = caffe2_pb2.DeviceOption()
    device_option.device_type = caffe2_pb2.CUDA
    device_option.device_id = gpu_id
    return device_option 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:7,代码来源:model_convert_utils.py

示例11: test_size_exceptions

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def test_size_exceptions(self):
        A = np.random.randn(2, 256, 42, 86).astype(np.float32)
        I = np.array(np.random.permutation(10), dtype=np.int32)
        with self.assertRaises(RuntimeError):
            self._run_op_test(A, I)

    # See doc string in _run_speed_test
    # def test_perf(self):
    #     with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, 0)):
    #         self._run_speed_test() 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:12,代码来源:test_batch_permutation_op.py

示例12: run_net

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def run_net(net):
    workspace.RunNetOnce(net)
    gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
    name_scope = 'gpu_{}'.format(0)
    with core.NameScope(name_scope):
        with core.DeviceScope(gpu_dev):
            data = workspace.FetchBlob(core.ScopedName('data'))
            return data 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:10,代码来源:test_loader.py

示例13: test_forward_and_gradient

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def test_forward_and_gradient(self):
        Y = np.random.randn(128, 4 * 21).astype(np.float32)
        Y_hat = np.random.randn(128, 4 * 21).astype(np.float32)
        inside_weights = np.random.randn(128, 4 * 21).astype(np.float32)
        inside_weights[inside_weights < 0] = 0
        outside_weights = np.random.randn(128, 4 * 21).astype(np.float32)
        outside_weights[outside_weights < 0] = 0
        scale = np.random.random()
        beta = np.random.random()

        op = core.CreateOperator(
            'SmoothL1Loss', ['Y_hat', 'Y', 'inside_weights', 'outside_weights'],
            ['loss'],
            scale=scale,
            beta=beta
        )

        gc = gradient_checker.GradientChecker(
            stepsize=0.005,
            threshold=0.005,
            device_option=core.DeviceOption(caffe2_pb2.CUDA, 0)
        )

        res, grad, grad_estimated = gc.CheckSimple(
            op, [Y_hat, Y, inside_weights, outside_weights], 0, [0]
        )

        self.assertTrue(
            grad.shape == grad_estimated.shape,
            'Fail check: grad.shape != grad_estimated.shape'
        )

        # To inspect the gradient and estimated gradient:
        # np.set_printoptions(precision=3, suppress=True)
        # print('grad:')
        # print(grad)
        # print('grad_estimated:')
        # print(grad_estimated)

        self.assertTrue(res) 
开发者ID:yihui-he,项目名称:KL-Loss,代码行数:42,代码来源:test_smooth_l1_loss_op.py

示例14: create_threads

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def create_threads(self):
        # "worker" threads to construct (partial) minibatches and put them on
        # minibatch queue in CPU memory (limited by queue size).
        self._worker_ids = self.get_worker_ids()
        self._workers = [
            threading.Thread(
                target=self.minibatch_loader,
                name='worker_{}'.format(worker_id),
                args=[worker_id],
            ) for worker_id in self._worker_ids
        ]

        # Create one BlobsQueue per GPU which holds the training data in GPU
        # memory and feeds to the net.
        root_gpu_id = cfg.ROOT_GPU_ID
        for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
            with core.NameScope('gpu_{}'.format(gpu_id)):
                self.create_blobs_queue(
                    queue_name=self._blobs_queue_name,
                    num_blobs=len(self._blobs_idx_map),
                    capacity=self._gpu_blobs_queue_capacity
                )

        # Launch enqueuer threads.
        blob_names = self._blobs_idx_map.keys()
        enqueue_blobs_names = [
            '{}_{}_enqueue'.format(self._split, blob_name)
            for blob_name in blob_names
        ]
        for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
            with core.NameScope('gpu_{}'.format(gpu_id)):
                with core.DeviceScope(
                    core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
                ):
                    for blob_list in enqueue_blobs_names:
                        for blob in blob_list:
                            scoped_blob_name = scope.CurrentNameScope() + blob
                            workspace.CreateBlob(scoped_blob_name)
        self._enqueuer = threading.Thread(
            target=self.enqueue_blobs_thread, args=(0, enqueue_blobs_names)
        ) 
开发者ID:facebookresearch,项目名称:video-long-term-feature-banks,代码行数:43,代码来源:dataloader.py

示例15: get_device_option_cuda

# 需要导入模块: from caffe2.python import core [as 别名]
# 或者: from caffe2.python.core import DeviceOption [as 别名]
def get_device_option_cuda(gpu_id=0):
    device_option = caffe2_pb2.DeviceOption()
    device_option.device_type = caffe2_pb2.CUDA
    device_option.cuda_gpu_id = gpu_id
    return device_option 
开发者ID:fyangneil,项目名称:Clustered-Object-Detection-in-Aerial-Image,代码行数:7,代码来源:model_convert_utils.py


注:本文中的caffe2.python.core.DeviceOption方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。