本文整理匯總了Python中caffe2.python.core.NameScope方法的典型用法代碼示例。如果您正苦於以下問題:Python core.NameScope方法的具體用法?Python core.NameScope怎麽用?Python core.NameScope使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類caffe2.python.core
的用法示例。
在下文中一共展示了core.NameScope方法的10個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: get_net
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def get_net(data_loader, name):
logger = logging.getLogger(__name__)
blob_names = data_loader.get_output_names()
net = core.Net(name)
net.type = 'dag'
for gpu_id in range(cfg.NUM_GPUS):
with core.NameScope('gpu_{}'.format(gpu_id)):
with core.DeviceScope(muji.OnGPU(gpu_id)):
for blob_name in blob_names:
blob = core.ScopedName(blob_name)
workspace.CreateBlob(blob)
net.DequeueBlobs(
data_loader._blobs_queue_name, blob_names)
logger.info("Protobuf:\n" + str(net.Proto()))
return net
示例2: close_blobs_queues
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def close_blobs_queues(self):
"""Close a BlobsQueue."""
for gpu_id in range(self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
workspace.RunOperatorOnce(
core.CreateOperator(
'CloseBlobsQueue', [self._blobs_queue_name], []
)
)
示例3: GpuNameScope
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def GpuNameScope(gpu_id):
"""Create a name scope for GPU device `gpu_id`."""
with core.NameScope('gpu_{:d}'.format(gpu_id)):
yield
示例4: run_net
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def run_net(net):
workspace.RunNetOnce(net)
gpu_dev = core.DeviceOption(caffe2_pb2.CUDA, 0)
name_scope = 'gpu_{}'.format(0)
with core.NameScope(name_scope):
with core.DeviceScope(gpu_dev):
data = workspace.FetchBlob(core.ScopedName('data'))
return data
示例5: test_simple_cnnmodel
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def test_simple_cnnmodel(self):
model = cnn.CNNModelHelper("NCHW", name="overfeat")
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
with core.NameScope("conv1"):
conv1 = model.Conv("data", "conv1", 3, 96, 11, stride=4)
relu1 = model.Relu(conv1, conv1)
pool1 = model.MaxPool(relu1, "pool1", kernel=2, stride=2)
with core.NameScope("classifier"):
fc = model.FC(pool1, "fc", 4096, 1000)
pred = model.Softmax(fc, "pred")
xent = model.LabelCrossEntropy([pred, "label"], "xent")
loss = model.AveragedLoss(xent, "loss")
blob_name_tracker = {}
graph = tb.model_to_graph_def(
model,
blob_name_tracker=blob_name_tracker,
shapes={},
show_simplified=False,
)
compare_proto(graph, self)
# cnn.CNNModelHelper is deprecated, so we also test with
# model_helper.ModelHelper. The model used in this test is taken from the
# Caffe2 MNIST tutorial. Also use show_simplified=False here.
示例6: test_simple_model
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def test_simple_model(self):
model = model_helper.ModelHelper(name="mnist")
# how come those inputs don't break the forward pass =.=a
workspace.FeedBlob("data", np.random.randn(1, 3, 64, 64).astype(np.float32))
workspace.FeedBlob("label", np.random.randn(1, 1000).astype(np.int))
with core.NameScope("conv1"):
conv1 = brew.conv(model, "data", 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=100, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
with core.NameScope("classifier"):
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
fc3 = brew.fc(model, pool2, 'fc3', dim_in=100 * 4 * 4, dim_out=500)
relu = brew.relu(model, fc3, fc3)
pred = brew.fc(model, relu, 'pred', 500, 10)
softmax = brew.softmax(model, pred, 'softmax')
xent = model.LabelCrossEntropy([softmax, "label"], 'xent')
# compute the expected loss
loss = model.AveragedLoss(xent, "loss")
model.net.RunAllOnMKL()
model.param_init_net.RunAllOnMKL()
model.AddGradientOperators([loss], skip=1)
blob_name_tracker = {}
graph = tb.model_to_graph_def(
model,
blob_name_tracker=blob_name_tracker,
shapes={},
show_simplified=False,
)
compare_proto(graph, self)
示例7: broadcast_parameters
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def broadcast_parameters(model):
num_gpus = cfg.NUM_GPUS
if num_gpus == 1:
return
root_gpu_id = cfg.ROOT_GPU_ID
all_model_params = model.GetAllParams('gpu_{}'.format(root_gpu_id))
all_params_momentum = []
if 'test' not in model.net.Name():
for param in model.GetParams('gpu_{}'.format(root_gpu_id)):
if param in model.TrainableParams():
all_params_momentum.append(str(param) + '_momentum')
all_params = all_model_params + all_params_momentum
for param in all_params:
data = workspace.FetchBlob(str(param))
unscoped_param_name = misc.unscope_name(str(param))
logger.info('Broadcasting {} to'.format(str(param)))
for idx in range(root_gpu_id + 1, root_gpu_id + num_gpus):
with core.NameScope('gpu_{}'.format(idx)):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, idx)):
gpu_scoped_name = misc.scoped_name(unscoped_param_name)
logger.info(' |-> {}'.format(gpu_scoped_name))
workspace.FeedBlob(gpu_scoped_name, data)
# Initialize the model from a file and broadcast the parameters to all GPUs
# if num_gpus > 1.
示例8: _CorrectMomentum
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def _CorrectMomentum(self, correction):
"""The MomentumSGDUpdate op implements the update V as
V := mu * V + lr * grad,
where mu is the momentum factor, lr is the learning rate, and grad is the
stochastic gradient. Since V is not defined independently of the learning
rate (as it should ideally be), when the learning rate is changed we should
scale the update history V in order to make it compatible in scale with
lr * grad.
"""
# Avoid noisy logging.
if correction < 0.9 or correction > 1.1:
logger.info('Scaling update history by {:.6f} (new/old lr)'.format(
correction))
root_gpu_id = cfg.ROOT_GPU_ID
num_gpus = cfg.NUM_GPUS
for i in range(root_gpu_id, root_gpu_id + num_gpus):
with core.DeviceScope(core.DeviceOption(caffe2_pb2.CUDA, i)):
with core.NameScope("gpu_{}".format(i)):
params = self.GetParams()
for param in params:
if param in self.TrainableParams():
op = core.CreateOperator(
'Scale', [param + '_momentum'],
[param + '_momentum'],
scale=correction)
workspace.RunOperatorOnce(op)
示例9: create_threads
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def create_threads(self):
# "worker" threads to construct (partial) minibatches and put them on
# minibatch queue in CPU memory (limited by queue size).
self._worker_ids = self.get_worker_ids()
self._workers = [
threading.Thread(
target=self.minibatch_loader,
name='worker_{}'.format(worker_id),
args=[worker_id],
) for worker_id in self._worker_ids
]
# Create one BlobsQueue per GPU which holds the training data in GPU
# memory and feeds to the net.
root_gpu_id = cfg.ROOT_GPU_ID
for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
self.create_blobs_queue(
queue_name=self._blobs_queue_name,
num_blobs=len(self._blobs_idx_map),
capacity=self._gpu_blobs_queue_capacity
)
# Launch enqueuer threads.
blob_names = self._blobs_idx_map.keys()
enqueue_blobs_names = [
'{}_{}_enqueue'.format(self._split, blob_name)
for blob_name in blob_names
]
for gpu_id in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope('gpu_{}'.format(gpu_id)):
with core.DeviceScope(
core.DeviceOption(caffe2_pb2.CUDA, gpu_id)
):
for blob_list in enqueue_blobs_names:
for blob in blob_list:
scoped_blob_name = scope.CurrentNameScope() + blob
workspace.CreateBlob(scoped_blob_name)
self._enqueuer = threading.Thread(
target=self.enqueue_blobs_thread, args=(0, enqueue_blobs_names)
)
示例10: shutdown_dataloader
# 需要導入模塊: from caffe2.python import core [as 別名]
# 或者: from caffe2.python.core import NameScope [as 別名]
def shutdown_dataloader(self):
self.coordinator.request_stop()
self.coordinator.wait_for_stop()
root_gpu_id = cfg.ROOT_GPU_ID
for idx in range(root_gpu_id, root_gpu_id + self._num_gpus):
with core.NameScope("gpu_{}".format(idx)):
self.close_blobs_queue()
self.join()