本文整理匯總了Python中caffe.set_device方法的典型用法代碼示例。如果您正苦於以下問題:Python caffe.set_device方法的具體用法?Python caffe.set_device怎麽用?Python caffe.set_device使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類caffe
的用法示例。
在下文中一共展示了caffe.set_device方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: prep_net
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''):
import caffe
print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path))
if gpu_id == -1:
caffe.set_mode_cpu()
else:
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
self.gpu_id = gpu_id
self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)
self.net_set = True
# automatically set cluster centers
if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313:
print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer)
self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T
# automatically set upsampling kernel
for layer in self.net._layer_names:
if layer[-3:] == '_us':
print('Setting upsampling layer kernel: %s' % layer)
self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :]
# ***** Call forward *****
示例2: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def __init__(self, model_weights, model_def, threshold=0.5, GPU_MODE=False):
if GPU_MODE:
caffe.set_device(0)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
self.threshold = threshold
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.transformer.set_transpose('data', (2, 0, 1))
self.transformer.set_mean('data', np.array([127.0, 127.0, 127.0])) # mean pixel
self.transformer.set_raw_scale('data',
255) # the reference model operates on images in [0,255] range instead of [0,1]
self.transformer.set_channel_swap('data', (2, 1, 0)) # the reference model has channels in BGR order instead of RGB
image_resize = 300
self.net.blobs['data'].reshape(1, 3, image_resize, image_resize)
示例3: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def __init__(self, solver_prototxt, output_dir,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(0)
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
示例4: train
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def train(solver_proto_path, snapshot_solver_path=None, init_weights=None, GPU_ID=0):
"""
Train the defined net. While we did not use this function for our final net, we used the caffe executable for multi-gpu use, this was used for prototyping
"""
import time
t0 = time.time()
caffe.set_mode_gpu()
caffe.set_device(GPU_ID)
solver = caffe.get_solver(solver_proto_path)
if snapshot_solver_path is not None:
solver.solve(snapshot_solver_path) # train from previous solverstate
else:
if init_weights is not None:
solver.net.copy_from(init_weights) # for copying weights from a model without solverstate
solver.solve() # train form scratch
t1 = time.time()
print 'Total training time: ', t1-t0, ' sec'
model_dir = "calc_" + time.strftime("%d-%m-%Y_%I%M%S")
moveModel(model_dir=model_dir) # move all the model files to a directory
print "Moved model to model/"+model_dir
示例5: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
示例6: load_nets
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
rnn = solver.net
if args.weights:
rnn.copy_from(args.weights)
feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST)
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
feature_net.params['bbox_pred_vid'][0].data[...] = \
feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
feature_net.params['bbox_pred_vid'][1].data[...] = \
feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
return solver, feature_net, rnn, bbox_means, bbox_stds
示例7: load_nets
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
net = solver.net
if args.weights:
print "Copying weights from {}".format(args.weights)
net.copy_from(args.weights)
return solver, net
示例8: load_models
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def load_models(args):
# load rnn model
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
if args.lstm_param is not '':
rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST)
print 'Loaded RNN network from {:s}.'.format(args.lstm_def)
else:
rnn_net = caffe.Net(args.lstm_def, caffe.TEST)
print 'WARNING: dummy RNN network created.'
# load feature model
feature_net = caffe.Net(args.def_file, args.param, caffe.TEST)
print 'Loaded feature network from {:s}.'.format(args.def_file)
return feature_net, rnn_net
示例9: solve
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def solve(proto, snapshot, gpus, timing, uid, rank):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if snapshot and len(snapshot) != 0:
solver.restore(snapshot)
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
if timing and rank == 0:
time(solver, nccl)
else:
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
solver.step(solver.param.max_iter)
示例10: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
#if self._net.blobs['data'].data.shape[1] == 3:
#printf
# transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
# transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
#else:
# pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
示例11: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def __init__(self,params):
self.dimension = params['dimension']
self.dataset = params['dataset']
self.pooling = params['pooling']
# Read image lists
with open(params['query_list'],'r') as f:
self.query_names = f.read().splitlines()
with open(params['frame_list'],'r') as f:
self.database_list = f.read().splitlines()
# Parameters needed
self.layer = params['layer']
self.save_db_feats = params['database_feats']
# Init network
if params['gpu']:
caffe.set_mode_gpu()
caffe.set_device(0)
else:
caffe.set_mode_cpu()
print "Extracting from:", params['net_proto']
cfg.TEST.HAS_RPN = True
self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
示例12: loadDNN
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def loadDNN(useGpu = False):
global net,W_in,H_in,H_out,W_out,lm_lab_l_rs
if useGpu:
gpu_id = 0
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net('colorization_deploy_v0.prototxt', 'colorization_release_v0.caffemodel', caffe.TEST)
print '\n done loading network! \n'
(H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
(H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature
l_mean = sio.loadmat('ilsvrc_2012_mean.mat')
lm = np.array(l_mean['mean_data'])
lm = lm/np.max(lm)
lm_lab = color.rgb2lab(lm)
lm_lab_l = lm_lab[:,:,0]
lm_lab_l = lm_lab_l - np.mean(np.mean(lm_lab_l)) + 50
lm_lab_l = Image.fromarray(lm_lab_l)
lm_lab_l_rs = lm_lab_l.resize((W_in,H_in), Image.ANTIALIAS)
示例13: solve
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def solve(proto, gpus, uid, rank, max_iter):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if rank == 0:
# solver.restore(_snapshot)
solver.net.copy_from(_weights)
solver.net.layers[0].get_gpu_id(gpus[rank])
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
for _ in range(max_iter):
solver.step(1)
示例14: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def __init__(self, weights_path, image_net_proto, device_id=-1):
if device_id >= 0:
caffe.set_mode_gpu()
caffe.set_device(device_id)
else:
caffe.set_mode_cpu()
# Setup image processing net.
phase = caffe.TEST
self.image_net = caffe.Net(image_net_proto, weights_path, phase)
image_data_shape = self.image_net.blobs['data'].data.shape
self.transformer = caffe.io.Transformer({'data': image_data_shape})
channel_mean = np.zeros(image_data_shape[1:])
channel_mean_values = [104, 117, 123]
assert channel_mean.shape[0] == len(channel_mean_values)
for channel_index, mean_val in enumerate(channel_mean_values):
channel_mean[channel_index, ...] = mean_val
self.transformer.set_mean('data', channel_mean)
self.transformer.set_channel_swap('data', (2, 1, 0)) # BGR
self.transformer.set_transpose('data', (2, 0, 1))
示例15: load_network
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_device [as 別名]
def load_network(proto_txt, caffe_model, device):
if 'gpu' in device:
caffe.set_mode_gpu()
device_id = int(device.split('gpu')[-1])
caffe.set_device(device_id)
else:
caffe.set_mode_cpu()
# load network
net = caffe.Net(proto_txt, caffe_model, caffe.TEST)
# tansformer
mu = np.load(osp.join(CAFFE_ROOT, 'models', 'ResNet', 'ResNet_mean.npy'))
mu = mu.mean(1).mean(1)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', mu) # subtract the dataset-mean value in each channel
transformer.set_raw_scale('data', 255) # rescale from [0, 1] to [0, 255]
transformer.set_channel_swap('data', (2, 1, 0)) # swap channels from RGB to BGR
# reshape input
net.blobs['data'].reshape(BS, 3, 224, 224)
return net, transformer