本文整理匯總了Python中caffe.set_mode_gpu方法的典型用法代碼示例。如果您正苦於以下問題:Python caffe.set_mode_gpu方法的具體用法?Python caffe.set_mode_gpu怎麽用?Python caffe.set_mode_gpu使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類caffe
的用法示例。
在下文中一共展示了caffe.set_mode_gpu方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: prep_net
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def prep_net(self, gpu_id, prototxt_path='', caffemodel_path=''):
import caffe
print('gpu_id = %d, net_path = %s, model_path = %s' % (gpu_id, prototxt_path, caffemodel_path))
if gpu_id == -1:
caffe.set_mode_cpu()
else:
caffe.set_device(gpu_id)
caffe.set_mode_gpu()
self.gpu_id = gpu_id
self.net = caffe.Net(prototxt_path, caffemodel_path, caffe.TEST)
self.net_set = True
# automatically set cluster centers
if len(self.net.params[self.pred_ab_layer][0].data[...].shape) == 4 and self.net.params[self.pred_ab_layer][0].data[...].shape[1] == 313:
print('Setting ab cluster centers in layer: %s' % self.pred_ab_layer)
self.net.params[self.pred_ab_layer][0].data[:, :, 0, 0] = self.pts_in_hull.T
# automatically set upsampling kernel
for layer in self.net._layer_names:
if layer[-3:] == '_us':
print('Setting upsampling layer kernel: %s' % layer)
self.net.params[layer][0].data[:, 0, :, :] = np.array(((.25, .5, .25, 0), (.5, 1., .5, 0), (.25, .5, .25, 0), (0, 0, 0, 0)))[np.newaxis, :, :]
# ***** Call forward *****
示例2: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def __init__(self, model_weights, model_def, threshold=0.5, GPU_MODE=False):
if GPU_MODE:
caffe.set_device(0)
caffe.set_mode_gpu()
else:
caffe.set_mode_cpu()
self.net = caffe.Net(model_def, # defines the structure of the model
model_weights, # contains the trained weights
caffe.TEST) # use test mode (e.g., don't perform dropout)
self.threshold = threshold
self.transformer = caffe.io.Transformer({'data': self.net.blobs['data'].data.shape})
self.transformer.set_transpose('data', (2, 0, 1))
self.transformer.set_mean('data', np.array([127.0, 127.0, 127.0])) # mean pixel
self.transformer.set_raw_scale('data',
255) # the reference model operates on images in [0,255] range instead of [0,1]
self.transformer.set_channel_swap('data', (2, 1, 0)) # the reference model has channels in BGR order instead of RGB
image_resize = 300
self.net.blobs['data'].reshape(1, 3, image_resize, image_resize)
示例3: _initialize_caffe
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def _initialize_caffe(deploy_file, input_weight_file, training_mean_pickle, inference_width,
inference_height):
"""
Initializes Caffe to prepare to run some data through the model for inference.
"""
caffe.set_mode_gpu()
net = caffe.Net(deploy_file, input_weight_file, caffe.TEST)
# input preprocessing: 'data' is the name of the input blob == net.inputs[0]
transformer = caffe.io.Transformer({"data": net.blobs["data"].data.shape})
# PIL.Image loads the data with the channel last.
transformer.set_transpose("data", (2, 0, 1))
# Mean pixel.
transformer.set_mean("data", np.load(training_mean_pickle).mean(1).mean(1))
# The reference model operates on images in [0, 255] range instead of [0, 1].
transformer.set_raw_scale("data", 255)
# The reference model has channels in BGR order instead of RGB.
transformer.set_channel_swap("data", (2, 1, 0))
net.blobs["data"].reshape(1, 3, inference_height, inference_width)
return (net, transformer)
示例4: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def __init__(self, solver_prototxt, output_dir,
pretrained_model=None):
"""Initialize the SolverWrapper."""
self.output_dir = output_dir
caffe.set_mode_gpu()
caffe.set_device(0)
self.solver = caffe.SGDSolver(solver_prototxt)
if pretrained_model is not None:
print ('Loading pretrained model '
'weights from {:s}').format(pretrained_model)
self.solver.net.copy_from(pretrained_model)
self.solver_param = caffe_pb2.SolverParameter()
with open(solver_prototxt, 'rt') as f:
pb2.text_format.Merge(f.read(), self.solver_param)
示例5: train
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def train(solver_proto_path, snapshot_solver_path=None, init_weights=None, GPU_ID=0):
"""
Train the defined net. While we did not use this function for our final net, we used the caffe executable for multi-gpu use, this was used for prototyping
"""
import time
t0 = time.time()
caffe.set_mode_gpu()
caffe.set_device(GPU_ID)
solver = caffe.get_solver(solver_proto_path)
if snapshot_solver_path is not None:
solver.solve(snapshot_solver_path) # train from previous solverstate
else:
if init_weights is not None:
solver.net.copy_from(init_weights) # for copying weights from a model without solverstate
solver.solve() # train form scratch
t1 = time.time()
print 'Total training time: ', t1-t0, ' sec'
model_dir = "calc_" + time.strftime("%d-%m-%Y_%I%M%S")
moveModel(model_dir=model_dir) # move all the model files to a directory
print "Moved model to model/"+model_dir
示例6: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
if self._net.blobs['data'].data.shape[1] == 3:
transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
else:
pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
示例7: load_nets
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
rnn = solver.net
if args.weights:
rnn.copy_from(args.weights)
feature_net = caffe.Net(args.feature_net, args.feature_param, caffe.TEST)
# apply bbox regression normalization on the net weights
with open(args.bbox_mean, 'rb') as f:
bbox_means = cPickle.load(f)
with open(args.bbox_std, 'rb') as f:
bbox_stds = cPickle.load(f)
feature_net.params['bbox_pred_vid'][0].data[...] = \
feature_net.params['bbox_pred_vid'][0].data * bbox_stds[:, np.newaxis]
feature_net.params['bbox_pred_vid'][1].data[...] = \
feature_net.params['bbox_pred_vid'][1].data * bbox_stds + bbox_means
return solver, feature_net, rnn, bbox_means, bbox_stds
示例8: load_nets
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def load_nets(args, cur_gpu):
# initialize solver and feature net,
# RNN should be initialized before CNN, because CNN cudnn conv layers
# may assume using all available memory
caffe.set_mode_gpu()
caffe.set_device(cur_gpu)
solver = caffe.SGDSolver(args.solver)
if args.snapshot:
print "Restoring history from {}".format(args.snapshot)
solver.restore(args.snapshot)
net = solver.net
if args.weights:
print "Copying weights from {}".format(args.weights)
net.copy_from(args.weights)
return solver, net
示例9: load_models
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def load_models(args):
# load rnn model
caffe.set_mode_gpu()
if args.gpus is None:
caffe.set_device(args.job_id - 1)
else:
assert args.job_id <= len(args.gpus)
caffe.set_device(args.gpus[args.job_id-1])
if args.lstm_param is not '':
rnn_net = caffe.Net(args.lstm_def, args.lstm_param, caffe.TEST)
print 'Loaded RNN network from {:s}.'.format(args.lstm_def)
else:
rnn_net = caffe.Net(args.lstm_def, caffe.TEST)
print 'WARNING: dummy RNN network created.'
# load feature model
feature_net = caffe.Net(args.def_file, args.param, caffe.TEST)
print 'Loaded feature network from {:s}.'.format(args.def_file)
return feature_net, rnn_net
示例10: solve
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def solve(proto, snapshot, gpus, timing, uid, rank):
caffe.set_mode_gpu()
caffe.set_device(gpus[rank])
caffe.set_solver_count(len(gpus))
caffe.set_solver_rank(rank)
caffe.set_multiprocess(True)
solver = caffe.SGDSolver(proto)
if snapshot and len(snapshot) != 0:
solver.restore(snapshot)
nccl = caffe.NCCL(solver, uid)
nccl.bcast()
if timing and rank == 0:
time(solver, nccl)
else:
solver.add_callback(nccl)
if solver.param.layer_wise_reduce:
solver.net.after_backward(nccl)
solver.step(solver.param.max_iter)
示例11: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def __init__(self, net_proto, net_weights, device_id, input_size=None):
caffe.set_mode_gpu()
caffe.set_device(device_id)
self._net = caffe.Net(net_proto, net_weights, caffe.TEST)
input_shape = self._net.blobs['data'].data.shape
if input_size is not None:
input_shape = input_shape[:2] + input_size
transformer = caffe.io.Transformer({'data': input_shape})
#if self._net.blobs['data'].data.shape[1] == 3:
#printf
# transformer.set_transpose('data', (2, 0, 1)) # move image channels to outermost dimension
# transformer.set_mean('data', np.array([104, 117, 123])) # subtract the dataset-mean value in each channel
#else:
# pass # non RGB data need not use transformer
self._transformer = transformer
self._sample_shape = self._net.blobs['data'].data.shape
示例12: __init__
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def __init__(self,params):
self.dimension = params['dimension']
self.dataset = params['dataset']
self.pooling = params['pooling']
# Read image lists
with open(params['query_list'],'r') as f:
self.query_names = f.read().splitlines()
with open(params['frame_list'],'r') as f:
self.database_list = f.read().splitlines()
# Parameters needed
self.layer = params['layer']
self.save_db_feats = params['database_feats']
# Init network
if params['gpu']:
caffe.set_mode_gpu()
caffe.set_device(0)
else:
caffe.set_mode_cpu()
print "Extracting from:", params['net_proto']
cfg.TEST.HAS_RPN = True
self.net = caffe.Net(params['net_proto'], params['net'], caffe.TEST)
示例13: get_net
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def get_net(caffemodel, deploy_file, use_gpu=True):
"""
Returns an instance of caffe.Net
Arguments:
caffemodel -- path to a .caffemodel file
deploy_file -- path to a .prototxt file
Keyword arguments:
use_gpu -- if True, use the GPU for inference
"""
if use_gpu:
caffe.set_mode_gpu()
# load a new model
return caffe.Net(deploy_file, caffemodel, caffe.TEST)
示例14: loadDNN
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def loadDNN(useGpu = False):
global net,W_in,H_in,H_out,W_out,lm_lab_l_rs
if useGpu:
gpu_id = 0
caffe.set_mode_gpu()
caffe.set_device(gpu_id)
net = caffe.Net('colorization_deploy_v0.prototxt', 'colorization_release_v0.caffemodel', caffe.TEST)
print '\n done loading network! \n'
(H_in,W_in) = net.blobs['data_l'].data.shape[2:] # get input shape
(H_out,W_out) = net.blobs['class8_ab'].data.shape[2:] # get output shape
net.blobs['Trecip'].data[...] = 6/np.log(10) # 1/T, set annealing temperature
l_mean = sio.loadmat('ilsvrc_2012_mean.mat')
lm = np.array(l_mean['mean_data'])
lm = lm/np.max(lm)
lm_lab = color.rgb2lab(lm)
lm_lab_l = lm_lab[:,:,0]
lm_lab_l = lm_lab_l - np.mean(np.mean(lm_lab_l)) + 50
lm_lab_l = Image.fromarray(lm_lab_l)
lm_lab_l_rs = lm_lab_l.resize((W_in,H_in), Image.ANTIALIAS)
示例15: main_image
# 需要導入模塊: import caffe [as 別名]
# 或者: from caffe import set_mode_gpu [as 別名]
def main_image(args):
"""Adversarial example for ImageNet classification model."""
if args.gpu >= 0:
caffe.set_mode_gpu()
caffe.set_device(args.gpu)
net = caffe.Net(args.model_def, args.model_weights, caffe.TEST)
transformer = caffe.io.Transformer({'data': net.blobs['data'].data.shape})
transformer.set_mean('data', args.mean)
transformer.set_transpose('data', (2,0,1))
transformer.set_channel_swap('data', (2,1,0))
transformer.set_raw_scale('data', 255.0)
image = caffe.io.load_image(args.image)
image = transformer.preprocess('data', image)
print "Prediction of original image"
Predict(net, image, do_top_5=True, label_names=args.label_names)
adv_func_args = GetAdvFuncArgs(args, net, image)
adversarial_image_data, added_noise_data = adv_attacks[args.attack_method](**adv_func_args)
print "Prediction of adversarial image"
Predict(net, adversarial_image_data, do_top_5=True, label_names=args.label_names)
adversarial_image = np.squeeze(adversarial_image_data[0,:,:,:]) # CxHxW
adversarial_image = np.transpose( adversarial_image, [1,2,0] ) # HxWxC
cv2.imwrite("adversarial_example.png", adversarial_image + args.mean)
added_noise = np.transpose( np.squeeze(added_noise_data[0,:,:,:]), [1,2,0] )
cv2.imwrite("perturbation.png", added_noise + args.mean)