本文整理汇总了Python中paddle.fluid.CUDAPlace方法的典型用法代码示例。如果您正苦于以下问题:Python fluid.CUDAPlace方法的具体用法?Python fluid.CUDAPlace怎么用?Python fluid.CUDAPlace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类paddle.fluid
的用法示例。
在下文中一共展示了fluid.CUDAPlace方法的12个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: set_device
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def set_device(device):
"""
Args:
device (str): specify device type, 'cpu' or 'gpu'.
Returns:
fluid.CUDAPlace or fluid.CPUPlace: Created GPU or CPU place.
"""
assert isinstance(device, six.string_types) and device.lower() in ['cpu', 'gpu'], \
"Expected device in ['cpu', 'gpu'], but got {}".format(device)
place = fluid.CUDAPlace(ParallelEnv().dev_id) \
if device.lower() == 'gpu' and fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
return place
示例2: _set_var
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def _set_var(self, var, ndarray):
t = global_scope().find_var(var.name).get_tensor()
p = t._place()
if p.is_cpu_place():
place = fluid.CPUPlace()
elif p.is_cuda_pinned_place():
place = fluid.CUDAPinnedPlace()
else:
p = fluid.core.Place()
p.set_place(t._place())
place = fluid.CUDAPlace(p.gpu_device_id())
t.set(ndarray, place)
示例3: test_batch
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def test_batch(self, inputs):
super(Model, self.model).eval()
self.mode = 'test'
inputs = [to_variable(x) for x in to_list(inputs)]
outputs = self.model.forward(*inputs)
if self._nranks > 1 and isinstance(self.model._place, fluid.CUDAPlace):
outputs = [_all_gather(o, self._nranks) for o in to_list(outputs)]
return [to_numpy(o) for o in to_list(outputs)]
示例4: export_deploy_model
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def export_deploy_model(self):
model = resnet18()
inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
model.prepare(inputs=inputs)
self.save_dir = tempfile.mkdtemp()
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
model.save_inference_model(self.save_dir)
place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = fluid.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=self.save_dir, executor=exe))
tensor_img = np.array(
np.random.random((1, 3, 224, 224)), dtype=np.float32)
ori_results = model.test_batch(tensor_img)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
np.testing.assert_allclose(results, ori_results)
示例5: prepare_distributed_context
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def prepare_distributed_context(place=None):
if place is None:
place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \
else fluid.CUDAPlace(0)
strategy = ParallelStrategy()
strategy.nranks = ParallelEnv().nranks
strategy.local_rank = ParallelEnv().local_rank
strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
strategy.current_endpoint = ParallelEnv().current_endpoint
if strategy.nranks < 2:
return
global _parallel_context_initialized
if not _parallel_context_initialized and isinstance(place,
fluid.CUDAPlace):
def _init_context():
communicator_prog = fluid.Program()
init_communicator(communicator_prog, strategy.local_rank,
strategy.nranks, True, strategy.current_endpoint,
strategy.trainer_endpoints)
exe = fluid.Executor(place)
exe.run(communicator_prog)
if fluid.in_dygraph_mode():
fluid.disable_dygraph()
_init_context()
fluid.enable_dygraph(place)
else:
_init_context()
else:
assert ("Only support CUDAPlace for now.")
_parallel_context_initialized = True
return strategy
示例6: main
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def main(_):
"""
main
"""
image_shape = [3, image_size, image_size]
files = os.listdir(FLAGS.data_path)
names = [each_item for each_item in files]
np.random.shuffle(names)
train_list = names[:9]
test_list = names[-1]
tokens, adjvec = utils.load_action(FLAGS.mid)
model = CIFARModel(tokens, adjvec, image_shape)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
startup = fluid.Program()
train_prog = fluid.Program()
test_prog = fluid.Program()
train_vars = model.build_program(train_prog, startup, True)
test_vars = model.build_program(test_prog, startup, False)
exe.run(startup)
train_accuracy, epoch_id = train(model, FLAGS.early_stop,
train_prog, train_vars, exe, train_list)
if epoch_id < FLAGS.early_stop:
utils.dump_reward(FLAGS.mid, train_accuracy)
else:
test_accuracy = test(model, test_prog, test_vars, exe, [test_list])
utils.dump_reward(FLAGS.mid, test_accuracy)
示例7: init_env
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def init_env(self):
"""
:return:
"""
# multi nodes
self.num_trainers = 1
self.trainer_id = 0
self.is_local = self.params.get("PADDLE_IS_LOCAL", False)
# cpu multi
if self.params["PADDLE_USE_GPU"]:
gpus = os.getenv('FLAGS_selected_gpus', '0').split(",")
self.gpu_id = int(gpus[0])
run_place = fluid.CUDAPlace(int(gpus[0]))
if "is_distributed" in self.params and self.params["is_distributed"]:
self.dev_count = len(gpus)
else:
self.dev_count = fluid.core.get_cuda_device_count()
#logging.debug("gpu count %d" % self.dev_count)
self.prepare_nccl2_env(self.is_local)
logging.debug("finish prepare nccl2 env")
else:
run_place = fluid.CPUPlace()
self.dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
self.prepare_cpumulti_env(self.is_local)
self.gpu_id = None
logging.debug("finish prepare cpu multi")
self.executor = fluid.Executor(run_place)
# parallel executor relevant config
self.num_iteration_per_drop_scope = self.params.get("num_iteration_per_drop_scope", 1)
self.use_fast_executor = self.params.get("use_fast_executor", False)
示例8: evaluate
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def evaluate(logger, args):
"""evaluate a specific model using devset"""
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
logger.info('vocab size is {} and embed dim is {}'.format(vocab.size(
), vocab.embed_dim))
brc_data = BRCDataset(
args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.devset)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Initialize the model...')
# build model
main_program = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
avg_cost, s_probs, e_probs, match, feed_order = rc_model.rc_model(
args.hidden_size, vocab, args)
# initialize parameters
if not args.use_gpu:
place = fluid.CPUPlace()
dev_count = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
fluid.io.load_persistables(
exe, args.load_dir, main_program=main_program)
else:
logger.error('No model file to load ...')
return
inference_program = main_program.clone(for_test=True)
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs, match, feed_order,
place, dev_count, vocab, brc_data, logger, args)
logger.info('Dev eval result: {}'.format(bleu_rouge))
logger.info('Predicted answers are saved to {}'.format(
os.path.join(args.result_dir)))
示例9: predict
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def predict(logger, args):
"""do inference on the test dataset """
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
logger.info('vocab size is {} and embed dim is {}'.format(vocab.size(
), vocab.embed_dim))
brc_data = BRCDataset(
args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.testset)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Initialize the model...')
# build model
main_program = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
avg_cost, s_probs, e_probs, match, feed_order = rc_model.rc_model(
args.hidden_size, vocab, args)
# initialize parameters
if not args.use_gpu:
place = fluid.CPUPlace()
dev_count = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
fluid.io.load_persistables(
exe, args.load_dir, main_program=main_program)
else:
logger.error('No model file to load ...')
return
inference_program = main_program.clone(for_test=True)
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs, match,
feed_order, place, dev_count, vocab, brc_data, logger, args)
示例10: infer
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def infer(use_cuda, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
infer_movie_id = 783
infer_movie_name = paddle.dataset.movielens.movie_info()[
infer_movie_id].title
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
ids = []
with fluid.scope_guard(inference_scope):
[inferencer, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)
# Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id"
user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[np.int64(0)]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[np.int64(10)]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[np.int64(783)]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor(
[np.array([10, 8, 9], dtype='int64')], [[3]], place)
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor(
[np.array([1069, 4140, 2923, 710, 988], dtype='int64')], [[5]], place)
ids.append(infer_movie_id)
results = exe.run(
inferencer,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
predict_rating = np.array(results[0])
usr_features = np.array(results[1])
mov_features = np.array(results[2])
print("Predict Rating of user id 1 on movie id 783 is " + str(predict_rating[0][0]))
print("Actual Rating of user id 1 on movie id 783 is 4.")
return usr_features[0], mov_features[0], ids
示例11: infer
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def infer(use_cuda, params_dirname, gender, age, job, mov_id=783,category=[10,8,9],title=[1069, 4140, 2923, 710, 988]):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
[inferencer, feed_target_names,fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)
assert feed_target_names[0] == "user_id"
user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[np.int64(gender)]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[np.int64(age)]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[np.int64(job)]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[np.int64(mov_id)]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor(
[np.array(category, dtype='int64')], [[len(category)]], place) # Animation, Children's, Musical
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor(
[np.array(title, dtype='int64')], [[len(title)]],place)
results = exe.run(
inferencer,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
# predict_rating = np.array(results[0])
usr_features = np.array(results[1])
mov_features = np.array(results[2])
return usr_features[0], mov_features[0], mov_id
示例12: test
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CUDAPlace [as 别名]
def test(model, args, im_shape):
test_py_reader, prob, acc_1, acc_5 = build_program(args, False, model,
im_shape)
test_prog = fluid.default_main_program().clone(for_test=True)
place = fluid.CUDAPlace(0)
exe = fluid.Executor(place)
exe.run(fluid.default_startup_program())
# yapf: disable
if args.pretrained_model:
def if_exist(var):
return os.path.exists(os.path.join(args.pretrained_model, var.name))
fluid.io.load_vars(exe, args.pretrained_model, predicate=if_exist)
# yapf: enable
exec_strategy = fluid.ExecutionStrategy()
exec_strategy.num_threads = 1
compile_program = fluid.compiler.CompiledProgram(
test_prog).with_data_parallel(exec_strategy=exec_strategy)
test_reader = reader.test10(args)
test_py_reader.decorate_paddle_reader(test_reader)
test_fetch_list = [prob, acc_1, acc_5]
prob = []
top1 = utils.AvgrageMeter()
top5 = utils.AvgrageMeter()
test_py_reader.start()
test_start_time = time.time()
step_id = 0
try:
while True:
prev_test_start_time = test_start_time
test_start_time = time.time()
prob_v, acc_1_v, acc_5_v = exe.run(compile_program,
test_prog,
fetch_list=test_fetch_list)
prob.append(list(np.array(prob_v)))
top1.update(np.array(acc_1_v), np.array(prob_v).shape[0])
top5.update(np.array(acc_5_v), np.array(prob_v).shape[0])
if step_id % args.report_freq == 0:
print('prob shape:', np.array(prob_v).shape)
print("Step {}, acc_1 {}, acc_5 {}, time {}".format(
step_id,
np.array(acc_1_v),
np.array(acc_5_v), test_start_time - prev_test_start_time))
step_id += 1
except fluid.core.EOFException:
test_py_reader.reset()
np.concatenate(prob).dump(args.dump_path)
print("top1 {0}, top5 {1}".format(top1.avg, top5.avg))