本文整理汇总了Python中paddle.fluid.CPUPlace方法的典型用法代码示例。如果您正苦于以下问题:Python fluid.CPUPlace方法的具体用法?Python fluid.CPUPlace怎么用?Python fluid.CPUPlace使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类paddle.fluid
的用法示例。
在下文中一共展示了fluid.CPUPlace方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def load(self, param_state_pairs, optim_state):
if self._executor is None:
executor = fluid.Executor(fluid.CPUPlace())._default_executor
else:
executor = self._executor._default_executor
# restore parameter states
fluid.core._create_loaded_parameter(
[param for param, state in param_state_pairs],
global_scope(), executor)
for param, state in param_state_pairs:
self._set_var(param, state)
# restore optimizer states
# FIXME what if a different optimizer is used?
if not self.model._optimizer or not optim_state:
return
self._load_optimizer(optim_state, executor)
示例2: infer
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def infer(save_dirname=None):
place = fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(save_dirname, exe))
test_reader = paddle.batch(paddle.dataset.uci_housing.test(), batch_size=20)
test_data = six.next(test_reader())
test_feat = numpy.array(list(map(lambda x: x[0], test_data))).astype("float32")
test_label = numpy.array(list(map(lambda x: x[1], test_data))).astype("float32")
results = exe.run(inference_program,
feed={feed_target_names[0]: numpy.array(test_feat)},
fetch_list=fetch_targets)
print("infer results: ", results[0])
print("ground truth: ", test_label)
# Run train and infer.
示例3: set_device
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def set_device(device):
"""
Args:
device (str): specify device type, 'cpu' or 'gpu'.
Returns:
fluid.CUDAPlace or fluid.CPUPlace: Created GPU or CPU place.
"""
assert isinstance(device, six.string_types) and device.lower() in ['cpu', 'gpu'], \
"Expected device in ['cpu', 'gpu'], but got {}".format(device)
place = fluid.CUDAPlace(ParallelEnv().dev_id) \
if device.lower() == 'gpu' and fluid.is_compiled_with_cuda() \
else fluid.CPUPlace()
return place
示例4: test_main
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def test_main(self):
with fluid.dygraph.guard(fluid.CPUPlace()):
acc = Accuracy(topk=self.topk, name=self.name)
for i in range(10):
label, pred = self.random_pred_label()
label_var = to_variable(label)
pred_var = to_variable(pred)
state = to_list(acc.add_metric_op(pred_var, label_var))
acc.update(*[s.numpy() for s in state])
res_m = acc.accumulate()
res_f = accuracy(pred, label, self.topk)
assert np.all(np.isclose(np.array(res_m), np.array(res_f), rtol=1e-3)), \
"Accuracy precision error: {} != {}".format(res_m, res_f)
acc.reset()
assert np.sum(acc.total) == 0
assert np.sum(acc.count) == 0
示例5: train
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def train(save_dirname):
x = fluid.layers.data(name='x', shape=[13], dtype='float32')
y = fluid.layers.data(name='y', shape=[1], dtype='float32')
y_predict, avg_cost = net(x, y)
sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
sgd_optimizer.minimize(avg_cost)
train_reader = paddle.batch(
paddle.reader.shuffle(paddle.dataset.uci_housing.train(), buf_size=500),
batch_size=20)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
def train_loop(main_program):
feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
exe.run(fluid.default_startup_program())
PASS_NUM = 1000
for pass_id in range(PASS_NUM):
total_loss_pass = 0
for data in train_reader():
avg_loss_value, = exe.run(
main_program, feed=feeder.feed(data), fetch_list=[avg_cost])
total_loss_pass += avg_loss_value
if avg_loss_value < 5.0:
if save_dirname is not None:
fluid.io.save_inference_model(
save_dirname, ['x'], [y_predict], exe)
return
print("Pass %d, total avg cost = %f" % (pass_id, total_loss_pass))
train_loop(fluid.default_main_program())
# Infer by using provided test data.
示例6: extract_weights
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def extract_weights(args):
# add ERNIR to environment
print('extract weights start'.center(60, '='))
startup_prog = fluid.Program()
test_prog = fluid.Program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
args.max_seq_len = 512
args.use_fp16 = False
args.num_labels = 2
args.loss_scaling = 1.0
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
create_model(
args,
pyreader_name='train',
ernie_config=ernie_config)
fluid.io.load_vars(exe, args.init_pretraining_params, main_program=test_prog, predicate=if_exist)
state_dict = collections.OrderedDict()
weight_map = build_weight_map()
for ernie_name, pytorch_name in weight_map.items():
fluid_tensor = fluid.global_scope().find_var(ernie_name).get_tensor()
fluid_array = np.array(fluid_tensor, dtype=np.float32)
if 'w_0' in ernie_name:
fluid_array = fluid_array.transpose()
state_dict[pytorch_name] = fluid_array
print(f'{ernie_name} -> {pytorch_name} {fluid_array.shape}')
print('extract weights done!'.center(60, '='))
return state_dict
示例7: __init__
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def __init__(self, model_path):
# load model
self.place = fluid.CPUPlace()
self.exe = fluid.Executor(self.place)
[self.inference_program, self.feed_target_names, self.fetch_targets] = fluid.io.load_inference_model(dirname=model_path, executor=self.exe)
# load vocabulary
self.vocabulary = read_vocabulary(os.path.join(model_path, 'data/vocabulary.txt'))
self.tag = read_vocabulary(os.path.join(model_path, 'data/tags.txt'))
# prepare tag set decoder
self.decoder = BILUOSequenceEncoderDecoder()
示例8: _set_var
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def _set_var(self, var, ndarray):
t = global_scope().find_var(var.name).get_tensor()
p = t._place()
if p.is_cpu_place():
place = fluid.CPUPlace()
elif p.is_cuda_pinned_place():
place = fluid.CUDAPinnedPlace()
else:
p = fluid.core.Place()
p.set_place(t._place())
place = fluid.CUDAPlace(p.gpu_device_id())
t.set(ndarray, place)
示例9: export_deploy_model
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def export_deploy_model(self):
model = resnet18()
inputs = [Input([None, 3, 224, 224], 'float32', name='image')]
model.prepare(inputs=inputs)
self.save_dir = tempfile.mkdtemp()
if not os.path.exists(self.save_dir):
os.makedirs(self.save_dir)
model.save_inference_model(self.save_dir)
place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
exe = fluid.Executor(place)
[inference_program, feed_target_names, fetch_targets] = (
fluid.io.load_inference_model(
dirname=self.save_dir, executor=exe))
tensor_img = np.array(
np.random.random((1, 3, 224, 224)), dtype=np.float32)
ori_results = model.test_batch(tensor_img)
results = exe.run(inference_program,
feed={feed_target_names[0]: tensor_img},
fetch_list=fetch_targets)
np.testing.assert_allclose(results, ori_results)
示例10: set_zero
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def set_zero(var_name):
param = fluid.global_scope().var(var_name).get_tensor()
param_array = np.zeros(param._get_dims()).astype("int64")
param.set(param_array, fluid.CPUPlace())
示例11: init_env
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def init_env(self):
"""
:return:
"""
# multi nodes
self.num_trainers = 1
self.trainer_id = 0
self.is_local = self.params.get("PADDLE_IS_LOCAL", False)
# cpu multi
if self.params["PADDLE_USE_GPU"]:
gpus = os.getenv('FLAGS_selected_gpus', '0').split(",")
self.gpu_id = int(gpus[0])
run_place = fluid.CUDAPlace(int(gpus[0]))
if "is_distributed" in self.params and self.params["is_distributed"]:
self.dev_count = len(gpus)
else:
self.dev_count = fluid.core.get_cuda_device_count()
#logging.debug("gpu count %d" % self.dev_count)
self.prepare_nccl2_env(self.is_local)
logging.debug("finish prepare nccl2 env")
else:
run_place = fluid.CPUPlace()
self.dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
self.prepare_cpumulti_env(self.is_local)
self.gpu_id = None
logging.debug("finish prepare cpu multi")
self.executor = fluid.Executor(run_place)
# parallel executor relevant config
self.num_iteration_per_drop_scope = self.params.get("num_iteration_per_drop_scope", 1)
self.use_fast_executor = self.params.get("use_fast_executor", False)
示例12: evaluate
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def evaluate(logger, args):
"""evaluate a specific model using devset"""
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
logger.info('vocab size is {} and embed dim is {}'.format(vocab.size(
), vocab.embed_dim))
brc_data = BRCDataset(
args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.devset)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Initialize the model...')
# build model
main_program = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
avg_cost, s_probs, e_probs, match, feed_order = rc_model.rc_model(
args.hidden_size, vocab, args)
# initialize parameters
if not args.use_gpu:
place = fluid.CPUPlace()
dev_count = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
fluid.io.load_persistables(
exe, args.load_dir, main_program=main_program)
else:
logger.error('No model file to load ...')
return
inference_program = main_program.clone(for_test=True)
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs, match, feed_order,
place, dev_count, vocab, brc_data, logger, args)
logger.info('Dev eval result: {}'.format(bleu_rouge))
logger.info('Predicted answers are saved to {}'.format(
os.path.join(args.result_dir)))
示例13: predict
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def predict(logger, args):
"""do inference on the test dataset """
logger.info('Load data_set and vocab...')
with open(os.path.join(args.vocab_dir, 'vocab.data'), 'rb') as fin:
vocab = pickle.load(fin)
logger.info('vocab size is {} and embed dim is {}'.format(vocab.size(
), vocab.embed_dim))
brc_data = BRCDataset(
args.max_p_num, args.max_p_len, args.max_q_len, dev_files=args.testset)
logger.info('Converting text into ids...')
brc_data.convert_to_ids(vocab)
logger.info('Initialize the model...')
# build model
main_program = fluid.Program()
startup_prog = fluid.Program()
with fluid.program_guard(main_program, startup_prog):
with fluid.unique_name.guard():
avg_cost, s_probs, e_probs, match, feed_order = rc_model.rc_model(
args.hidden_size, vocab, args)
# initialize parameters
if not args.use_gpu:
place = fluid.CPUPlace()
dev_count = int(
os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
else:
place = fluid.CUDAPlace(0)
dev_count = fluid.core.get_cuda_device_count()
exe = Executor(place)
if args.load_dir:
logger.info('load from {}'.format(args.load_dir))
fluid.io.load_persistables(
exe, args.load_dir, main_program=main_program)
else:
logger.error('No model file to load ...')
return
inference_program = main_program.clone(for_test=True)
eval_loss, bleu_rouge = validation(
inference_program, avg_cost, s_probs, e_probs, match,
feed_order, place, dev_count, vocab, brc_data, logger, args)
示例14: infer
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def infer(use_cuda, params_dirname):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
infer_movie_id = 783
infer_movie_name = paddle.dataset.movielens.movie_info()[
infer_movie_id].title
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
ids = []
with fluid.scope_guard(inference_scope):
[inferencer, feed_target_names,
fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)
# Use the first data from paddle.dataset.movielens.test() as input
assert feed_target_names[0] == "user_id"
user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[np.int64(0)]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[np.int64(10)]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[np.int64(783)]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor(
[np.array([10, 8, 9], dtype='int64')], [[3]], place)
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor(
[np.array([1069, 4140, 2923, 710, 988], dtype='int64')], [[5]], place)
ids.append(infer_movie_id)
results = exe.run(
inferencer,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
predict_rating = np.array(results[0])
usr_features = np.array(results[1])
mov_features = np.array(results[2])
print("Predict Rating of user id 1 on movie id 783 is " + str(predict_rating[0][0]))
print("Actual Rating of user id 1 on movie id 783 is 4.")
return usr_features[0], mov_features[0], ids
示例15: infer
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import CPUPlace [as 别名]
def infer(use_cuda, params_dirname, gender, age, job, mov_id=783,category=[10,8,9],title=[1069, 4140, 2923, 710, 988]):
place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
exe = fluid.Executor(place)
inference_scope = fluid.core.Scope()
with fluid.scope_guard(inference_scope):
[inferencer, feed_target_names,fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)
assert feed_target_names[0] == "user_id"
user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)
assert feed_target_names[1] == "gender_id"
gender_id = fluid.create_lod_tensor([[np.int64(gender)]], [[1]], place)
assert feed_target_names[2] == "age_id"
age_id = fluid.create_lod_tensor([[np.int64(age)]], [[1]], place)
assert feed_target_names[3] == "job_id"
job_id = fluid.create_lod_tensor([[np.int64(job)]], [[1]], place)
assert feed_target_names[4] == "movie_id"
movie_id = fluid.create_lod_tensor([[np.int64(mov_id)]], [[1]], place)
assert feed_target_names[5] == "category_id"
category_id = fluid.create_lod_tensor(
[np.array(category, dtype='int64')], [[len(category)]], place) # Animation, Children's, Musical
assert feed_target_names[6] == "movie_title"
movie_title = fluid.create_lod_tensor(
[np.array(title, dtype='int64')], [[len(title)]],place)
results = exe.run(
inferencer,
feed={
feed_target_names[0]: user_id,
feed_target_names[1]: gender_id,
feed_target_names[2]: age_id,
feed_target_names[3]: job_id,
feed_target_names[4]: movie_id,
feed_target_names[5]: category_id,
feed_target_names[6]: movie_title
},
fetch_list=fetch_targets,
return_numpy=False)
# predict_rating = np.array(results[0])
usr_features = np.array(results[1])
mov_features = np.array(results[2])
return usr_features[0], mov_features[0], mov_id