本文整理汇总了Python中paddle.fluid.global_scope方法的典型用法代码示例。如果您正苦于以下问题:Python fluid.global_scope方法的具体用法?Python fluid.global_scope怎么用?Python fluid.global_scope使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类paddle.fluid
的用法示例。
在下文中一共展示了fluid.global_scope方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import global_scope [as 别名]
def load(self, param_state_pairs, optim_state):
if self._executor is None:
executor = fluid.Executor(fluid.CPUPlace())._default_executor
else:
executor = self._executor._default_executor
# restore parameter states
fluid.core._create_loaded_parameter(
[param for param, state in param_state_pairs],
global_scope(), executor)
for param, state in param_state_pairs:
self._set_var(param, state)
# restore optimizer states
# FIXME what if a different optimizer is used?
if not self.model._optimizer or not optim_state:
return
self._load_optimizer(optim_state, executor)
示例2: cast_fp32_to_fp16
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import global_scope [as 别名]
def cast_fp32_to_fp16(exe, main_program):
""" cast_fp32_to_fp16 """
logging.info("Cast parameters to float16 data format.")
for param in main_program.global_block().all_parameters():
if not param.name.endswith(".master"):
#load fp16
param_t = fluid.global_scope().find_var(param.name).get_tensor()
data = np.array(param_t)
if param.name.startswith("encoder_layer") \
and "layer_norm" not in param.name:
logging.info(param.name)
param_t.set(np.float16(data).view(np.uint16), exe.place)
#load fp32
master_param_var = fluid.global_scope().find_var(param.name +
".master")
if master_param_var is not None:
master_param_var.get_tensor().set(data, exe.place)
示例3: extract_weights
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import global_scope [as 别名]
def extract_weights(args):
# add ERNIR to environment
print('extract weights start'.center(60, '='))
startup_prog = fluid.Program()
test_prog = fluid.Program()
place = fluid.CPUPlace()
exe = fluid.Executor(place)
exe.run(startup_prog)
args.max_seq_len = 512
args.use_fp16 = False
args.num_labels = 2
args.loss_scaling = 1.0
ernie_config = ErnieConfig(args.ernie_config_path)
ernie_config.print_config()
with fluid.program_guard(test_prog, startup_prog):
with fluid.unique_name.guard():
create_model(
args,
pyreader_name='train',
ernie_config=ernie_config)
fluid.io.load_vars(exe, args.init_pretraining_params, main_program=test_prog, predicate=if_exist)
state_dict = collections.OrderedDict()
weight_map = build_weight_map()
for ernie_name, pytorch_name in weight_map.items():
fluid_tensor = fluid.global_scope().find_var(ernie_name).get_tensor()
fluid_array = np.array(fluid_tensor, dtype=np.float32)
if 'w_0' in ernie_name:
fluid_array = fluid_array.transpose()
state_dict[pytorch_name] = fluid_array
print(f'{ernie_name} -> {pytorch_name} {fluid_array.shape}')
print('extract weights done!'.center(60, '='))
return state_dict
示例4: _set_var
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import global_scope [as 别名]
def _set_var(self, var, ndarray):
t = global_scope().find_var(var.name).get_tensor()
p = t._place()
if p.is_cpu_place():
place = fluid.CPUPlace()
elif p.is_cuda_pinned_place():
place = fluid.CUDAPinnedPlace()
else:
p = fluid.core.Place()
p.set_place(t._place())
place = fluid.CUDAPlace(p.gpu_device_id())
t.set(ndarray, place)
示例5: _compile_and_initialize
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import global_scope [as 别名]
def _compile_and_initialize(self, prog, mode):
compiled_prog = self._compiled_progs.get(mode, None)
if compiled_prog is not None:
return compiled_prog
assert self.model._place is not None, \
"device is not set, please call `model.prepare()` first"
place = self.model._place
# XXX *ALL WEIGHTS* should be initialized upon model construction
# even if `forward()` may run different code path for different mode
# therefore startup program only needs to run once
if self._executor is None:
self._executor = fluid.Executor(place)
# XXX incremental initialization
uninitialized = []
for var_py in self._startup_prog.list_vars():
var = fluid.global_scope().find_var(var_py.name)
if not var_py.name.startswith('nccl_id') and var and \
var.get_tensor()._is_initialized():
continue
uninitialized.append(var_py)
if uninitialized:
startup_prog = self._startup_prog._prune(uninitialized)
self._executor.run(startup_prog)
if self._nranks < 2:
compiled_prog = fluid.CompiledProgram(prog)
else:
compiled_prog = prog
self._compiled_progs[mode] = compiled_prog
示例6: cast_fp32_to_fp16
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import global_scope [as 别名]
def cast_fp32_to_fp16(exe, main_program):
print("Cast parameters to float16 data format.")
for param in main_program.global_block().all_parameters():
if not param.name.endswith(".master"):
param_t = fluid.global_scope().find_var(param.name).get_tensor()
data = np.array(param_t)
if param.name.find("layer_norm") == -1:
param_t.set(np.float16(data).view(np.uint16), exe.place)
master_param_var = fluid.global_scope().find_var(param.name +
".master")
if master_param_var is not None:
master_param_var.get_tensor().set(data, exe.place)
示例7: set_zero
# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import global_scope [as 别名]
def set_zero(var_name):
param = fluid.global_scope().var(var_name).get_tensor()
param_array = np.zeros(param._get_dims()).astype("int64")
param.set(param_array, fluid.CPUPlace())