当前位置: 首页>>代码示例>>Python>>正文


Python fluid.Executor方法代码示例

本文整理汇总了Python中paddle.fluid.Executor方法的典型用法代码示例。如果您正苦于以下问题:Python fluid.Executor方法的具体用法?Python fluid.Executor怎么用?Python fluid.Executor使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在paddle.fluid的用法示例。


在下文中一共展示了fluid.Executor方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: infer

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def infer(save_dirname=None):
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    inference_scope = fluid.core.Scope()
    with fluid.scope_guard(inference_scope):
        [inference_program, feed_target_names, fetch_targets] = (
            fluid.io.load_inference_model(save_dirname, exe))
        test_reader = paddle.batch(paddle.dataset.uci_housing.test(), batch_size=20)

        test_data = six.next(test_reader())
        test_feat = numpy.array(list(map(lambda x: x[0], test_data))).astype("float32")
        test_label = numpy.array(list(map(lambda x: x[1], test_data))).astype("float32")

        results = exe.run(inference_program,
                          feed={feed_target_names[0]: numpy.array(test_feat)},
                          fetch_list=fetch_targets)
        print("infer results: ", results[0])
        print("ground truth: ", test_label)


# Run train and infer. 
开发者ID:yeyupiaoling,项目名称:LearnPaddle2,代码行数:23,代码来源:test_paddle.py

示例2: load

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def load(self, param_state_pairs, optim_state):
        if self._executor is None:
            executor = fluid.Executor(fluid.CPUPlace())._default_executor
        else:
            executor = self._executor._default_executor

        # restore parameter states
        fluid.core._create_loaded_parameter(
            [param for param, state in param_state_pairs],
            global_scope(), executor)
        for param, state in param_state_pairs:
            self._set_var(param, state)

        # restore optimizer states
        # FIXME what if a different optimizer is used?
        if not self.model._optimizer or not optim_state:
            return
        self._load_optimizer(optim_state, executor) 
开发者ID:PaddlePaddle,项目名称:hapi,代码行数:20,代码来源:model.py

示例3: train

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def train(save_dirname):
    x = fluid.layers.data(name='x', shape=[13], dtype='float32')
    y = fluid.layers.data(name='y', shape=[1], dtype='float32')
    y_predict, avg_cost = net(x, y)
    sgd_optimizer = fluid.optimizer.SGD(learning_rate=0.001)
    sgd_optimizer.minimize(avg_cost)
    train_reader = paddle.batch(
        paddle.reader.shuffle(paddle.dataset.uci_housing.train(), buf_size=500),
        batch_size=20)
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)

    def train_loop(main_program):
        feeder = fluid.DataFeeder(place=place, feed_list=[x, y])
        exe.run(fluid.default_startup_program())

        PASS_NUM = 1000
        for pass_id in range(PASS_NUM):
            total_loss_pass = 0
            for data in train_reader():
                avg_loss_value, = exe.run(
                    main_program, feed=feeder.feed(data), fetch_list=[avg_cost])
                total_loss_pass += avg_loss_value
                if avg_loss_value < 5.0:
                    if save_dirname is not None:
                        fluid.io.save_inference_model(
                            save_dirname, ['x'], [y_predict], exe)
                    return
            print("Pass %d, total avg cost = %f" % (pass_id, total_loss_pass))

    train_loop(fluid.default_main_program())


# Infer by using provided test data. 
开发者ID:yeyupiaoling,项目名称:LearnPaddle2,代码行数:36,代码来源:test_paddle.py

示例4: extract_weights

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def extract_weights(args):
    # add ERNIR to environment
    print('extract weights start'.center(60, '='))
    startup_prog = fluid.Program()
    test_prog = fluid.Program()
    place = fluid.CPUPlace()
    exe = fluid.Executor(place)
    exe.run(startup_prog)
    args.max_seq_len = 512
    args.use_fp16 = False
    args.num_labels = 2
    args.loss_scaling = 1.0
    ernie_config = ErnieConfig(args.ernie_config_path)
    ernie_config.print_config()
    with fluid.program_guard(test_prog, startup_prog):
        with fluid.unique_name.guard():
            create_model(
                args,
                pyreader_name='train',
                ernie_config=ernie_config)
    fluid.io.load_vars(exe, args.init_pretraining_params, main_program=test_prog, predicate=if_exist)
    state_dict = collections.OrderedDict()
    weight_map = build_weight_map()
    for ernie_name, pytorch_name in weight_map.items():
        fluid_tensor = fluid.global_scope().find_var(ernie_name).get_tensor()
        fluid_array = np.array(fluid_tensor, dtype=np.float32)
        if 'w_0' in ernie_name:
            fluid_array = fluid_array.transpose()
        state_dict[pytorch_name] = fluid_array
        print(f'{ernie_name} -> {pytorch_name} {fluid_array.shape}')
    print('extract weights done!'.center(60, '='))
    return state_dict 
开发者ID:nghuyong,项目名称:ERNIE-Pytorch,代码行数:34,代码来源:convert_ernie_to_pytorch.py

示例5: __init__

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def __init__(self, model_path):
        # load model
        self.place = fluid.CPUPlace()
        self.exe = fluid.Executor(self.place)
        [self.inference_program, self.feed_target_names, self.fetch_targets] = fluid.io.load_inference_model(dirname=model_path, executor=self.exe)

        # load vocabulary
        self.vocabulary = read_vocabulary(os.path.join(model_path, 'data/vocabulary.txt'))
        self.tag = read_vocabulary(os.path.join(model_path, 'data/tags.txt'))

        # prepare tag set decoder
        self.decoder = BILUOSequenceEncoderDecoder() 
开发者ID:howl-anderson,项目名称:seq2annotation,代码行数:14,代码来源:paddle_inference.py

示例6: _compile_and_initialize

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def _compile_and_initialize(self, prog, mode):
        compiled_prog = self._compiled_progs.get(mode, None)
        if compiled_prog is not None:
            return compiled_prog

        assert self.model._place is not None, \
            "device is not set, please call `model.prepare()` first"

        place = self.model._place

        # XXX *ALL WEIGHTS* should be initialized upon model construction
        # even if `forward()` may run different code path for different mode
        # therefore startup program only needs to run once
        if self._executor is None:
            self._executor = fluid.Executor(place)
            # XXX incremental initialization
            uninitialized = []
            for var_py in self._startup_prog.list_vars():
                var = fluid.global_scope().find_var(var_py.name)
                if not var_py.name.startswith('nccl_id') and var and \
                        var.get_tensor()._is_initialized():
                    continue

                uninitialized.append(var_py)
            if uninitialized:
                startup_prog = self._startup_prog._prune(uninitialized)
                self._executor.run(startup_prog)

        if self._nranks < 2:
            compiled_prog = fluid.CompiledProgram(prog)
        else:
            compiled_prog = prog

        self._compiled_progs[mode] = compiled_prog 
开发者ID:PaddlePaddle,项目名称:hapi,代码行数:36,代码来源:model.py

示例7: export_deploy_model

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def export_deploy_model(self):
        model = resnet18()

        inputs = [Input([None, 3, 224, 224], 'float32', name='image')]

        model.prepare(inputs=inputs)

        self.save_dir = tempfile.mkdtemp()
        if not os.path.exists(self.save_dir):
            os.makedirs(self.save_dir)

        model.save_inference_model(self.save_dir)

        place = fluid.CPUPlace() if not fluid.is_compiled_with_cuda(
        ) else fluid.CUDAPlace(0)
        exe = fluid.Executor(place)

        [inference_program, feed_target_names, fetch_targets] = (
            fluid.io.load_inference_model(
                dirname=self.save_dir, executor=exe))
        tensor_img = np.array(
            np.random.random((1, 3, 224, 224)), dtype=np.float32)
        ori_results = model.test_batch(tensor_img)
        results = exe.run(inference_program,
                          feed={feed_target_names[0]: tensor_img},
                          fetch_list=fetch_targets)

        np.testing.assert_allclose(results, ori_results) 
开发者ID:PaddlePaddle,项目名称:hapi,代码行数:30,代码来源:test_save_inference_model.py

示例8: test_main

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def test_main(self):
        main_prog = fluid.Program()
        startup_prog = fluid.Program()
        with fluid.program_guard(main_prog, startup_prog):
            pred = fluid.data(name='pred', shape=[None, self.class_num], dtype='float32')
            label = fluid.data(name='label', shape=[None, 1], dtype='int64')
            acc = Accuracy(topk=self.topk, name=self.name)
            state = acc.add_metric_op(pred, label)

        exe = fluid.Executor(fluid.CPUPlace())
        compiled_main_prog = fluid.CompiledProgram(main_prog)

        for i in range(10):
            label, pred = self.random_pred_label()
            state_ret = exe.run(compiled_main_prog,
                                feed={'pred': pred, 'label': label},
                                fetch_list=[s.name for s in to_list(state)],
                                return_numpy=True)
            acc.update(*state_ret)
            res_m = acc.accumulate()
            res_f = accuracy(pred, label, self.topk)
            assert np.all(np.isclose(np.array(res_m), np.array(res_f), rtol=1e-3)), \
                    "Accuracy precision error: {} != {}".format(res_m, res_f)
            acc.reset()
            assert np.sum(acc.total) == 0
            assert np.sum(acc.count) == 0 
开发者ID:PaddlePaddle,项目名称:hapi,代码行数:28,代码来源:test_metrics.py

示例9: prepare_distributed_context

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def prepare_distributed_context(place=None):
    if place is None:
        place = fluid.CUDAPlace(ParallelEnv().dev_id) if ParallelEnv().nranks > 1 \
            else fluid.CUDAPlace(0)

    strategy = ParallelStrategy()
    strategy.nranks = ParallelEnv().nranks
    strategy.local_rank = ParallelEnv().local_rank
    strategy.trainer_endpoints = ParallelEnv().trainer_endpoints
    strategy.current_endpoint = ParallelEnv().current_endpoint

    if strategy.nranks < 2:
        return

    global _parallel_context_initialized

    if not _parallel_context_initialized and isinstance(place,
                                                        fluid.CUDAPlace):

        def _init_context():
            communicator_prog = fluid.Program()
            init_communicator(communicator_prog, strategy.local_rank,
                              strategy.nranks, True, strategy.current_endpoint,
                              strategy.trainer_endpoints)
            exe = fluid.Executor(place)
            exe.run(communicator_prog)

        if fluid.in_dygraph_mode():
            fluid.disable_dygraph()
            _init_context()
            fluid.enable_dygraph(place)
        else:
            _init_context()

    else:
        assert ("Only support CUDAPlace for now.")

    _parallel_context_initialized = True
    return strategy 
开发者ID:PaddlePaddle,项目名称:hapi,代码行数:41,代码来源:distributed.py

示例10: evaluation

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def evaluation(program, reader, fetch_list, place):
    feed_var_list = [
        program.global_block().var('pixel'), program.global_block().var('label')
    ]
    feeder_test = fluid.DataFeeder(feed_list=feed_var_list, place=place)
    test_exe = fluid.Executor(place)
    losses, accuracies = AverageMeter(), AverageMeter()
    for tid, test_data in enumerate(reader()):
        loss, acc = test_exe.run(program=program,
                                 feed=feeder_test.feed(test_data),
                                 fetch_list=fetch_list)
        losses.update(float(loss), len(test_data))
        accuracies.update(float(acc) * 100, len(test_data))
    return losses.avg, accuracies.avg 
开发者ID:PaddlePaddle,项目名称:AutoDL,代码行数:16,代码来源:train_cifar.py

示例11: main

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def main(_):
    """
        main
    """
    image_shape = [3, image_size, image_size]
    files = os.listdir(FLAGS.data_path)
    names = [each_item for each_item in files]
    np.random.shuffle(names)
    train_list = names[:9]
    test_list = names[-1]
    tokens, adjvec = utils.load_action(FLAGS.mid)

    model = CIFARModel(tokens, adjvec, image_shape)

    place = fluid.CUDAPlace(0)
    exe = fluid.Executor(place)

    startup = fluid.Program()
    train_prog = fluid.Program()
    test_prog = fluid.Program()
    train_vars = model.build_program(train_prog, startup, True)
    test_vars = model.build_program(test_prog, startup, False)
    exe.run(startup)

    train_accuracy, epoch_id = train(model, FLAGS.early_stop,
                                     train_prog, train_vars, exe, train_list)
    if epoch_id < FLAGS.early_stop:
        utils.dump_reward(FLAGS.mid, train_accuracy)
    else:
        test_accuracy = test(model, test_prog, test_vars, exe, [test_list])
        utils.dump_reward(FLAGS.mid, test_accuracy) 
开发者ID:PaddlePaddle,项目名称:AutoDL,代码行数:33,代码来源:train.py

示例12: save_program

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def save_program():
    dataset = fluid.DatasetFactory().create_dataset()
    dataset.set_use_var(sparse_input_ids + [label])
    pipe_command = "python criteo_dataset.py {}".format(sys.argv[1])
    dataset.set_pipe_command(pipe_command)
    dataset.set_batch_size(32)
    dataset.set_thread(10)
    optimizer = fluid.optimizer.SGD(0.0001)
    optimizer.minimize(avg_cost)
    exe = fluid.Executor(fluid.CPUPlace())

    input_folder = "hdfs:"
    output = sp.check_output( "hadoop fs -D hadoop.job.ugi=" + hdfs_ugi
                              + " -D fs.defaultFS=" + hdfs_address +" -ls " + os.path.join(dataset_prefix, current_date_hr) + "/ | awk '{if(NR>1) print $8}'", shell=True)
    train_filelist = ["{}{}".format(input_folder, f) for f in output.decode('ascii').strip().split('\n')]
    train_filelist.remove('hdfs:' + os.path.join(dataset_prefix, current_date_hr, 'donefile'))
    train_filelist = [train_filelist[0]] 
    print(train_filelist)

    exe.run(fluid.default_startup_program())
    print("startup save program done.")
    dataset.set_filelist(train_filelist)
    exe.train_from_dataset(
        program=fluid.default_main_program(),
        dataset=dataset,
        fetch_list=[auc_var],
        fetch_info=["auc"],
        debug=False,)
        #print_period=10000)
    # save model here
    fetch_list = fluid.io.save_inference_model(inference_path, [x.name for x in inference_feed_vars], [predict], exe) 
开发者ID:PaddlePaddle,项目名称:ElasticCTR,代码行数:33,代码来源:save_program.py

示例13: init_env

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def init_env(self):
        """
        :return:
        """
        # multi nodes
        self.num_trainers = 1
        self.trainer_id = 0
        self.is_local = self.params.get("PADDLE_IS_LOCAL", False)
        # cpu multi
        if self.params["PADDLE_USE_GPU"]:
            gpus = os.getenv('FLAGS_selected_gpus', '0').split(",")
            self.gpu_id = int(gpus[0])
            run_place = fluid.CUDAPlace(int(gpus[0]))
            if "is_distributed" in self.params and self.params["is_distributed"]:
                self.dev_count = len(gpus)
            else:
                self.dev_count = fluid.core.get_cuda_device_count()
            #logging.debug("gpu count %d" % self.dev_count)
            self.prepare_nccl2_env(self.is_local)
            logging.debug("finish prepare nccl2 env")
        else:
            run_place = fluid.CPUPlace()
            self.dev_count = int(os.environ.get('CPU_NUM', multiprocessing.cpu_count()))
            self.prepare_cpumulti_env(self.is_local)
            self.gpu_id = None
            logging.debug("finish prepare cpu multi")
        self.executor = fluid.Executor(run_place)

        # parallel executor relevant config
        self.num_iteration_per_drop_scope = self.params.get("num_iteration_per_drop_scope", 1)
        self.use_fast_executor = self.params.get("use_fast_executor", False) 
开发者ID:baidu,项目名称:Senta,代码行数:33,代码来源:base_trainer.py

示例14: infer

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def infer(use_cuda, params_dirname):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()

    infer_movie_id = 783
    infer_movie_name = paddle.dataset.movielens.movie_info()[
        infer_movie_id].title

    exe = fluid.Executor(place)

    inference_scope = fluid.core.Scope()
    ids = []

    with fluid.scope_guard(inference_scope):
        [inferencer, feed_target_names,
            fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)

        # Use the first data from paddle.dataset.movielens.test() as input
        assert feed_target_names[0] == "user_id"
        user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)

        assert feed_target_names[1] == "gender_id"
        gender_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)

        assert feed_target_names[2] == "age_id"
        age_id = fluid.create_lod_tensor([[np.int64(0)]], [[1]], place)

        assert feed_target_names[3] == "job_id"
        job_id = fluid.create_lod_tensor([[np.int64(10)]], [[1]], place)

        assert feed_target_names[4] == "movie_id"
        movie_id = fluid.create_lod_tensor([[np.int64(783)]], [[1]], place)

        assert feed_target_names[5] == "category_id"
        category_id = fluid.create_lod_tensor(
            [np.array([10, 8, 9], dtype='int64')], [[3]], place)

        assert feed_target_names[6] == "movie_title"
        movie_title = fluid.create_lod_tensor(
             [np.array([1069, 4140, 2923, 710, 988], dtype='int64')], [[5]], place)

        ids.append(infer_movie_id)
        results = exe.run(
            inferencer,
            feed={
                feed_target_names[0]: user_id,
                feed_target_names[1]: gender_id,
                feed_target_names[2]: age_id,
                feed_target_names[3]: job_id,
                feed_target_names[4]: movie_id,
                feed_target_names[5]: category_id,
                feed_target_names[6]: movie_title
            },
            fetch_list=fetch_targets,
            return_numpy=False)
        predict_rating = np.array(results[0])
        usr_features = np.array(results[1])
        mov_features = np.array(results[2])
        print("Predict Rating of user id 1 on movie id 783 is " + str(predict_rating[0][0]))
        print("Actual Rating of user id 1 on movie id 783 is 4.")
    return usr_features[0], mov_features[0], ids 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:62,代码来源:infer_paddle.py

示例15: infer

# 需要导入模块: from paddle import fluid [as 别名]
# 或者: from paddle.fluid import Executor [as 别名]
def infer(use_cuda, params_dirname, gender, age, job, mov_id=783,category=[10,8,9],title=[1069, 4140, 2923, 710, 988]):
    place = fluid.CUDAPlace(0) if use_cuda else fluid.CPUPlace()
    exe = fluid.Executor(place)
    inference_scope = fluid.core.Scope()

    with fluid.scope_guard(inference_scope):
        [inferencer, feed_target_names,fetch_targets] = fluid.io.load_inference_model(params_dirname, exe)

        assert feed_target_names[0] == "user_id"
        user_id = fluid.create_lod_tensor([[np.int64(1)]], [[1]], place)

        assert feed_target_names[1] == "gender_id"
        gender_id = fluid.create_lod_tensor([[np.int64(gender)]], [[1]], place)

        assert feed_target_names[2] == "age_id"
        age_id = fluid.create_lod_tensor([[np.int64(age)]], [[1]], place)

        assert feed_target_names[3] == "job_id"
        job_id = fluid.create_lod_tensor([[np.int64(job)]], [[1]], place)

        assert feed_target_names[4] == "movie_id"
        movie_id = fluid.create_lod_tensor([[np.int64(mov_id)]], [[1]], place)

        assert feed_target_names[5] == "category_id"
        category_id = fluid.create_lod_tensor(
                [np.array(category, dtype='int64')], [[len(category)]], place) # Animation, Children's, Musical

        assert feed_target_names[6] == "movie_title"
        movie_title = fluid.create_lod_tensor(
            [np.array(title, dtype='int64')], [[len(title)]],place)

        results = exe.run(
            inferencer,
            feed={
                feed_target_names[0]: user_id,
                feed_target_names[1]: gender_id,
                feed_target_names[2]: age_id,
                feed_target_names[3]: job_id,
                feed_target_names[4]: movie_id,
                feed_target_names[5]: category_id,
                feed_target_names[6]: movie_title
            },
            fetch_list=fetch_targets,
            return_numpy=False)

        # predict_rating = np.array(results[0])
        usr_features = np.array(results[1])
        mov_features = np.array(results[2])

    return usr_features[0], mov_features[0], mov_id 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:52,代码来源:infer_milvus.py


注:本文中的paddle.fluid.Executor方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。