当前位置: 首页>>代码示例>>Python>>正文


Python transformers.GPT2Config方法代码示例

本文整理汇总了Python中transformers.GPT2Config方法的典型用法代码示例。如果您正苦于以下问题:Python transformers.GPT2Config方法的具体用法?Python transformers.GPT2Config怎么用?Python transformers.GPT2Config使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在transformers的用法示例。


在下文中一共展示了transformers.GPT2Config方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: GPT2ConfigCPU

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import GPT2Config [as 别名]
def GPT2ConfigCPU(
    vocab_size: int = 5000, bos_token_id: int = 0, eos_token_id: int = 0, **kwargs
):
    """
    Returns a GPT-2 config more suitable for training on a regular consumer CPU.
    """

    return GPT2Config(
        vocab_size=vocab_size,
        n_positions=64,
        n_ctx=64,
        n_embd=128,
        n_layer=4,
        n_head=4,
        bos_token_id=bos_token_id,
        eos_token_id=eos_token_id,
        **kwargs,
    ) 
开发者ID:minimaxir,项目名称:aitextgen,代码行数:20,代码来源:utils.py

示例2: build_gpt2_config

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import GPT2Config [as 别名]
def build_gpt2_config(
    vocab_size: int = 10000,
    bos_token_id: int = 0,
    eos_token_id: int = 0,
    max_length: int = 1024,
    dropout: float = 0.0,
    **kwargs
):
    """
    Builds a custom GPT-2 config based on a given Transformers config,
    with a few more user-friendly aliases.
    """

    return GPT2Config(
        vocab_size=vocab_size,
        n_positions=max_length,
        n_ctx=max_length,
        resid_pdrop=dropout,
        embd_pdrop=dropout,
        attn_pdrop=dropout,
        summary_first_dropout=dropout,
        bos_token_id=bos_token_id,
        eos_token_id=eos_token_id,
        **kwargs,
    ) 
开发者ID:minimaxir,项目名称:aitextgen,代码行数:27,代码来源:utils.py

示例3: test_TFGPT2

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import GPT2Config [as 别名]
def test_TFGPT2(self):
        if enable_full_transformer_test:
            from transformers import GPT2Config, TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel
            model_list = [TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel]
        else:
            from transformers import GPT2Config, TFGPT2Model
            model_list = [TFGPT2Model]
        # pretrained_weights = 'gpt2'
        tokenizer_file = 'gpt2_gpt2.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = GPT2Config()
        for model_instance_ in model_list:
            keras.backend.clear_session()
            model = model_instance_(config)
            model._set_inputs(inputs)
            predictions_original = model(inputs)
            predictions = [predictions_original[0]] + list(v_.numpy() for v_ in predictions_original[1])
            onnx_model = keras2onnx.convert_keras(model, model.name)
            self.assertTrue(
                run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                                 atol=1.e-4)) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:24,代码来源:test_transformers.py

示例4: __init__

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import GPT2Config [as 别名]
def __init__(self, args, task):
        super().__init__(task.target_dictionary)

        try:
            # Prepend the transformers submodule to the path, so that
            # it's prioritized over other installations. This allows
            # making local changes in the submodule.
            sys.path.insert(
                0, os.path.join(os.path.dirname(__file__), 'transformers', 'src')
            )
            from transformers import GPT2Config, GPT2LMHeadModel
        except ImportError:
            raise ImportError(
                '\n\nPlease install huggingface/transformers with:'
                '\n\n  pip install transformers'
                '\n\nOr to make local edits, install the submodule:'
                '\n\n  git submodule update --init '
                'fairseq/models/huggingface/transformers'
            )

        config = GPT2Config(
            vocab_size=len(task.target_dictionary),
            n_positions=args.max_target_positions + 1,
            n_ctx=args.max_target_positions,
            n_embd=args.embed_dim,
            n_layer=args.num_layers,
            n_head=args.num_attention_heads,
            resid_pdrop=args.dropout,
            embd_pdrop=args.dropout,
            attn_pdrop=args.attention_dropout,
            layer_norm_epsilon=1e-6,
        )
        self.model = GPT2LMHeadModel(config)

        # set zero embedding for padding symbol
        self.pad_idx = task.target_dictionary.pad()
        self.model.transformer.wte.weight.data[self.pad_idx].zero_()
        self.model.transformer.wpe.weight.data[0].zero_() 
开发者ID:pytorch,项目名称:fairseq,代码行数:40,代码来源:hf_gpt2.py

示例5: setUp

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import GPT2Config [as 别名]
def setUp(self):
        self.model_tester = TFGPT2ModelTest.TFGPT2ModelTester(self)
        self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37) 
开发者ID:bhoov,项目名称:exbert,代码行数:5,代码来源:test_modeling_tf_gpt2.py

示例6: test_3layer_gpt2

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import GPT2Config [as 别名]
def test_3layer_gpt2(self):
        from transformers import GPT2Config, TFGPT2Model, BertTokenizer
        keras2onnx.proto.keras.backend.set_learning_phase(0)
        config = GPT2Config(n_layer=3)
        model = TFGPT2Model(config)
        tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        inputs = tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='tf')
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:13,代码来源:test_transformers.py

示例7: prepare_config_and_inputs

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import GPT2Config [as 别名]
def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

            token_type_ids = None
            if self.use_token_type_ids:
                token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

            mc_token_ids = None
            if self.use_mc_token_ids:
                mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)

            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

            config = GPT2Config(
                vocab_size=self.vocab_size,
                n_embd=self.hidden_size,
                n_layer=self.num_hidden_layers,
                n_head=self.num_attention_heads,
                # intermediate_size=self.intermediate_size,
                # hidden_act=self.hidden_act,
                # hidden_dropout_prob=self.hidden_dropout_prob,
                # attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                n_positions=self.max_position_embeddings,
                n_ctx=self.max_position_embeddings
                # type_vocab_size=self.type_vocab_size,
                # initializer_range=self.initializer_range
            )

            head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)

            return (
                config,
                input_ids,
                input_mask,
                head_mask,
                token_type_ids,
                mc_token_ids,
                sequence_labels,
                token_labels,
                choice_labels,
            ) 
开发者ID:bhoov,项目名称:exbert,代码行数:53,代码来源:test_modeling_tf_gpt2.py


注:本文中的transformers.GPT2Config方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。