當前位置: 首頁>>代碼示例>>Python>>正文


Python transformers.GPT2Config方法代碼示例

本文整理匯總了Python中transformers.GPT2Config方法的典型用法代碼示例。如果您正苦於以下問題:Python transformers.GPT2Config方法的具體用法?Python transformers.GPT2Config怎麽用?Python transformers.GPT2Config使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在transformers的用法示例。


在下文中一共展示了transformers.GPT2Config方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: GPT2ConfigCPU

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import GPT2Config [as 別名]
def GPT2ConfigCPU(
    vocab_size: int = 5000, bos_token_id: int = 0, eos_token_id: int = 0, **kwargs
):
    """
    Returns a GPT-2 config more suitable for training on a regular consumer CPU.
    """

    return GPT2Config(
        vocab_size=vocab_size,
        n_positions=64,
        n_ctx=64,
        n_embd=128,
        n_layer=4,
        n_head=4,
        bos_token_id=bos_token_id,
        eos_token_id=eos_token_id,
        **kwargs,
    ) 
開發者ID:minimaxir,項目名稱:aitextgen,代碼行數:20,代碼來源:utils.py

示例2: build_gpt2_config

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import GPT2Config [as 別名]
def build_gpt2_config(
    vocab_size: int = 10000,
    bos_token_id: int = 0,
    eos_token_id: int = 0,
    max_length: int = 1024,
    dropout: float = 0.0,
    **kwargs
):
    """
    Builds a custom GPT-2 config based on a given Transformers config,
    with a few more user-friendly aliases.
    """

    return GPT2Config(
        vocab_size=vocab_size,
        n_positions=max_length,
        n_ctx=max_length,
        resid_pdrop=dropout,
        embd_pdrop=dropout,
        attn_pdrop=dropout,
        summary_first_dropout=dropout,
        bos_token_id=bos_token_id,
        eos_token_id=eos_token_id,
        **kwargs,
    ) 
開發者ID:minimaxir,項目名稱:aitextgen,代碼行數:27,代碼來源:utils.py

示例3: test_TFGPT2

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import GPT2Config [as 別名]
def test_TFGPT2(self):
        if enable_full_transformer_test:
            from transformers import GPT2Config, TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel
            model_list = [TFGPT2Model, TFGPT2LMHeadModel, TFGPT2DoubleHeadsModel]
        else:
            from transformers import GPT2Config, TFGPT2Model
            model_list = [TFGPT2Model]
        # pretrained_weights = 'gpt2'
        tokenizer_file = 'gpt2_gpt2.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = GPT2Config()
        for model_instance_ in model_list:
            keras.backend.clear_session()
            model = model_instance_(config)
            model._set_inputs(inputs)
            predictions_original = model(inputs)
            predictions = [predictions_original[0]] + list(v_.numpy() for v_ in predictions_original[1])
            onnx_model = keras2onnx.convert_keras(model, model.name)
            self.assertTrue(
                run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                                 atol=1.e-4)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:24,代碼來源:test_transformers.py

示例4: __init__

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import GPT2Config [as 別名]
def __init__(self, args, task):
        super().__init__(task.target_dictionary)

        try:
            # Prepend the transformers submodule to the path, so that
            # it's prioritized over other installations. This allows
            # making local changes in the submodule.
            sys.path.insert(
                0, os.path.join(os.path.dirname(__file__), 'transformers', 'src')
            )
            from transformers import GPT2Config, GPT2LMHeadModel
        except ImportError:
            raise ImportError(
                '\n\nPlease install huggingface/transformers with:'
                '\n\n  pip install transformers'
                '\n\nOr to make local edits, install the submodule:'
                '\n\n  git submodule update --init '
                'fairseq/models/huggingface/transformers'
            )

        config = GPT2Config(
            vocab_size=len(task.target_dictionary),
            n_positions=args.max_target_positions + 1,
            n_ctx=args.max_target_positions,
            n_embd=args.embed_dim,
            n_layer=args.num_layers,
            n_head=args.num_attention_heads,
            resid_pdrop=args.dropout,
            embd_pdrop=args.dropout,
            attn_pdrop=args.attention_dropout,
            layer_norm_epsilon=1e-6,
        )
        self.model = GPT2LMHeadModel(config)

        # set zero embedding for padding symbol
        self.pad_idx = task.target_dictionary.pad()
        self.model.transformer.wte.weight.data[self.pad_idx].zero_()
        self.model.transformer.wpe.weight.data[0].zero_() 
開發者ID:pytorch,項目名稱:fairseq,代碼行數:40,代碼來源:hf_gpt2.py

示例5: setUp

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import GPT2Config [as 別名]
def setUp(self):
        self.model_tester = TFGPT2ModelTest.TFGPT2ModelTester(self)
        self.config_tester = ConfigTester(self, config_class=GPT2Config, n_embd=37) 
開發者ID:bhoov,項目名稱:exbert,代碼行數:5,代碼來源:test_modeling_tf_gpt2.py

示例6: test_3layer_gpt2

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import GPT2Config [as 別名]
def test_3layer_gpt2(self):
        from transformers import GPT2Config, TFGPT2Model, BertTokenizer
        keras2onnx.proto.keras.backend.set_learning_phase(0)
        config = GPT2Config(n_layer=3)
        model = TFGPT2Model(config)
        tokenizer = BertTokenizer.from_pretrained("bert-base-uncased")
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        inputs = tokenizer.encode_plus(text, add_special_tokens=True, return_tensors='tf')
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:13,代碼來源:test_transformers.py

示例7: prepare_config_and_inputs

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import GPT2Config [as 別名]
def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

            token_type_ids = None
            if self.use_token_type_ids:
                token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

            mc_token_ids = None
            if self.use_mc_token_ids:
                mc_token_ids = ids_tensor([self.batch_size, self.num_choices], self.seq_length)

            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

            config = GPT2Config(
                vocab_size=self.vocab_size,
                n_embd=self.hidden_size,
                n_layer=self.num_hidden_layers,
                n_head=self.num_attention_heads,
                # intermediate_size=self.intermediate_size,
                # hidden_act=self.hidden_act,
                # hidden_dropout_prob=self.hidden_dropout_prob,
                # attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                n_positions=self.max_position_embeddings,
                n_ctx=self.max_position_embeddings
                # type_vocab_size=self.type_vocab_size,
                # initializer_range=self.initializer_range
            )

            head_mask = ids_tensor([self.num_hidden_layers, self.num_attention_heads], 2)

            return (
                config,
                input_ids,
                input_mask,
                head_mask,
                token_type_ids,
                mc_token_ids,
                sequence_labels,
                token_labels,
                choice_labels,
            ) 
開發者ID:bhoov,項目名稱:exbert,代碼行數:53,代碼來源:test_modeling_tf_gpt2.py


注:本文中的transformers.GPT2Config方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。