當前位置: 首頁>>代碼示例>>Python>>正文


Python transformers.RobertaConfig方法代碼示例

本文整理匯總了Python中transformers.RobertaConfig方法的典型用法代碼示例。如果您正苦於以下問題:Python transformers.RobertaConfig方法的具體用法?Python transformers.RobertaConfig怎麽用?Python transformers.RobertaConfig使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在transformers的用法示例。


在下文中一共展示了transformers.RobertaConfig方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: from_exist_config

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import RobertaConfig [as 別名]
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None):
        required_keys = [
            "vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
            "hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
            "max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps"]

        kwargs = {}
        for key in required_keys:
            assert hasattr(config, key)
            kwargs[key] = getattr(config, key)

        kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
        if isinstance(config, RobertaConfig):
            kwargs["type_vocab_size"] = 0
            kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2
        
        additional_keys = [
            "source_type_id", "target_type_id"
        ]
        for key in additional_keys:
            if hasattr(config, key):
                kwargs[key] = getattr(config, key)

        if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
            kwargs["max_position_embeddings"] = max_position_embeddings
            logger.info("  **  Change max position embeddings to %d  ** " % max_position_embeddings)

        return cls(label_smoothing=label_smoothing, **kwargs) 
開發者ID:microsoft,項目名稱:unilm,代碼行數:30,代碼來源:config.py

示例2: prepare_config_and_inputs

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import RobertaConfig [as 別名]
def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

            token_type_ids = None
            if self.use_token_type_ids:
                token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

            config = RobertaConfig(
                vocab_size=self.vocab_size,
                hidden_size=self.hidden_size,
                num_hidden_layers=self.num_hidden_layers,
                num_attention_heads=self.num_attention_heads,
                intermediate_size=self.intermediate_size,
                hidden_act=self.hidden_act,
                hidden_dropout_prob=self.hidden_dropout_prob,
                attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                max_position_embeddings=self.max_position_embeddings,
                type_vocab_size=self.type_vocab_size,
                initializer_range=self.initializer_range,
            )

            return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels 
開發者ID:bhoov,項目名稱:exbert,代碼行數:36,代碼來源:test_modeling_tf_roberta.py

示例3: setUp

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import RobertaConfig [as 別名]
def setUp(self):
        self.model_tester = TFRobertaModelTest.TFRobertaModelTester(self)
        self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37) 
開發者ID:bhoov,項目名稱:exbert,代碼行數:5,代碼來源:test_modeling_tf_roberta.py

示例4: test_TFRobertaModel

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import RobertaConfig [as 別名]
def test_TFRobertaModel(self):
        from transformers import RobertaConfig, TFRobertaModel
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:14,代碼來源:test_transformers.py

示例5: test_TFRobertaForMaskedLM

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import RobertaConfig [as 別名]
def test_TFRobertaForMaskedLM(self):
        from transformers import RobertaConfig, TFRobertaForMaskedLM
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForMaskedLM(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:16,代碼來源:test_transformers.py

示例6: test_TFRobertaForSequenceClassification

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import RobertaConfig [as 別名]
def test_TFRobertaForSequenceClassification(self):
        from transformers import RobertaConfig, TFRobertaForSequenceClassification
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForSequenceClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:14,代碼來源:test_transformers.py

示例7: test_TFRobertaForTokenClassification

# 需要導入模塊: import transformers [as 別名]
# 或者: from transformers import RobertaConfig [as 別名]
def test_TFRobertaForTokenClassification(self):
        from transformers import RobertaConfig, TFRobertaForTokenClassification
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForTokenClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
開發者ID:onnx,項目名稱:keras-onnx,代碼行數:14,代碼來源:test_transformers.py


注:本文中的transformers.RobertaConfig方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。