当前位置: 首页>>代码示例>>Python>>正文


Python transformers.RobertaConfig方法代码示例

本文整理汇总了Python中transformers.RobertaConfig方法的典型用法代码示例。如果您正苦于以下问题:Python transformers.RobertaConfig方法的具体用法?Python transformers.RobertaConfig怎么用?Python transformers.RobertaConfig使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在transformers的用法示例。


在下文中一共展示了transformers.RobertaConfig方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: from_exist_config

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import RobertaConfig [as 别名]
def from_exist_config(cls, config, label_smoothing=0.1, max_position_embeddings=None):
        required_keys = [
            "vocab_size", "hidden_size", "num_hidden_layers", "num_attention_heads",
            "hidden_act", "intermediate_size", "hidden_dropout_prob", "attention_probs_dropout_prob",
            "max_position_embeddings", "type_vocab_size", "initializer_range", "layer_norm_eps"]

        kwargs = {}
        for key in required_keys:
            assert hasattr(config, key)
            kwargs[key] = getattr(config, key)

        kwargs["vocab_size_or_config_json_file"] = kwargs["vocab_size"]
        if isinstance(config, RobertaConfig):
            kwargs["type_vocab_size"] = 0
            kwargs["max_position_embeddings"] = kwargs["max_position_embeddings"] - 2
        
        additional_keys = [
            "source_type_id", "target_type_id"
        ]
        for key in additional_keys:
            if hasattr(config, key):
                kwargs[key] = getattr(config, key)

        if max_position_embeddings is not None and max_position_embeddings > config.max_position_embeddings:
            kwargs["max_position_embeddings"] = max_position_embeddings
            logger.info("  **  Change max position embeddings to %d  ** " % max_position_embeddings)

        return cls(label_smoothing=label_smoothing, **kwargs) 
开发者ID:microsoft,项目名称:unilm,代码行数:30,代码来源:config.py

示例2: prepare_config_and_inputs

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import RobertaConfig [as 别名]
def prepare_config_and_inputs(self):
            input_ids = ids_tensor([self.batch_size, self.seq_length], self.vocab_size)

            input_mask = None
            if self.use_input_mask:
                input_mask = ids_tensor([self.batch_size, self.seq_length], vocab_size=2)

            token_type_ids = None
            if self.use_token_type_ids:
                token_type_ids = ids_tensor([self.batch_size, self.seq_length], self.type_vocab_size)

            sequence_labels = None
            token_labels = None
            choice_labels = None
            if self.use_labels:
                sequence_labels = ids_tensor([self.batch_size], self.type_sequence_label_size)
                token_labels = ids_tensor([self.batch_size, self.seq_length], self.num_labels)
                choice_labels = ids_tensor([self.batch_size], self.num_choices)

            config = RobertaConfig(
                vocab_size=self.vocab_size,
                hidden_size=self.hidden_size,
                num_hidden_layers=self.num_hidden_layers,
                num_attention_heads=self.num_attention_heads,
                intermediate_size=self.intermediate_size,
                hidden_act=self.hidden_act,
                hidden_dropout_prob=self.hidden_dropout_prob,
                attention_probs_dropout_prob=self.attention_probs_dropout_prob,
                max_position_embeddings=self.max_position_embeddings,
                type_vocab_size=self.type_vocab_size,
                initializer_range=self.initializer_range,
            )

            return config, input_ids, token_type_ids, input_mask, sequence_labels, token_labels, choice_labels 
开发者ID:bhoov,项目名称:exbert,代码行数:36,代码来源:test_modeling_tf_roberta.py

示例3: setUp

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import RobertaConfig [as 别名]
def setUp(self):
        self.model_tester = TFRobertaModelTest.TFRobertaModelTester(self)
        self.config_tester = ConfigTester(self, config_class=RobertaConfig, hidden_size=37) 
开发者ID:bhoov,项目名称:exbert,代码行数:5,代码来源:test_modeling_tf_roberta.py

示例4: test_TFRobertaModel

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import RobertaConfig [as 别名]
def test_TFRobertaModel(self):
        from transformers import RobertaConfig, TFRobertaModel
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaModel(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:14,代码来源:test_transformers.py

示例5: test_TFRobertaForMaskedLM

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import RobertaConfig [as 别名]
def test_TFRobertaForMaskedLM(self):
        from transformers import RobertaConfig, TFRobertaForMaskedLM
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForMaskedLM(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(
            run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files, rtol=1.e-2,
                             atol=1.e-4)) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:16,代码来源:test_transformers.py

示例6: test_TFRobertaForSequenceClassification

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import RobertaConfig [as 别名]
def test_TFRobertaForSequenceClassification(self):
        from transformers import RobertaConfig, TFRobertaForSequenceClassification
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForSequenceClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:14,代码来源:test_transformers.py

示例7: test_TFRobertaForTokenClassification

# 需要导入模块: import transformers [as 别名]
# 或者: from transformers import RobertaConfig [as 别名]
def test_TFRobertaForTokenClassification(self):
        from transformers import RobertaConfig, TFRobertaForTokenClassification
        keras.backend.clear_session()
        # pretrained_weights = 'roberta-base'
        tokenizer_file = 'roberta_roberta-base.pickle'
        tokenizer = self._get_tokenzier(tokenizer_file)
        text, inputs, inputs_onnx = self._prepare_inputs(tokenizer)
        config = RobertaConfig()
        model = TFRobertaForTokenClassification(config)
        predictions = model.predict(inputs)
        onnx_model = keras2onnx.convert_keras(model, model.name)
        self.assertTrue(run_onnx_runtime(onnx_model.graph.name, onnx_model, inputs_onnx, predictions, self.model_files)) 
开发者ID:onnx,项目名称:keras-onnx,代码行数:14,代码来源:test_transformers.py


注:本文中的transformers.RobertaConfig方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。