当前位置: 首页>>代码示例>>Python>>正文


Python transformer.Transformer方法代码示例

本文整理汇总了Python中transformer.Transformer方法的典型用法代码示例。如果您正苦于以下问题:Python transformer.Transformer方法的具体用法?Python transformer.Transformer怎么用?Python transformer.Transformer使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在transformer的用法示例。


在下文中一共展示了transformer.Transformer方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _load_models

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def _load_models(self, process_id, sess):
        """
        Loads models and returns them
        """
        logging.debug("Process '%s' - Loading models\n" % (process_id))

        import tensorflow as tf
        models = []
        for i, options in enumerate(self._options):
            with tf.compat.v1.variable_scope("model%d" % i) as scope:
                if options.model_type == "transformer":
                    model = TransformerModel(options)
                else:
                    model = rnn_model.RNNModel(options)
                saver = model_loader.init_or_restore_variables(
                    options, sess, ensemble_scope=scope)
                models.append(model)

        logging.info("NOTE: Length of translations is capped to {}".format(self._options[0].translation_maxlen))
        return models 
开发者ID:EdinburghNLP,项目名称:nematus,代码行数:22,代码来源:server_translator.py

示例2: InvertOrder

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def InvertOrder(order):
    if order is None:
        return None
    # 'order'[i] maps nat_i -> position of nat_i
    # Inverse: position -> natural idx.  This it the 'true' ordering -- it's how
    # heuristic orders are generated + (less crucially) how Transformer works.
    nin = len(order)
    inv_ordering = [None] * nin
    for natural_idx in range(nin):
        inv_ordering[order[natural_idx]] = natural_idx
    return inv_ordering 
开发者ID:naru-project,项目名称:naru,代码行数:13,代码来源:train_model.py

示例3: MakeTransformer

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def MakeTransformer(cols_to_train, fixed_ordering, seed=None):
    return transformer.Transformer(
        num_blocks=args.blocks,
        d_model=args.dmodel,
        d_ff=args.dff,
        num_heads=args.heads,
        nin=len(cols_to_train),
        input_bins=[c.DistributionSize() for c in cols_to_train],
        use_positional_embs=True,
        activation=args.transformer_act,
        fixed_ordering=fixed_ordering,
        column_masking=args.column_masking,
        seed=seed,
    ).to(DEVICE) 
开发者ID:naru-project,项目名称:naru,代码行数:16,代码来源:train_model.py

示例4: InvertOrder

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def InvertOrder(order):
    if order is None:
        return None
    # 'order'[i] maps nat_i -> position of nat_i
    # Inverse: position -> natural idx.  This it the "true" ordering -- it's how
    # heuristic orders are generated + (less crucially) how Transformer works.
    nin = len(order)
    inv_ordering = [None] * nin
    for natural_idx in range(nin):
        inv_ordering[order[natural_idx]] = natural_idx
    return inv_ordering 
开发者ID:naru-project,项目名称:naru,代码行数:13,代码来源:eval_model.py

示例5: __init__

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def __init__(self, vocabs, inference_layers, embed_dim, ff_embed_dim, num_heads, token_size, rel_size, dropout):
        super(DecodeLayer, self).__init__()
        self.inference_core = Transformer(inference_layers, embed_dim, ff_embed_dim, num_heads, dropout, with_external=True)
        self.token_generator = TokenGenerator(vocabs, embed_dim, token_size, dropout)
        self.dropout = dropout
        self.vocabs = vocabs 
开发者ID:jcyk,项目名称:gtos,代码行数:8,代码来源:decoder.py

示例6: __init__

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def __init__(self, vocabs, 
                word_char_dim, word_dim,
                concept_char_dim, concept_dim,
                cnn_filters, char2word_dim, char2concept_dim,
                rel_dim, rnn_hidden_size, rnn_num_layers,
                embed_dim, ff_embed_dim, num_heads, dropout,
                snt_layers, graph_layers, inference_layers,
                pretrained_file, device):
        super(Generator, self).__init__()
        self.vocabs = vocabs
        self.concept_encoder = TokenEncoder(vocabs['concept'], vocabs['concept_char'],
                                          concept_char_dim, concept_dim, embed_dim,
                                          cnn_filters, char2concept_dim, dropout, pretrained_file)
        self.relation_encoder = RelationEncoder(vocabs['relation'], rel_dim, embed_dim, rnn_hidden_size, rnn_num_layers, dropout)
        self.token_encoder = TokenEncoder(vocabs['token'], vocabs['token_char'],
                        word_char_dim, word_dim, embed_dim,
                        cnn_filters, char2word_dim, dropout, pretrained_file)

        self.graph_encoder = GraphTransformer(graph_layers, embed_dim, ff_embed_dim, num_heads, dropout)
        self.snt_encoder = Transformer(snt_layers, embed_dim, ff_embed_dim, num_heads, dropout, with_external=True)

        self.embed_dim = embed_dim
        self.embed_scale = math.sqrt(embed_dim)
        self.token_position = SinusoidalPositionalEmbedding(embed_dim, device)
        self.concept_depth = nn.Embedding(32, embed_dim)
        self.token_embed_layer_norm = nn.LayerNorm(embed_dim)
        self.concept_embed_layer_norm = nn.LayerNorm(embed_dim)
        self.self_attn_mask = SelfAttentionMask(device)
        self.decoder = DecodeLayer(vocabs, inference_layers, embed_dim, ff_embed_dim, num_heads, concept_dim, rel_dim, dropout)
        self.dropout = dropout
        self.probe_generator = nn.Linear(embed_dim, embed_dim)
        self.device = device
        self.reset_parameters() 
开发者ID:jcyk,项目名称:gtos,代码行数:35,代码来源:generator.py

示例7: __init__

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def __init__(self, vocabs, 
                word_char_dim, word_dim,
                concept_char_dim, concept_dim,
                cnn_filters, char2word_dim, char2concept_dim,
                rel_dim, rnn_hidden_size, rnn_num_layers,
                embed_dim, ff_embed_dim, num_heads, dropout,
                snt_layers, graph_layers, inference_layers,
                pretrained_file, device):
        super(Generator, self).__init__()
        self.vocabs = vocabs
        self.concept_encoder = TokenEncoder(vocabs['concept'], vocabs['concept_char'],
                                          concept_char_dim, concept_dim, embed_dim,
                                          cnn_filters, char2concept_dim, dropout, pretrained_file)
        self.relation_encoder = RelationEncoder(vocabs['relation'], rel_dim, embed_dim, rnn_hidden_size, rnn_num_layers, dropout)
        self.token_encoder = TokenEncoder(vocabs['token'], vocabs['token_char'],
                        word_char_dim, word_dim, embed_dim,
                        cnn_filters, char2word_dim, dropout, pretrained_file)

        self.graph_encoder = GraphTransformer(graph_layers, embed_dim, ff_embed_dim, num_heads, dropout)
        self.snt_encoder = Transformer(snt_layers, embed_dim, ff_embed_dim, num_heads, dropout, with_external=True)
        
        self.embed_dim = embed_dim
        self.embed_scale = math.sqrt(embed_dim)
        self.token_position = SinusoidalPositionalEmbedding(embed_dim, device)
        self.concept_depth = nn.Embedding(256, embed_dim)
        self.token_embed_layer_norm = nn.LayerNorm(embed_dim)
        self.concept_embed_layer_norm = nn.LayerNorm(embed_dim)
        self.self_attn_mask = SelfAttentionMask(device)
        self.decoder = DecodeLayer(vocabs, inference_layers, embed_dim, ff_embed_dim, num_heads, concept_dim, rel_dim, dropout)
        self.dropout = dropout
        self.probe_generator = nn.Linear(embed_dim, embed_dim)
        self.device = device
        self.reset_parameters() 
开发者ID:jcyk,项目名称:gtos,代码行数:35,代码来源:generator.py

示例8: wrangle

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def wrangle(path, out_path):
    """
    An example to show how to use wrangler

    :param path: path to input data file
    :param out_path: path to store normalized data

    """

    spark = SparkSession.builder.getOrCreate()

    data = spark.read.csv(path, header=True, encoding='utf-8')

    functions = [lowercase, trim]

    # hospital cols
    columns = data.columns

    transformer = Transformer(functions, columns)

    data = transformer.transform(data)

    cols_info = list()

    # hospital cols
    for col in data.columns:
        cols_info.append(ColNormInfo(col))

    normalizer = Normalizer(cols_info)

    data = normalizer.normalize(data)

    data.toPandas().to_csv(out_path, index=False, header=True) 
开发者ID:HoloClean,项目名称:HoloClean-Legacy-deprecated,代码行数:35,代码来源:wrangle_data.py

示例9: Query

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def Query(self, columns, operators, vals):
        # Massages queries into natural order.
        columns, operators, vals = FillInUnqueriedColumns(
            self.table, columns, operators, vals)

        # TODO: we can move these attributes to ctor.
        ordering = None
        if hasattr(self.model, 'orderings'):
            ordering = self.model.orderings[0]
            orderings = self.model.orderings
        elif hasattr(self.model, 'm'):
            # MADE.
            ordering = self.model.m[-1]
            orderings = [self.model.m[-1]]
        else:
            print('****Warning: defaulting to natural order')
            ordering = np.arange(len(columns))
            orderings = [ordering]

        num_orderings = len(orderings)

        # order idx (first/second/... to be sample) -> x_{natural_idx}.
        inv_ordering = [None] * len(columns)
        for natural_idx in range(len(columns)):
            inv_ordering[ordering[natural_idx]] = natural_idx

        with torch.no_grad():
            inp_buf = self.inp.zero_()
            # Fast (?) path.
            if num_orderings == 1:
                ordering = orderings[0]
                self.OnStart()
                p = self._sample_n(
                    self.num_samples,
                    ordering if isinstance(
                        self.model, transformer.Transformer) else inv_ordering,
                    columns,
                    operators,
                    vals,
                    inp=inp_buf)
                self.OnEnd()
                return np.ceil(p * self.cardinality).astype(dtype=np.int32,
                                                            copy=False)

            # Num orderings > 1.
            ps = []
            self.OnStart()
            for ordering in orderings:
                p_scalar = self._sample_n(self.num_samples // num_orderings,
                                          ordering, columns, operators, vals)
                ps.append(p_scalar)
            self.OnEnd()
            return np.ceil(np.mean(ps) * self.cardinality).astype(
                dtype=np.int32, copy=False) 
开发者ID:naru-project,项目名称:naru,代码行数:56,代码来源:estimators.py

示例10: main

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def main():
    # Command line arguments
    args = get_args()
    # Fix seed for consistent sampling
    th.manual_seed(args.seed)
    # data
    vocab, _, _ = load_data()
    # Model
    model = Transformer(
        args.n_layers,
        args.embed_dim,
        args.hidden_dim,
        args.n_heads,
        vocab,
        args.dropout
    )
    if args.cuda:
        model = model.cuda()
    # Load existing model
    model.load_state_dict(th.load(args.model_file, map_location="cpu"))
    # Read from file/stdin
    if args.input_file is not None:
        input_stream = open(args.input_file, "r", encoding="utf-8")
    else:
        input_stream = sys.stdin
    # Write to file/stdout
    if args.output_file is not None:
        output_stream = open(args.output_file, "w", encoding="utf-8")
        # If we're printing to a file, display stats in stdout
        input_stream = tqdm(input_stream)
    else:
        output_stream = sys.stdout
    # Translate
    try:
        for line in input_stream:
            in_words = line.strip().split()
            out_words = translate_sentence(
                model,
                in_words,
                beam_size=args.beam_size,
                search=args.search,
            )
            print(desegment(out_words), file=output_stream)
            output_stream.flush()
    except KeyboardInterrupt:
        pass
    finally:
        input_stream.close()
        output_stream.close() 
开发者ID:pmichel31415,项目名称:jsalt-2019-mt-tutorial,代码行数:51,代码来源:translate.py

示例11: main

# 需要导入模块: import transformer [as 别名]
# 或者: from transformer import Transformer [as 别名]
def main():
    # Command line arguments
    args = get_args()
    # data
    vocab, train_data, valid_data = load_data()
    # Model
    model = Transformer(
        args.n_layers,
        args.embed_dim,
        args.hidden_dim,
        args.n_heads,
        vocab,
        args.dropout
    )
    if args.cuda:
        model = model.cuda()
    # Load existing model
    if os.path.isfile(args.model_file) and not args.overwrite_model:
        model.load_state_dict(th.load(args.model_file))
    # Optimizer
    optim = th.optim.Adam(model.parameters(), lr=args.lr, betas=(0.9, 0.98))
    # Learning rate schedule
    lr_schedule = inverse_sqrt_schedule(2000, args.lr)
    # Dataloader
    train_loader = MTDataLoader(
        train_data,
        max_bsz=args.samples_per_batch,
        max_tokens=args.tokens_per_batch,
        shuffle=True
    )
    valid_loader = MTDataLoader(
        valid_data,
        max_bsz=args.samples_per_batch,
        max_tokens=args.tokens_per_batch,
        shuffle=False
    )
    # Either validate
    if args.validate_only:
        valid_ppl = evaluate_ppl(model, valid_loader)
        print(f"Validation perplexity: {valid_ppl:.2f}")
    else:
        # Train epochs
        best_ppl = 1e12
        for epoch in range(1, args.n_epochs+1):
            print(f"----- Epoch {epoch} -----")
            # Train for one epoch
            model.train()
            train_epoch(model, optim, train_loader,
                        lr_schedule, args.clip_grad)
            # Check dev ppl
            model.eval()
            valid_ppl = evaluate_ppl(model, valid_loader)
            print(f"Validation perplexity: {valid_ppl:.2f}")
            # Early stopping maybe
            if valid_ppl < best_ppl:
                best_ppl = valid_ppl
                print(f"Saving new best model (epoch {epoch} ppl {valid_ppl})")
                th.save(model.state_dict(), args.model_file) 
开发者ID:pmichel31415,项目名称:jsalt-2019-mt-tutorial,代码行数:60,代码来源:training.py


注:本文中的transformer.Transformer方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。