当前位置: 首页>>代码示例>>Python>>正文


Python typeguard.check_argument_types函数代码示例

本文整理汇总了Python中typeguard.check_argument_types函数的典型用法代码示例。如果您正苦于以下问题:Python check_argument_types函数的具体用法?Python check_argument_types怎么用?Python check_argument_types使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了check_argument_types函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: __init__

    def __init__(self,
                 name: str,
                 encoders: List[TemporalStateful],
                 vocabulary: Vocabulary,
                 data_id: str,
                 max_output_len: int = None,
                 hidden_dim: int = None,
                 activation: Callable = tf.nn.relu,
                 dropout_keep_prob: float = 1.0,
                 add_start_symbol: bool = False,
                 add_end_symbol: bool = False,
                 reuse: ModelPart = None,
                 save_checkpoint: str = None,
                 load_checkpoint: str = None,
                 initializers: InitializerSpecs = None) -> None:
        check_argument_types()
        ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
                           initializers)

        self.encoders = encoders
        self.vocabulary = vocabulary
        self.data_id = data_id
        self.max_output_len = max_output_len
        self.hidden_dim = hidden_dim
        self.activation = activation
        self.dropout_keep_prob = dropout_keep_prob
        self.add_start_symbol = add_start_symbol
        self.add_end_symbol = add_end_symbol
开发者ID:ufal,项目名称:neuralmonkey,代码行数:28,代码来源:sequence_labeler.py

示例2: beam_search_runner_range

def beam_search_runner_range(
        output_series: str,
        decoder: BeamSearchDecoder,
        max_rank: int = None,
        postprocess: Callable[[List[str]], List[str]] = None) -> List[
            BeamSearchRunner]:
    """Return beam search runners for a range of ranks from 1 to max_rank.

    This means there is max_rank output series where the n-th series contains
    the n-th best hypothesis from the beam search.

    Args:
        output_series: Prefix of output series.
        decoder: Beam search decoder shared by all runners.
        max_rank: Maximum rank of the hypotheses.
        postprocess: Series-level postprocess applied on output.

    Returns:
        List of beam search runners getting hypotheses with rank from 1 to
        max_rank.
    """
    check_argument_types()

    if max_rank is None:
        max_rank = decoder.beam_size

    if max_rank > decoder.beam_size:
        raise ValueError(
            ("The maximum rank ({}) cannot be "
             "bigger than beam size {}.").format(
                 max_rank, decoder.beam_size))

    return [BeamSearchRunner("{}.rank{:03d}".format(output_series, r),
                             decoder, r, postprocess)
            for r in range(1, max_rank + 1)]
开发者ID:ufal,项目名称:neuralmonkey,代码行数:35,代码来源:beamsearch_runner.py

示例3: __init__

    def __init__(self,
                 name: str,
                 input_sequence: Attendable,
                 hidden_size: int,
                 num_heads: int,
                 output_size: int = None,
                 state_proj_size: int = None,
                 dropout_keep_prob: float = 1.0,
                 reuse: ModelPart = None,
                 save_checkpoint: str = None,
                 load_checkpoint: str = None,
                 initializers: InitializerSpecs = None) -> None:
        """Initialize an instance of the encoder."""
        check_argument_types()
        ModelPart.__init__(self, name, reuse, save_checkpoint, load_checkpoint,
                           initializers)

        self.input_sequence = input_sequence
        self.hidden_size = hidden_size
        self.num_heads = num_heads
        self.output_size = output_size
        self.state_proj_size = state_proj_size
        self.dropout_keep_prob = dropout_keep_prob

        if self.dropout_keep_prob <= 0.0 or self.dropout_keep_prob > 1.0:
            raise ValueError("Dropout keep prob must be inside (0,1].")
开发者ID:ufal,项目名称:neuralmonkey,代码行数:26,代码来源:attentive.py

示例4: single_tensor

def single_tensor(files: List[str]) -> np.ndarray:
    """Load a single tensor from a numpy file."""
    check_argument_types()
    if len(files) == 1:
        return np.load(files[0])

    return np.concatenate([np.load(f) for f in files], axis=0)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:7,代码来源:numpy_reader.py

示例5: __init__

    def __init__(self,
                 decoders: List[Any],
                 decoder_weights: List[ObjectiveWeight] = None,
                 l1_weight: float = 0.,
                 l2_weight: float = 0.,
                 clip_norm: float = None,
                 optimizer: tf.train.Optimizer = None,
                 var_scopes: List[str] = None,
                 var_collection: str = None) -> None:
        check_argument_types()

        if decoder_weights is None:
            decoder_weights = [None for _ in decoders]

        if len(decoder_weights) != len(decoders):
            raise ValueError(
                "decoder_weights (length {}) do not match decoders (length {})"
                .format(len(decoder_weights), len(decoders)))

        objectives = [CostObjective(dec, w)
                      for dec, w in zip(decoders, decoder_weights)]

        GenericTrainer.__init__(
            self,
            objectives=objectives,
            l1_weight=l1_weight,
            l2_weight=l2_weight,
            clip_norm=clip_norm,
            optimizer=optimizer,
            var_scopes=var_scopes,
            var_collection=var_collection)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:31,代码来源:cross_entropy_trainer.py

示例6: from_file_list

def from_file_list(prefix: str,
                   shape: List[int],
                   suffix: str = "",
                   default_tensor_name: str = "arr_0") -> Callable:
    """Load a list of numpy arrays from a list of .npz numpy files.

    Args:
        prefix: A common prefix for the files in the list.
        shape: The shape of the numpy arrays stored in the referenced files.
        suffix: An optional suffix that will be appended to each path
        default_tensor_name: Key of the tensors to load from the npz files.

    Returns:
        A generator function that yields the loaded arryas.
    """
    check_argument_types()

    def load(files: List[str]) -> Iterable[np.ndarray]:
        for list_file in files:
            with open(list_file, encoding="utf-8") as f_list:
                for line in f_list:
                    path = os.path.join(prefix, line.rstrip()) + suffix
                    with np.load(path) as npz:
                        arr = npz[default_tensor_name]
                        arr_shape = list(arr.shape)
                        if arr_shape != shape:
                            raise ValueError(
                                "Shapes do not match: expected {}, found {}"
                                .format(shape, arr_shape))
                        yield arr
    return load
开发者ID:ufal,项目名称:neuralmonkey,代码行数:31,代码来源:numpy_reader.py

示例7: pooling

def pooling(
        prev_layer: tf.Tensor,
        prev_mask: tf.Tensor,
        specification: MaxPoolSpec,
        layer_num: int) -> Tuple[tf.Tensor, tf.Tensor]:
    try:
        check_argument_types()
    except TypeError as err:
        raise ValueError((
            "Specification of a max-pooling layer (number {} in config) "
            'needs to have 3 members: "M", pool size, stride, padding, '
            "was {}").format(layer_num, specification)) from err
    pool_type, pool_size, stride, pad = specification

    if pool_type == "M":
        pool_fn = tf.layers.max_pooling2d
    elif pool_type == "A":
        pool_fn = tf.layers.average_pooling2d
    else:
        raise ValueError(
            ("Unsupported type of pooling: {}, use 'M' for max-pooling or "
             "'A' for average pooling.").format(pool_type))

    if pad not in ["same", "valid"]:
        raise ValueError(
            "Padding must be 'same' or 'valid', was '{}' in layer {}."
            .format(pad, layer_num + 1))

    with tf.variable_scope("layer_{}_max_pool".format(layer_num)):
        next_layer = pool_fn(prev_layer, pool_size, stride)
        next_mask = tf.layers.max_pooling2d(prev_mask, pool_size, stride)
    return next_layer, next_mask
开发者ID:ufal,项目名称:neuralmonkey,代码行数:32,代码来源:cnn_encoder.py

示例8: __init__

 def __init__(self,
              output_series: str,
              decoder: SupportedDecoder,
              postprocess: Postprocessor = None) -> None:
     check_argument_types()
     BaseRunner[SupportedDecoder].__init__(self, output_series, decoder)
     self.postprocess = postprocess
开发者ID:ufal,项目名称:neuralmonkey,代码行数:7,代码来源:plain_runner.py

示例9: mlp_output

def mlp_output(layer_sizes: List[int],
               activation: Callable[[tf.Tensor], tf.Tensor] = tf.tanh,
               dropout_keep_prob: float = 1.0) -> Tuple[OutputProjection, int]:
    """Apply a multilayer perceptron.

    Compute RNN deep output using the multilayer perceptron
    with a specified activation function.
    (Pascanu et al., 2013 [https://arxiv.org/pdf/1312.6026v5.pdf])

    Arguments:
        layer_sizes: A list of sizes of the hiddel layers of the MLP
        dropout_keep_prob: the dropout keep probability
        activation: The activation function to use in each layer.
    """
    check_argument_types()

    def _projection(prev_state, prev_output, ctx_tensors, train_mode):
        mlp_input = tf.concat([prev_state, prev_output] + ctx_tensors, 1)

        return multilayer_projection(mlp_input, layer_sizes,
                                     activation=activation,
                                     dropout_keep_prob=dropout_keep_prob,
                                     train_mode=train_mode,
                                     scope="deep_output_mlp")

    return _projection, layer_sizes[-1]
开发者ID:ufal,项目名称:neuralmonkey,代码行数:26,代码来源:output_projection.py

示例10: word2vec_vocabulary

def word2vec_vocabulary(w2v: Word2Vec) -> Vocabulary:
    """Return the vocabulary from a word2vec object.

    This is a helper method used from configuration.
    """
    check_argument_types()
    return w2v.vocabulary
开发者ID:ufal,项目名称:neuralmonkey,代码行数:7,代码来源:word2vec.py

示例11: from_t2t_vocabulary

def from_t2t_vocabulary(path: str,
                        encoding: str = "utf-8") -> "Vocabulary":
    """Load a vocabulary generated during tensor2tensor training.

    Arguments:
        path: The path to the vocabulary file.
        encoding: The encoding of the vocabulary file (defaults to UTF-8).

    Returns:
        The new Vocabulary instantce.
    """
    check_argument_types()
    vocabulary = []  # type: List[str]

    with open(path, encoding=encoding) as wordlist:
        for line in wordlist:
            line = line.strip()

            # T2T vocab tends to wrap words in single quotes
            if ((line.startswith("'") and line.endswith("'"))
                    or (line.startswith('"') and line.endswith('"'))):
                line = line[1:-1]

            if line in ["<pad>", "<EOS>"]:
                continue

            vocabulary.append(line)

    log("Vocabulary form wordlist loaded, containing {} words"
        .format(len(vocabulary)))
    log_sample(vocabulary)

    return Vocabulary(vocabulary)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:33,代码来源:vocabulary.py

示例12: linear_encoder_projection

def linear_encoder_projection(dropout_keep_prob: float) -> EncoderProjection:
    """Return a linear encoder projection.

    Return a projection function which applies dropout on concatenated
    encoder final states and returns a linear projection to a rnn_size-sized
    tensor.

    Arguments:
        dropout_keep_prob: The dropout keep probability
    """
    check_argument_types()

    def func(train_mode: tf.Tensor,
             rnn_size: int,
             encoders: List[Stateful]) -> tf.Tensor:

        if rnn_size is None:
            raise ValueError(
                "You must supply rnn_size for this type of encoder projection")

        en_concat = concat_encoder_projection(train_mode, None, encoders)

        return dropout(
            tf.layers.dense(en_concat, rnn_size, name="encoders_projection"),
            dropout_keep_prob, train_mode)

    return cast(EncoderProjection, func)
开发者ID:ufal,项目名称:neuralmonkey,代码行数:27,代码来源:encoder_projection.py

示例13: __init__

    def __init__(self,
                 name: str,
                 input_shape: List[int],
                 data_id: str,
                 projection_dim: int = None,
                 ff_hidden_dim: int = None,
                 reuse: ModelPart = None,
                 save_checkpoint: str = None,
                 load_checkpoint: str = None,
                 initializers: InitializerSpecs = None) -> None:
        """Instantiate SpatialFiller.

        Args:
            name: Name of the model part.
            input_shape: Dimensionality of the input.
            data_id: Name of the data series with numpy objects.
            projection_dim: Optional, dimension of the states projection.
        """
        check_argument_types()
        ModelPart.__init__(
            self, name, reuse, save_checkpoint, load_checkpoint, initializers)

        self.data_id = data_id
        self.input_shape = input_shape
        self.projection_dim = projection_dim
        self.ff_hidden_dim = ff_hidden_dim

        if self.ff_hidden_dim is not None and self.projection_dim is None:
            raise ValueError(
                "projection_dim must be provided when using ff_hidden_dim")

        if len(self.input_shape) != 3:
            raise ValueError("The input shape should have 3 dimensions.")
开发者ID:ufal,项目名称:neuralmonkey,代码行数:33,代码来源:numpy_stateful_filler.py

示例14: __init__

    def __init__(self, n: int = 4,
                 deduplicate: bool = False,
                 name: str = None,
                 multiple_references_separator: str = None) -> None:
        """Instantiate BLEU evaluator.

        Args:
            n: Longest n-grams considered.
            deduplicate: Flag whether repated tokes should be treated as one.
            name: Name displayed in the logs and TensorBoard.
            multiple_references_separator: Token that separates multiple
                reference sentences. If ``None``, it assumes the reference is
                one sentence only.
        """
        check_argument_types()

        if name is None:
            name = "BLEU-{}".format(n)
            if deduplicate:
                name += "-dedup"
        super().__init__(name)

        self.n = n
        self.deduplicate = deduplicate
        self.multiple_references_separator = multiple_references_separator
开发者ID:ufal,项目名称:neuralmonkey,代码行数:25,代码来源:bleu.py

示例15: __init__

    def __init__(self,
                 name: str,
                 parent_decoder: AutoregressiveDecoder,
                 beam_size: int,
                 max_steps: int,
                 length_normalization: float) -> None:
        """Construct the beam search decoder graph.

        Arguments:
            name: The name for the model part.
            parent_decoder: An autoregressive decoder from which to sample.
            beam_size: The number of hypotheses in the beam.
            max_steps: The maximum number of time steps to perform.
            length_normalization: The alpha parameter from Eq. 14 in the paper.
        """
        check_argument_types()
        ModelPart.__init__(self, name)

        self.parent_decoder = parent_decoder
        self.beam_size = beam_size
        self.length_normalization = length_normalization
        self.max_steps_int = max_steps

        # Create a placeholder for maximum number of steps that is necessary
        # during ensembling, when the decoder is called repetitively with the
        # max_steps attribute set to one.
        self.max_steps = tf.placeholder_with_default(self.max_steps_int, [])

        self._initial_loop_state = None  # type: Optional[BeamSearchLoopState]
开发者ID:ufal,项目名称:neuralmonkey,代码行数:29,代码来源:beam_search_decoder.py


注:本文中的typeguard.check_argument_types函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。