当前位置: 首页>>代码示例>>Python>>正文


Python layers.multi_class_target函数代码示例

本文整理汇总了Python中tensorflow.contrib.layers.multi_class_target函数的典型用法代码示例。如果您正苦于以下问题:Python multi_class_target函数的具体用法?Python multi_class_target怎么用?Python multi_class_target使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了multi_class_target函数的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: testDNNModel

    def testDNNModel(self):
        """Tests multi-class classification using matrix data as input."""
        cont_features = [tf.contrib.layers.real_valued_column("feature", dimension=4)]

        target_column = layers.multi_class_target(n_classes=3)
        classifier = DNNEstimator(target_column, feature_columns=cont_features, hidden_units=[3, 3])

        classifier.fit(input_fn=_iris_input_fn, steps=1000)
        classifier.evaluate(input_fn=_iris_input_fn, steps=100)
开发者ID:pronobis,项目名称:tensorflow,代码行数:9,代码来源:composable_model_test.py

示例2: single_value_rnn_classifier

def single_value_rnn_classifier(num_classes,
                                num_units,
                                sequence_feature_columns,
                                context_feature_columns=None,
                                cell_type='basic_rnn',
                                cell_dtype=dtypes.float32,
                                num_rnn_layers=1,
                                optimizer_type='SGD',
                                learning_rate=0.1,
                                momentum=None,
                                gradient_clipping_norm=10.0,
                                model_dir=None,
                                config=None):
  """Creates a RNN `Estimator` that predicts single labels.

  Args:
    num_classes: the number of classes for categorization.
    num_units: the size of the RNN cells.
    sequence_feature_columns: An iterable containing all the feature columns
      describing sequence features. All items in the set should be instances
      of classes derived from `FeatureColumn`.
    context_feature_columns: An iterable containing all the feature columns
      describing context features i.e. features that apply accross all time
      steps. All items in the set should be instances of classes derived from
      `FeatureColumn`.
    cell_type: subclass of `RNNCell` or one of 'basic_rnn,' 'lstm' or 'gru'.
    cell_dtype: the dtype of the state and output for the given `cell_type`.
    num_rnn_layers: number of RNN layers.
    optimizer_type: the type of optimizer to use. Either a subclass of
      `Optimizer` or a string.
    learning_rate: learning rate.
    momentum: momentum value. Only used if `optimizer_type` is 'Momentum'.
    gradient_clipping_norm: parameter used for gradient clipping. If `None`,
      then no clipping is performed.
    model_dir: directory to use for The directory in which to save and restore
      the model graph, parameters, etc.
    config: A `RunConfig` instance.
  Returns:
    An initialized instance of `_MultiValueRNNEstimator`.
  """
  optimizer = _get_optimizer(optimizer_type, learning_rate, momentum)
  cell = _get_rnn_cell(cell_type, num_units, num_rnn_layers)
  target_column = layers.multi_class_target(n_classes=num_classes)
  return _SingleValueRNNEstimator(cell,
                                  target_column,
                                  optimizer,
                                  sequence_feature_columns,
                                  context_feature_columns,
                                  model_dir,
                                  config,
                                  gradient_clipping_norm,
                                  dtype=cell_dtype)
开发者ID:821760408-sp,项目名称:tensorflow,代码行数:52,代码来源:dynamic_rnn_estimator.py

示例3: testLinearModel

  def testLinearModel(self):
    """Tests that loss goes down with training."""

    def input_fn():
      return {
          'age': tf.constant([1]),
          'language': tf.SparseTensor(values=['english'],
                                      indices=[[0, 0]],
                                      shape=[1, 1])
      }, tf.constant([[1]])

    language = tf.contrib.layers.sparse_column_with_hash_bucket('language', 100)
    age = tf.contrib.layers.real_valued_column('age')

    target_column = layers.multi_class_target(n_classes=2)
    classifier = LinearEstimator(target_column,
                                 feature_columns=[age, language])

    classifier.fit(input_fn=input_fn, steps=1000)
    loss1 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
    classifier.fit(input_fn=input_fn, steps=2000)
    loss2 = classifier.evaluate(input_fn=input_fn, steps=1)['loss']
    self.assertLess(loss2, loss1)
    self.assertLess(loss2, 0.01)
开发者ID:2020zyc,项目名称:tensorflow,代码行数:24,代码来源:composable_model_test.py

示例4: testJointLinearModel

    def testJointLinearModel(self):
        """Tests that loss goes down with training."""

        def input_fn():
            return (
                {
                    "age": tf.SparseTensor(values=["1"], indices=[[0, 0]], shape=[1, 1]),
                    "language": tf.SparseTensor(values=["english"], indices=[[0, 0]], shape=[1, 1]),
                },
                tf.constant([[1]]),
            )

        language = tf.contrib.layers.sparse_column_with_hash_bucket("language", 100)
        age = tf.contrib.layers.sparse_column_with_hash_bucket("age", 2)

        target_column = layers.multi_class_target(n_classes=2)
        classifier = JointLinearEstimator(target_column, feature_columns=[age, language])

        classifier.fit(input_fn=input_fn, steps=1000)
        loss1 = classifier.evaluate(input_fn=input_fn, steps=1)["loss"]
        classifier.fit(input_fn=input_fn, steps=2000)
        loss2 = classifier.evaluate(input_fn=input_fn, steps=1)["loss"]
        self.assertLess(loss2, loss1)
        self.assertLess(loss2, 0.01)
开发者ID:pronobis,项目名称:tensorflow,代码行数:24,代码来源:composable_model_test.py

示例5: __init__

    def __init__(
        self,
        model_dir=None,
        n_classes=2,
        weight_column_name=None,
        linear_feature_columns=None,
        linear_optimizer=None,
        dnn_feature_columns=None,
        dnn_optimizer=None,
        dnn_hidden_units=None,
        dnn_activation_fn=nn.relu,
        dnn_dropout=None,
        gradient_clip_norm=None,
        enable_centered_bias=True,
        config=None,
    ):
        """Constructs a DNNLinearCombinedClassifier instance.

    Args:
      model_dir: Directory to save model parameters, graph and etc. This can also
        be used to load checkpoints from the directory into a estimator to continue
        training a previously saved model.
      n_classes: number of target classes. Default is binary classification.
      weight_column_name: A string defining feature column name representing
        weights. It is used to down weight or boost examples during training.
        It will be multiplied by the loss of the example.
      linear_feature_columns: An iterable containing all the feature columns
        used by linear part of the model. All items in the set must be
        instances of classes derived from `FeatureColumn`.
      linear_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the linear part of the model. If `None`, will use a FTRL optimizer.
      dnn_feature_columns: An iterable containing all the feature columns used
        by deep part of the model. All items in the set must be instances of
        classes derived from `FeatureColumn`.
      dnn_optimizer: An instance of `tf.Optimizer` used to apply gradients to
        the deep part of the model. If `None`, will use an Adagrad optimizer.
      dnn_hidden_units: List of hidden units per layer. All layers are fully
        connected.
      dnn_activation_fn: Activation function applied to each layer. If `None`,
        will use `tf.nn.relu`.
      dnn_dropout: When not None, the probability we will drop out
        a given coordinate.
      gradient_clip_norm: A float > 0. If provided, gradients are clipped
        to their global norm with this clipping ratio. See
        tf.clip_by_global_norm for more details.
      enable_centered_bias: A bool. If True, estimator will learn a centered
        bias variable for each class. Rest of the model structure learns the
        residual after centered bias.
      config: RunConfig object to configure the runtime settings.

    Raises:
      ValueError: If `n_classes` < 2.
      ValueError: If both `linear_feature_columns` and `dnn_features_columns`
        are empty at the same time.
    """

        if n_classes < 2:
            raise ValueError("n_classes should be greater than 1. Given: {}".format(n_classes))
        target_column = layers.multi_class_target(n_classes=n_classes, weight_column_name=weight_column_name)
        super(DNNLinearCombinedClassifier, self).__init__(
            model_dir=model_dir,
            linear_feature_columns=linear_feature_columns,
            linear_optimizer=linear_optimizer,
            dnn_feature_columns=dnn_feature_columns,
            dnn_optimizer=dnn_optimizer,
            dnn_hidden_units=dnn_hidden_units,
            dnn_activation_fn=dnn_activation_fn,
            dnn_dropout=dnn_dropout,
            gradient_clip_norm=gradient_clip_norm,
            enable_centered_bias=enable_centered_bias,
            target_column=target_column,
            config=config,
        )
开发者ID:jinniahn,项目名称:tensorflow,代码行数:73,代码来源:dnn_linear_combined.py

示例6: multi_value_rnn_classifier

def multi_value_rnn_classifier(num_classes,
                               num_units,
                               num_unroll,
                               batch_size,
                               input_key_column_name,
                               sequence_feature_columns,
                               context_feature_columns=None,
                               num_rnn_layers=1,
                               optimizer_type='SGD',
                               learning_rate=0.1,
                               predict_probabilities=False,
                               momentum=None,
                               gradient_clipping_norm=5.0,
                               input_keep_probability=None,
                               output_keep_probability=None,
                               model_dir=None,
                               config=None,
                               feature_engineering_fn=None,
                               num_threads=3,
                               queue_capacity=1000):
  """Creates a RNN `Estimator` that predicts sequences of labels.

  Args:
    num_classes: The number of classes for categorization.
    num_units: The size of the RNN cells.
    num_unroll: Python integer, how many time steps to unroll at a time.
      The input sequences of length `k` are then split into `k / num_unroll`
      many segments.
    batch_size: Python integer, the size of the minibatch.
    input_key_column_name: Python string, the name of the feature column
      containing a string scalar `Tensor` that serves as a unique key to
      identify input sequence across minibatches.
    sequence_feature_columns: An iterable containing all the feature columns
      describing sequence features. All items in the set should be instances
      of classes derived from `FeatureColumn`.
    context_feature_columns: An iterable containing all the feature columns
      describing context features, i.e., features that apply accross all time
      steps. All items in the set should be instances of classes derived from
      `FeatureColumn`.
    num_rnn_layers: Number of RNN layers.
    optimizer_type: The type of optimizer to use. Either a subclass of
      `Optimizer`, an instance of an `Optimizer` or a string. Strings must be
      one of 'Adagrad', 'Momentum' or 'SGD'.
    learning_rate: Learning rate. This argument has no effect if `optimizer`
      is an instance of an `Optimizer`.
    predict_probabilities: A boolean indicating whether to predict probabilities
      for all classes.
    momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
    gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
      then no clipping is performed.
    input_keep_probability: Probability to keep inputs to `cell`. If `None`,
      no dropout is applied.
    output_keep_probability: Probability to keep outputs of `cell`. If `None`,
      no dropout is applied.
    model_dir: The directory in which to save and restore the model graph,
      parameters, etc.
    config: A `RunConfig` instance.
    feature_engineering_fn: Takes features and labels which are the output of
      `input_fn` and returns features and labels which will be fed into
      `model_fn`. Please check `model_fn` for a definition of features and
      labels.
    num_threads: The Python integer number of threads enqueuing input examples
      into a queue. Defaults to 3.
    queue_capacity: The max capacity of the queue in number of examples.
      Needs to be at least `batch_size`. Defaults to 1000. When iterating
      over the same input example multiple times reusing their keys the
      `queue_capacity` must be smaller than the number of examples.
  Returns:
    An initialized `Estimator`.
  """
  cell = lstm_cell(num_units, num_rnn_layers)
  target_column = layers.multi_class_target(n_classes=num_classes)
  if optimizer_type == 'Momentum':
    optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum)
  rnn_model_fn = _get_rnn_model_fn(
      cell=cell,
      target_column=target_column,
      problem_type=ProblemType.CLASSIFICATION,
      optimizer=optimizer_type,
      num_unroll=num_unroll,
      num_layers=num_rnn_layers,
      num_threads=num_threads,
      queue_capacity=queue_capacity,
      batch_size=batch_size,
      input_key_column_name=input_key_column_name,
      sequence_feature_columns=sequence_feature_columns,
      context_feature_columns=context_feature_columns,
      predict_probabilities=predict_probabilities,
      learning_rate=learning_rate,
      gradient_clipping_norm=gradient_clipping_norm,
      input_keep_probability=input_keep_probability,
      output_keep_probability=output_keep_probability,
      name='MultiValueRnnClassifier')

  return estimator.Estimator(
      model_fn=rnn_model_fn,
      model_dir=model_dir,
      config=config,
      feature_engineering_fn=feature_engineering_fn)
开发者ID:ivankreso,项目名称:tensorflow,代码行数:99,代码来源:state_saving_rnn_estimator.py

示例7: __init__

  def __init__(self,
               problem_type,
               num_units,
               num_unroll,
               batch_size,
               sequence_feature_columns,
               context_feature_columns=None,
               num_classes=None,
               num_rnn_layers=1,
               optimizer_type='SGD',
               learning_rate=0.1,
               predict_probabilities=False,
               momentum=None,
               gradient_clipping_norm=5.0,
               dropout_keep_probabilities=None,
               model_dir=None,
               config=None,
               feature_engineering_fn=None,
               num_threads=3,
               queue_capacity=1000,
               seed=None):
    """Initializes a StateSavingRnnEstimator.

    Args:
      problem_type: `ProblemType.CLASSIFICATION` or
        `ProblemType.LINEAR_REGRESSION`.
      num_units: The size of the RNN cells.
      num_unroll: Python integer, how many time steps to unroll at a time.
        The input sequences of length `k` are then split into `k / num_unroll`
        many segments.
      batch_size: Python integer, the size of the minibatch.
      sequence_feature_columns: An iterable containing all the feature columns
        describing sequence features. All items in the set should be instances
        of classes derived from `FeatureColumn`.
      context_feature_columns: An iterable containing all the feature columns
        describing context features, i.e., features that apply accross all time
        steps. All items in the set should be instances of classes derived from
        `FeatureColumn`.
      num_classes: The number of classes for categorization. Used only and
        required if `problem_type` is `ProblemType.CLASSIFICATION`
      num_rnn_layers: Number of RNN layers.
      optimizer_type: The type of optimizer to use. Either a subclass of
        `Optimizer`, an instance of an `Optimizer` or a string. Strings must be
        one of 'Adagrad', 'Adam', 'Ftrl', Momentum', 'RMSProp', or 'SGD'.
      learning_rate: Learning rate. This argument has no effect if `optimizer`
        is an instance of an `Optimizer`.
      predict_probabilities: A boolean indicating whether to predict
        probabilities for all classes. Used only if `problem_type` is
        `ProblemType.CLASSIFICATION`.
      momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
      gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
        then no clipping is performed.
      dropout_keep_probabilities: a list of dropout keep probabilities or
        `None`. If given a list, it must have length `num_rnn_layers + 1`.
      model_dir: The directory in which to save and restore the model graph,
        parameters, etc.
      config: A `RunConfig` instance.
      feature_engineering_fn: Takes features and labels which are the output of
        `input_fn` and returns features and labels which will be fed into
        `model_fn`. Please check `model_fn` for a definition of features and
        labels.
      num_threads: The Python integer number of threads enqueuing input examples
        into a queue. Defaults to 3.
      queue_capacity: The max capacity of the queue in number of examples.
        Needs to be at least `batch_size`. Defaults to 1000. When iterating
        over the same input example multiple times reusing their keys the
        `queue_capacity` must be smaller than the number of examples.
      seed: Fixes the random seed used for generating input keys by the SQSS.

    Raises:
      ValueError: `problem_type` is not one of
        `ProblemType.LINEAR_REGRESSION` or `ProblemType.CLASSIFICATION`.
      ValueError: `problem_type` is `ProblemType.CLASSIFICATION` but
        `num_classes` is not specified.
    """

    name = 'MultiValueStateSavingRNN'
    if problem_type == constants.ProblemType.LINEAR_REGRESSION:
      name += 'Regressor'
      target_column = layers.regression_target()
    elif problem_type == constants.ProblemType.CLASSIFICATION:
      if not num_classes:
        raise ValueError('For CLASSIFICATION problem_type, num_classes must be '
                         'specified.')
      target_column = layers.multi_class_target(n_classes=num_classes)
      name += 'Classifier'
    else:
      raise ValueError(
          'problem_type must be either ProblemType.LINEAR_REGRESSION '
          'or ProblemType.CLASSIFICATION; got {}'.format(
              problem_type))

    if optimizer_type == 'Momentum':
      optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum)

    rnn_model_fn = _get_rnn_model_fn(
        target_column=target_column,
        problem_type=problem_type,
        optimizer=optimizer_type,
        num_unroll=num_unroll,
#.........这里部分代码省略.........
开发者ID:Immexxx,项目名称:tensorflow,代码行数:101,代码来源:state_saving_rnn_estimator.py

示例8: single_value_rnn_classifier

def single_value_rnn_classifier(num_classes,
                                num_units,
                                sequence_feature_columns,
                                context_feature_columns=None,
                                cell_type='basic_rnn',
                                num_rnn_layers=1,
                                optimizer_type='SGD',
                                learning_rate=0.1,
                                predict_probabilities=False,
                                momentum=None,
                                gradient_clipping_norm=10.0,
                                input_keep_probability=None,
                                output_keep_probability=None,
                                model_dir=None,
                                config=None,
                                params=None,
                                feature_engineering_fn=None):
  """Creates a RNN `Estimator` that predicts single labels.

  Args:
    num_classes: The number of classes for categorization.
    num_units: The size of the RNN cells.
    sequence_feature_columns: An iterable containing all the feature columns
      describing sequence features. All items in the set should be instances
      of classes derived from `FeatureColumn`.
    context_feature_columns: An iterable containing all the feature columns
      describing context features i.e. features that apply accross all time
      steps. All items in the set should be instances of classes derived from
      `FeatureColumn`.
    cell_type: A subclass of `RNNCell`, an instance of an `RNNCell or one of
      'basic_rnn,' 'lstm' or 'gru'.
    num_rnn_layers: Number of RNN layers.
    optimizer_type: The type of optimizer to use. Either a subclass of
      `Optimizer`, an instance of an `Optimizer` or a string. Strings must be
      one of 'Adagrad', 'Momentum' or 'SGD'.
    learning_rate: Learning rate.
    predict_probabilities: A boolean indicating whether to predict probabilities
      for all classes.
    momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
    gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
      then no clipping is performed.
    input_keep_probability: Probability to keep inputs to `cell`. If `None`,
      no dropout is applied.
    output_keep_probability: Probability to keep outputs to `cell`. If `None`,
      no dropout is applied.
    model_dir: Directory to use for The directory in which to save and restore
      the model graph, parameters, etc.
    config: A `RunConfig` instance.
    params: `dict` of hyperparameters. Passed through to `Estimator`.
    feature_engineering_fn: Takes features and labels which are the output of
      `input_fn` and returns features and labels which will be fed into
      `model_fn`. Please check `model_fn` for a definition of features and
      labels.
  Returns:
    An initialized `Estimator`.
  """
  cell = _to_rnn_cell(cell_type, num_units, num_rnn_layers)
  target_column = layers.multi_class_target(n_classes=num_classes)
  if optimizer_type == 'Momentum':
    optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum)
  dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn(
      cell=cell,
      target_column=target_column,
      problem_type=ProblemType.CLASSIFICATION,
      prediction_type=PredictionType.SINGLE_VALUE,
      optimizer=optimizer_type,
      sequence_feature_columns=sequence_feature_columns,
      context_feature_columns=context_feature_columns,
      predict_probabilities=predict_probabilities,
      learning_rate=learning_rate,
      gradient_clipping_norm=gradient_clipping_norm,
      input_keep_probability=input_keep_probability,
      output_keep_probability=output_keep_probability,
      name='SingleValueRnnClassifier')

  return estimator.Estimator(model_fn=dynamic_rnn_model_fn,
                             model_dir=model_dir,
                             config=config,
                             params=params,
                             feature_engineering_fn=feature_engineering_fn)
开发者ID:HKUST-SING,项目名称:tensorflow,代码行数:80,代码来源:dynamic_rnn_estimator.py

示例9: single_value_rnn_classifier

def single_value_rnn_classifier(
    num_classes,
    num_units,
    sequence_feature_columns,
    context_feature_columns=None,
    cell_type="basic_rnn",
    num_rnn_layers=1,
    optimizer_type="SGD",
    learning_rate=0.1,
    predict_probabilities=False,
    momentum=None,
    gradient_clipping_norm=5.0,
    input_keep_probability=None,
    output_keep_probability=None,
    model_dir=None,
    config=None,
    feature_engineering_fn=None,
):
    """Creates a RNN `Estimator` that predicts single labels.

  The input function passed to this `Estimator` optionally contains keys
  `RNNKeys.SEQUENCE_LENGTH_KEY`. The value corresponding to
  `RNNKeys.SEQUENCE_LENGTH_KEY` must be vector of size `batch_size` where entry
  `n` corresponds to the length of the `n`th sequence in the batch. The sequence
  length feature is required for batches of varying sizes. It will be used to
  calculate loss and evaluation metrics.

  In order to specify an initial state, the input function must include keys
  `STATE_PREFIX_i` for all `0 <= i < n` where `n` is the number of nested
  elements in `cell.state_size`. The input function must contain values for all
  state components or none of them. If none are included, then the default
  (zero) state is used as an initial state. See the documentation for
  `dict_to_state_tuple` and `state_tuple_to_dict` for further details.

  The `predict()` method of the `Estimator` returns a dictionary with keys
  `RNNKeys.PREDICTIONS_KEY` and `STATE_PREFIX_i` for `0 <= i < n` where `n` is
  the number of nested elements in `cell.state_size`. The value keyed by
  `RNNKeys.PREDICTIONS_KEY` has shape `[batch_size, padded_length]`.  Here,
  `padded_length` is the largest value in the `RNNKeys.SEQUENCE_LENGTH` `Tensor`
  passed as input. Entry `[i, j]` is the prediction associated with sequence `i`
  and time step `j`.

  If `predict_probabilities` is set to true, the `dict` returned by `predict()`
  contains an additional key `RNNKeys.PROBABILITIES_KEY`. The associated array
  has shape `[batch_size, num_classes]` where entry `[i, j]`
  is the probability assigned to class `k` in sequence `i` of the batch.

  Args:
    num_classes: The number of classes for categorization.
    num_units: The size of the RNN cells. This argument has no effect
      if `cell_type` is an instance of `RNNCell`.
    sequence_feature_columns: An iterable containing all the feature columns
      describing sequence features. All items in the set should be instances
      of classes derived from `FeatureColumn`.
    context_feature_columns: An iterable containing all the feature columns
      describing context features, i.e., features that apply accross all time
      steps. All items in the set should be instances of classes derived from
      `FeatureColumn`.
    cell_type: A subclass of `RNNCell`, an instance of an `RNNCell or one of
      'basic_rnn,' 'lstm' or 'gru'.
    num_rnn_layers: Number of RNN layers. Leave this at its default value 1
      if passing a `cell_type` that is already a MultiRNNCell.
    optimizer_type: The type of optimizer to use. Either a subclass of
      `Optimizer`, an instance of an `Optimizer` or a string. Strings must be
      one of 'Adagrad', 'Momentum' or 'SGD'.
    learning_rate: Learning rate. This argument has no effect if `optimizer`
      is an instance of an `Optimizer`.
    predict_probabilities: A boolean indicating whether to predict probabilities
      for all classes.
    momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
    gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
      then no clipping is performed.
    input_keep_probability: Probability to keep inputs to `cell`. If `None`,
      no dropout is applied.
    output_keep_probability: Probability to keep outputs of `cell`. If `None`,
      no dropout is applied.
    model_dir: The directory in which to save and restore the model graph,
      parameters, etc.
    config: A `RunConfig` instance.
    feature_engineering_fn: Takes features and labels which are the output of
      `input_fn` and returns features and labels which will be fed into
      `model_fn`. Please check `model_fn` for a definition of features and
      labels.
  Returns:
    An initialized `Estimator`.
  """
    cell = _to_rnn_cell(cell_type, num_units, num_rnn_layers)
    target_column = layers.multi_class_target(n_classes=num_classes)
    if optimizer_type == "Momentum":
        optimizer_type = momentum_opt.MomentumOptimizer(learning_rate, momentum)
    dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn(
        cell=cell,
        target_column=target_column,
        problem_type=ProblemType.CLASSIFICATION,
        prediction_type=PredictionType.SINGLE_VALUE,
        optimizer=optimizer_type,
        sequence_feature_columns=sequence_feature_columns,
        context_feature_columns=context_feature_columns,
        predict_probabilities=predict_probabilities,
        learning_rate=learning_rate,
#.........这里部分代码省略.........
开发者ID:kdavis-mozilla,项目名称:tensorflow,代码行数:101,代码来源:dynamic_rnn_estimator.py

示例10: __init__


#.........这里部分代码省略.........
    `True`, it will also include key`PredictionKey.PROBABILITIES`.

    Args:
      problem_type: whether the `Estimator` is intended for a regression or
        classification problem. Value must be one of
        `ProblemType.CLASSIFICATION` or `ProblemType.LINEAR_REGRESSION`.
      prediction_type: whether the `Estimator` should return a value for each
        step in the sequence, or just a single value for the final time step.
        Must be one of `ProblemType.SINGLE_VALUE` or
        `ProblemType.MULTIPLE_VALUE`.
      sequence_feature_columns: An iterable containing all the feature columns
        describing sequence features. All items in the iterable should be
        instances of classes derived from `FeatureColumn`.
      context_feature_columns: An iterable containing all the feature columns
        describing context features, i.e., features that apply across all time
        steps. All items in the set should be instances of classes derived from
        `FeatureColumn`.
      num_classes: the number of classes for a classification problem. Only
        used when `problem_type=ProblemType.CLASSIFICATION`.
      num_units: A list of integers indicating the number of units in the
        `RNNCell`s in each layer.
      cell_type: A subclass of `RNNCell` or one of 'basic_rnn,' 'lstm' or 'gru'.
      optimizer: The type of optimizer to use. Either a subclass of
        `Optimizer`, an instance of an `Optimizer`, a callback that returns an
        optimizer, or a string. Strings must be one of 'Adagrad', 'Adam',
        'Ftrl', 'Momentum', 'RMSProp' or 'SGD. See `layers.optimize_loss` for
        more details.
      learning_rate: Learning rate. This argument has no effect if `optimizer`
        is an instance of an `Optimizer`.
      predict_probabilities: A boolean indicating whether to predict
        probabilities for all classes. Used only if `problem_type` is
        `ProblemType.CLASSIFICATION`
      momentum: Momentum value. Only used if `optimizer_type` is 'Momentum'.
      gradient_clipping_norm: Parameter used for gradient clipping. If `None`,
        then no clipping is performed.
      dropout_keep_probabilities: a list of dropout probabilities or `None`.
        If a list is given, it must have length `len(num_units) + 1`. If
        `None`, then no dropout is applied.
      model_dir: The directory in which to save and restore the model graph,
        parameters, etc.
      feature_engineering_fn: Takes features and labels which are the output of
        `input_fn` and returns features and labels which will be fed into
        `model_fn`. Please check `model_fn` for a definition of features and
        labels.
      config: A `RunConfig` instance.

    Raises:
      ValueError: `problem_type` is not one of
        `ProblemType.LINEAR_REGRESSION` or `ProblemType.CLASSIFICATION`.
      ValueError: `problem_type` is `ProblemType.CLASSIFICATION` but
        `num_classes` is not specifieProblemType
      ValueError: `prediction_type` is not one of
        `PredictionType.MULTIPLE_VALUE` or `PredictionType.SINGLE_VALUE`.
    """
    if prediction_type == rnn_common.PredictionType.MULTIPLE_VALUE:
      name = 'MultiValueDynamicRNN'
    elif prediction_type == rnn_common.PredictionType.SINGLE_VALUE:
      name = 'SingleValueDynamicRNN'
    else:
      raise ValueError(
          'prediction_type must be one of PredictionType.MULTIPLE_VALUE or '
          'PredictionType.SINGLE_VALUE; got {}'.format(prediction_type))

    if problem_type == constants.ProblemType.LINEAR_REGRESSION:
      name += 'Regressor'
      target_column = layers.regression_target()
    elif problem_type == constants.ProblemType.CLASSIFICATION:
      if not num_classes:
        raise ValueError('For CLASSIFICATION problem_type, num_classes must be '
                         'specified.')
      target_column = layers.multi_class_target(n_classes=num_classes)
      name += 'Classifier'
    else:
      raise ValueError(
          'problem_type must be either ProblemType.LINEAR_REGRESSION '
          'or ProblemType.CLASSIFICATION; got {}'.format(
              problem_type))

    if optimizer == 'Momentum':
      optimizer = momentum_opt.MomentumOptimizer(learning_rate, momentum)
    dynamic_rnn_model_fn = _get_dynamic_rnn_model_fn(
        cell_type=cell_type,
        num_units=num_units,
        target_column=target_column,
        problem_type=problem_type,
        prediction_type=prediction_type,
        optimizer=optimizer,
        sequence_feature_columns=sequence_feature_columns,
        context_feature_columns=context_feature_columns,
        predict_probabilities=predict_probabilities,
        learning_rate=learning_rate,
        gradient_clipping_norm=gradient_clipping_norm,
        dropout_keep_probabilities=dropout_keep_probabilities,
        name=name)

    super(DynamicRnnEstimator, self).__init__(
        model_fn=dynamic_rnn_model_fn,
        model_dir=model_dir,
        config=config,
        feature_engineering_fn=feature_engineering_fn)
开发者ID:sandeepgupta2k4,项目名称:tensorflow,代码行数:101,代码来源:dynamic_rnn_estimator.py


注:本文中的tensorflow.contrib.layers.multi_class_target函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。