当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.Dataset方法代码示例

本文整理汇总了Python中tensorflow.Dataset方法的典型用法代码示例。如果您正苦于以下问题:Python tensorflow.Dataset方法的具体用法?Python tensorflow.Dataset怎么用?Python tensorflow.Dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow的用法示例。


在下文中一共展示了tensorflow.Dataset方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: transform

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def transform(self, fn, **args):
    """Construct a new dataset by applying a transformation to every sample in this dataset.

    The argument is a function that can be called as follows:

    >> newx, newy, neww = fn(x, y, w)

    It might be called only once with the whole dataset, or multiple
    times with different subsets of the data.  Each time it is called,
    it should transform the samples and return the transformed data.

    Parameters
    ----------
    fn: function
      A function to apply to each sample in the dataset

    Returns
    -------
    a newly constructed Dataset object
    """
    raise NotImplementedError() 
开发者ID:deepchem,项目名称:deepchem,代码行数:23,代码来源:datasets.py

示例2: make_pytorch_dataset

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def make_pytorch_dataset(self, epochs=1, deterministic=False):
    """Create a torch.utils.data.IterableDataset that iterates over the data in this Dataset.

    Each value returned by the Dataset's iterator is a tuple of (X, y,
    w, id) for one sample.

    Parameters
    ----------
    epochs: int
      the number of times to iterate over the Dataset
    deterministic: bool
      if True, the data is produced in order.  If False, a different
      random permutation of the data is used for each epoch.

    Returns
    -------
    `torch.utils.data.IterableDataset` that iterates over the data in
    this dataset.
    """
    raise NotImplementedError() 
开发者ID:deepchem,项目名称:deepchem,代码行数:22,代码来源:datasets.py

示例3: dataset_to_stream

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def dataset_to_stream(dataset, input_name, n_chunks=0, append_targets=False):
  """Takes a tf.Dataset and creates a numpy stream of ready batches."""
  for example in backend.dataset_as_numpy(dataset):
    inp, out = example[0][input_name], example[1]
    # Some accelerators don't handle uint8 well, cast to int.
    if isinstance(inp, np.uint8):
      inp = inp.astype(np.int32)
    if isinstance(out, np.uint8):
      out = out.astype(np.int32)
    if len(out.shape) > 1 and out.shape[-1] == 1:
      out = np.squeeze(out, axis=-1)
    if n_chunks > 0:
      inp = tuple(np.split(inp, n_chunks, axis=1))
      out = tuple(np.split(out, n_chunks, axis=1))
    if append_targets:
      inp = (inp, out)
    yield inp, out 
开发者ID:yyht,项目名称:BERT,代码行数:19,代码来源:inputs.py

示例4: __dataset_generator

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def __dataset_generator(self, hops, scale_terms):
        """
        Generates a set of triplets and associated scaling terms by:
            1. Sampling for each node a set of nodes from each of its neighborhoods
            2. Forming all implied pairwise constraints

        Uses tf.Dataset API to perform the sampling in a separate thread for increased speed.

        Parameters
        ----------
        hops : dict
            A dictionary where each 1, 2, ... K, neighborhoods are saved as sparse matrices
        scale_terms : dict
            The appropriate up-scaling terms to ensure unbiased estimates for each neighbourhood
        Returns
        -------
        """
        def gen():
            while True:
                yield to_triplets(sample_all_hops(hops), scale_terms)

        dataset = tf.data.Dataset.from_generator(gen, (tf.int32, tf.float32), ([None, 3], [None]))
        self.triplets, self.scale_terms = dataset.prefetch(1).make_one_shot_iterator().get_next() 
开发者ID:abojchevski,项目名称:graph2gauss,代码行数:25,代码来源:model.py

示例5: test_with_tf_datasets

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def test_with_tf_datasets(self):

    @computations.tf_computation(computation_types.SequenceType(tf.int64))
    def consume(ds):
      return ds.reduce(np.int64(0), lambda x, y: x + y)

    self.assertEqual(str(consume.type_signature), '(int64* -> int64)')

    @computations.tf_computation
    def produce():
      return tf.data.Dataset.range(10)

    self.assertEqual(str(produce.type_signature), '( -> int64*)')

    self.assertEqual(consume(produce()), 45)

  # TODO(b/131363314): The reference executor should support generating and
  # returning infinite datasets 
开发者ID:tensorflow,项目名称:federated,代码行数:20,代码来源:computations_test.py

示例6: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def __init__(self, model_creator, data_creator, config=None,
                 verbose=False):
        """Initializes the runner.

        Args:
            model_creator (dict -> Model): see tf_trainer.py.
            data_creator (dict -> tf.Dataset, tf.Dataset): see tf_trainer.py.
            config (dict): see tf_trainer.py.
            verbose (bool): Outputs training data if true.
        """

        self.model_creator = model_creator
        self.data_creator = data_creator
        self.config = {} if config is None else config
        self.epoch = 0
        self.verbose = verbose 
开发者ID:ray-project,项目名称:ray,代码行数:18,代码来源:tf_runner.py

示例7: to_dataframe

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def to_dataframe(self):
    """Construct a pandas DataFrame containing the data from this Dataset.

    Returns
    -------
    pandas dataframe. If there is only a single feature per datapoint,
    will have column "X" else will have columns "X1,X2,..." for
    features.  If there is only a single label per datapoint, will
    have column "y" else will have columns "y1,y2,..." for labels. If
    there is only a single weight per datapoint will have column "w"
    else will have columns "w1,w2,...". Will have column "ids" for
    identifiers.
    """
    X = self.X
    y = self.y
    w = self.w
    ids = self.ids
    if len(X.shape) == 1 or X.shape[1] == 1:
      columns = ['X']
    else:
      columns = [f'X{i+1}' for i in range(X.shape[1])]
    X_df = pd.DataFrame(X, columns=columns)
    if len(y.shape) == 1 or y.shape[1] == 1:
      columns = ['y']
    else:
      columns = [f'y{i+1}' for i in range(y.shape[1])]
    y_df = pd.DataFrame(y, columns=columns)
    if len(w.shape) == 1 or w.shape[1] == 1:
      columns = ['w']
    else:
      columns = [f'w{i+1}' for i in range(w.shape[1])]
    w_df = pd.DataFrame(w, columns=columns)
    ids_df = pd.DataFrame(ids, columns=['ids'])
    return pd.concat([X_df, y_df, w_df, ids_df], axis=1, sort=False) 
开发者ID:deepchem,项目名称:deepchem,代码行数:36,代码来源:datasets.py

示例8: __init__

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def __init__(self, datasets=None):
    """Initialize this `Databag`.

    Parameters
    ----------
    datasets: dict, optional
      A dictionary mapping keys to `Dataset` objects.
    """
    if datasets is None:
      self.datasets = dict()
    else:
      self.datasets = datasets 
开发者ID:deepchem,项目名称:deepchem,代码行数:14,代码来源:datasets.py

示例9: test_unknown_shapes

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def test_unknown_shapes():
    """Apply _check_interpolation_correctness() for a few sizes and check
    for tf.Dataset compatibility."""
    shapes_to_try = [[3, 4, 5, 6], [1, 2, 2, 1]]
    for shape in shapes_to_try:
        _check_interpolation_correctness(shape, "float32", "float32", True) 
开发者ID:tensorflow,项目名称:addons,代码行数:8,代码来源:dense_image_warp_test.py

示例10: build_client_update_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def build_client_update_fn(model_fn, optimizer_fn, client_update_tf,
                           tf_dataset_type, model_weights_type):
  """Builds a `tff.tf_computation` in the presense of malicious clients.

  Args:
    model_fn: A no-arg function that returns a `tff.learning.Model`.
    optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer`.
    client_update_tf: A 'tf.function' that computes the ClientOutput
    tf_dataset_type: type_signature of dataset.
    model_weights_type: type_signature of model weights.

  Returns:
    A `tff.tf_computation` for local model optimization with type signature:
    '@tff.tf_computation(tf_dataset_type, tf_dataset_type,
                      tf.bool, model_weights_type)'
  """

  @tff.tf_computation(tf_dataset_type, tf_dataset_type, tf.bool,
                      model_weights_type)
  def client_delta_tf(benign_dataset, malicious_dataset, client_type,
                      initial_model_weights):
    """Performs client local model optimization.

    Args:
      benign_dataset: A 'tf.data.Dataset' consisting of benign dataset
      malicious_dataset: A 'tf.data.Dataset' consisting of malicious dataset
      client_type: A 'tf.bool' indicating whether the client is malicious
      initial_model_weights: A `tff.learning.Model.weights` from server.

    Returns:
      A 'ClientOutput`.
    """
    # Create variables here in the graph context, before calling the tf.function
    # below.
    model = model_fn()
    optimizer = optimizer_fn()
    return client_update_tf(model, optimizer, benign_dataset, malicious_dataset,
                            client_type, initial_model_weights)

  return client_delta_tf 
开发者ID:tensorflow,项目名称:federated,代码行数:43,代码来源:attacked_fedavg.py

示例11: build_client_update_fn

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def build_client_update_fn(model_fn, client_optimizer_fn, tf_dataset_type,
                           model_weights_type):
  """Builds a `tff.tf_computation` for local model optimization.

  Args:
    model_fn: A no-arg function that returns a `tff.learning.TrainableModel`.
    client_optimizer_fn: A no-arg function that returns a
      `tf.keras.optimizers.Optimizer`.
    tf_dataset_type: type_signature of dataset.
    model_weights_type: type_signature of model weights.

  Returns:
    A `tff.tf_computation` for local model optimization.
  """

  @tff.tf_computation(tf_dataset_type, model_weights_type)
  def client_delta_tf(tf_dataset, initial_model_weights):
    """Performs client local model optimization.

    Args:
      tf_dataset: a `tf.data.Dataset` that provides training examples.
      initial_model_weights: a `model_utils.ModelWeights` containing the
        starting weights.

    Returns:
      A `ClientOutput`.
    """
    model = model_fn()
    optimizer = client_optimizer_fn()
    return client_update(model, optimizer, tf_dataset, initial_model_weights)

  return client_delta_tf 
开发者ID:tensorflow,项目名称:federated,代码行数:34,代码来源:flars_fedavg.py

示例12: test_get_size_info

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def test_get_size_info(self, num_clients):

    @computations.federated_computation(
        type_factory.at_clients(computation_types.SequenceType(tf.float32)),
        type_factory.at_server(tf.float32))
    def comp(temperatures, threshold):
      client_data = [temperatures, intrinsics.federated_broadcast(threshold)]
      result_map = intrinsics.federated_map(
          count_over, intrinsics.federated_zip(client_data))
      count_map = intrinsics.federated_map(count_total, temperatures)
      return intrinsics.federated_mean(result_map, count_map)

    factory = executor_stacks.sizing_executor_factory(num_clients=num_clients)
    default_executor.set_default_executor(factory)

    to_float = lambda x: tf.cast(x, tf.float32)
    temperatures = [tf.data.Dataset.range(10).map(to_float)] * num_clients
    threshold = 15.0
    comp(temperatures, threshold)

    # Each client receives a tf.float32 and uploads two tf.float32 values.
    expected_broadcast_bits = num_clients * 32
    expected_aggregate_bits = expected_broadcast_bits * 2
    expected = ({
        (('CLIENTS', num_clients),): [[1, tf.float32]] * num_clients
    }, {
        (('CLIENTS', num_clients),): [[1, tf.float32]] * num_clients * 2
    }, [expected_broadcast_bits], [expected_aggregate_bits])

    self.assertEqual(expected, factory.get_size_info()) 
开发者ID:tensorflow,项目名称:federated,代码行数:32,代码来源:computations_test.py

示例13: test_consume_infinite_tf_dataset

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def test_consume_infinite_tf_dataset(self):

    @computations.tf_computation(computation_types.SequenceType(tf.int64))
    def consume(ds):
      # Consume the first 10 elements of the dataset.
      return ds.take(10).reduce(np.int64(0), lambda x, y: x + y)

    self.assertEqual(consume(tf.data.Dataset.range(10).repeat()), 45)

  # TODO(b/131363314): The reference executor should support generating and
  # returning infinite datasets 
开发者ID:tensorflow,项目名称:federated,代码行数:13,代码来源:computations_test.py

示例14: test_produce_and_consume_infinite_tf_dataset

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def test_produce_and_consume_infinite_tf_dataset(self):

    @computations.tf_computation(computation_types.SequenceType(tf.int64))
    def consume(ds):
      # Consume the first 10 elements of the dataset.
      return ds.take(10).reduce(np.int64(0), lambda x, y: x + y)

    @computations.tf_computation
    def produce():
      # Produce an infinite dataset.
      return tf.data.Dataset.range(10).repeat()

    self.assertEqual(consume(produce()), 45) 
开发者ID:tensorflow,项目名称:federated,代码行数:15,代码来源:computations_test.py

示例15: test_with_sequence_of_pairs

# 需要导入模块: import tensorflow [as 别名]
# 或者: from tensorflow import Dataset [as 别名]
def test_with_sequence_of_pairs(self):
    pairs = tf.data.Dataset.from_tensor_slices(
        (list(range(5)), list(range(5, 10))))

    @computations.tf_computation
    def process_pairs(ds):
      return ds.reduce(0, lambda state, pair: state + pair[0] + pair[1])

    self.assertEqual(process_pairs(pairs), 45) 
开发者ID:tensorflow,项目名称:federated,代码行数:11,代码来源:computations_test.py


注:本文中的tensorflow.Dataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。