當前位置: 首頁>>代碼示例>>Python>>正文


Python tensorflow_datasets.load方法代碼示例

本文整理匯總了Python中tensorflow_datasets.load方法的典型用法代碼示例。如果您正苦於以下問題:Python tensorflow_datasets.load方法的具體用法?Python tensorflow_datasets.load怎麽用?Python tensorflow_datasets.load使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在tensorflow_datasets的用法示例。


在下文中一共展示了tensorflow_datasets.load方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: load

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def load(data_set_name, **kwargs):
    """
    :param data_set_name: data set name--call tfds.list_builders() for options
    :return:
        train_ds: TensorFlow Dataset object for the training data
        test_ds: TensorFlow Dataset object for the testing data
        info: data set info object
    """
    # get data and its info
    ds, info = tfds.load(name=data_set_name, split=tfds.Split.ALL, with_info=True)

    # configure the data sets
    if 'train' in info.splits:
        train_ds = configure_data_set(ds=ds, info=info, is_training=True, **kwargs)
    else:
        train_ds = None
    if 'test' in info.splits:
        test_ds = configure_data_set(ds=ds, info=info, is_training=False, **kwargs)
    else:
        test_ds = None

    return train_ds, test_ds, info 
開發者ID:astirn,項目名稱:IIC,代碼行數:24,代碼來源:data.py

示例2: build_dataset

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def build_dataset(
    shape: Tuple[int, int],
    name: str="mnist",
    train_batch_size: int=32,
    valid_batch_size: int=32
    ):

    dataset = {}
    builder = tfds.builder(name)
    dataset["num_train"] = builder.info.splits['train'].num_examples
    dataset["num_test"] = builder.info.splits['test'].num_examples

    [ds_train, ds_test], info = tfds.load(name=name, split=["train", "test"], with_info=True)
    dataset["num_classes"] = info.features["label"].num_classes
    dataset["channels"] = ds_train.output_shapes["image"][-1].value

    ds_train = ds_train.shuffle(1024).repeat()
    ds_train = ds_train.map(lambda data: _parse_function(data, shape, dataset["num_classes"], dataset["channels"]))
    dataset["train"] = ds_train.batch(train_batch_size)

    ds_test = ds_test.shuffle(1024).repeat()
    ds_test = ds_test.map(lambda data: _parse_function(data, shape, dataset["num_classes"], dataset["channels"]))
    dataset["test"] = ds_test.batch(valid_batch_size)

    return dataset 
開發者ID:Bisonai,項目名稱:mobilenetv3-tensorflow,代碼行數:27,代碼來源:datasets.py

示例3: _build

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def _build(self):
    dataset = tfds.load(name=self._dataset_name, split=self._mode)
    minibatch = dataset.map(parse).repeat()

    if self._shuffle:
      minibatch = minibatch.shuffle(self._batch_size*100)
    minibatch = minibatch.batch(
        self._batch_size).make_one_shot_iterator().get_next()
    minibatch['sentiment'].set_shape([self._batch_size])
    minibatch['sentence'] = tf.SparseTensor(
        indices=minibatch['sentence'].indices,
        values=minibatch['sentence'].values,
        dense_shape=[self._batch_size, minibatch['sentence'].dense_shape[1]])
    # minibatch.sentence sparse tensor with dense shape
    # [batch_size x seq_length], length: [batch_size]
    return Dataset(
        tokens=minibatch['sentence'],
        num_tokens=self.get_row_lengths(minibatch['sentence']),
        sentiment=minibatch['sentiment'],
    ) 
開發者ID:deepmind,項目名稱:interval-bound-propagation,代碼行數:22,代碼來源:robust_model.py

示例4: load_data_planetoid

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def load_data_planetoid(name, path, splits_path=None, row_normalize=False,
                        data_container_class=PlanetoidDataset):
  """Load Planetoid data."""
  if splits_path is None:
    # Load from file in Planetoid format.
    (adj, features, _, _, _, train_mask, val_mask, test_mask,
     labels) = load_from_planetoid_files(name, path)
  else:
    # Otherwise load from a path where we saved a pickle with random splits.
    logging.info('Loading from splits path: %s', splits_path)
    (adj, features, _, _, _, train_mask, val_mask, test_mask,
     labels) = pickle.load(open(splits_path, 'rb'))

  return data_container_class.build_from_adjacency_matrix(
      name,
      adj,
      features,
      train_mask,
      val_mask,
      test_mask,
      labels,
      row_normalize=row_normalize) 
開發者ID:tensorflow,項目名稱:neural-structured-learning,代碼行數:24,代碼來源:loaders.py

示例5: get_train_examples

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def get_train_examples(self, data_dir, filename=None):
        """
        Returns the training examples from the data directory.

        Args:
            data_dir: Directory containing the data files used for training and evaluating.
            filename: None by default, specify this if the training file has a different name than the original one
                which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.

        """
        if data_dir is None:
            data_dir = ""

        if self.train_file is None:
            raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")

        with open(
            os.path.join(data_dir, self.train_file if filename is None else filename), "r", encoding="utf-8"
        ) as reader:
            input_data = json.load(reader)["data"]
        return self._create_examples(input_data, "train") 
開發者ID:bhoov,項目名稱:exbert,代碼行數:23,代碼來源:squad.py

示例6: get_dev_examples

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def get_dev_examples(self, data_dir, filename=None):
        """
        Returns the evaluation example from the data directory.

        Args:
            data_dir: Directory containing the data files used for training and evaluating.
            filename: None by default, specify this if the evaluation file has a different name than the original one
                which is `train-v1.1.json` and `train-v2.0.json` for squad versions 1.1 and 2.0 respectively.
        """
        if data_dir is None:
            data_dir = ""

        if self.dev_file is None:
            raise ValueError("SquadProcessor should be instantiated via SquadV1Processor or SquadV2Processor")

        with open(
            os.path.join(data_dir, self.dev_file if filename is None else filename), "r", encoding="utf-8"
        ) as reader:
            input_data = json.load(reader)["data"]
        return self._create_examples(input_data, "dev") 
開發者ID:bhoov,項目名稱:exbert,代碼行數:22,代碼來源:squad.py

示例7: prepare_glue_examples

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def prepare_glue_examples(tokenizer, task_name='mrpc', split_name='train'):
    processor = glue_processors[task_name]()

    def tf_mrpc_to_pytorch(d):
        for ex in d:
            ex = processor.get_example_from_tensor_dict(ex)
            #        ex = processor.tfds_map(ex)
            yield ex

    tf_data = tensorflow_datasets.load(f"glue/{task_name}")[split_name]
    examples = tf_mrpc_to_pytorch(tf_data)
    features = glue_convert_examples_to_features(examples,
                                                 tokenizer,
                                                 max_length=128,
                                                 task='mrpc')

    all_input_ids = torch.tensor([f.input_ids for f in features], dtype=torch.long)
    all_attention_mask = torch.tensor([f.attention_mask for f in features], dtype=torch.long)
    all_token_type_ids = torch.tensor([f.token_type_ids for f in features], dtype=torch.long)
    all_labels = torch.tensor([f.label for f in features], dtype=torch.long)

    dataset = TensorDataset(all_input_ids, all_attention_mask, all_token_type_ids, all_labels)
    return dataset 
開發者ID:paperswithcode,項目名稱:axcell,代碼行數:25,代碼來源:transfo_experiment.py

示例8: _load_dataset

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def _load_dataset(self, split):
    """Loads the underlying dataset split from disk.

    Args:
      split: Name of the split to load.

    Returns:
      Returns a `tf.data.Dataset` object with a tuple of image and label tensor.
    """
    if FLAGS.data_fake_dataset:
      return self._make_fake_dataset(split)
    ds = tfds.load(
        self._tfds_name,
        split=split,
        data_dir=FLAGS.tfds_data_dir,
        as_dataset_kwargs={"shuffle_files": False})
    ds = self._replace_labels(split, ds)
    ds = ds.map(self._parse_fn)
    return ds.prefetch(tf.contrib.data.AUTOTUNE) 
開發者ID:google,項目名稱:compare_gan,代碼行數:21,代碼來源:datasets.py

示例9: ld_mnist

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def ld_mnist():
  """Load training and test data."""

  def convert_types(image, label):
    image = tf.cast(image, tf.float32)
    image /= 255
    return image, label

  dataset, info = tfds.load('mnist', 
                            data_dir='gs://tfds-data/datasets', 
                            with_info=True,
                            as_supervised=True)
  mnist_train, mnist_test = dataset['train'], dataset['test']
  mnist_train = mnist_train.map(convert_types).shuffle(10000).batch(128)
  mnist_test = mnist_test.map(convert_types).batch(128)
  return EasyDict(train=mnist_train, test=mnist_test) 
開發者ID:tensorflow,項目名稱:cleverhans,代碼行數:18,代碼來源:mnist_tutorial.py

示例10: get_test_iterator

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def get_test_iterator(self, tmp_dir):
    path = generator_utils.maybe_download(
        tmp_dir, os.path.basename(DATA_URL), DATA_URL)
    with tf.io.gfile.GFile(path, "rb") as fp:
      mnist_test = np.load(fp)
    mnist_test = np.transpose(mnist_test, (1, 0, 2, 3))
    mnist_test = np.expand_dims(mnist_test, axis=-1)
    mnist_test = tf.data.Dataset.from_tensor_slices(mnist_test)
    return mnist_test.make_initializable_iterator() 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:11,代碼來源:moving_mnist.py

示例11: get_train_iterator

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def get_train_iterator(self):
    mnist_ds = tfds.load("mnist:3.*.*", split=tfds.Split.TRAIN,
                         as_supervised=True)
    mnist_ds = mnist_ds.repeat()
    moving_mnist_ds = mnist_ds.map(self.map_fn).batch(2)
    moving_mnist_ds = moving_mnist_ds.map(lambda x: tf.reduce_max(x, axis=0))
    return moving_mnist_ds.make_initializable_iterator() 
開發者ID:tensorflow,項目名稱:tensor2tensor,代碼行數:9,代碼來源:moving_mnist.py

示例12: train_and_eval_dataset

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def train_and_eval_dataset(dataset_name, data_dir):
  """Return train and evaluation datasets, feature info and supervised keys.

  Args:
    dataset_name: a string, the name of the dataset; if it starts with "v1_"
      then we'll search T2T Problem registry for it, otherwise we assume it
      is a dataset from TFDS and load it from there.
    data_dir: directory where the data is located.

  Returns:
    a 4-tuple consisting of:
     * the train tf.data.Dataset
     * the eval tf.data.Dataset
     * information about features: a python dictionary with feature names
         as keys and an object as value that provides .shape and .num_classes.
     * supervised_keys: information what's the input and what's the target,
         ie., a pair of lists with input and target feature names.
  """
  if dataset_name.startswith("v1_"):
    return _train_and_eval_dataset_v1(dataset_name[3:], data_dir)
  dataset_builder = tfds.builder(dataset_name, data_dir=data_dir)
  info = dataset_builder.info
  splits = dataset_builder.info.splits
  if tfds.Split.TRAIN not in splits:
    raise ValueError("To train we require a train split in the dataset.")
  if tfds.Split.VALIDATION not in splits and "test" not in splits:
    raise ValueError("We require a validation or test split in the dataset.")
  eval_split = tfds.Split.VALIDATION
  if tfds.Split.VALIDATION not in splits:
    eval_split = tfds.Split.TEST
  train, valid = tfds.load(
      name=dataset_name, split=[tfds.Split.TRAIN, eval_split])
  keys = None
  if info.supervised_keys:
    keys = ([info.supervised_keys[0]], [info.supervised_keys[1]])
  return train, valid, info.features, keys 
開發者ID:yyht,項目名稱:BERT,代碼行數:38,代碼來源:t2t.py

示例13: get_train_iterator

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def get_train_iterator(self):
    mnist_ds = tfds.load("mnist", split=tfds.Split.TRAIN, as_supervised=True)
    mnist_ds = mnist_ds.repeat()
    moving_mnist_ds = mnist_ds.map(self.map_fn).batch(2)
    moving_mnist_ds = moving_mnist_ds.map(lambda x: tf.reduce_max(x, axis=0))
    return moving_mnist_ds.make_initializable_iterator() 
開發者ID:yyht,項目名稱:BERT,代碼行數:8,代碼來源:moving_mnist.py

示例14: load

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def load(*args, **kwargs):
    return tfds.load('shapenet', *args, **kwargs) 
開發者ID:tensorflow,項目名稱:graphics,代碼行數:4,代碼來源:shapenet.py

示例15: load

# 需要導入模塊: import tensorflow_datasets [as 別名]
# 或者: from tensorflow_datasets import load [as 別名]
def load(*args, **kwargs):
    return tfds.load('model_net40', *args, **kwargs) 
開發者ID:tensorflow,項目名稱:graphics,代碼行數:4,代碼來源:__init__.py


注:本文中的tensorflow_datasets.load方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。