当前位置: 首页>>代码示例>>Python>>正文


Python base.Datasets方法代码示例

本文整理汇总了Python中tensorflow.contrib.learn.python.learn.datasets.base.Datasets方法的典型用法代码示例。如果您正苦于以下问题:Python base.Datasets方法的具体用法?Python base.Datasets怎么用?Python base.Datasets使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在tensorflow.contrib.learn.python.learn.datasets.base的用法示例。


在下文中一共展示了base.Datasets方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: load_dbpedia

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def load_dbpedia(size='small', test_with_fake_data=False):
  """Get DBpedia datasets from CSV files."""
  if not test_with_fake_data:
    data_dir = os.path.join(os.getenv('TF_EXP_BASE_DIR', ''), 'dbpedia_data')
    maybe_download_dbpedia(data_dir)

    train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv')
    test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv')

    if size == 'small':
      # Reduce the size of original data by a factor of 1000.
      base.shrink_csv(train_path, 1000)
      base.shrink_csv(test_path, 1000)
      train_path = train_path.replace('train.csv', 'train_small.csv')
      test_path = test_path.replace('test.csv', 'test_small.csv')
  else:
    module_path = os.path.dirname(__file__)
    train_path = os.path.join(module_path, 'data', 'text_train.csv')
    test_path = os.path.join(module_path, 'data', 'text_test.csv')

  train = base.load_csv_without_header(
      train_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)
  test = base.load_csv_without_header(
      test_path, target_dtype=np.int32, features_dtype=np.str, target_column=0)

  return base.Datasets(train=train, validation=None, test=test) 
开发者ID:ryfeus,项目名称:lambda-packs,代码行数:28,代码来源:text_datasets.py

示例2: load_dataset

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def load_dataset(path=None, img_h=28, img_w=28):

    print("\n***** Load dataset *****")

    split_data(path=path)

    dirlist = path_to_dirlist(path=PACK_PATH+"/train")
    if(len(dirlist) > 0):
        train_datas, train_labels, classes = dirlist_to_dataset(path="./train", dirlist=dirlist)

    dirlist = path_to_dirlist(path=PACK_PATH+"/test")
    if(len(dirlist) > 0):
        test_datas, test_labels, classes = dirlist_to_dataset(path="./test", dirlist=dirlist)

    dirlist = path_to_dirlist(path=PACK_PATH+"/valid")
    if(len(dirlist) > 0):
        valid_datas, valid_labels, classes = dirlist_to_dataset(path="./valid", dirlist=dirlist)

    train = DataSet(who_am_i="train", datas=train_datas, labels=train_labels, class_len=classes, height=img_h, width=img_w)
    test = DataSet(who_am_i="test", datas=test_datas, labels=test_labels, class_len=classes, height=img_h, width=img_w)
    validation = DataSet(who_am_i="valid", datas=valid_datas, labels=valid_labels, class_len=classes, height=img_h, width=img_w)

    num_train = train.amount
    num_test = test.amount
    print(" Num of Train images : "+str(num_train))
    print(" Num of Test images  : "+str(num_test))

    return base.Datasets(train=train, test=test, validation=validation), classes, min(num_train, num_test) 
开发者ID:YeongHyeon,项目名称:CNN_Own_Dataset,代码行数:30,代码来源:constructor.py

示例3: dataset_constructor

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def dataset_constructor():

    f = open(PACK_PATH+"/dataset/format.txt", 'r')
    class_len = int(f.readline())
    data_len = int(f.readline())
    height = int(f.readline())
    width = int(f.readline())
    chennel = int(f.readline())
    f.close()

    train = DataSet(who_am_i="train", class_len=class_len, data_len=data_len, height=height, width=width, chennel=chennel)
    test = DataSet(who_am_i="test", class_len=class_len, data_len=data_len, height=height, width=width, chennel=chennel)
    valid = DataSet(who_am_i="valid", class_len=class_len, data_len=data_len, height=height, width=width, chennel=chennel)

    return base.Datasets(train=train, test=test, validation=valid) 
开发者ID:YeongHyeon,项目名称:R-CNN_LIGHT,代码行数:17,代码来源:constructor.py

示例4: read_data_sets

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def read_data_sets(train_dir,
                   fake_data=False,
                   one_hot=False,
                   dtype=dtypes.float32,
                   reshape=True,
                   validation_size=5000):
  if fake_data:

    def fake():
      return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)

    train = fake()
    validation = fake()
    test = fake()
    return base.Datasets(train=train, validation=validation, test=test)


  with open(os.path.join(train_dir, 'small_chairs.npy')) as f:
    train_images = numpy.load(f)
    train_labels = numpy.zeros(len(train_images))

  train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape)
  validation = None
  test = None

  return base.Datasets(train=train, validation=validation, test=test) 
开发者ID:whyjay,项目名称:memoryGAN,代码行数:28,代码来源:chair.py

示例5: read_data_sets

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def read_data_sets(train_dir,
                   fake_data=False,
                   one_hot=False,
                   dtype=dtypes.float32):
  if fake_data:

    def fake():
      return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)

    train = fake()
    validation = fake()
    test = fake()
    return base.Datasets(train=train, validation=validation, test=test)

  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'
  VALIDATION_SIZE = 5000

  local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
                                   SOURCE_URL + TRAIN_IMAGES)
  train_images = extract_images(local_file)

  local_file = base.maybe_download(TRAIN_LABELS, train_dir,
                                   SOURCE_URL + TRAIN_LABELS)
  train_labels = extract_labels(local_file, one_hot=one_hot)

  local_file = base.maybe_download(TEST_IMAGES, train_dir,
                                   SOURCE_URL + TEST_IMAGES)
  test_images = extract_images(local_file)

  local_file = base.maybe_download(TEST_LABELS, train_dir,
                                   SOURCE_URL + TEST_LABELS)
  test_labels = extract_labels(local_file, one_hot=one_hot)

  validation_images = train_images[:VALIDATION_SIZE]
  validation_labels = train_labels[:VALIDATION_SIZE]
  train_images = train_images[VALIDATION_SIZE:]
  train_labels = train_labels[VALIDATION_SIZE:]

  train = DataSet(train_images, train_labels, start_id=0, dtype=dtype)
  validation = DataSet(validation_images,
                       validation_labels,
                       start_id=len(train_images),
                       dtype=dtype)
  test = DataSet(test_images,
                 test_labels,
                 start_id=(len(train_images) + len(validation_images)),
                 dtype=dtype)

  return base.Datasets(train=train, validation=validation, test=test) 
开发者ID:GoogleCloudPlatform,项目名称:cloudml-samples,代码行数:54,代码来源:input_data.py

示例6: read_data_sets

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def read_data_sets(train_dir,
                   fake_data=False,
                   one_hot=False,
                   dtype=dtypes.float32,
                   reshape=True,
                   validation_size=5000):
  if fake_data:

    def fake():
      return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)

    train = fake()
    validation = fake()
    test = fake()
    return base.Datasets(train=train, validation=validation, test=test)

  TRAIN_IMAGES = 'train-images-idx3-ubyte.gz'
  TRAIN_LABELS = 'train-labels-idx1-ubyte.gz'
  TEST_IMAGES = 't10k-images-idx3-ubyte.gz'
  TEST_LABELS = 't10k-labels-idx1-ubyte.gz'

  local_file = base.maybe_download(TRAIN_IMAGES, train_dir,
                                   SOURCE_URL + TRAIN_IMAGES)
  with open(local_file, 'rb') as f:
    train_images = extract_images(f)

  local_file = base.maybe_download(TRAIN_LABELS, train_dir,
                                   SOURCE_URL + TRAIN_LABELS)
  with open(local_file, 'rb') as f:
    train_labels = extract_labels(f, one_hot=one_hot)

  local_file = base.maybe_download(TEST_IMAGES, train_dir,
                                   SOURCE_URL + TEST_IMAGES)
  with open(local_file, 'rb') as f:
    test_images = extract_images(f)

  local_file = base.maybe_download(TEST_LABELS, train_dir,
                                   SOURCE_URL + TEST_LABELS)
  with open(local_file, 'rb') as f:
    test_labels = extract_labels(f, one_hot=one_hot)

  if not 0 <= validation_size <= len(train_images):
    raise ValueError(
        'Validation size should be between 0 and {}. Received: {}.'
        .format(len(train_images), validation_size))

  validation_images = train_images[:validation_size]
  validation_labels = train_labels[:validation_size]
  train_images = train_images[validation_size:]
  train_labels = train_labels[validation_size:]

  train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape)
  validation = DataSet(validation_images,
                       validation_labels,
                       dtype=dtype,
                       reshape=reshape)
  test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape)

  return base.Datasets(train=train, validation=validation, test=test) 
开发者ID:abhisuri97,项目名称:auto-alt-text-lambda-api,代码行数:61,代码来源:mnist.py

示例7: load_stock_data

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def load_stock_data(path, moving_window=128, columns=5, train_test_ratio=4.0):
  # process a single file's data into usable arrays
  def process_data(data):
    stock_set = np.zeros([0,moving_window,columns])
    label_set = np.zeros([0,2])
    for idx in range(data.shape[0] - (moving_window + 5)):
      stock_set = np.concatenate((stock_set, np.expand_dims(data[range(idx,idx+(moving_window)),:], axis=0)), axis=0)

      if data[idx+(moving_window+5),3] > data[idx+(moving_window),3]:
        lbl = [[1.0, 0.0]]
      else:
        lbl = [[0.0, 1.0]]
      label_set = np.concatenate((label_set, lbl), axis=0)
      # label_set = np.concatenate((label_set, np.array([data[idx+(moving_window+5),3] - data[idx+(moving_window),3]])))
    # print(stock_set.shape, label_set.shape)
    return stock_set, label_set

  # read a directory of data
  stocks_set = np.zeros([0,moving_window,columns])
  labels_set = np.zeros([0,2])
  for dir_item in os.listdir(path):
    dir_item_path = os.path.join(path, dir_item)
    if os.path.isfile(dir_item_path):
      print(dir_item_path)
      ss, ls = process_data(load_csv(dir_item_path))
      stocks_set = np.concatenate((stocks_set, ss), axis=0)
      labels_set = np.concatenate((labels_set, ls), axis=0)

  # shuffling the data
  perm = np.arange(labels_set.shape[0])
  np.random.shuffle(perm)
  stocks_set = stocks_set[perm]
  labels_set = labels_set[perm]

  # normalize the data
  stocks_set_ = np.zeros(stocks_set.shape)
  for i in range(len(stocks_set)):
    min = stocks_set[i].min(axis=0)
    max = stocks_set[i].max(axis=0)
    stocks_set_[i] = (stocks_set[i] - min) / (max - min)
  stocks_set = stocks_set_
  # labels_set = np.transpose(labels_set)

  # selecting 1/5 for testing, and 4/5 for training
  train_test_idx = int((1.0 / (train_test_ratio + 1.0)) * labels_set.shape[0])
  train_stocks = stocks_set[train_test_idx:,:,:]
  train_labels = labels_set[train_test_idx:]
  test_stocks = stocks_set[:train_test_idx,:,:]
  test_labels = labels_set[:train_test_idx]

  train = DataSet(train_stocks, train_labels)
  test = DataSet(test_stocks, test_labels)

  return base.Datasets(train=train, validation=None, test=test)

# db = load_stock_data("data/short/")
# images, labels = db.train.next_batch(10)
# print(images.shape, labels.shape)
# print(images, labels) 
开发者ID:philipxjm,项目名称:Deep-Convolution-Stock-Technical-Analysis,代码行数:61,代码来源:loader.py

示例8: read_data_sets

# 需要导入模块: from tensorflow.contrib.learn.python.learn.datasets import base [as 别名]
# 或者: from tensorflow.contrib.learn.python.learn.datasets.base import Datasets [as 别名]
def read_data_sets(train_dir,
                   fake_data=False,
                   one_hot=False,
                   dtype=dtypes.float32,
                   reshape=True,
                   validation_size=0):
  if fake_data:

    def fake():
      return DataSet([], [], fake_data=True, one_hot=one_hot, dtype=dtype)

    train = fake()
    validation = fake()
    test = fake()
    return base.Datasets(train=train, validation=validation, test=test)


  gz_file_name = 'cifar-10-python.tar.gz'

  local_file = base.maybe_download(gz_file_name, train_dir,
                                   SOURCE_URL + gz_file_name)

  train_images = []
  train_labels = []
  for i in range(1, 6):
    with open(os.path.join(train_dir, 'cifar-10-batches-py', 'data_batch_%d'%i)) as f:
      batch = numpy.load(f)
      tmp_images = batch['data'].reshape([-1, 3, 32, 32])
      train_images.append(tmp_images.transpose([0, 2, 3, 1]))
      train_labels += batch['labels']
  train_images = numpy.concatenate(train_images)
  train_labels = numpy.array(train_labels)

  if not 0 <= validation_size <= len(train_images):
    raise ValueError(
        'Validation size should be between 0 and {}. Received: {}.'
        .format(len(train_images), validation_size))

  validation_images = train_images[:validation_size]
  validation_labels = train_labels[:validation_size]
  train_images = train_images[validation_size:]
  train_labels = train_labels[validation_size:]

  train = DataSet(train_images, train_labels, dtype=dtype, reshape=reshape)
  validation = DataSet(validation_images,
                       validation_labels,
                       dtype=dtype,
                       reshape=reshape)
  #test = DataSet(test_images, test_labels, dtype=dtype, reshape=reshape)
  test = None

  return base.Datasets(train=train, validation=validation, test=test) 
开发者ID:whyjay,项目名称:memoryGAN,代码行数:54,代码来源:cifar10.py


注:本文中的tensorflow.contrib.learn.python.learn.datasets.base.Datasets方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。