当前位置: 首页>>代码示例>>Python>>正文


Python config.DATA_DIR属性代码示例

本文整理汇总了Python中config.DATA_DIR属性的典型用法代码示例。如果您正苦于以下问题:Python config.DATA_DIR属性的具体用法?Python config.DATA_DIR怎么用?Python config.DATA_DIR使用的例子?那么, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在config的用法示例。


在下文中一共展示了config.DATA_DIR属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: read_MNIST

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def read_MNIST(binarize=False):
  """Reads in MNIST images.

  Args:
    binarize: whether to use the fixed binarization

  Returns:
    x_train: 50k training images
    x_valid: 10k validation images
    x_test: 10k test images

  """
  with gfile.FastGFile(os.path.join(config.DATA_DIR, config.MNIST_BINARIZED), 'r') as f:
    (x_train, _), (x_valid, _), (x_test, _) = pickle.load(f)

  if not binarize:
    with gfile.FastGFile(os.path.join(config.DATA_DIR, config.MNIST_FLOAT), 'r') as f:
      x_train = np.load(f).reshape(-1, 784)

  return x_train, x_valid, x_test 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:22,代码来源:datasets.py

示例2: experiment

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def experiment():
    logger.configure(log_directory=config.DATA_DIR, prefix=EXP_PREFIX, color='green')

    # 1) EUROSTOXX
    dataset = datasets.EuroStoxx50()

    result_df = run_benchmark_train_test_fit_cv_ml(dataset, model_dict, n_train_valid_splits=3, shuffle_splits=False, seed=22)

    # 2)
    for n_samples in [10000]:
        dataset = datasets.NCYTaxiDropoffPredict(n_samples=n_samples)

    df = run_benchmark_train_test_fit_cv_ml(dataset, model_dict, n_train_valid_splits=3, shuffle_splits=True, seed=22)

    result_df = pd.concat([result_df, df], ignore_index=True)

    # 3) UCI & NYC Taxi
    for dataset_class in [datasets.BostonHousing, datasets.Conrete, datasets.Energy]:
        dataset = dataset_class()
        df = run_benchmark_train_test_fit_cv_ml(dataset, model_dict, n_train_valid_splits=3, shuffle_splits=True, seed=22)
        result_df = pd.concat([result_df, df], ignore_index=True)

    logger.log('\n', str(result_df)) 
开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:25,代码来源:benchmark_empirical_kde.py

示例3: main

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def main():
    args = parse_args()
    path = os.path.abspath(args.PATH)

    if path.endswith(".manifest.template"):
        if not os.path.isfile(path):
            sys.exit("Cannot find file %r" % path)
        manifest_templates = [path]
    else:
        manifest_templates = get_manifest_templates(path)

    for manifest_template in manifest_templates:
        manifest = manifest_template[:-9]
        with open(manifest_template) as f_template:
            with open(manifest, "w+") as f_manifest:
                for line in f_template:
                    line = line.replace("$(DATA_DIR)", DATA_DIR)
                    line = line.replace("$(CONFIG_DIR)", CONFIG_DIR)
                    line = line.replace("$(RUNTIME)", RUNTIME)
                    line = line.replace("$(PYTHON_VERSION)", PYTHON_VERSION)
                    line = line.replace("$(LIBPROTOBUF_VERSION)", LIBPROTOBUF_VERSION)
                    line = line.replace("$(TESTS_DIR)", TESTS_DIR)
                    f_manifest.write(line) 
开发者ID:adombeck,项目名称:python-sgx,代码行数:25,代码来源:create_manifest.py

示例4: read_omniglot

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def read_omniglot(binarize=False):
  """Reads in Omniglot images.

  Args:
    binarize: whether to use the fixed binarization

  Returns:
    x_train: training images
    x_valid: validation images
    x_test: test images

  """
  n_validation=1345

  def reshape_data(data):
    return data.reshape((-1, 28, 28)).reshape((-1, 28*28), order='fortran')

  omni_raw = scipy.io.loadmat(os.path.join(config.DATA_DIR, config.OMNIGLOT))

  train_data = reshape_data(omni_raw['data'].T.astype('float32'))
  test_data = reshape_data(omni_raw['testdata'].T.astype('float32'))

  # Binarize the data with a fixed seed
  if binarize:
    np.random.seed(5)
    train_data = (np.random.rand(*train_data.shape) < train_data).astype(float)
    test_data = (np.random.rand(*test_data.shape) < test_data).astype(float)

  shuffle_seed = 123
  permutation = np.random.RandomState(seed=shuffle_seed).permutation(train_data.shape[0])
  train_data = train_data[permutation]

  x_train = train_data[:-n_validation]
  x_valid = train_data[-n_validation:]
  x_test = test_data

  return x_train, x_valid, x_test 
开发者ID:rky0930,项目名称:yolo_v2,代码行数:39,代码来源:datasets.py

示例5: get_dataset_mean_std

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def get_dataset_mean_std():
    all_sub_dirs = []
    for split in config.SPLITS:
        if 'test' not in split:
            for cat in config.CATEGORIES:
                all_sub_dirs.append(os.path.join(config.DATA_DIR, split, 'Images', cat))
    all_image_nums = 0
    #print(all_sub_dirs)
    means = [0., 0., 0.]
    stds = [0., 0., 0.]
    for dirs in all_sub_dirs:
        all_images = tf.gfile.Glob(os.path.join(dirs, '*.jpg'))
        for image in all_images:
            np_image = imread(image, mode='RGB')
            if len(np_image.shape) < 3 or np_image.shape[-1] != 3:
                continue
            all_image_nums += 1

            means[0] += np.mean(np_image[:, :, 0]) / 10000.
            means[1] += np.mean(np_image[:, :, 1]) / 10000.
            means[2] += np.mean(np_image[:, :, 2]) / 10000.

            stds[0] += np.std(np_image[:, :, 0]) / 10000.
            stds[1] += np.std(np_image[:, :, 1]) / 10000.
            stds[2] += np.std(np_image[:, :, 2]) / 10000.

        print([_*10000./all_image_nums for _ in means])
        print([_*10000./all_image_nums for _ in stds])
    print([_*10000./all_image_nums for _ in means])
    print([_*10000./all_image_nums for _ in stds])
    print(all_image_nums) 
开发者ID:HiKapok,项目名称:tf.fashionAI,代码行数:33,代码来源:get_dataset_mean_std.py

示例6: __init__

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def __init__(self):
        image_folder = config.DATA_DIR / 'BSR/BSDS500/data/images'
        self.image_files = list(map(str, image_folder.glob('*/*.jpg'))) 
开发者ID:jvanvugt,项目名称:pytorch-domain-adaptation,代码行数:5,代码来源:data.py

示例7: create_dataloaders

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def create_dataloaders(batch_size):
    dataset = MNIST(config.DATA_DIR/'mnist', train=True, download=True,
                    transform=Compose([GrayscaleToRgb(), ToTensor()]))
    shuffled_indices = np.random.permutation(len(dataset))
    train_idx = shuffled_indices[:int(0.8*len(dataset))]
    val_idx = shuffled_indices[int(0.8*len(dataset)):]

    train_loader = DataLoader(dataset, batch_size=batch_size, drop_last=True,
                              sampler=SubsetRandomSampler(train_idx),
                              num_workers=1, pin_memory=True)
    val_loader = DataLoader(dataset, batch_size=batch_size, drop_last=False,
                            sampler=SubsetRandomSampler(val_idx),
                            num_workers=1, pin_memory=True)
    return train_loader, val_loader 
开发者ID:jvanvugt,项目名称:pytorch-domain-adaptation,代码行数:16,代码来源:train_source.py

示例8: test_store_load_configrunner_pipeline

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def test_store_load_configrunner_pipeline(self):

    logger.configure(log_directory=config.DATA_DIR, prefix=EXP_PREFIX)
    test_dir = os.path.join(logger.log_directory, logger.prefix)
    if os.path.exists(test_dir):
      shutil.rmtree(test_dir)


    keys_of_interest = ['task_name', 'estimator', 'simulator', 'n_observations', 'center_sampling_method', 'x_noise_std', 'y_noise_std',
                        'ndim_x', 'ndim_y', 'n_centers', "n_mc_samples", "n_x_cond", 'mean_est', 'cov_est', 'mean_sim', 'cov_sim',
                        'kl_divergence', 'hellinger_distance', 'js_divergence', 'x_cond', 'random_seed', "mean_sim", "cov_sim",
                        "mean_abs_diff", "cov_abs_diff", "VaR_sim", "VaR_est", "VaR_abs_diff", "CVaR_sim", "CVaR_est", "CVaR_abs_diff",
                        "time_to_fit"]


    conf_est, conf_sim, observations = question1()
    conf_runner = ConfigRunner(EXP_PREFIX, conf_est, conf_sim, observations=observations, keys_of_interest=keys_of_interest,
                               n_mc_samples=1 * 10 ** 2, n_x_cond=5, n_seeds=5)

    conf_runner.configs = random.sample(conf_runner.configs, NUM_CONFIGS_TO_TEST)

    conf_runner.run_configurations(dump_models=True, multiprocessing=False)
    results_from_pkl_file = dict({logger.load_pkl(RESULTS_FILE)})

    """ check if model dumps have all been created """
    dump_dir = os.path.join(logger.log_directory, logger.prefix, 'model_dumps')
    model_dumps_list = os.listdir(dump_dir) # get list of all model files
    model_dumps_list_no_suffix = [os.path.splitext(entry)[0] for entry in model_dumps_list] # remove suffix

    for conf in conf_runner.configs:
      self.assertTrue(conf['task_name'] in model_dumps_list_no_suffix)


    """ check if model dumps can be used successfully"""
    for model_dump_i in model_dumps_list:
      #tf.reset_default_graph()
      with tf.Session(graph=tf.Graph()):
        model = logger.load_pkl("model_dumps/"+model_dump_i)
        self.assertTrue(model)
        if model.ndim_x == 1 and model.ndim_y == 1:
          self.assertTrue(model.plot3d(show=False)) 
开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:43,代码来源:unittests_configrunner.py

示例9: experiment

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def experiment():
    logger.configure(log_directory=config.DATA_DIR, prefix=EXP_PREFIX, color='green')

    # 1) EUROSTOXX
    dataset = datasets.EuroStoxx50()

    result_df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=3, n_eval_seeds=5, shuffle_splits=False,
                                    n_folds=5, seed=22)

    # 2) NYC Taxi
    for n_samples in [10000]:
        dataset = datasets.NCYTaxiDropoffPredict(n_samples=n_samples)

    df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=3, n_eval_seeds=5, shuffle_splits=True,
                                    n_folds=5, seed=22,  n_jobs_inner=-1, n_jobc_outer=2)
    result_df = pd.concat([result_df, df], ignore_index=True)


    # 3) UCI
    result_df = None
    for dataset_class in [datasets.BostonHousing, datasets.Conrete, datasets.Energy]:
        dataset = dataset_class()
        df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=1, n_eval_seeds=5,
                                             shuffle_splits=True, n_folds=5, seed=22, n_jobs_inner=-1,
                                             n_jobc_outer=2)
        result_df = pd.concat([result_df, df], ignore_index=True)

    logger.log('\n', str(result_df)) 
开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:30,代码来源:benchmark_empirical.py

示例10: experiment

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def experiment():
    logger.configure(log_directory=config.DATA_DIR, prefix=EXP_PREFIX, color='green')

    # 1) EUROSTOXX
    dataset = datasets.EuroStoxx50()

    result_df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=3, n_eval_seeds=5, shuffle_splits=False,
                                    n_folds=5, seed=22, n_jobs_inner=-1, n_jobc_outer=3)

    # 2) NYC Taxi
    for n_samples in [10000]:
        dataset = datasets.NCYTaxiDropoffPredict(n_samples=n_samples)

    df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=3, n_eval_seeds=5, shuffle_splits=True,
                                    n_folds=5, seed=22, n_jobs_inner=-1, n_jobc_outer=3)
    result_df = pd.concat([result_df, df], ignore_index=True)

    # 3) UCI
    for dataset_class in [datasets.BostonHousing, datasets.Conrete, datasets.Energy]:
        dataset = dataset_class()
        df = run_benchmark_train_test_fit_cv(dataset, model_dict, n_train_valid_splits=3, n_eval_seeds=5,
                                        shuffle_splits=True, n_folds=5, seed=22, n_jobs_inner=-1, n_jobc_outer=3)
        result_df = pd.concat([result_df, df], ignore_index=True)

    logger.log('\n', str(result_df))
    logger.log('\n', result_df.tolatex()) 
开发者ID:freelunchtheorem,项目名称:Conditional_Density_Estimation,代码行数:28,代码来源:regularization_empirical.py

示例11: get_train_valid_test_data

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def get_train_valid_test_data(augmentation=False):
    # load data
    Q = load_question(params)
    dfTrain = load_train()
    dfTest = load_test()
    # train_features = load_feat("train")
    # test_features = load_feat("test")
    # params["num_features"] = train_features.shape[1]

    # load split
    with open(config.SPLIT_FILE, "rb") as f:
        train_idx, valid_idx = pkl.load(f)

    # validation
    if augmentation:
        dfDev = pd.read_csv(config.DATA_DIR + "/" + "dev_aug.csv")
        dfDev = downsample(dfDev)
        params["use_features"] = False
        params["augmentation_decay_steps"] = 50000
        params["decay_steps"] = 50000
        X_dev = get_model_data(dfDev, None, params)
    else:
        X_dev = get_model_data(dfTrain.loc[train_idx], None, params)
    X_valid = get_model_data(dfTrain.loc[valid_idx], None, params)

    # submit
    if augmentation:
        dfTrain = pd.read_csv(config.DATA_DIR + "/" + "train_aug.csv")
        dfTrain = downsample(dfTrain)
        params["use_features"] = False
        params["augmentation_decay_steps"] = 50000
        params["decay_steps"] = 50000
        X_train = get_model_data(dfTrain, None, params)
    else:
        X_train = get_model_data(dfTrain, None, params)
    X_test = get_model_data(dfTest, None, params)

    return X_dev, X_valid, X_train, X_test, Q 
开发者ID:ChenglongChen,项目名称:tensorflow-DSMM,代码行数:40,代码来源:main.py

示例12: load_task2

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def load_task2(dataset):
    data_file = os.path.join(DATA_DIR, "task2/us_{}.text".format(dataset))
    label_file = os.path.join(DATA_DIR, "task2/us_{}.labels".format(dataset))

    X = []
    y = []
    with open(data_file, 'r', encoding="utf-8") as dfile, \
            open(label_file, 'r', encoding="utf-8") as lfile:
        for tweet, label in zip(dfile, lfile):
            X.append(tweet.rstrip())
            y.append(int(label.rstrip()))

    return X, y 
开发者ID:cbaziotis,项目名称:ntua-slp-semeval2018,代码行数:15,代码来源:task2.py

示例13: load_data

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def load_data(DATA_NAME):
    
    print('loading', DATA_NAME, 'data ...')
    myTrans = pd.read_csv(DATA_DIR + DATA_NAME + ".data.csv", encoding = 'latin1')
    myTrans['PID'] = myTrans['PID'].apply(lambda x : list(set(eval(x))))
    myItem = pd.read_csv(DATA_DIR + DATA_NAME + ".meta.csv", encoding = 'latin1')
    n_item = len(myItem)
    n_user = myTrans['UID'].max() + 1
    print('done!')
    print('interactions about', n_item, 'products and', n_user, 'users are loaded')
    return myTrans, myItem, n_item, n_user 
开发者ID:MengtingWan,项目名称:grocery,代码行数:13,代码来源:main.py

示例14: convert_test

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def convert_test(output_dir, splits=config.SPLITS):

    class_hist = {'blouse': 0,
                 'dress': 0,
                 'outwear': 0,
                 'skirt': 0,
                 'trousers': 0}

    for cat in config.CATEGORIES:
        total_examples = 0
        # TODO: create tfrecorder writer here
        sys.stdout.write('\nprocessing category: {}...'.format(cat))
        sys.stdout.flush()
        file_idx = 0
        record_idx = 0
        tf_filename = os.path.join(output_dir, '%s_%04d.tfrecord' % (cat, file_idx))
        tfrecord_writer = tf.python_io.TFRecordWriter(tf_filename)
        this_key_map = keymap_factory[cat]

        for split in splits:
            if 'train' in split: continue
            sys.stdout.write('\nprocessing split: {}...\n'.format(split))
            sys.stdout.flush()
            split_path = os.path.join(config.DATA_DIR, split)
            anna_file = os.path.join(split_path, 'test.csv')
            anna_pd = pd.read_csv(anna_file)
            this_nums = len(anna_pd.index)
            total_examples += this_nums
            for index, row in anna_pd.iterrows():
                sys.stdout.write('\r>> Converting image %d/%d' % (index+1, this_nums))
                sys.stdout.flush()
                category = row['image_category']
                if not (cat in category): continue
                class_hist[category] += 1
                image_file = row['image_id']
                full_file_path = os.path.join(split_path, image_file)
                #print(len(all_columns_name))
                class_id = config.category2ind[category]

                _test_add_to_tfrecord(tfrecord_writer, full_file_path, image_file, class_id)
                record_idx += 1
                if record_idx > SAMPLES_PER_FILES:
                    record_idx = 0
                    file_idx += 1
                    tf_filename = os.path.join(output_dir, '%s_%04d.tfrecord' % (cat, file_idx))
                    tfrecord_writer.flush()
                    tfrecord_writer.close()
                    tfrecord_writer = tf.python_io.TFRecordWriter(tf_filename)
    print('\nFinished converting the whole test dataset!')
    print(class_hist, total_examples)
    return class_hist, total_examples 
开发者ID:HiKapok,项目名称:tf.fashionAI,代码行数:53,代码来源:convert_tfrecords.py

示例15: main

# 需要导入模块: import config [as 别名]
# 或者: from config import DATA_DIR [as 别名]
def main(args):
    model = Net().to(device)
    model.load_state_dict(torch.load(args.MODEL_FILE))
    feature_extractor = model.feature_extractor
    clf = model.classifier

    discriminator = nn.Sequential(
        GradientReversal(),
        nn.Linear(320, 50),
        nn.ReLU(),
        nn.Linear(50, 20),
        nn.ReLU(),
        nn.Linear(20, 1)
    ).to(device)

    half_batch = args.batch_size // 2
    source_dataset = MNIST(config.DATA_DIR/'mnist', train=True, download=True,
                          transform=Compose([GrayscaleToRgb(), ToTensor()]))
    source_loader = DataLoader(source_dataset, batch_size=half_batch,
                               shuffle=True, num_workers=1, pin_memory=True)
    
    target_dataset = MNISTM(train=False)
    target_loader = DataLoader(target_dataset, batch_size=half_batch,
                               shuffle=True, num_workers=1, pin_memory=True)

    optim = torch.optim.Adam(list(discriminator.parameters()) + list(model.parameters()))

    for epoch in range(1, args.epochs+1):
        batches = zip(source_loader, target_loader)
        n_batches = min(len(source_loader), len(target_loader))

        total_domain_loss = total_label_accuracy = 0
        for (source_x, source_labels), (target_x, _) in tqdm(batches, leave=False, total=n_batches):
                x = torch.cat([source_x, target_x])
                x = x.to(device)
                domain_y = torch.cat([torch.ones(source_x.shape[0]),
                                      torch.zeros(target_x.shape[0])])
                domain_y = domain_y.to(device)
                label_y = source_labels.to(device)

                features = feature_extractor(x).view(x.shape[0], -1)
                domain_preds = discriminator(features).squeeze()
                label_preds = clf(features[:source_x.shape[0]])
                
                domain_loss = F.binary_cross_entropy_with_logits(domain_preds, domain_y)
                label_loss = F.cross_entropy(label_preds, label_y)
                loss = domain_loss + label_loss

                optim.zero_grad()
                loss.backward()
                optim.step()

                total_domain_loss += domain_loss.item()
                total_label_accuracy += (label_preds.max(1)[1] == label_y).float().mean().item()

        mean_loss = total_domain_loss / n_batches
        mean_accuracy = total_label_accuracy / n_batches
        tqdm.write(f'EPOCH {epoch:03d}: domain_loss={mean_loss:.4f}, '
                   f'source_accuracy={mean_accuracy:.4f}')

        torch.save(model.state_dict(), 'trained_models/revgrad.pt') 
开发者ID:jvanvugt,项目名称:pytorch-domain-adaptation,代码行数:63,代码来源:revgrad.py


注:本文中的config.DATA_DIR属性示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。