当前位置: 首页>>代码示例>>Python>>正文


Python hickle.load方法代码示例

本文整理汇总了Python中hickle.load方法的典型用法代码示例。如果您正苦于以下问题:Python hickle.load方法的具体用法?Python hickle.load怎么用?Python hickle.load使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在hickle的用法示例。


在下文中一共展示了hickle.load方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: data_split

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def data_split(inputfile):
    data = hkl.load(inputfile)
    X = data['mat']
    X_kspec = data['kmer']
    y = data['y']
    rs = ShuffleSplit(len(y), n_iter=1,random_state = 1)
    X_kspec = X_kspec.reshape((X_kspec.shape[0],1024,4))
    X = np.concatenate((X,X_kspec), axis = 1)
    X = X[:,np.newaxis]
    X = X.transpose((0,1,3,2))
    for train_idx, test_idx in rs:
        X_train = X[train_idx,:]
        y_train = y[train_idx]
        X_test = X[test_idx,:]
        y_test = y[test_idx]
    X_train = X_train.astype('float32')
    y_train = y_train.astype('int32')
    X_test = X_test.astype('float32')
    y_test = y_test.astype('int32')
    return [X_train, y_train, X_test, y_test]

#define the network architecture 
开发者ID:kimmo1019,项目名称:Deopen,代码行数:24,代码来源:Deopen_classification.py

示例2: load_inference_data

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def load_inference_data(data_path='./data'):
    start_t = time.time()
    data = {}
    data['features'] = hickle.load(os.path.join(data_path, 'inference.features.hkl'))
    with open(os.path.join(data_path, 'inference.file.names.pkl'), 'rb') as f:
        data['file_names'] = pickle.load(f)
    with open(os.path.join(data_path, 'inference.image.idxs.pkl'), 'rb') as f:
        data['image_idxs'] = pickle.load(f)

    for k, v in data.iteritems():
        if type(v) == np.ndarray:
            print
            k, type(v), v.shape, v.dtype
        else:
            print
            k, type(v), len(v)
    end_t = time.time()
    print
    "Elapse time: %.2f" % (end_t - start_t)
    return data 
开发者ID:weili-ict,项目名称:SelfCriticalSequenceTraining-tensorflow,代码行数:22,代码来源:utils.py

示例3: __init__

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def __init__(self, load=None, **kwargs):
    if load is None:
      args = {}
    else:
      args = util.load_params(load, 'train')
    
    util.pp.pprint(args)
    Default.__init__(self, **args)

    if self.init:
      self.learner.init()
      self.learner.save()
    else:
      self.learner.restore()
    
    print("Loading experiences from", self.data)
    
    start_time = time.time()
    self.experiences = hickle.load(self.data)
    print("Loaded experiences in %d seconds." % (time.time() - start_time))
    if 'initial' not in self.experiences:
      self.experiences['initial'] = [] 
开发者ID:vladfi1,项目名称:phillip,代码行数:24,代码来源:train_model.py

示例4: load_dataset

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def load_dataset(prefix, suffix):
    filename = os.path.join(prefix, 'train_fc7_{}.hkl'.format(suffix))
    X_train = hkl.load(filename).astype(np.float32)
    filename = os.path.join(prefix, 'train_conf.hkl')
    y_train = hkl.load(filename).astype(np.uint8)

    filename = os.path.join(prefix, 'val_fc7_{}.hkl'.format(suffix))
    X_val = hkl.load(filename).astype(np.float32)
    filename = os.path.join(prefix, 'val_conf.hkl')
    y_val = hkl.load(filename).astype(np.uint8)

    filename = os.path.join(prefix, 'train_priors.hkl')
    priors = hkl.load(filename).astype(np.float32).flatten()
    # We just return all the arrays in order, as expected in main().
    # (It doesn't matter how we do this as long as we can read them again.)
    return priors, X_train, y_train, X_val, y_val


# ############################# Batch iterator ################################
# This is just a simple helper function iterating over training data in
# mini-batches of a particular size, optionally in random order. It assumes
# data is available as numpy arrays. 
开发者ID:escorciav,项目名称:deep-action-proposals,代码行数:24,代码来源:learning.py

示例5: input_parser

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def input_parser():
    description = 'Apply PCA over features'
    p = argparse.ArgumentParser(description=description)
    h_dsname = ('HDF5-file with features where to apply transformation')
    p.add_argument('dsfile', help=h_dsname)
    p.add_argument('pcafile', help='HDF5-file with PCA results')
    p.add_argument('-o', '--outputfile', default=None,
                   help='Fullpath name for output-file')
    g = p.add_mutually_exclusive_group()
    g.add_argument('-e', '--energy', default=0.9, type=float,
                   help='Minimium energy of eigenvalues')
    g.add_argument('-k', '--k', default=None, type=int,
                   help='Number of components to select')
    h_pcasrc = 'Dict with keys (S, U, x_mean) pointing variables of pcafile'
    p.add_argument('-ps', '--pca_src', default=PCA_SOURCE, help=h_pcasrc,
                   type=json.load)
    p.add_argument('-ds', '--ds_src', default=DS_SOURCE,
                   help='source of hdf5-file with features')
    p.add_argument('-v', '--verbose', action='store_true',
                   help='verbosity level')
    p.add_argument('-vl', '--vloop', default=100, type=int,
                   help='Control frequency of verbose level inside loops')
    return p 
开发者ID:escorciav,项目名称:deep-action-proposals,代码行数:25,代码来源:dim_reduction.py

示例6: _load_ld_info_

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def _load_ld_info_(local_ld_dict_file, verbose=True, compressed=True, use_hickle=False):
    t0 = time.time()
    if use_hickle:
        f = h5py.File(local_ld_dict_file, 'r')
        ld_dict = hickle.load(f)
        f.close()
    else:
        if compressed:
            f = gzip.open(local_ld_dict_file, 'r')
        else:
            f = open(local_ld_dict_file, 'r')            
        ld_dict = pickle.load(f)
        f.close()
    t1 = time.time()
    t = (t1 - t0)
    if verbose:
        print('\nIt took %d minutes and %0.2f seconds to load LD information from disk.' % (t / 60, t % 60))
    return ld_dict 
开发者ID:bvilhjal,项目名称:ldpred,代码行数:20,代码来源:ld.py

示例7: __call__

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def __call__(self, nn, train_history):
        val_acc[self.iteration] = train_history[-1]['valid_accuracy']
        params.append(nn.get_all_params_values())
        
#load the best parameters before training 
开发者ID:kimmo1019,项目名称:Deopen,代码行数:7,代码来源:Deopen_classification.py

示例8: __call__

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def __call__(self, nn, train_history):
        val_loss[self.iteration] = train_history[-1]['valid_loss']
        params.append(nn.get_all_params_values())

        
#load the best parameters before training 
开发者ID:kimmo1019,项目名称:Deopen,代码行数:8,代码来源:Deopen_regression.py

示例9: data_split

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def data_split(inputfile,reads_count):
    data = hkl.load(inputfile)
    reads_count= hkl.load(reads_count)
    X = data['mat']
    X_kspec = data['kmer']
    reads_count = np.array(reads_count)
    y = np.mean(reads_count, axis = 1)
    y = np.log(y+1e-3)
    rs = ShuffleSplit(len(y), n_iter=1,random_state = 1)
    X_kspec = X_kspec.reshape((X_kspec.shape[0],1024,4))
    X = np.concatenate((X,X_kspec), axis = 1)
    X = X[:,np.newaxis]
    X = X.transpose((0,1,3,2))
    for train_idx, test_idx in rs:
        X_train = X[train_idx,:]
        y_train = y[train_idx]
        X_test = X[test_idx,:]
        y_test = y[test_idx]
    X_train = X_train.astype('float32')
    y_train = y_train.astype('float32')
    X_test = X_test.astype('float32')
    y_test = y_test.astype('float32')
    print 'Data prepration done!'
    return [X_train, y_train, X_test, y_test]


#define the network architecture 
开发者ID:kimmo1019,项目名称:Deopen,代码行数:29,代码来源:Deopen_regression.py

示例10: wideresnet50

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def wideresnet50(pooling):
    dir_models = os.path.join(expanduser("~"), '.torch/wideresnet')
    path_hkl = os.path.join(dir_models, 'wideresnet50.hkl')
    if os.path.isfile(path_hkl):
        params = hkl.load(path_hkl)
        # convert numpy arrays to torch Variables
        for k,v in sorted(params.items()):
            print(k, v.shape)
            params[k] = Variable(torch.from_numpy(v), requires_grad=True)
    else:
        os.system('mkdir -p ' + dir_models)
        os.system('wget {} -O {}'.format(model_urls['wideresnet50'], path_hkl))
    f = define_model(params)
    model = WideResNet(pooling)
    return model 
开发者ID:alexandonian,项目名称:pretorched-x,代码行数:17,代码来源:wideresnet.py

示例11: load_coco_data

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def load_coco_data(data_path='./data', split='train'):

    start_t = time.time()
    data = {}
    # use validation data to debug
    if split == "debug":
        split = 'val'
        with open(os.path.join(os.path.join(data_path, 'train'), 'word_to_idx.pkl'), 'rb') as f:
            data['word_to_idx'] = pickle.load(f)
    data_path = os.path.join(data_path, split)
    data['features'] = hickle.load(os.path.join(data_path, '%s.features.hkl' % split))
    with open(os.path.join(data_path, '%s.file.names.pkl' % split), 'rb') as f:
        data['file_names'] = pickle.load(f)
    with open(os.path.join(data_path, '%s.captions.pkl' % split), 'rb') as f:
        data['captions'] = pickle.load(f)
    with open(os.path.join(data_path, '%s.image.idxs.pkl' % split), 'rb') as f:
        data['image_idxs'] = pickle.load(f)

    if split == 'train':
        with open(os.path.join(data_path, 'word_to_idx.pkl'), 'rb') as f:
            data['word_to_idx'] = pickle.load(f)

    for k, v in data.iteritems():
        if type(v) == np.ndarray:
            print k, type(v), v.shape, v.dtype
        else:
            print k, type(v), len(v)
    end_t = time.time()
    print "Elapse time: %.2f" % (end_t - start_t)
    return data 
开发者ID:weili-ict,项目名称:SelfCriticalSequenceTraining-tensorflow,代码行数:32,代码来源:utils.py

示例12: load_pickle

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def load_pickle(path):
    with open(path, 'rb') as f:
        file = pickle.load(f)
        print ('Loaded %s..' % path)
        return file 
开发者ID:weili-ict,项目名称:SelfCriticalSequenceTraining-tensorflow,代码行数:7,代码来源:utils.py

示例13: deserialize_from_file_json

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def deserialize_from_file_json(path):
    f = open(path, 'r')
    obj = json.load(f)
    f.close()
    return obj 
开发者ID:memray,项目名称:seq2seq-keyphrase,代码行数:7,代码来源:build_dataset.py

示例14: deserialize_from_file_hdf5

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def deserialize_from_file_hdf5(path):
    f = open(path, 'r')
    obj = hickle.load(f)
    f.close()
    return obj 
开发者ID:memray,项目名称:seq2seq-keyphrase,代码行数:7,代码来源:build_dataset.py

示例15: deserialize_from_file

# 需要导入模块: import hickle [as 别名]
# 或者: from hickle import load [as 别名]
def deserialize_from_file(path):
    f = open(path, 'rb')
    obj = pickle.load(f)
    f.close()
    return obj 
开发者ID:memray,项目名称:seq2seq-keyphrase,代码行数:7,代码来源:build_dataset.py


注:本文中的hickle.load方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。