当前位置: 首页>>代码示例>>Python>>正文


Python cPickle.load函数代码示例

本文整理汇总了Python中six.moves.cPickle.load函数的典型用法代码示例。如果您正苦于以下问题:Python load函数的具体用法?Python load怎么用?Python load使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了load函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sample

def sample(args):
    with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
        chars, vocab = cPickle.load(f)
    model = Model(saved_args, True)
    with tf.Session() as sess:
        tf.initialize_all_variables().run()
        saver = tf.train.Saver(tf.all_variables())
        ckpt = tf.train.get_checkpoint_state(args.save_dir)
        if ckpt and ckpt.model_checkpoint_path:
            saver.restore(sess, ckpt.model_checkpoint_path)
            ts = model.sample(sess, chars, vocab, args.n, args.prime, args.sample)
            print("Sampled Output\n")
            print(ts)
            print("Converting Text to Speech")
            tts = gTTS(text=ts, lang='en-uk')
            tts.save("ts.mp3")
            audio = MP3("ts.mp3")
            audio_length = audio.info.length
            print("Speaker is Getting Ready")
            mixer.init()
            mixer.music.load('ts.mp3')
            mixer.music.play()
            time.sleep(audio_length+5)
开发者ID:vyraun,项目名称:char-rnn-tensorflow,代码行数:25,代码来源:sample.py

示例2: Init

  def Init(self):
    TFunctionApprox.Init(self)
    L= self.Locate
    if self.Params['data_x'] != None:
      self.DataX= pickle.load(open(L(self.Params['data_x']), 'rb'))
    if self.Params['data_y'] != None:
      self.DataY= pickle.load(open(L(self.Params['data_y']), 'rb'))

    self.C= []
    self.Closests= []
    self.CDists= []  #Distance to the closest point

    if self.Params['C'] != None:
      self.C= copy.deepcopy(self.Params['C'])
    if self.Params['Closests'] != None:
      self.Closests= copy.deepcopy(self.Params['Closests'])
    if self.Params['CDists'] != None:
      self.CDists= copy.deepcopy(self.Params['CDists'])

    if self.Options['kernel']=='l2g':  #L2 norm Gaussian
      self.kernel= Gaussian
      self.dist= Dist
    elif self.Options['kernel']=='maxg':  #Max norm Gaussian
      self.kernel= GaussianM
      self.dist= DistM
    else:
      raise Exception('Undefined kernel type:',self.Options['kernel'])

    self.lazy_copy= True  #Assign True when DataX or DataY is updated.
    self.CheckPredictability()
开发者ID:akihikoy,项目名称:lfd_trick,代码行数:30,代码来源:base_ml_lwr2.py

示例3: __init__

 def __init__(self, path, random_seed, fold):
     np.random.seed(random_seed)
     self.path = path
     self.linkfile = path + 'allPostLinkMap.pickle'
     # self.edgelistfile = path + 'edgelist.txt'
     self.labelfile = path + 'allPostLabelMap.pickle'
     self.authorfile = path + 'allPostAuthorMap.pickle'
     self.authorattrifile = path + 'allAuthorAttrisProc.pickle'
     self.authorlinkfile = path + 'allAuthorLinks.pickle'
     self.textfile = path + 'allUserTextSkip.pickle2'
     self.foldfile = path + 'allFolds.pickle'
     self.threadfile = path + 'allThreadPost.pickle'
     self.embfile = path + 'node.emb'
     self.fold = fold
     self.nodes_infor = []
     self.node_map = {} 
     with open(self.textfile, 'rb') as fin:
         allTextEmbed = pickle.load(fin, encoding='latin1')
         self.allTextMap = pickle.load(fin, encoding='latin1')
         fin.close()
     self.node_count = len(self.allTextMap)
     for i in range(self.node_count):
         self.add_node(i)
     self.read_label()
     self.read_text()
     self.read_link()
     self.label_count = len(self.label_map)
     # print('label count:', self.label_count)
     self.construct_data()
开发者ID:BillMcGrady,项目名称:StancePrediction,代码行数:29,代码来源:DataLoader4Forum.py

示例4: sample

def sample(args):
    with open(os.path.join(args.save_dir, 'config.pkl'), 'rb') as f:
        saved_args = cPickle.load(f)
    with open(os.path.join(args.save_dir, 'chars_vocab.pkl'), 'rb') as f:
        chars, vocab = cPickle.load(f)
    model = Model(saved_args, True)
    val_loss_file = args.save_dir + '/val_loss.json'
    with tf.Session() as sess:
        saver = tf.train.Saver(tf.all_variables())
        if os.path.exists(val_loss_file):
            with open(val_loss_file, "r") as text_file:
                text = text_file.read()
                loss_json = json.loads(text)
                losses = loss_json.keys()
                losses.sort(key=lambda x: float(x))
                loss = losses[0]
                model_checkpoint_path =  loss_json[loss]['checkpoint_path']
                #print(model_checkpoint_path)
                saver.restore(sess, model_checkpoint_path)
                result = model.sample(sess, chars, vocab, args.n, args.prime, args.sample_rule, args.temperature)
                print(result) #add this back in later, not sure why its not working
                output = "/data/output/"+ str(int(time.time())) + ".txt"
                with open(output, "w") as text_file:
                    text_file.write(result)
                print(output)
开发者ID:jtoy,项目名称:word-rnn-tf,代码行数:25,代码来源:sample.py

示例5: load

 def load(cls, fn, compress=True, *args, **kwargs):
     if compress and not fn.strip().lower().endswith('.gz'):
         fn = fn + '.gz'
     assert os.path.isfile(fn), 'File %s does not exist.' % (fn,)
     if compress:
         return pickle.load(gzip.open(fn, 'rb'))
     return pickle.load(open(fn, 'rb'))
开发者ID:chrisspen,项目名称:weka,代码行数:7,代码来源:classifiers.py

示例6: _parse_file

    def _parse_file(cls, path, pickle=False):
        """parse a .chain file into a list of the type [(L{Chain}, arr, arr, arr) ...]

        :param fname: name of the file"""

        fname = path
        if fname.endswith(".gz"):
            fname = path[:-3]

        if fname.endswith('.pkl'):
            #you asked for the pickled file. I'll give it to you
            log.debug("loading pickled file %s ..." % fname)
            return cPickle.load( open(fname) )
        elif os.path.isfile("%s.pkl" % fname):
            #there is a cached version I can give to you
            log.info("loading pickled file %s.pkl ..." % fname)
            if os.stat(path).st_mtime > os.stat("%s.pkl" % fname).st_mtime:
                log.critical("*** pickled file %s.pkl is not up to date ***" % (path))
            return cPickle.load( open("%s.pkl" % fname) )

        data = fastLoadChain(path, cls._strfactory)
        if pickle and not os.path.isfile('%s.pkl' % fname):
            log.info("pckling to %s.pkl" % (fname))
            with open('%s.pkl' % fname, 'wb') as fd:
                cPickle.dump(data, fd)
        return data
开发者ID:BUBioinoformaticsHub,项目名称:bx-python,代码行数:26,代码来源:epo.py

示例7: _pickle_load

def _pickle_load(f):
    if sys.version_info > (3, ):
        # python3
        return pickle.load(f, encoding='latin-1')
    else:
        # python2
        return pickle.load(f)
开发者ID:KotaroSetoyama,项目名称:chainer,代码行数:7,代码来源:cifar.py

示例8: read_pickle_from_file

def read_pickle_from_file(filename):
  with tf.gfile.Open(filename, 'rb') as f:
    if sys.version_info >= (3, 0):
      data_dict = pickle.load(f, encoding='bytes')
    else:
      data_dict = pickle.load(f)
  return data_dict
开发者ID:ALISCIFP,项目名称:models,代码行数:7,代码来源:generate_cifar10_tfrecords.py

示例9: test_read_backward_compatibility

def test_read_backward_compatibility():
    """Test backwards compatibility with a pickled file that's created with Python 2.7.3,
    Numpy 1.7.1_ahl2 and Pandas 0.14.1
    """
    fname = path.join(path.dirname(__file__), "data", "test-data.pkl")

    # For newer versions; verify that unpickling fails when using cPickle
    if PANDAS_VERSION >= LooseVersion("0.16.1"):
        if sys.version_info[0] >= 3:
            with pytest.raises(UnicodeDecodeError), open(fname) as fh:
                cPickle.load(fh)
        else:
            with pytest.raises(TypeError), open(fname) as fh:
                cPickle.load(fh)

    # Verify that PickleStore() uses a backwards compatible unpickler.
    store = PickleStore()

    with open(fname) as fh:
        # PickleStore compresses data with lz4
        version = {'blob': compressHC(fh.read())}
    df = store.read(sentinel.arctic_lib, version, sentinel.symbol)

    expected = pd.DataFrame(range(4), pd.date_range(start="20150101", periods=4))
    assert (df == expected).all().all()
开发者ID:cozmacib,项目名称:arctic,代码行数:25,代码来源:test_pickle_store.py

示例10: creator

    def creator(path):
        archive_path = download.cached_download(url)

        train_x = numpy.empty((5, 10000, 3072), dtype=numpy.uint8)
        train_y = numpy.empty((5, 10000), dtype=numpy.uint8)
        test_y = numpy.empty(10000, dtype=numpy.uint8)

        dir_name = '{}-batches-py'.format(name)

        with tarfile.open(archive_path, 'r:gz') as archive:
            # training set
            for i in range(5):
                file_name = '{}/data_batch_{}'.format(dir_name, i + 1)
                d = pickle.load(archive.extractfile(file_name))
                train_x[i] = d['data']
                train_y[i] = d['labels']

            # test set
            file_name = '{}/test_batch'.format(dir_name)
            d = pickle.load(archive.extractfile(file_name))
            test_x = d['data']
            test_y[...] = d['labels']  # copy to array

        train_x = train_x.reshape(50000, 3072)
        train_y = train_y.reshape(50000)

        numpy.savez_compressed(path, train_x=train_x, train_y=train_y,
                               test_x=test_x, test_y=test_y)
        return {'train_x': train_x, 'train_y': train_y,
                'test_x': test_x, 'test_y': test_y}
开发者ID:sarikayamehmet,项目名称:chainer,代码行数:30,代码来源:cifar.py

示例11: load_batch

def load_batch(fpath, label_key='labels'):
    """Internal utility for parsing CIFAR data.

    # Arguments
        fpath: path the file to parse.
        label_key: key for label data in the retrieve
            dictionary.

    # Returns
        A tuple `(data, labels)`.
    """
    f = open(fpath, 'rb')
    if sys.version_info < (3,):
        d = cPickle.load(f)
    else:
        d = cPickle.load(f, encoding='bytes')
        # decode utf8
        d_decoded = {}
        for k, v in d.items():
            d_decoded[k.decode('utf8')] = v
        d = d_decoded
    f.close()
    data = d['data']
    labels = d[label_key]

    data = data.reshape(data.shape[0], 3, 32, 32)
    return data, labels
开发者ID:5ke,项目名称:keras,代码行数:27,代码来源:cifar.py

示例12: __init__

    def __init__(self, experiment_name):
        self.engine = experiment.Experiment.get_engine(
            experiment_name, "sqlite"
        )
        SQLAlchemySession.configure(bind=self.engine)
        self.session = SQLAlchemySession()

        self.hdf5_file = h5py.File(
            os.path.join(experiment_name, "phenotypes.hdf5"),
            "r"
        )

        self.config = os.path.join(experiment_name, "configuration.yaml")
        if not os.path.isfile(self.config):
            self.config = None

        # Experiment info.
        filename = os.path.join(experiment_name, "experiment_info.pkl")
        with open(filename, "rb") as f:
            self.info = pickle.load(f)

        # Task info.
        self.task_info = {}
        path = os.path.join(experiment_name, "tasks")
        for task_dir in os.listdir(path):
            info_path = os.path.join(path, task_dir, "task_info.pkl")
            if os.path.isfile(info_path):
                with open(info_path, "rb") as f:
                    self.task_info[task_dir] = pickle.load(f)

        # Correlation matrix.
        filename = os.path.join(experiment_name, "phen_correlation_matrix.npy")
        self.correlation_matrix = np.load(filename)
开发者ID:legaultmarc,项目名称:forward,代码行数:33,代码来源:backend.py

示例13: main

def main():
    # Reading the configuration from stdin
    classifier = pickle.load(sys.stdin)
    info = pickle.load(sys.stdin)
    assert isinstance(classifier, tmva.TMVAClassifier) or isinstance(classifier, tmva.TMVARegressor)
    assert isinstance(info, tmva._AdditionalInformation)
    tmva_process(classifier, info)
开发者ID:0x0all,项目名称:rep,代码行数:7,代码来源:_tmvaFactory.py

示例14: get_data

def get_data():
  """Get data in form suitable for episodic training.

  Returns:
    Train and test data as dictionaries mapping
    label to list of examples.
  """
  with tf.gfile.GFile(DATA_FILE_FORMAT % 'train', 'rb') as f:
    processed_train_data = pickle.load(f)
  with tf.gfile.GFile(DATA_FILE_FORMAT % 'test', 'rb') as f:
    processed_test_data = pickle.load(f)

  train_data = {}
  test_data = {}

  for data, processed_data in zip([train_data, test_data],
                                  [processed_train_data, processed_test_data]):
    for image, label in zip(processed_data['images'],
                            processed_data['labels']):
      if label not in data:
        data[label] = []
      data[label].append(image.reshape([-1]).astype('float32'))

  intersection = set(train_data.keys()) & set(test_data.keys())
  assert not intersection, 'Train and test data intersect.'
  ok_num_examples = [len(ll) == 20 for _, ll in train_data.items()]
  assert all(ok_num_examples), 'Bad number of examples in train data.'
  ok_num_examples = [len(ll) == 20 for _, ll in test_data.items()]
  assert all(ok_num_examples), 'Bad number of examples in test data.'

  logging.info('Number of labels in train data: %d.', len(train_data))
  logging.info('Number of labels in test data: %d.', len(test_data))

  return train_data, test_data
开发者ID:ALISCIFP,项目名称:models,代码行数:34,代码来源:data_utils.py

示例15: store_and_or_load_data

def store_and_or_load_data(dataset_info, outputdir):
    if dataset_info.endswith('.pkl'):
        save_path = dataset_info
    else:
        dataset = os.path.basename(dataset_info)
        data_dir = os.path.dirname(dataset_info)
        save_path = os.path.join(outputdir, dataset + '_Manager.pkl')

    if not os.path.exists(save_path):
        lock = lockfile.LockFile(save_path)
        while not lock.i_am_locking():
            try:
                lock.acquire(timeout=60)  # wait up to 60 seconds
            except lockfile.LockTimeout:
                lock.break_lock()
                lock.acquire()
        print('I locked', lock.path)
        # It is not yet sure, whether the file already exists
        try:
            if not os.path.exists(save_path):
                D = CompetitionDataManager(dataset, data_dir,
                                           verbose=True,
                                           encode_labels=True)
                fh = open(save_path, 'w')
                pickle.dump(D, fh, -1)
                fh.close()
            else:
                D = pickle.load(open(save_path, 'r'))
        except Exception:
            raise
        finally:
            lock.release()
    else:
        D = pickle.load(open(save_path, 'r'))
    return D
开发者ID:WarmongeR1,项目名称:auto-sklearn,代码行数:35,代码来源:base_interface.py


注:本文中的six.moves.cPickle.load函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。