當前位置: 首頁>>代碼示例>>Python>>正文


Python diskcache.Cache方法代碼示例

本文整理匯總了Python中diskcache.Cache方法的典型用法代碼示例。如果您正苦於以下問題:Python diskcache.Cache方法的具體用法?Python diskcache.Cache怎麽用?Python diskcache.Cache使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在diskcache的用法示例。


在下文中一共展示了diskcache.Cache方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: _load_file_cache

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def _load_file_cache(filename,
                     database_format,
                     encoding,
                     frame_id_mask,
                     strict,
                     cache_dir):
    with open(filename, 'rb') as fin:
        key = fin.read()

    cache = diskcache.Cache(cache_dir)

    try:
        return cache[key]
    except KeyError:
        with fopen(filename, 'r', encoding=encoding) as fin:
            database = load(fin,
                            database_format,
                            frame_id_mask,
                            strict)
        cache[key] = database

        return database 
開發者ID:eerimoq,項目名稱:cantools,代碼行數:24,代碼來源:__init__.py

示例2: feature_extract

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def feature_extract(table_name, filepath):
    names = []
    feats = []
    cache = Cache(default_cache_dir)
    total = len(open(filepath,'rU').readlines())
    cache['total'] = total
    current = 0
    with open(filepath, 'r') as f:
        for line in f:
            current += 1
            cache['current'] = current
            line = line.strip()
            line = line.split()
            line = line[0]
            try:
                vec = smiles_to_vec(line)
                feats.append(vec)
                names.append(line.encode())
            except:
                continue
            print ("extracting feature from smi No. %d , %d molecular in total" %(current, total))
    return feats, names 
開發者ID:milvus-io,項目名稱:bootcamp,代碼行數:24,代碼來源:encode.py

示例3: do_train

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def do_train(table_name, database_path):
    if not table_name:
        table_name = DEFAULT_TABLE
    cache = Cache(default_cache_dir)
    try:
        vectors, names = feature_extract(database_path, VGGNet())
        index_client = milvus_client()
        # delete_table(index_client, table_name=table_name)
        # time.sleep(1)
        status, ok = has_table(index_client, table_name)
        if not ok:
            print("create table.")
            create_table(index_client, table_name=table_name)
        print("insert into:", table_name)
        status, ids = insert_vectors(index_client, table_name, vectors)
        create_index(index_client, table_name)
        for i in range(len(names)):
            # cache[names[i]] = ids[i]
            cache[ids[i]] = names[i]
        print("Train finished")
        return "Train finished"
    except Exception as e:
        logging.error(e)
        return "Error with {}".format(e) 
開發者ID:milvus-io,項目名稱:bootcamp,代碼行數:26,代碼來源:train.py

示例4: test_cache_merfish

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def test_cache_merfish(tmpdir, name, expected, config, monkeypatch):

    cache_enabled = (0 != config["slicedimage"]["caching"].get("size_limit", None))
    if cache_enabled:
        config["slicedimage"]["caching"]["directory"] = str(tmpdir / "caching")

    with monkeypatch.context() as mc:
        setup_config(config, tmpdir, mc)

        # Run 1
        data.MERFISH(use_test_data=True).fov().get_image("primary")

        # Run 2
        if cache_enabled:
            data.MERFISH(use_test_data=True).fov().get_image("primary")

        # Check constraints
        if cache_enabled:
            # Enforce smallest size
            cache = Cache(str(tmpdir / "caching"))
            cache.cull()

        cache_size = get_size(tmpdir / "caching")
        min, max = expected
        assert (min <= cache_size) and (cache_size <= max) 
開發者ID:spacetx,項目名稱:starfish,代碼行數:27,代碼來源:test_config.py

示例5: __init__

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def __init__(
            self, _psql_host='localhost', _psql_dbname='data',
            _psql_user='', _psql_password='', _cache_path='cache'
    ):
        super().__init__()
        self.register_type = RegisterSymbol

        self.psql_host: str = _psql_host
        self.psql_dbname: str = _psql_dbname
        self.psql_user: str = _psql_user
        self.psql_password: str = _psql_password

        self.table_key: str = None

        self.cache: Cache = Cache(_cache_path)
        self.market_key: str = 'crypto_{}_{}'

        self._psql_con: psycopg2.extensions.connection = None
        self._psql_cur: psycopg2.extensions.cursor = None

        self.columns: typing.List[str] = [] 
開發者ID:ppaanngggg,項目名稱:ParadoxTrading,代碼行數:23,代碼來源:FetchBase.py

示例6: _get_cached

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def _get_cached(path):
    # 1/ memory cache
    if path in JUMBO_FIELDS_MEMORY_CACHE:
        return JUMBO_FIELDS_MEMORY_CACHE[path]

    # 2/ disk cache
    if SIMPLEFLOW_ENABLE_DISK_CACHE:
        try:
            # NB: this cache may also be triggered on activity workers, where it's not that
            # useful. The performance hit should be minimal. To be improved later.
            # NB2: cache has to be lazily instantiated here, cache objects do not survive forks,
            # see DiskCache docs.
            cache = Cache(constants.CACHE_DIR)
            # generate a dedicated cache key because this cache may be shared with other
            # features of simpleflow at some point
            cache_key = "jumbo_fields/" + path.split("/")[-1]
            if cache_key in cache:
                logger.debug("diskcache: getting key={} from cache_dir={}".format(cache_key, constants.CACHE_DIR))
                return cache[cache_key]
        except OperationalError:
            logger.warning("diskcache: got an OperationalError, skipping cache usage")

    # nothing to return, but better be explicit here
    return 
開發者ID:botify-labs,項目名稱:simpleflow,代碼行數:26,代碼來源:format.py

示例7: _compile_files_cache

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def _compile_files_cache(filenames,
                         codec,
                         any_defined_by_choices,
                         encoding,
                         cache_dir,
                         numeric_enums):
    key = [codec.encode('ascii')]

    if isinstance(filenames, str):
        filenames = [filenames]

    for filename in filenames:
        with open(filename, 'rb') as fin:
            key.append(fin.read())

    key = b''.join(key)
    cache = diskcache.Cache(cache_dir)

    try:
        return cache[key]
    except KeyError:
        compiled = compile_dict(parse_files(filenames, encoding),
                                codec,
                                any_defined_by_choices,
                                numeric_enums)
        cache[key] = compiled

        return compiled 
開發者ID:eerimoq,項目名稱:asn1tools,代碼行數:30,代碼來源:compiler.py

示例8: batch_generator

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def batch_generator(self, dataset: List[Tuple[QASetting, List[Answer]]], batch_size: int, is_eval: bool) \
            -> Iterable[Mapping[TensorPort, np.ndarray]]:
        """Preprocesses all instances, batches & shuffles them and generates batches in dicts."""
        logger.info("OnlineInputModule pre-processes data on-the-fly in first epoch and caches results for subsequent "
                    "epochs! That means, first epoch might be slower.")
        # only cache training data on file
        use_cache = not is_eval and self.shared_resources.config.get('file_cache', False)
        if use_cache:
            cache_dir = os.path.join(os.environ.get('JACK_TEMP', tempfile.gettempdir()), 'cache')
            db = dc.Cache(cache_dir)
            db.reset('cull_limit', 0)
            logger.info("Caching temporary preprocessed data in %s. You can change cache dir using the"
                        " JACK_TEMP environment variable which defaults to /tmp/jack." % cache_dir)
        else:
            db = dict()
        preprocessed = set()
        def make_generator():
            running_idx = 0
            for i, batch in enumerate(self._batch_questions(dataset, batch_size, is_eval)):
                questions, answers = zip(*batch)
                if any(q.id not in preprocessed for q in questions):
                    annots = self.preprocess(questions, answers)
                    if questions[0].id is None:  # make sure there is an id, if not we set it here
                        for q in questions:
                            if q.id is None:
                                q.id = running_idx
                                running_idx += 1
                    for q, a in zip(questions, annots):
                        preprocessed.add(q.id)
                        db[q.id] = a
                else:
                    annots = [db[q.id] for q in questions]

                yield self.create_batch(annots, is_eval, True)

        return GeneratorWithRestart(make_generator) 
開發者ID:uclnlp,項目名稱:jack,代碼行數:38,代碼來源:input_module.py

示例9: thread_status_api

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def thread_status_api():
    cache = Cache(default_cache_dir)
    return "current: {}, total: {}".format(cache['current'], cache['total']) 
開發者ID:milvus-io,項目名稱:bootcamp,代碼行數:5,代碼來源:app.py

示例10: do_load

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def do_load(table_name, database_path):
    if not table_name:
        table_name = DEFAULT_TABLE
    cache = Cache(default_cache_dir)
    try:
        vectors, names = feature_extract(table_name, database_path)
        print("start connetc to milvus")
        index_client = milvus_client()
        status, ok = has_table(index_client, table_name)
        if not ok:
            print("create table.")
            create_table(index_client, table_name=table_name)
        print("insert into:", table_name)

        # status, ids = insert_vectors(index_client, table_name, vectors)
        total_ids = []
        ids_lens = 0
        while ids_lens<len(vectors) :
            try:
                status, ids = insert_vectors(index_client, table_name, vectors[ids_lens:ids_lens+100000])
            except:
                status, ids = insert_vectors(index_client, table_name, vectors[ids_lens:len(vectors)])
            ids_lens += 100000
            total_ids += ids
            print("ids:",len(ids))

        create_index(index_client, table_name)
        for i in range(len(names)):
            cache[total_ids[i]] = names[i]
        print("FP finished")
        return "FP finished"
    except Exception as e:
        logging.error(e)
        return "Error with {}".format(e) 
開發者ID:milvus-io,項目名稱:bootcamp,代碼行數:36,代碼來源:load.py

示例11: query_smi_from_ids

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def query_smi_from_ids(vids):
    res = []
    cache = Cache(default_cache_dir)
    print("cache:",cache)
    for i in vids:
        if i in cache:
            res.append(cache[i])
    return res 
開發者ID:milvus-io,項目名稱:bootcamp,代碼行數:10,代碼來源:search.py

示例12: query_name_from_ids

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def query_name_from_ids(vids):
    res = []
    cache = Cache(default_cache_dir)
    for i in vids:
        if i in cache:
            res.append(cache[i])
    return res 
開發者ID:milvus-io,項目名稱:bootcamp,代碼行數:9,代碼來源:search.py

示例13: __init__

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def __init__(self, cache_duration=3600, cache_root='/tmp/cortex/tor_project'):
        self.session = requests.Session()
        self.cache_duration = cache_duration
        if self.cache_duration > 0:
            self.cache = Cache(cache_root)
        self.url = 'http://torstatus.blutmagie.de/query_export.php/Tor_query_EXPORT.csv' 
開發者ID:TheHive-Project,項目名稱:Cortex-Analyzers,代碼行數:8,代碼來源:tor_blutmagie.py

示例14: __init__

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def __init__(self, ttl=86400, cache_duration=3600,
                 cache_root='/tmp/cortex/tor_project'):
        self.session = requests.Session()
        self.delta = None
        self.cache = None
        if ttl > 0:
            self.delta = timedelta(seconds=ttl)
        if cache_duration > 0:
            self.cache = Cache(cache_root)
            self.cache_duration = cache_duration
        self.url = 'https://check.torproject.org/exit-addresses' 
開發者ID:TheHive-Project,項目名稱:Cortex-Analyzers,代碼行數:13,代碼來源:tor_project.py

示例15: get_cache

# 需要導入模塊: import diskcache [as 別名]
# 或者: from diskcache import Cache [as 別名]
def get_cache(self):
        if self.cache is None:
            self.cache_path.parent.mkdir(parents=True, exist_ok=True)
            self.cache = dc.Cache(self.cache_path)
        return self.cache 
開發者ID:paperswithcode,項目名稱:axcell,代碼行數:7,代碼來源:references.py


注:本文中的diskcache.Cache方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。