当前位置: 首页>>代码示例>>Python>>正文


Python diskcache.Cache方法代码示例

本文整理汇总了Python中diskcache.Cache方法的典型用法代码示例。如果您正苦于以下问题:Python diskcache.Cache方法的具体用法?Python diskcache.Cache怎么用?Python diskcache.Cache使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在diskcache的用法示例。


在下文中一共展示了diskcache.Cache方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: _load_file_cache

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def _load_file_cache(filename,
                     database_format,
                     encoding,
                     frame_id_mask,
                     strict,
                     cache_dir):
    with open(filename, 'rb') as fin:
        key = fin.read()

    cache = diskcache.Cache(cache_dir)

    try:
        return cache[key]
    except KeyError:
        with fopen(filename, 'r', encoding=encoding) as fin:
            database = load(fin,
                            database_format,
                            frame_id_mask,
                            strict)
        cache[key] = database

        return database 
开发者ID:eerimoq,项目名称:cantools,代码行数:24,代码来源:__init__.py

示例2: feature_extract

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def feature_extract(table_name, filepath):
    names = []
    feats = []
    cache = Cache(default_cache_dir)
    total = len(open(filepath,'rU').readlines())
    cache['total'] = total
    current = 0
    with open(filepath, 'r') as f:
        for line in f:
            current += 1
            cache['current'] = current
            line = line.strip()
            line = line.split()
            line = line[0]
            try:
                vec = smiles_to_vec(line)
                feats.append(vec)
                names.append(line.encode())
            except:
                continue
            print ("extracting feature from smi No. %d , %d molecular in total" %(current, total))
    return feats, names 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:24,代码来源:encode.py

示例3: do_train

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def do_train(table_name, database_path):
    if not table_name:
        table_name = DEFAULT_TABLE
    cache = Cache(default_cache_dir)
    try:
        vectors, names = feature_extract(database_path, VGGNet())
        index_client = milvus_client()
        # delete_table(index_client, table_name=table_name)
        # time.sleep(1)
        status, ok = has_table(index_client, table_name)
        if not ok:
            print("create table.")
            create_table(index_client, table_name=table_name)
        print("insert into:", table_name)
        status, ids = insert_vectors(index_client, table_name, vectors)
        create_index(index_client, table_name)
        for i in range(len(names)):
            # cache[names[i]] = ids[i]
            cache[ids[i]] = names[i]
        print("Train finished")
        return "Train finished"
    except Exception as e:
        logging.error(e)
        return "Error with {}".format(e) 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:26,代码来源:train.py

示例4: test_cache_merfish

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def test_cache_merfish(tmpdir, name, expected, config, monkeypatch):

    cache_enabled = (0 != config["slicedimage"]["caching"].get("size_limit", None))
    if cache_enabled:
        config["slicedimage"]["caching"]["directory"] = str(tmpdir / "caching")

    with monkeypatch.context() as mc:
        setup_config(config, tmpdir, mc)

        # Run 1
        data.MERFISH(use_test_data=True).fov().get_image("primary")

        # Run 2
        if cache_enabled:
            data.MERFISH(use_test_data=True).fov().get_image("primary")

        # Check constraints
        if cache_enabled:
            # Enforce smallest size
            cache = Cache(str(tmpdir / "caching"))
            cache.cull()

        cache_size = get_size(tmpdir / "caching")
        min, max = expected
        assert (min <= cache_size) and (cache_size <= max) 
开发者ID:spacetx,项目名称:starfish,代码行数:27,代码来源:test_config.py

示例5: __init__

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def __init__(
            self, _psql_host='localhost', _psql_dbname='data',
            _psql_user='', _psql_password='', _cache_path='cache'
    ):
        super().__init__()
        self.register_type = RegisterSymbol

        self.psql_host: str = _psql_host
        self.psql_dbname: str = _psql_dbname
        self.psql_user: str = _psql_user
        self.psql_password: str = _psql_password

        self.table_key: str = None

        self.cache: Cache = Cache(_cache_path)
        self.market_key: str = 'crypto_{}_{}'

        self._psql_con: psycopg2.extensions.connection = None
        self._psql_cur: psycopg2.extensions.cursor = None

        self.columns: typing.List[str] = [] 
开发者ID:ppaanngggg,项目名称:ParadoxTrading,代码行数:23,代码来源:FetchBase.py

示例6: _get_cached

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def _get_cached(path):
    # 1/ memory cache
    if path in JUMBO_FIELDS_MEMORY_CACHE:
        return JUMBO_FIELDS_MEMORY_CACHE[path]

    # 2/ disk cache
    if SIMPLEFLOW_ENABLE_DISK_CACHE:
        try:
            # NB: this cache may also be triggered on activity workers, where it's not that
            # useful. The performance hit should be minimal. To be improved later.
            # NB2: cache has to be lazily instantiated here, cache objects do not survive forks,
            # see DiskCache docs.
            cache = Cache(constants.CACHE_DIR)
            # generate a dedicated cache key because this cache may be shared with other
            # features of simpleflow at some point
            cache_key = "jumbo_fields/" + path.split("/")[-1]
            if cache_key in cache:
                logger.debug("diskcache: getting key={} from cache_dir={}".format(cache_key, constants.CACHE_DIR))
                return cache[cache_key]
        except OperationalError:
            logger.warning("diskcache: got an OperationalError, skipping cache usage")

    # nothing to return, but better be explicit here
    return 
开发者ID:botify-labs,项目名称:simpleflow,代码行数:26,代码来源:format.py

示例7: _compile_files_cache

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def _compile_files_cache(filenames,
                         codec,
                         any_defined_by_choices,
                         encoding,
                         cache_dir,
                         numeric_enums):
    key = [codec.encode('ascii')]

    if isinstance(filenames, str):
        filenames = [filenames]

    for filename in filenames:
        with open(filename, 'rb') as fin:
            key.append(fin.read())

    key = b''.join(key)
    cache = diskcache.Cache(cache_dir)

    try:
        return cache[key]
    except KeyError:
        compiled = compile_dict(parse_files(filenames, encoding),
                                codec,
                                any_defined_by_choices,
                                numeric_enums)
        cache[key] = compiled

        return compiled 
开发者ID:eerimoq,项目名称:asn1tools,代码行数:30,代码来源:compiler.py

示例8: batch_generator

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def batch_generator(self, dataset: List[Tuple[QASetting, List[Answer]]], batch_size: int, is_eval: bool) \
            -> Iterable[Mapping[TensorPort, np.ndarray]]:
        """Preprocesses all instances, batches & shuffles them and generates batches in dicts."""
        logger.info("OnlineInputModule pre-processes data on-the-fly in first epoch and caches results for subsequent "
                    "epochs! That means, first epoch might be slower.")
        # only cache training data on file
        use_cache = not is_eval and self.shared_resources.config.get('file_cache', False)
        if use_cache:
            cache_dir = os.path.join(os.environ.get('JACK_TEMP', tempfile.gettempdir()), 'cache')
            db = dc.Cache(cache_dir)
            db.reset('cull_limit', 0)
            logger.info("Caching temporary preprocessed data in %s. You can change cache dir using the"
                        " JACK_TEMP environment variable which defaults to /tmp/jack." % cache_dir)
        else:
            db = dict()
        preprocessed = set()
        def make_generator():
            running_idx = 0
            for i, batch in enumerate(self._batch_questions(dataset, batch_size, is_eval)):
                questions, answers = zip(*batch)
                if any(q.id not in preprocessed for q in questions):
                    annots = self.preprocess(questions, answers)
                    if questions[0].id is None:  # make sure there is an id, if not we set it here
                        for q in questions:
                            if q.id is None:
                                q.id = running_idx
                                running_idx += 1
                    for q, a in zip(questions, annots):
                        preprocessed.add(q.id)
                        db[q.id] = a
                else:
                    annots = [db[q.id] for q in questions]

                yield self.create_batch(annots, is_eval, True)

        return GeneratorWithRestart(make_generator) 
开发者ID:uclnlp,项目名称:jack,代码行数:38,代码来源:input_module.py

示例9: thread_status_api

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def thread_status_api():
    cache = Cache(default_cache_dir)
    return "current: {}, total: {}".format(cache['current'], cache['total']) 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:5,代码来源:app.py

示例10: do_load

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def do_load(table_name, database_path):
    if not table_name:
        table_name = DEFAULT_TABLE
    cache = Cache(default_cache_dir)
    try:
        vectors, names = feature_extract(table_name, database_path)
        print("start connetc to milvus")
        index_client = milvus_client()
        status, ok = has_table(index_client, table_name)
        if not ok:
            print("create table.")
            create_table(index_client, table_name=table_name)
        print("insert into:", table_name)

        # status, ids = insert_vectors(index_client, table_name, vectors)
        total_ids = []
        ids_lens = 0
        while ids_lens<len(vectors) :
            try:
                status, ids = insert_vectors(index_client, table_name, vectors[ids_lens:ids_lens+100000])
            except:
                status, ids = insert_vectors(index_client, table_name, vectors[ids_lens:len(vectors)])
            ids_lens += 100000
            total_ids += ids
            print("ids:",len(ids))

        create_index(index_client, table_name)
        for i in range(len(names)):
            cache[total_ids[i]] = names[i]
        print("FP finished")
        return "FP finished"
    except Exception as e:
        logging.error(e)
        return "Error with {}".format(e) 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:36,代码来源:load.py

示例11: query_smi_from_ids

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def query_smi_from_ids(vids):
    res = []
    cache = Cache(default_cache_dir)
    print("cache:",cache)
    for i in vids:
        if i in cache:
            res.append(cache[i])
    return res 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:10,代码来源:search.py

示例12: query_name_from_ids

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def query_name_from_ids(vids):
    res = []
    cache = Cache(default_cache_dir)
    for i in vids:
        if i in cache:
            res.append(cache[i])
    return res 
开发者ID:milvus-io,项目名称:bootcamp,代码行数:9,代码来源:search.py

示例13: __init__

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def __init__(self, cache_duration=3600, cache_root='/tmp/cortex/tor_project'):
        self.session = requests.Session()
        self.cache_duration = cache_duration
        if self.cache_duration > 0:
            self.cache = Cache(cache_root)
        self.url = 'http://torstatus.blutmagie.de/query_export.php/Tor_query_EXPORT.csv' 
开发者ID:TheHive-Project,项目名称:Cortex-Analyzers,代码行数:8,代码来源:tor_blutmagie.py

示例14: __init__

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def __init__(self, ttl=86400, cache_duration=3600,
                 cache_root='/tmp/cortex/tor_project'):
        self.session = requests.Session()
        self.delta = None
        self.cache = None
        if ttl > 0:
            self.delta = timedelta(seconds=ttl)
        if cache_duration > 0:
            self.cache = Cache(cache_root)
            self.cache_duration = cache_duration
        self.url = 'https://check.torproject.org/exit-addresses' 
开发者ID:TheHive-Project,项目名称:Cortex-Analyzers,代码行数:13,代码来源:tor_project.py

示例15: get_cache

# 需要导入模块: import diskcache [as 别名]
# 或者: from diskcache import Cache [as 别名]
def get_cache(self):
        if self.cache is None:
            self.cache_path.parent.mkdir(parents=True, exist_ok=True)
            self.cache = dc.Cache(self.cache_path)
        return self.cache 
开发者ID:paperswithcode,项目名称:axcell,代码行数:7,代码来源:references.py


注:本文中的diskcache.Cache方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。