當前位置: 首頁>>代碼示例>>Python>>正文


Python six.MAXSIZE屬性代碼示例

本文整理匯總了Python中six.MAXSIZE屬性的典型用法代碼示例。如果您正苦於以下問題:Python six.MAXSIZE屬性的具體用法?Python six.MAXSIZE怎麽用?Python six.MAXSIZE使用的例子?那麽, 這裏精選的屬性代碼示例或許可以為您提供幫助。您也可以進一步了解該屬性所在six的用法示例。


在下文中一共展示了six.MAXSIZE屬性的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: scan

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def scan(self, tokens, max_matches=six.MAXSIZE, overlap=False):
        """"""
        if not self.streamlined:
            self.streamline()
        matches = 0
        i = 0
        length = len(tokens)
        while i < length and matches < max_matches:
            try:
                results, next_i = self.parse(tokens, i)
            except ParseException as err:
                i += 1
            else:
                if next_i > i:
                    matches += 1
                    if len(results) == 1:
                        results = results[0]
                    yield results, i, next_i
                    if overlap:
                        i += 1
                    else:
                        i = next_i
                else:
                    i += 1 
開發者ID:mcs07,項目名稱:ChemDataExtractor,代碼行數:26,代碼來源:elements.py

示例2: __init__

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def __init__(self, model, **kwargs):
        super().__init__()
        if type(model) is dict:
            model = create_model_for('tagger', **model)
        self.grad_accum = int(kwargs.get('grad_accum', 1))
        self.gpus = int(kwargs.get('gpus', 1))
        # By default support IOB1/IOB2
        self.span_type = kwargs.get('span_type', 'iob')
        self.verbose = kwargs.get('verbose', False)

        logger.info('Setting span type %s', self.span_type)
        self.model = model
        self.idx2label = revlut(self.model.labels)
        self.clip = float(kwargs.get('clip', 5))
        self.optimizer = OptimizerManager(self.model, **kwargs)
        if self.gpus > 1:
            logger.info("Trainer for PyTorch tagger currently doesnt support multiple GPUs.  Setting to 1")
            self.gpus = 1
        if self.gpus > 0 and self.model.gpu:
            self.model = model.cuda()
        else:
            logger.warning("Requested training on CPU.  This will be slow.")

        self.nsteps = kwargs.get('nsteps', six.MAXSIZE) 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:26,代碼來源:train.py

示例3: length_dist

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def length_dist(synset_1, synset_2):
    """
    Return a measure of the length of the shortest path in the semantic
    ontology (Wordnet in our case as well as the paper's) between two
    synsets.
    """
    l_dist = six.MAXSIZE
    if synset_1 is None or synset_2 is None:
        return 0.0
    if synset_1 == synset_2:
        # if synset_1 and synset_2 are the same synset return 0
        l_dist = 0.0
    else:
        wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
        wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
        if len(wset_1.intersection(wset_2)) > 0:
            # if synset_1 != synset_2 but there is word overlap, return 1.0
            l_dist = 1.0
        else:
            # just compute the shortest path between the two
            l_dist = synset_1.shortest_path_distance(synset_2)
            if l_dist is None:
                l_dist = 0.0
    # normalize path length to the range [0,1]
    return math.exp(-ALPHA * l_dist) 
開發者ID:rgtjf,項目名稱:Semantic-Texual-Similarity-Toolkits,代碼行數:27,代碼來源:short_sentence_similarity.py

示例4: __init__

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def __init__(self, alignment):
        self.alignment = alignment
        self.node_list = [MemoryNode(six.MAXSIZE)]
        self.max_allocation = 0 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:6,代碼來源:memlayout.py

示例5: allocate_best_fit

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def allocate_best_fit(self, size):
        size = MemoryManager.align(size, self.alignment)
        best_node = None
        best_offset = None
        best_delta = six.MAXSIZE
        offset = 0
        for i, node in enumerate(self.node_list):
            delta = node.size - size
            if node.is_free and delta >= 0:
                if not best_node or delta < best_delta:
                    best_i = i
                    best_node = node
                    best_offset = offset
                    best_delta = delta
            offset += node.size

        if not best_node:
            raise RuntimeError("Bad Allocation")
        else:
            if best_delta == 0:
                best_node.is_free = False
            else:
                self.node_list[best_i].size -= size
                self.node_list.insert(best_i, MemoryNode(size, is_free=False))

        self.max_allocation = max(self.max_allocation, best_offset + size)
        return best_offset 
開發者ID:NervanaSystems,項目名稱:ngraph-python,代碼行數:29,代碼來源:memlayout.py

示例6: list_more

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def list_more(fn, offset, size, batch_size, *args):
    """list all data using the fn
    """
    if size < 0:
        expected_total_size = six.MAXSIZE
    else:
        expected_total_size = size
        batch_size = min(size, batch_size)

    response = None
    total_count_got = 0
    while True:
        ret = fn(*args, offset=offset, size=batch_size)
        if response is None:
            response = ret
        else:
            response.merge(ret)

        count = ret.get_count()
        total = ret.get_total()
        offset += count
        total_count_got += count
        batch_size = min(batch_size, expected_total_size - total_count_got)

        if count == 0 or offset >= total or total_count_got >= expected_total_size:
            break

    return response 
開發者ID:aliyun,項目名稱:aliyun-log-python-sdk,代碼行數:30,代碼來源:logclient_operator.py

示例7: query_more

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def query_more(fn, offset, size, batch_size, *args):
    """list all data using the fn
    """
    if size < 0:
        expected_total_size = six.MAXSIZE
    else:
        expected_total_size = size
        batch_size = min(size, batch_size)

    response = None
    total_count_got = 0
    complete = False
    while True:
        ret = fn(*args, offset=offset, size=batch_size)

        if response is None:
            response = ret
        else:
            response.merge(ret)

        # if incompete, exit
        if not ret.is_completed():
            break

        count = ret.get_count()
        offset += count
        total_count_got += count
        batch_size = min(batch_size, expected_total_size - total_count_got)
        if count == 0 or total_count_got >= expected_total_size:
            break

    return response 
開發者ID:aliyun,項目名稱:aliyun-log-python-sdk,代碼行數:34,代碼來源:logclient_operator.py

示例8: read_geonames_csv

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def read_geonames_csv():
    print("Downloading geoname data from: " + GEONAMES_ZIP_URL)
    try:
        url = request.urlopen(GEONAMES_ZIP_URL)
    except URLError:
        print("If you are operating behind a firewall, try setting the HTTP_PROXY/HTTPS_PROXY environment variables.")
        raise
    zipfile = ZipFile(BytesIO(url.read()))
    print("Download complete")
    # Loading geonames data may cause errors without setting csv.field_size_limit:
    if sys.platform == "win32":
        max_c_long_on_windows = (2**32 / 2) - 1
        csv.field_size_limit(max_c_long_on_windows)
    else:
        csv.field_size_limit(sys.maxint if six.PY2 else six.MAXSIZE)
    with zipfile.open('allCountries.txt') as f:
        reader = unicodecsv.DictReader(f,
                                       fieldnames=[
                                           k for k, v in geonames_field_mappings],
                                       encoding='utf-8',
                                       delimiter='\t',
                                       quoting=csv.QUOTE_NONE)
        for d in reader:
            d['population'] = parse_number(d['population'], 0)
            d['latitude'] = parse_number(d['latitude'], 0)
            d['longitude'] = parse_number(d['longitude'], 0)
            if len(d['alternatenames']) > 0:
                d['alternatenames'] = d['alternatenames'].split(',')
            else:
                d['alternatenames'] = []
            yield d 
開發者ID:ecohealthalliance,項目名稱:EpiTator,代碼行數:33,代碼來源:import_geonames.py

示例9: __init__

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def __init__(self, model, **kwargs):

        if type(model) is dict:
            model = create_model_for('classify', **model)
        super().__init__()
        if type(model) is dict:
            model = create_model_for('classify', **model)
        self.clip = float(kwargs.get('clip', 5))
        self.labels = model.labels
        self.gpus = int(kwargs.get('gpus', 1))
        if self.gpus == -1:
            self.gpus = len(os.getenv('CUDA_VISIBLE_DEVICES', os.getenv('NV_GPU', '0')).split(','))

        self.optimizer = OptimizerManager(model, **kwargs)
        self.model = model
        if self.gpus > 0 and self.model.gpu:
            self.crit = model.create_loss().cuda()
            if self.gpus > 1:
                self.model = torch.nn.DataParallel(model).cuda()
            else:
                self.model.cuda()
        else:
            logger.warning("Requested training on CPU.  This will be slow.")
            self.crit = model.create_loss()
            self.model = model
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE) 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:28,代碼來源:train.py

示例10: get_metric_cmp

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def get_metric_cmp(metric, user_cmp=None, less_than_metrics=LESS_THAN_METRICS):
    if user_cmp is not None:
        return _try_user_cmp(user_cmp)
    if metric in less_than_metrics:
        return lt, six.MAXSIZE
    return gt, -six.MAXSIZE - 1 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:8,代碼來源:utils.py

示例11: _try_user_cmp

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def _try_user_cmp(user_cmp):
    user_cmp = user_cmp.lower()
    if user_cmp in {"lt", "less", "less than", "<", "less_than"}:
        return lt, six.MAXSIZE
    if user_cmp in {"le", "lte", "<="}:
        return le, six.MAXSIZE
    if user_cmp in {"ge", "gte", ">="}:
        return ge, -six.MAXSIZE - 1
    return gt, -six.MAXSIZE - 1 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:11,代碼來源:utils.py

示例12: __init__

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def __init__(self, model_params, **kwargs):
        """Create a Trainer, and give it the parameters needed to instantiate the model

        :param model_params: The model parameters
        :param kwargs: See below

        :Keyword Arguments:

          * *nsteps* (`int`) -- If we should report every n-steps, this should be passed
          * *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
          * *clip* (`int`) -- If we are doing gradient clipping, what value to use
          * *optim* (`str`) -- The name of the optimizer we are using
          * *lr* (`float`) -- The learning rate we are using
          * *mom* (`float`) -- If we are using SGD, what value to use for momentum
          * *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
          * *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
          * *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8

        """
        super().__init__()
        if type(model_params) is dict:
            self.model = create_model_for('tagger', **model_params)
        else:
            self.model = model_params
        self.sess = self.model.sess
        self.loss = self.model.create_loss()
        span_type = kwargs.get('span_type', 'iob')
        verbose = kwargs.get('verbose', False)
        self.evaluator = TaggerEvaluatorTf(self.model, span_type, verbose)
        self.global_step, self.train_op = optimizer(self.loss, colocate_gradients_with_ops=True, variables=self.model.trainable_variables, **kwargs)
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
        tables = tf.compat.v1.tables_initializer()
        self.model.sess.run(tables)
        init = tf.compat.v1.global_variables_initializer()
        self.model.sess.run(init)
        saver = tf.compat.v1.train.Saver()
        self.model.save_using(saver)
        checkpoint = kwargs.get('checkpoint')
        if checkpoint is not None:
            skip_blocks = kwargs.get('blocks_to_skip', ['OptimizeLoss'])
            reload_checkpoint(self.model.sess, checkpoint, skip_blocks) 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:43,代碼來源:utils.py

示例13: __init__

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def __init__(self, model_params, **kwargs):
        """Create a Trainer, and give it the parameters needed to instantiate the model

        :param model_params: The model parameters
        :param kwargs: See below

        :Keyword Arguments:

          * *nsteps* (`int`) -- If we should report every n-steps, this should be passed
          * *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
          * *clip* (`int`) -- If we are doing gradient clipping, what value to use
          * *optim* (`str`) -- The name of the optimizer we are using
          * *lr* (`float`) -- The learning rate we are using
          * *mom* (`float`) -- If we are using SGD, what value to use for momentum
          * *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
          * *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
          * *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8

        """
        super().__init__()

        self.gpus = int(kwargs.get('gpus', 1))
        if type(model_params) is dict:
            self.model = create_model_for('classify', **model_params)
        else:
            self.model = model_params

        self.optimizer = EagerOptimizer(loss, **kwargs)
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
        self._checkpoint = tf.train.Checkpoint(optimizer=self.optimizer.optimizer, model=self.model)
        checkpoint_dir = '{}-{}'.format("./tf-classify", os.getpid())

        self.checkpoint_manager = tf.train.CheckpointManager(self._checkpoint,
                                                             directory=checkpoint_dir,
                                                             max_to_keep=5)
        devices = ['/device:GPU:{}'.format(i) for i in range(self.gpus)]
        self.strategy = tf.distribute.MirroredStrategy(devices) 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:39,代碼來源:distributed.py

示例14: __init__

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def __init__(self, model_params, **kwargs):
        """Create a Trainer, and give it the parameters needed to instantiate the model

        :param model_params: The model parameters
        :param kwargs: See below

        :Keyword Arguments:

          * *nsteps* (`int`) -- If we should report every n-steps, this should be passed
          * *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
          * *clip* (`int`) -- If we are doing gradient clipping, what value to use
          * *optim* (`str`) -- The name of the optimizer we are using
          * *lr* (`float`) -- The learning rate we are using
          * *mom* (`float`) -- If we are using SGD, what value to use for momentum
          * *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
          * *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
          * *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8

        """
        super().__init__()

        if type(model_params) is dict:
            self.model = create_model_for('classify', **model_params)
        else:
            self.model = model_params

        self.optimizer = EagerOptimizer(loss, **kwargs)
        self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
        self._checkpoint = tf.train.Checkpoint(optimizer=self.optimizer.optimizer, model=self.model)
        checkpoint_dir = '{}-{}'.format("./tf-classify", os.getpid())

        self.checkpoint_manager = tf.train.CheckpointManager(self._checkpoint,
                                                             directory=checkpoint_dir,
                                                             max_to_keep=5) 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:36,代碼來源:eager.py

示例15: test_composite_calls_rest

# 需要導入模塊: import six [as 別名]
# 或者: from six import MAXSIZE [as 別名]
def test_composite_calls_rest():
    warmup_steps = np.random.randint(50, 101)
    warm = MagicMock()
    warm.warmup_steps = warmup_steps
    rest = MagicMock()
    lr = CompositeLRScheduler(warm=warm, rest=rest)
    step = np.random.randint(warmup_steps + 1, six.MAXSIZE)
    _ = lr(step)
    warm.assert_not_called()
    rest.assert_called_once_with(step - warmup_steps) 
開發者ID:dpressel,項目名稱:mead-baseline,代碼行數:12,代碼來源:test_decay.py


注:本文中的six.MAXSIZE屬性示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。