本文整理汇总了Python中six.MAXSIZE属性的典型用法代码示例。如果您正苦于以下问题:Python six.MAXSIZE属性的具体用法?Python six.MAXSIZE怎么用?Python six.MAXSIZE使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类six
的用法示例。
在下文中一共展示了six.MAXSIZE属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: scan
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def scan(self, tokens, max_matches=six.MAXSIZE, overlap=False):
""""""
if not self.streamlined:
self.streamline()
matches = 0
i = 0
length = len(tokens)
while i < length and matches < max_matches:
try:
results, next_i = self.parse(tokens, i)
except ParseException as err:
i += 1
else:
if next_i > i:
matches += 1
if len(results) == 1:
results = results[0]
yield results, i, next_i
if overlap:
i += 1
else:
i = next_i
else:
i += 1
示例2: __init__
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def __init__(self, model, **kwargs):
super().__init__()
if type(model) is dict:
model = create_model_for('tagger', **model)
self.grad_accum = int(kwargs.get('grad_accum', 1))
self.gpus = int(kwargs.get('gpus', 1))
# By default support IOB1/IOB2
self.span_type = kwargs.get('span_type', 'iob')
self.verbose = kwargs.get('verbose', False)
logger.info('Setting span type %s', self.span_type)
self.model = model
self.idx2label = revlut(self.model.labels)
self.clip = float(kwargs.get('clip', 5))
self.optimizer = OptimizerManager(self.model, **kwargs)
if self.gpus > 1:
logger.info("Trainer for PyTorch tagger currently doesnt support multiple GPUs. Setting to 1")
self.gpus = 1
if self.gpus > 0 and self.model.gpu:
self.model = model.cuda()
else:
logger.warning("Requested training on CPU. This will be slow.")
self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
示例3: length_dist
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def length_dist(synset_1, synset_2):
"""
Return a measure of the length of the shortest path in the semantic
ontology (Wordnet in our case as well as the paper's) between two
synsets.
"""
l_dist = six.MAXSIZE
if synset_1 is None or synset_2 is None:
return 0.0
if synset_1 == synset_2:
# if synset_1 and synset_2 are the same synset return 0
l_dist = 0.0
else:
wset_1 = set([str(x.name()) for x in synset_1.lemmas()])
wset_2 = set([str(x.name()) for x in synset_2.lemmas()])
if len(wset_1.intersection(wset_2)) > 0:
# if synset_1 != synset_2 but there is word overlap, return 1.0
l_dist = 1.0
else:
# just compute the shortest path between the two
l_dist = synset_1.shortest_path_distance(synset_2)
if l_dist is None:
l_dist = 0.0
# normalize path length to the range [0,1]
return math.exp(-ALPHA * l_dist)
示例4: __init__
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def __init__(self, alignment):
self.alignment = alignment
self.node_list = [MemoryNode(six.MAXSIZE)]
self.max_allocation = 0
示例5: allocate_best_fit
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def allocate_best_fit(self, size):
size = MemoryManager.align(size, self.alignment)
best_node = None
best_offset = None
best_delta = six.MAXSIZE
offset = 0
for i, node in enumerate(self.node_list):
delta = node.size - size
if node.is_free and delta >= 0:
if not best_node or delta < best_delta:
best_i = i
best_node = node
best_offset = offset
best_delta = delta
offset += node.size
if not best_node:
raise RuntimeError("Bad Allocation")
else:
if best_delta == 0:
best_node.is_free = False
else:
self.node_list[best_i].size -= size
self.node_list.insert(best_i, MemoryNode(size, is_free=False))
self.max_allocation = max(self.max_allocation, best_offset + size)
return best_offset
示例6: list_more
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def list_more(fn, offset, size, batch_size, *args):
"""list all data using the fn
"""
if size < 0:
expected_total_size = six.MAXSIZE
else:
expected_total_size = size
batch_size = min(size, batch_size)
response = None
total_count_got = 0
while True:
ret = fn(*args, offset=offset, size=batch_size)
if response is None:
response = ret
else:
response.merge(ret)
count = ret.get_count()
total = ret.get_total()
offset += count
total_count_got += count
batch_size = min(batch_size, expected_total_size - total_count_got)
if count == 0 or offset >= total or total_count_got >= expected_total_size:
break
return response
示例7: query_more
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def query_more(fn, offset, size, batch_size, *args):
"""list all data using the fn
"""
if size < 0:
expected_total_size = six.MAXSIZE
else:
expected_total_size = size
batch_size = min(size, batch_size)
response = None
total_count_got = 0
complete = False
while True:
ret = fn(*args, offset=offset, size=batch_size)
if response is None:
response = ret
else:
response.merge(ret)
# if incompete, exit
if not ret.is_completed():
break
count = ret.get_count()
offset += count
total_count_got += count
batch_size = min(batch_size, expected_total_size - total_count_got)
if count == 0 or total_count_got >= expected_total_size:
break
return response
示例8: read_geonames_csv
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def read_geonames_csv():
print("Downloading geoname data from: " + GEONAMES_ZIP_URL)
try:
url = request.urlopen(GEONAMES_ZIP_URL)
except URLError:
print("If you are operating behind a firewall, try setting the HTTP_PROXY/HTTPS_PROXY environment variables.")
raise
zipfile = ZipFile(BytesIO(url.read()))
print("Download complete")
# Loading geonames data may cause errors without setting csv.field_size_limit:
if sys.platform == "win32":
max_c_long_on_windows = (2**32 / 2) - 1
csv.field_size_limit(max_c_long_on_windows)
else:
csv.field_size_limit(sys.maxint if six.PY2 else six.MAXSIZE)
with zipfile.open('allCountries.txt') as f:
reader = unicodecsv.DictReader(f,
fieldnames=[
k for k, v in geonames_field_mappings],
encoding='utf-8',
delimiter='\t',
quoting=csv.QUOTE_NONE)
for d in reader:
d['population'] = parse_number(d['population'], 0)
d['latitude'] = parse_number(d['latitude'], 0)
d['longitude'] = parse_number(d['longitude'], 0)
if len(d['alternatenames']) > 0:
d['alternatenames'] = d['alternatenames'].split(',')
else:
d['alternatenames'] = []
yield d
示例9: __init__
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def __init__(self, model, **kwargs):
if type(model) is dict:
model = create_model_for('classify', **model)
super().__init__()
if type(model) is dict:
model = create_model_for('classify', **model)
self.clip = float(kwargs.get('clip', 5))
self.labels = model.labels
self.gpus = int(kwargs.get('gpus', 1))
if self.gpus == -1:
self.gpus = len(os.getenv('CUDA_VISIBLE_DEVICES', os.getenv('NV_GPU', '0')).split(','))
self.optimizer = OptimizerManager(model, **kwargs)
self.model = model
if self.gpus > 0 and self.model.gpu:
self.crit = model.create_loss().cuda()
if self.gpus > 1:
self.model = torch.nn.DataParallel(model).cuda()
else:
self.model.cuda()
else:
logger.warning("Requested training on CPU. This will be slow.")
self.crit = model.create_loss()
self.model = model
self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
示例10: get_metric_cmp
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def get_metric_cmp(metric, user_cmp=None, less_than_metrics=LESS_THAN_METRICS):
if user_cmp is not None:
return _try_user_cmp(user_cmp)
if metric in less_than_metrics:
return lt, six.MAXSIZE
return gt, -six.MAXSIZE - 1
示例11: _try_user_cmp
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def _try_user_cmp(user_cmp):
user_cmp = user_cmp.lower()
if user_cmp in {"lt", "less", "less than", "<", "less_than"}:
return lt, six.MAXSIZE
if user_cmp in {"le", "lte", "<="}:
return le, six.MAXSIZE
if user_cmp in {"ge", "gte", ">="}:
return ge, -six.MAXSIZE - 1
return gt, -six.MAXSIZE - 1
示例12: __init__
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def __init__(self, model_params, **kwargs):
"""Create a Trainer, and give it the parameters needed to instantiate the model
:param model_params: The model parameters
:param kwargs: See below
:Keyword Arguments:
* *nsteps* (`int`) -- If we should report every n-steps, this should be passed
* *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
* *clip* (`int`) -- If we are doing gradient clipping, what value to use
* *optim* (`str`) -- The name of the optimizer we are using
* *lr* (`float`) -- The learning rate we are using
* *mom* (`float`) -- If we are using SGD, what value to use for momentum
* *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
* *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
* *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8
"""
super().__init__()
if type(model_params) is dict:
self.model = create_model_for('tagger', **model_params)
else:
self.model = model_params
self.sess = self.model.sess
self.loss = self.model.create_loss()
span_type = kwargs.get('span_type', 'iob')
verbose = kwargs.get('verbose', False)
self.evaluator = TaggerEvaluatorTf(self.model, span_type, verbose)
self.global_step, self.train_op = optimizer(self.loss, colocate_gradients_with_ops=True, variables=self.model.trainable_variables, **kwargs)
self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
tables = tf.compat.v1.tables_initializer()
self.model.sess.run(tables)
init = tf.compat.v1.global_variables_initializer()
self.model.sess.run(init)
saver = tf.compat.v1.train.Saver()
self.model.save_using(saver)
checkpoint = kwargs.get('checkpoint')
if checkpoint is not None:
skip_blocks = kwargs.get('blocks_to_skip', ['OptimizeLoss'])
reload_checkpoint(self.model.sess, checkpoint, skip_blocks)
示例13: __init__
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def __init__(self, model_params, **kwargs):
"""Create a Trainer, and give it the parameters needed to instantiate the model
:param model_params: The model parameters
:param kwargs: See below
:Keyword Arguments:
* *nsteps* (`int`) -- If we should report every n-steps, this should be passed
* *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
* *clip* (`int`) -- If we are doing gradient clipping, what value to use
* *optim* (`str`) -- The name of the optimizer we are using
* *lr* (`float`) -- The learning rate we are using
* *mom* (`float`) -- If we are using SGD, what value to use for momentum
* *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
* *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
* *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8
"""
super().__init__()
self.gpus = int(kwargs.get('gpus', 1))
if type(model_params) is dict:
self.model = create_model_for('classify', **model_params)
else:
self.model = model_params
self.optimizer = EagerOptimizer(loss, **kwargs)
self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
self._checkpoint = tf.train.Checkpoint(optimizer=self.optimizer.optimizer, model=self.model)
checkpoint_dir = '{}-{}'.format("./tf-classify", os.getpid())
self.checkpoint_manager = tf.train.CheckpointManager(self._checkpoint,
directory=checkpoint_dir,
max_to_keep=5)
devices = ['/device:GPU:{}'.format(i) for i in range(self.gpus)]
self.strategy = tf.distribute.MirroredStrategy(devices)
示例14: __init__
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def __init__(self, model_params, **kwargs):
"""Create a Trainer, and give it the parameters needed to instantiate the model
:param model_params: The model parameters
:param kwargs: See below
:Keyword Arguments:
* *nsteps* (`int`) -- If we should report every n-steps, this should be passed
* *ema_decay* (`float`) -- If we are doing an exponential moving average, what decay to us4e
* *clip* (`int`) -- If we are doing gradient clipping, what value to use
* *optim* (`str`) -- The name of the optimizer we are using
* *lr* (`float`) -- The learning rate we are using
* *mom* (`float`) -- If we are using SGD, what value to use for momentum
* *beta1* (`float`) -- Adam-specific hyper-param, defaults to `0.9`
* *beta2* (`float`) -- Adam-specific hyper-param, defaults to `0.999`
* *epsilon* (`float`) -- Adam-specific hyper-param, defaults to `1e-8
"""
super().__init__()
if type(model_params) is dict:
self.model = create_model_for('classify', **model_params)
else:
self.model = model_params
self.optimizer = EagerOptimizer(loss, **kwargs)
self.nsteps = kwargs.get('nsteps', six.MAXSIZE)
self._checkpoint = tf.train.Checkpoint(optimizer=self.optimizer.optimizer, model=self.model)
checkpoint_dir = '{}-{}'.format("./tf-classify", os.getpid())
self.checkpoint_manager = tf.train.CheckpointManager(self._checkpoint,
directory=checkpoint_dir,
max_to_keep=5)
示例15: test_composite_calls_rest
# 需要导入模块: import six [as 别名]
# 或者: from six import MAXSIZE [as 别名]
def test_composite_calls_rest():
warmup_steps = np.random.randint(50, 101)
warm = MagicMock()
warm.warmup_steps = warmup_steps
rest = MagicMock()
lr = CompositeLRScheduler(warm=warm, rest=rest)
step = np.random.randint(warmup_steps + 1, six.MAXSIZE)
_ = lr(step)
warm.assert_not_called()
rest.assert_called_once_with(step - warmup_steps)