本文整理汇总了Python中six.moves.range方法的典型用法代码示例。如果您正苦于以下问题:Python moves.range方法的具体用法?Python moves.range怎么用?Python moves.range使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类six.moves
的用法示例。
在下文中一共展示了moves.range方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: draw_bounding_boxes
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def draw_bounding_boxes(image, gt_boxes, im_info):
num_boxes = gt_boxes.shape[0]
gt_boxes_new = gt_boxes.copy()
gt_boxes_new[:,:4] = np.round(gt_boxes_new[:,:4].copy() / im_info[2])
disp_image = Image.fromarray(np.uint8(image[0]))
for i in range(num_boxes):
this_class = int(gt_boxes_new[i, 4])
disp_image = _draw_single_box(disp_image,
gt_boxes_new[i, 0],
gt_boxes_new[i, 1],
gt_boxes_new[i, 2],
gt_boxes_new[i, 3],
'N%02d-C%02d' % (i, this_class),
FONT,
color=STANDARD_COLORS[this_class % NUM_COLORS])
image[0, :] = np.array(disp_image)
return image
开发者ID:Sunarker,项目名称:Collaborative-Learning-for-Weakly-Supervised-Object-Detection,代码行数:21,代码来源:visualization.py
示例2: __init__
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def __init__(self, get_df, num_prefetch, num_thread):
"""
Args:
get_df ( -> DataFlow): a callable which returns a DataFlow.
Each thread will call this function to get the DataFlow to use.
Therefore do not return the same DataFlow object for each call,
unless your dataflow is stateless.
num_prefetch (int): size of the queue
num_thread (int): number of threads
"""
assert num_thread > 0, num_thread
assert num_prefetch > 0, num_prefetch
self.num_thread = num_thread
self.queue = queue.Queue(maxsize=num_prefetch)
self.threads = [
MultiThreadRunner._Worker(get_df, self.queue)
for _ in range(num_thread)]
try:
self._size = self.__len__()
except NotImplementedError:
self._size = -1
示例3: _get_ngrams
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def _get_ngrams(segment, max_order):
"""Extracts all n-grams up to a given maximum order from an input segment.
Args:
segment: text segment from which n-grams will be extracted.
max_order: maximum length in tokens of the n-grams returned by this
methods.
Returns:
The Counter containing all n-grams up to max_order in segment
with a count of how many times each n-gram occurred.
"""
ngram_counts = collections.Counter()
for order in range(1, max_order + 1):
for i in range(0, len(segment) - order + 1):
ngram = tuple(segment[i:i + order])
ngram_counts[ngram] += 1
return ngram_counts
示例4: deBruijn
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def deBruijn(n, k):
"""
An implementation of the FKM algorithm for generating the de Bruijn
sequence containing all k-ary strings of length n, as described in
"Combinatorial Generation" by Frank Ruskey.
"""
a = [ 0 ] * (n + 1)
def gen(t, p):
if t > n:
for v in a[1:p + 1]:
yield v
else:
a[t] = a[t - p]
for v in gen(t + 1, p):
yield v
for j in range(a[t - p] + 1, k):
a[t] = j
for v in gen(t + 1, t):
yield v
return gen(1, 1)
示例5: caesar_app
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def caesar_app(parser, cmd, args): # pragma: no cover
"""
Caesar crypt a value with a key.
"""
parser.add_argument('shift', type=int, help='the shift to apply')
parser.add_argument('value', help='the value to caesar crypt, read from stdin if omitted', nargs='?')
parser.add_argument(
'-s', '--shift-range',
dest='shift_ranges',
action='append',
help='specify a character range to shift (defaults to a-z, A-Z)'
)
args = parser.parse_args(args)
if not args.shift_ranges:
args.shift_ranges = ['az', 'AZ']
return caesar(args.shift, pwnypack.main.string_value_or_stdin(args.value), args.shift_ranges)
示例6: main
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def main():
context_dimension = 5
action_storage = MemoryActionStorage()
action_storage.add([Action(i) for i in range(5)])
# Regret Analysis
n_rounds = 10000
context, desired_actions = simulation.simulate_data(
n_rounds, context_dimension, action_storage, random_state=1)
policy = UCB1(MemoryHistoryStorage(), MemoryModelStorage(),
action_storage)
for t in range(n_rounds):
history_id, recommendation = policy.get_action(context[t])
action_id = recommendation.action.id
if desired_actions[t] != action_id:
policy.reward(history_id, {action_id: 0})
else:
policy.reward(history_id, {action_id: 1})
policy.plot_avg_regret()
plt.show()
示例7: calculate_cum_reward
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def calculate_cum_reward(policy):
"""Calculate cumulative reward with respect to time.
Parameters
----------
policy: bandit object
The bandit algorithm you want to evaluate.
Return
---------
cum_reward: dict
The dict stores {history_id: cumulative reward} .
cum_n_actions: dict
The dict stores
{history_id: cumulative number of recommended actions}.
"""
cum_reward = {-1: 0.0}
cum_n_actions = {-1: 0}
for i in range(policy.history_storage.n_histories):
reward = policy.history_storage.get_history(i).rewards
cum_n_actions[i] = cum_n_actions[i - 1] + len(reward)
cum_reward[i] = cum_reward[i - 1] + sum(six.viewvalues(reward))
return cum_reward, cum_n_actions
示例8: get_subword_for_word
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def get_subword_for_word(word, n1=3, n2=6, include_self=False):
"""only extract the prefix and suffix"""
z = []
if len(word) >= n1:
word = "*" + word + "*"
l = len(word)
n1 = min(n1, l)
n2 = min(n2, l)
# bind method outside of loop to reduce overhead
# https://github.com/scikit-learn/scikit-learn/blob/a24c8b46/sklearn/feature_extraction/text.py#L144
z_append = z.append
if include_self:
z_append(word)
for k in range(n1, n2 + 1):
z_append(word[:k])
z_append(word[-k:])
return z
# 564 µs ± 14.9 µs per loop (mean ± std. dev. of 7 runs, 1000 loops each)
示例9: add_voice
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def add_voice(self):
new_voice = Voice({}) # creates new voice with no name and empty tree of corpora
texts = os.listdir('texts')
add_another_corpus = ''
while add_another_corpus != 'n':
for i in range(len(texts)):
print("%s %s" % (i + 1, texts[i]))
choice = input('Enter the number of the corpus you want to load:\n')
corpus_name = texts[int(choice) - 1]
path = 'texts/%s' % corpus_name
f = open(path, 'r')
text = f.read()
corpus_weight_prompt = 'Enter the weight for %s:\n' % corpus_name
corpus_weight = float(input(corpus_weight_prompt))
new_voice.add_corpus(Corpus(text, corpus_name), corpus_weight)
texts.remove(corpus_name)
add_another_corpus = input('Add another corpus to this voice? y/n\n')
voicename = input('Name this voice:\n')
new_voice.name = voicename
new_voice.normalize_weights()
self.voices[voicename] = new_voice
# asks user to specify a transcript and number of characters, and makes separate voices for that number of
# the most represented characters in the transcript
示例10: load_voices_from_transcript
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def load_voices_from_transcript(self):
transcripts = os.listdir('texts/transcripts')
for i in range(len(transcripts)):
print("%s %s" % (i + 1, transcripts[i]))
choice = input('Enter the number of the transcript you want to load:\n')
transcript_name = transcripts[int(choice) - 1]
number = int(input('Enter the number of voices to load:\n'))
for charname, size in self.biggest_characters(transcript_name, number):
print(charname)
path = 'texts/transcripts/%s/%s' % (transcript_name, charname)
source_text = open(path).read()
corpus_name = charname
weighted_corpora = {}
weighted_corpora[charname] = [Corpus(source_text, corpus_name), 1]
self.voices[charname] = Voice(weighted_corpora, charname)
# retrieves a list of the top 20 largest character text files in a transcript folder
示例11: register_options_provider
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def register_options_provider(self, provider, own_group=True):
"""register an options provider"""
assert provider.priority <= 0, "provider's priority can't be >= 0"
for i in range(len(self.options_providers)):
if provider.priority > self.options_providers[i].priority:
self.options_providers.insert(i, provider)
break
else:
self.options_providers.append(provider)
non_group_spec_options = [option for option in provider.options
if 'group' not in option[1]]
groups = getattr(provider, 'option_groups', ())
if own_group and non_group_spec_options:
self.add_option_group(provider.name.upper(), provider.__doc__,
non_group_spec_options, provider)
else:
for opt, optdict in non_group_spec_options:
self.add_optik_option(provider, self.cmdline_parser, opt, optdict)
for gname, gdoc in groups:
gname = gname.upper()
goptions = [option for option in provider.options
if option[1].get('group', '').upper() == gname]
self.add_option_group(gname, gdoc, goptions, provider)
示例12: main
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def main(unused_argv):
generate.init_modules(FLAGS.train_split)
output_dir = os.path.expanduser(FLAGS.output_dir)
if os.path.exists(output_dir):
logging.fatal('output dir %s already exists', output_dir)
logging.info('Writing to %s', output_dir)
os.makedirs(output_dir)
for regime, flat_modules in six.iteritems(generate.filtered_modules):
regime_dir = os.path.join(output_dir, regime)
os.mkdir(regime_dir)
per_module = generate.counts[regime]
for module_name, module in six.iteritems(flat_modules):
path = os.path.join(regime_dir, module_name + '.txt')
with open(path, 'w') as text_file:
for _ in range(per_module):
problem, _ = generate.sample_from_module(module)
text_file.write(str(problem.question) + '\n')
text_file.write(str(problem.answer) + '\n')
logging.info('Written %s', path)
示例13: _sample_integrand
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def _sample_integrand(coefficients, derivative_order, derivative_axis, entropy):
"""Integrates `coefficients` and adds sampled "constant" terms."""
coefficients = np.asarray(coefficients)
# Integrate (with zero for constant terms).
integrand = coefficients
for _ in range(derivative_order):
integrand = polynomials.integrate(integrand, derivative_axis)
# Add on sampled constant terms.
constant_degrees = np.array(integrand.shape) - 1
constant_degrees[derivative_axis] = derivative_order - 1
extra_coeffs = polynomials.sample_coefficients(constant_degrees, entropy)
pad_amount = coefficients.shape[derivative_axis]
pad = [(0, pad_amount if i == derivative_axis else 0)
for i in range(coefficients.ndim)]
extra_coeffs = np.pad(extra_coeffs, pad, 'constant', constant_values=0)
return integrand + extra_coeffs
示例14: _unique_values
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def _unique_values(entropy, only_integers=False, count=None):
"""Generates unique values."""
if count is None:
count = random.randint(*_sort_count_range(entropy))
if only_integers:
sampler = functools.partial(number.integer, signed=True)
else:
sampler = integer_or_rational_or_decimal
for _ in range(1000):
entropies = entropy * np.random.dirichlet(np.ones(count))
entropies = np.maximum(1, entropies)
values = [sampler(ent) for ent in entropies]
if len(sympy.FiniteSet(*values)) == len(values):
return values
raise ValueError('Could not generate {} unique values with entropy={}'
.format(count, entropy))
示例15: _sequence_event
# 需要导入模块: from six import moves [as 别名]
# 或者: from six.moves import range [as 别名]
def _sequence_event(values, length, verb):
"""Returns sequence (finite product) event.
Args:
values: List of values to sample from.
length: Length of the sequence to generate.
verb: Verb in infinitive form.
Returns:
Instance of `probability.FiniteProductEvent`, together with a text
description.
"""
del verb # unused
samples = [random.choice(values) for _ in range(length)]
events = [probability.DiscreteEvent([sample]) for sample in samples]
event = probability.FiniteProductEvent(events)
sequence = ''.join(str(sample) for sample in samples)
event_description = 'sequence {sequence}'.format(sequence=sequence)
return event, event_description