本文整理汇总了Python中six.moves.xrange函数的典型用法代码示例。如果您正苦于以下问题:Python xrange函数的具体用法?Python xrange怎么用?Python xrange使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了xrange函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: next_batch
def next_batch(self):
enc_batch = np.zeros((self._batch_size, self._enc_timesteps), dtype=np.int32)
enc_input_lens = np.zeros(self._batch_size, dtype=np.int32)
dec_batch = np.zeros((self._batch_size, self._dec_timesteps), dtype=np.int32)
dec_output_lens = np.zeros(self._batch_size, dtype=np.int32)
target_batch = np.zeros((self._batch_size, self._dec_timesteps), dtype=np.int32)
loss_weights = np.zeros((self._batch_size, self._dec_timesteps), dtype=np.float32)
origin_articles = ['None'] * self._batch_size
origin_abstracts = ['None'] * self._batch_size
buckets = self._bucket_input_queue.get()
for i in xrange(self._batch_size):
(enc_inputs, dec_inputs, targets, enc_input_len, dec_output_len, article, abstract) = buckets[i]
origin_articles[i] = article
origin_abstracts[i] = abstract
enc_input_lens[i] = enc_input_len
dec_output_lens[i] = dec_output_len
enc_batch[i, :] = enc_inputs[:]
dec_batch[i, :] = dec_inputs[:]
target_batch[i, :] = targets[:]
for j in xrange(dec_output_len):
loss_weights[i][j] = 1
return (
enc_batch, dec_batch, target_batch, enc_input_lens, dec_output_lens, loss_weights,
origin_articles, origin_abstracts
)
示例2: get_run_op
def get_run_op():
# Create an optimizer that performs gradient descent.
#opt = tf.train.GradientDescentOptimizer(learning_rate=0.01)
slice_size = FLAGS.batch_size / FLAGS.num_cuts
print('Slice size:{}'.format(slice_size))
data = None
label = None
last_fc = [tf.no_op()]
with tf.device('/gpu:0'):
data = tf.get_variable(
name = 'data',
shape=[slice_size, FLAGS.hidden_size],
trainable=False)
'''
label = tf.get_variable(
name = 'label',
shape = [slice_size, FLAGS.hidden_size],
trainable=False))
with tf.variable_scope('fc_in'):
weight_in = tf.zeros([1000, FLAGS.hidden_size])
for k in xrange(FLAGS.num_cuts):
with tf.control_dependencies([last_fc[-1]]):
last_fc.append(tf.matmul(data[k+1], weight_in))
'''
for i in xrange(FLAGS.num_cuts):
last_fc.append(data)
for i in xrange(FLAGS.num_layers):
dev = '/gpu:%d' % (i * FLAGS.num_gpus / FLAGS.num_layers)
with tf.device(dev), scopes.arg_scope([variables.variable], device=dev):
tmp_fc = [tf.no_op()]
with tf.variable_scope('fc%d' % i):
w = tf.get_variable(
name='w',
shape=[FLAGS.hidden_size, FLAGS.hidden_size],
trainable=True)
for k in xrange(FLAGS.num_cuts):
with tf.control_dependencies([tmp_fc[-1]]):
tmp_fc.append(tf.matmul(last_fc[k+1], w))
last_fc = tmp_fc
if i == FLAGS.num_layers - 1:
with tf.control_dependencies(last_fc):
train_op = tf.no_op()
'''
with tf.device('/gpu:%d' % (FLAGS.num_gpus - 1)):
tmp_fc = [tf.no_op()]
with tf.variable_scope('fc_out'):
weight_out = tf.zeros([FLAGS.hidden_size, 1000])
for k in xrange(FLAGS.num_cuts):
with tf.control_dependencies([tmp_fc[-1]]):
tmp_fc.append(tf.matmul(last_fc[k+1], weight_out))
last_fc = tmp_fc
loss = tf.nn_softmax_cross_entropy_with_logits(last_fc, labels, name='xentropy')
grads = opt.compute_gradients(loss)
apply_gradient_op = opt.apply_gradients(grads)
train_op = tf.group(apply_gradient_op)
'''
init_op = tf.initialize_all_variables()
return init_op, train_op
示例3: _shapes
def _shapes(tensor_list_list, shapes, enqueue_many):
"""Calculate and merge the shapes of incoming tensors.
Args:
tensor_list_list: List of tensor lists.
shapes: List of shape tuples corresponding to tensors within the lists.
enqueue_many: Boolean describing whether shapes will be enqueued as
batches or individual entries.
Returns:
A list of shapes aggregating shape inference info from `tensor_list_list`,
or returning `shapes` if it is not `None`.
Raises:
ValueError: If any of the inferred shapes in `tensor_list_list` lack a
well defined rank.
"""
if shapes is None:
len0 = len(tensor_list_list[0])
for tl in tensor_list_list:
for i in xrange(len0):
if tl[i].get_shape().ndims is None:
raise ValueError("Cannot infer Tensor's rank: %s" % tl[i])
shapes = [_merge_shapes(
[tl[i].get_shape().as_list() for tl in tensor_list_list], enqueue_many)
for i in xrange(len0)]
return shapes
示例4: write_to_buffer
def write_to_buffer(self, buffer, colors=None):
if self.mode == MODE_NUMBER:
for i in xrange(0, len(self.data), 3):
chars = self.data[i:i + 3]
bit_length = NUMBER_LENGTH[len(chars)]
color = self._getColor(i, colors)
buffer.put(int(chars), bit_length, color)
elif self.mode == MODE_ALPHA_NUM:
for i in xrange(0, len(self.data), 2):
chars = self.data[i:i + 2]
color = self._getColor(i, colors)
if len(chars) > 1:
buffer.put(
ALPHA_NUM.find(chars[0]) * 45 +
ALPHA_NUM.find(chars[1]), 11, color)
else:
buffer.put(ALPHA_NUM.find(chars), 6, color)
else:
if six.PY3:
# Iterating a bytestring in Python 3 returns an integer,
# no need to ord().
data = self.data
else:
data = [ord(c) for c in self.data]
for i, c in enumerate(data):
color = self._getColor(i, colors)
buffer.put(c, 8, color)
示例5: eval
def eval(self):
"""Evaluate analogy questions and reports accuracy."""
# How many questions we get right at [email protected]
correct = 0
total = self._analogy_questions.shape[0]
start = 0
while start < total:
limit = start + 2500
sub = self._analogy_questions[start:limit, :]
idx = self._predict(sub)
start = limit
for question in xrange(sub.shape[0]):
for j in xrange(4):
if idx[question, j] == sub[question, 3]:
# Bingo! We predicted correctly. E.g., [italy, rome, france, paris].
correct += 1
break
elif idx[question, j] in sub[question, :3]:
# We need to skip words already in the question.
continue
else:
# The correct label is not the [email protected]
break
print()
print("Eval %4d/%d accuracy = %4.1f%%" % (correct, total,
correct * 100.0 / total))
示例6: show_topics
def show_topics(self, topics=10, topn=10, log=False, formatted=True):
shown = []
if topics < 0:
topics = len(self.data)
topics = min(topics, len(self.data))
for k in xrange(topics):
lambdak = list(self.data[k, :])
lambdak = lambdak / sum(lambdak)
temp = zip(lambdak, xrange(len(lambdak)))
temp = sorted(temp, key=lambda x: x[0], reverse=True)
topic_terms = self.show_topic_terms(temp, topn)
if formatted:
topic = self.format_topic(k, topic_terms)
# assuming we only output formatted topics
if log:
logger.info(topic)
else:
topic = (k, topic_terms)
shown.append(topic)
return shown
示例7: testParallelDequeueUpToRandomPartition
def testParallelDequeueUpToRandomPartition(self):
with self.test_session() as sess:
dequeue_sizes = [random.randint(50, 150) for _ in xrange(10)]
total_elements = sum(dequeue_sizes)
q = tf.RandomShuffleQueue(total_elements, 0, tf.float32, shapes=())
elems = [10.0 * x for x in xrange(total_elements)]
enqueue_op = q.enqueue_many((elems,))
dequeue_ops = [q.dequeue_up_to(size) for size in dequeue_sizes]
enqueue_op.run()
# Dequeue random number of items in parallel on 10 threads.
dequeued_elems = []
def dequeue(dequeue_op):
dequeued_elems.extend(sess.run(dequeue_op))
threads = []
for dequeue_op in dequeue_ops:
threads.append(self.checkedThread(target=dequeue, args=(dequeue_op,)))
for thread in threads:
thread.start()
for thread in threads:
thread.join()
self.assertItemsEqual(elems, dequeued_elems)
示例8: xfun
def xfun(n, d=None):
""" Create a QTT-representation of 0:prod(n) _vector
call examples:
tt.xfun(2, 5) # create 2 x 2 x 2 x 2 x 2 TT-vector
tt.xfun(3) # create [0, 1, 2] one-dimensional TT-vector
tt.xfun([3, 5, 7], 2) # create 3 x 5 x 7 x 3 x 5 x 7 TT-vector
"""
if isinstance(n, six.integer_types):
n = [n]
if d is None:
n0 = _np.asanyarray(n, dtype=_np.int32)
else:
n0 = _np.array(n * d, dtype=_np.int32)
d = n0.size
if d == 1:
return _vector.vector.from_list(
[_np.reshape(_np.arange(n0[0]), (1, n0[0], 1))])
cr = []
cur_core = _np.ones((1, n0[0], 2))
cur_core[0, :, 0] = _np.arange(n0[0])
cr.append(cur_core)
ni = float(n0[0])
for i in xrange(1, d - 1):
cur_core = _np.zeros((2, n0[i], 2))
for j in xrange(n0[i]):
cur_core[:, j, :] = _np.eye(2)
cur_core[1, :, 0] = ni * _np.arange(n0[i])
ni *= n0[i]
cr.append(cur_core)
cur_core = _np.ones((2, n0[d - 1], 1))
cur_core[1, :, 0] = ni * _np.arange(n0[d - 1])
cr.append(cur_core)
return _vector.vector.from_list(cr)
示例9: delta
def delta(n, d=None, center=0):
""" Create TT-vector for delta-function :math:`\\delta(x - x_0)`. """
if isinstance(n, six.integer_types):
n = [n]
if d is None:
n0 = _np.asanyarray(n, dtype=_np.int32)
else:
n0 = _np.array(n * d, dtype=_np.int32)
d = n0.size
if center < 0:
cind = [0] * d
else:
cind = []
for i in xrange(d):
cind.append(center % n0[i])
center //= n0[i]
if center > 0:
cind = [0] * d
cr = []
for i in xrange(d):
cur_core = _np.zeros((1, n0[i], 1))
cur_core[0, cind[i], 0] = 1
cr.append(cur_core)
return _vector.vector.from_list(cr)
示例10: test_neville2d
def test_neville2d(self):
funcx = numpy.sin
funcy = numpy.exp
nrow = 10
ncol = 10
tol = 1.0e-4
# TODO: As with test_neville; can this not be simplified with
# vectorized code
x = numpy.zeros((nrow, ))
y = numpy.zeros((ncol, ))
fval = numpy.empty((nrow, ncol))
row_tmp = numpy.pi / nrow
# col_tmp = 1.0 / float(ncol)
for row in xrange(nrow):
x[row] = (row + 1.0) * row_tmp
for col in xrange(ncol):
y[col] = (col + 1.0) / float(ncol)
fval[row][col] = funcx(x[row]) * funcy(y[col])
for row in xrange(ncol):
xx = (-0.1 + (row + 1.0) / float(nrow)) * numpy.pi
for col in xrange(4):
yy = -0.1 + (col + 1.0) / float(ncol)
answer = funcx(xx) * funcy(yy)
val = utils.neville2d(xx, yy, x, y, fval)
self.assertTrue(utils.Knuth_close(answer, val, tol))
示例11: filter
def filter(self, im):
falloff = self.falloff
extent = self.extent
def length(start, end):
start_x, start_y = start
end_x, end_y = end
dist_x = end_x - start_x
dist_y = end_y - start_y
return math.sqrt((dist_x ** 2) + (dist_y ** 2))
def light_falloff(radius, outside):
return ((radius / outside) ** falloff) * extent
im = im.convert('RGBA')
w, h = im.size
center = w / 2, h / 2
outside = length(center, (0, 0))
data = []
for y in xrange(h):
for x in xrange(w):
radius = length(center, (x, y))
factor = light_falloff(radius, outside)
data.append(factor)
alpha_im = Image.new('L', im.size)
alpha_im.putdata(data)
overlay_im = Image.new('L', im.size, 'black')
return Image.composite(overlay_im, im, alpha_im)
示例12: SoftmaxEval
def SoftmaxEval(self, sess, model, num_steps):
"""Evaluate a model in softmax mode.
Adds char, word recall and sequence error rate events to the sw summary
writer, and returns them as well
TODO(rays) Add LogisticEval.
Args:
sess: A tensor flow Session.
model: The model to run in the session. Requires a VGSLImageModel or any
other class that has a using_ctc attribute and a RunAStep(sess) method
that reurns a softmax result with corresponding labels.
num_steps: Number of steps to evaluate for.
Returns:
ErrorRates named tuple.
Raises:
ValueError: If an unsupported number of dimensions is used.
"""
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
# Run the requested number of evaluation steps, gathering the outputs of the
# softmax and the true labels of the evaluation examples.
total_label_counts = ec.ErrorCounts(0, 0, 0, 0)
total_word_counts = ec.ErrorCounts(0, 0, 0, 0)
sequence_errors = 0
for _ in xrange(num_steps):
softmax_result, labels = model.RunAStep(sess)
# Collapse softmax to same shape as labels.
predictions = softmax_result.argmax(axis=-1)
# Exclude batch from num_dims.
num_dims = len(predictions.shape) - 1
batch_size = predictions.shape[0]
null_label = softmax_result.shape[-1] - 1
for b in xrange(batch_size):
if num_dims == 2:
# TODO(rays) Support 2-d data.
raise ValueError('2-d label data not supported yet!')
else:
if num_dims == 1:
pred_batch = predictions[b, :]
labels_batch = labels[b, :]
else:
pred_batch = [predictions[b]]
labels_batch = [labels[b]]
text = self.StringFromCTC(pred_batch, model.using_ctc, null_label)
truth = self.StringFromCTC(labels_batch, False, null_label)
# Note that recall_errs is false negatives (fn) aka drops/deletions.
# Actual recall would be 1-fn/truth_words.
# Likewise precision_errs is false positives (fp) aka adds/insertions.
# Actual precision would be 1-fp/ocr_words.
total_word_counts = ec.AddErrors(total_word_counts,
ec.CountWordErrors(text, truth))
total_label_counts = ec.AddErrors(total_label_counts,
ec.CountErrors(text, truth))
if text != truth:
sequence_errors += 1
coord.request_stop()
coord.join(threads)
return ec.ComputeErrorRates(total_label_counts, total_word_counts,
sequence_errors, num_steps * batch_size)
示例13: testParams
def testParams(self):
"""Tests that the params work as intended."""
num_classes = 2
with self.test_session() as sess:
# Experiment 1. Update weights only.
data = constant_op.constant(self.data, dtype=dtypes.float32)
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'w')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
# Only the probability to each class is updated.
alphas = sess.run(gmm_tool.alphas())
self.assertGreater(alphas[1], 0.6)
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[3.0, 3.0], [0.0, 0.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(covs[0], covs[1])
# Experiment 2. Update means and covariances.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[3.0, 3.0], [0.0, 0.0]], 'mc')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[2.0, 2.0], [-1.0, -1.0]], 1), means, decimal=1)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.371111, -0.0050774], [-0.0050774, 0.8651744]], covs[0], decimal=4)
np.testing.assert_almost_equal(
[[0.146976, 0.0259463], [0.0259463, 0.2543971]], covs[1], decimal=4)
# Experiment 3. Update covariances only.
gmm_tool = gmm_ops.GmmAlgorithm([data], num_classes,
[[-1.0, -1.0], [1.0, 1.0]], 'c')
training_ops = gmm_tool.training_ops()
variables.global_variables_initializer().run()
sess.run(gmm_tool.init_ops())
for _ in xrange(self.iterations):
sess.run(training_ops)
alphas = sess.run(gmm_tool.alphas())
self.assertAlmostEqual(alphas[0], alphas[1])
means = sess.run(gmm_tool.clusters())
np.testing.assert_almost_equal(
np.expand_dims([[-1.0, -1.0], [1.0, 1.0]], 1), means)
covs = sess.run(gmm_tool.covariances())
np.testing.assert_almost_equal(
[[0.1299582, 0.0435872], [0.0435872, 0.2558578]], covs[0], decimal=5)
np.testing.assert_almost_equal(
[[3.195385, 2.6989155], [2.6989155, 3.3881593]], covs[1], decimal=5)
示例14: add_lines
def add_lines(self, levels, colors, linewidths, erase=True):
'''
Draw lines on the colorbar.
*colors* and *linewidths* must be scalars or
sequences the same length as *levels*.
Set *erase* to False to add lines without first
removing any previously added lines.
'''
y = self._locate(levels)
igood = (y < 1.001) & (y > -0.001)
y = y[igood]
if cbook.iterable(colors):
colors = np.asarray(colors)[igood]
if cbook.iterable(linewidths):
linewidths = np.asarray(linewidths)[igood]
N = len(y)
x = np.array([0.0, 1.0])
X, Y = np.meshgrid(x, y)
if self.orientation == 'vertical':
xy = [list(zip(X[i], Y[i])) for i in xrange(N)]
else:
xy = [list(zip(Y[i], X[i])) for i in xrange(N)]
col = collections.LineCollection(xy, linewidths=linewidths)
if erase and self.lines:
for lc in self.lines:
lc.remove()
self.lines = []
self.lines.append(col)
col.set_color(colors)
self.ax.add_collection(col)
self.stale = True
示例15: parse_atoms
def parse_atoms(self, tokens, command, min_size, max_size=None):
"""
Parses a sequence of N atoms (min_size <= N <= max_size) consuming
the tokens
"""
if max_size is None:
max_size = min_size
res = []
current = None
for _ in xrange(min_size):
current = next(tokens)
if current == ")":
raise SyntaxError("Expected at least %d arguments in %s command." % (min_size, command))
if current == "(":
raise SyntaxError("Unexpected token '(' in %s command." % command)
res.append(current)
for _ in xrange(min_size, max_size + 1):
current = next(tokens)
if current == ")":
return res
if current == "(":
raise SyntaxError("Unexpected token '(' in %s command." % command)
res.append(current)
raise SyntaxError(
"Unexpected token '%s' in %s command. Expected at " "most %d arguments." % (current, command, max_size)
)