本文整理汇总了Python中future.moves.itertools.zip_longest方法的典型用法代码示例。如果您正苦于以下问题:Python itertools.zip_longest方法的具体用法?Python itertools.zip_longest怎么用?Python itertools.zip_longest使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类future.moves.itertools
的用法示例。
在下文中一共展示了itertools.zip_longest方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: result
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def result(self):
""" Function to encode the output into the specified output mode.
:return result: The output string in the specified output mode.
"""
if self.output == 'json':
result_dict = {"Accessible": self._accessible, "Not Accessible": self._not_accessible}
result = dumps(result_dict, indent=4)
elif self.output == 'csv':
result = StringIO() if(sys.version_info >= (3, 0)) else BytesIO()
ippy_writer = writer(result)
d = [self._accessible, self._not_accessible]
for x in zip_longest(*d):
ippy_writer.writerow(x)
result = str(result.getvalue()).strip('\r\n')
result = 'Accessible,Not Accessible\n' + result
else:
raise ValueError("Unknown output mode")
return result
示例2: permute_by_iteration
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def permute_by_iteration(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all
sections for the first global spec first, followed by all sections for the
second spec, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, Y.A1, X.B1, Y.B1, X.A2, Y.A2, X.B2, Y.B2
"""
groups = [list(g) for _, g in groupby(specs, lambda s: s.workload_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in range(spec.iterations)])
for t in chain(*list(map(list, zip_longest(*all_tuples)))):
if t is not None:
yield t
示例3: permute_by_section
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def permute_by_section(specs):
"""
Runs the first iteration for all benchmarks first, before proceeding to the
next iteration, i.e. A1, B1, C1, A2, B2, C2... instead of A1, A1, B1, B2,
C1, C2...
If multiple sections where specified in the agenda, this will run all specs
for the first section followed by all specs for the seciod section, etc.
e.g. given sections X and Y, and global specs A and B, with 2 iterations,
this will run
X.A1, X.B1, Y.A1, Y.B1, X.A2, X.B2, Y.A2, Y.B2
"""
groups = [list(g) for _, g in groupby(specs, lambda s: s.section_id)]
all_tuples = []
for spec in chain(*groups):
all_tuples.append([(spec, i + 1)
for i in range(spec.iterations)])
for t in chain(*list(map(list, zip_longest(*all_tuples)))):
if t is not None:
yield t
示例4: test_itertools_zip_longest
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def test_itertools_zip_longest(self):
"""
Tests whether itertools.zip_longest is available.
"""
from itertools import zip_longest
a = (1, 2)
b = [2, 4, 6]
self.assertEqual(list(zip_longest(a, b)),
[(1, 2), (2, 4), (None, 6)])
示例5: test_install_aliases
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def test_install_aliases(self):
"""
Does the install_aliases() interface monkey-patch urllib etc. successfully?
"""
from future.standard_library import remove_hooks, install_aliases
remove_hooks()
install_aliases()
from collections import Counter, OrderedDict # backported to Py2.6
from collections import UserDict, UserList, UserString
# Requires Python dbm support:
# import dbm
# import dbm.dumb
# import dbm.gnu
# import dbm.ndbm
from itertools import filterfalse, zip_longest
from subprocess import check_output # backported to Py2.6
from subprocess import getoutput, getstatusoutput
from sys import intern
# test_support may not be available (e.g. on Anaconda Py2.6):
# import test.support
import urllib.error
import urllib.parse
import urllib.request
import urllib.response
import urllib.robotparser
self.assertTrue('urlopen' in dir(urllib.request))
示例6: grouper
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def grouper(iterable, n, fillvalue=None):
"Collect data into fixed-length chunks or blocks"
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
示例7: all_equal
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def all_equal(iter1, iter2):
"""Return ``True`` if all elements in ``a`` and ``b`` are equal."""
# Direct comparison for scalars, tuples or lists
try:
if iter1 == iter2:
return True
except ValueError: # Raised by NumPy when comparing arrays
pass
# Special case for None
if iter1 is None and iter2 is None:
return True
# If one nested iterator is exhausted, go to direct comparison
try:
it1 = iter(iter1)
it2 = iter(iter2)
except TypeError:
try:
return iter1 == iter2
except ValueError: # Raised by NumPy when comparing arrays
return False
diff_length_sentinel = object()
# Compare element by element and return False if the sequences have
# different lengths
for [ip1, ip2] in zip_longest(it1, it2,
fillvalue=diff_length_sentinel):
# Verify that none of the lists has ended (then they are not the
# same size)
if ip1 is diff_length_sentinel or ip2 is diff_length_sentinel:
return False
if not all_equal(ip1, ip2):
return False
return True
示例8: all_almost_equal
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def all_almost_equal(iter1, iter2, ndigits=None):
"""Return ``True`` if all elements in ``a`` and ``b`` are almost equal."""
try:
if iter1 is iter2 or iter1 == iter2:
return True
except ValueError:
pass
if iter1 is None and iter2 is None:
return True
if hasattr(iter1, '__array__') and hasattr(iter2, '__array__'):
# Only get default ndigits if comparing arrays, need to keep `None`
# otherwise for recursive calls.
if ndigits is None:
ndigits = _ndigits(iter1, iter2, None)
return all_almost_equal_array(iter1, iter2, ndigits)
try:
it1 = iter(iter1)
it2 = iter(iter2)
except TypeError:
if ndigits is None:
ndigits = _ndigits(iter1, iter2, None)
return np.isclose(iter1, iter2,
atol=10 ** -ndigits, rtol=10 ** -ndigits,
equal_nan=True)
diff_length_sentinel = object()
for [ip1, ip2] in zip_longest(it1, it2,
fillvalue=diff_length_sentinel):
# Verify that none of the lists has ended (then they are not the
# same size)
if ip1 is diff_length_sentinel or ip2 is diff_length_sentinel:
return False
if not all_almost_equal(ip1, ip2, ndigits):
return False
return True
示例9: diff_sysfs_dirs
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def diff_sysfs_dirs(before, after, result): # pylint: disable=R0914
before_files = []
for root, _, files in os.walk(before):
before_files.extend([os.path.join(root, f) for f in files])
before_files = list(filter(os.path.isfile, before_files))
files = [os.path.relpath(f, before) for f in before_files]
after_files = [os.path.join(after, f) for f in files]
diff_files = [os.path.join(result, f) for f in files]
for bfile, afile, dfile in zip(before_files, after_files, diff_files):
if not os.path.isfile(afile):
logger.debug('sysfs_diff: {} does not exist or is not a file'.format(afile))
continue
with open(bfile) as bfh, open(afile) as afh: # pylint: disable=C0321
with open(_f(dfile), 'w') as dfh:
for i, (bline, aline) in enumerate(zip_longest(bfh, afh), 1):
if aline is None:
logger.debug('Lines missing from {}'.format(afile))
break
bchunks = re.split(r'(\W+)', bline)
achunks = re.split(r'(\W+)', aline)
if len(bchunks) != len(achunks):
logger.debug('Token length mismatch in {} on line {}'.format(bfile, i))
dfh.write('xxx ' + bline)
continue
if ((len([c for c in bchunks if c.strip()]) == len([c for c in achunks if c.strip()]) == 2) and
(bchunks[0] == achunks[0])):
# if there are only two columns and the first column is the
# same, assume it's a "header" column and do not diff it.
dchunks = [bchunks[0]] + [diff_tokens(b, a) for b, a in zip(bchunks[1:], achunks[1:])]
else:
dchunks = [diff_tokens(b, a) for b, a in zip(bchunks, achunks)]
dfh.write(''.join(dchunks))
示例10: _iterable_grouper
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def _iterable_grouper(iterable, chunk_size):
"""
Group the iterable into chunks of up to chunk_size items
"""
args = [iter(iterable)] * chunk_size
for group in itertools.zip_longest(*args):
group = tuple(item for item in group if item is not None)
yield group
示例11: paint
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def paint(self, text, x, y, colour=7, attr=0, bg=0, transparent=False,
colour_map=None):
"""
Paint multi-colour text at the defined location.
:param text: The (single line) text to be printed.
:param x: The column (x coord) for the start of the text.
:param y: The line (y coord) for the start of the text.
:param colour: The default colour of the text to be displayed.
:param attr: The default cell attribute of the text to be displayed.
:param bg: The default background colour of the text to be displayed.
:param transparent: Whether to print spaces or not, thus giving a
transparent effect.
:param colour_map: Colour/attribute list for multi-colour text.
The colours and attributes are the COLOUR_xxx and A_yyy constants
defined in the Screen class.
colour_map is a list of tuples (foreground, attribute, background) that
must be the same length as the passed in text (or None if no mapping is
required).
"""
if colour_map is None:
self.print_at(text, x, y, colour, attr, bg, transparent)
else:
offset = next_offset = 0
current = ""
for c, m in zip_longest(str(text), colour_map):
if m:
if len(current) > 0:
self.print_at(current, x + offset, y, colour, attr, bg, transparent)
offset = next_offset
current = ""
if len(m) > 0 and m[0] is not None:
colour = m[0]
if len(m) > 1 and m[1] is not None:
attr = m[1]
if len(m) > 2 and m[2] is not None:
bg = m[2]
if c:
current += c
next_offset += wcwidth(c) if ord(c) >= 256 else 1
if len(current) > 0:
self.print_at(current, x + offset, y, colour, attr, bg, transparent)
示例12: test_future_moves
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def test_future_moves(self):
"""
Ensure everything is available from the future.moves interface that we
claim and expect. (Issue #104).
"""
from future.moves.collections import Counter, OrderedDict # backported to Py2.6
from future.moves.collections import UserDict, UserList, UserString
from future.moves import configparser
from future.moves import copyreg
from future.moves.itertools import filterfalse, zip_longest
from future.moves import html
import future.moves.html.entities
import future.moves.html.parser
from future.moves import http
import future.moves.http.client
import future.moves.http.cookies
import future.moves.http.cookiejar
import future.moves.http.server
from future.moves import queue
from future.moves import socketserver
from future.moves.subprocess import check_output # even on Py2.6
from future.moves.subprocess import getoutput, getstatusoutput
from future.moves.sys import intern
from future.moves import urllib
import future.moves.urllib.error
import future.moves.urllib.parse
import future.moves.urllib.request
import future.moves.urllib.response
import future.moves.urllib.robotparser
try:
# Is _winreg available on Py2? If so, ensure future.moves._winreg is available too:
import _winreg
except ImportError:
pass
else:
from future.moves import winreg
from future.moves import xmlrpc
import future.moves.xmlrpc.client
import future.moves.xmlrpc.server
from future.moves import _dummy_thread
from future.moves import _markupbase
from future.moves import _thread
示例13: extract_blocks
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def extract_blocks(img, blksz, stpsz=None):
"""Extract blocks from an ndarray signal into an ndarray.
Parameters
----------
img : ndarray or tuple of ndarrays
nd array of images, or tuple of images
blksz : tuple
tuple of block sizes, blocks are taken starting from the first index
of img
stpsz : tuple, optional (default None, corresponds to steps of 1)
tuple of step sizes between neighboring blocks
Returns
-------
blks : ndarray
image blocks
"""
# See http://stackoverflow.com/questions/16774148 and
# sklearn.feature_extraction.image.extract_patches_2d
if isinstance(img, tuple):
img = np.stack(img, axis=-1)
if stpsz is None:
stpsz = (1,) * len(blksz)
imgsz = img.shape
# Calculate the number of blocks that can fit in each dimension of
# the images
numblocks = tuple(int(np.floor((a - b) / c) + 1) for a, b, c in
zip_longest(imgsz, blksz, stpsz, fillvalue=1))
# Calculate the strides for blocks
blockstrides = tuple(a * b for a, b in zip_longest(img.strides, stpsz,
fillvalue=1))
new_shape = blksz + numblocks
new_strides = img.strides[:len(blksz)] + blockstrides
blks = np.lib.stride_tricks.as_strided(img, new_shape, new_strides)
return np.reshape(blks, blksz + (-1,))
示例14: average_blocks
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def average_blocks(blks, imgsz, stpsz=None):
"""Average blocks together from an ndarray to reconstruct ndarray signal.
Parameters
----------
blks : ndarray
Array of blocks of a signal
imgsz : tuple
Tuple of the signal size
stpsz : tuple, optional (default None, corresponds to steps of 1)
Tuple of step sizes between neighboring blocks
Returns
-------
imgs : ndarray
Reconstructed signal, unknown pixels are returned as np.nan
"""
blksz = blks.shape[:-1]
if stpsz is None:
stpsz = tuple(1 for _ in blksz)
# Calculate the number of blocks that can fit in each dimension of
# the images
numblocks = tuple(int(np.floor((a-b)/c)+1) for a, b, c in
zip_longest(imgsz, blksz, stpsz, fillvalue=1))
new_shape = blksz + numblocks
blks = np.reshape(blks, new_shape)
# Construct an imgs matrix of empty lists
imgs = np.zeros(imgsz, dtype=blks.dtype)
normalizer = np.zeros(imgsz, dtype=blks.dtype)
# Iterate over each block and append the values to the corresponding
# imgs cell
for pos in np.ndindex(numblocks):
slices = tuple(slice(a*c, a*c+b) for a, b, c in
zip(pos, blksz, stpsz))
imgs[slices+pos[len(blksz):]] += blks[(Ellipsis, )+pos]
normalizer[slices+pos[len(blksz):]] += blks.dtype.type(1)
return np.where(normalizer > 0, (imgs/normalizer).astype(blks.dtype),
np.nan)
示例15: combine_blocks
# 需要导入模块: from future.moves import itertools [as 别名]
# 或者: from future.moves.itertools import zip_longest [as 别名]
def combine_blocks(blks, imgsz, stpsz=None, fn=np.median):
"""Combine blocks from an ndarray to reconstruct ndarray signal.
Parameters
----------
blks : ndarray
Array of blocks of a signal
imgsz : tuple
Tuple of the signal size
stpsz : tuple, optional (default None, corresponds to steps of 1)
Tuple of step sizes between neighboring blocks
fn : function, optional (default np.median)
Function used to resolve multivalued cells
Returns
-------
imgs : ndarray
Reconstructed signal, unknown pixels are returned as np.nan
"""
# Construct a vectorized append function
def listapp(x, y):
x.append(y)
veclistapp = np.vectorize(listapp, otypes=[np.object_])
blksz = blks.shape[:-1]
if stpsz is None:
stpsz = tuple(1 for _ in blksz)
# Calculate the number of blocks that can fit in each dimension of
# the images
numblocks = tuple(int(np.floor((a-b)/c) + 1) for a, b, c in
zip_longest(imgsz, blksz, stpsz, fillvalue=1))
new_shape = blksz + numblocks
blks = np.reshape(blks, new_shape)
# Construct an imgs matrix of empty lists
imgs = np.empty(imgsz, dtype=np.object_)
imgs.fill([])
imgs = np.frompyfunc(list, 1, 1)(imgs)
# Iterate over each block and append the values to the corresponding
# imgs cell
for pos in np.ndindex(numblocks):
slices = tuple(slice(a*c, a*c + b) for a, b, c in
zip_longest(pos, blksz, stpsz, fillvalue=1))
veclistapp(imgs[slices].squeeze(), blks[(Ellipsis, ) + pos].squeeze())
return np.vectorize(fn, otypes=[blks.dtype])(imgs)