本文整理匯總了Python中six.moves.reduce方法的典型用法代碼示例。如果您正苦於以下問題:Python moves.reduce方法的具體用法?Python moves.reduce怎麽用?Python moves.reduce使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類six.moves
的用法示例。
在下文中一共展示了moves.reduce方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: osf_crawl
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def osf_crawl(k, *pths, **kw):
'''
osf_crawl(k) crawls the osf repository k and returns a lazy nested map structure of the
repository's files. Folders have values that are maps of their contents while files have
values that are their download links.
osf_crawl(k1, k2, ...) is equivalent to osf_crawl(posixpath.join(k1, k2...)).
The optional named argument base (default: 'osfstorage') may be specified to search in a
non-standard storage position in the OSF URL; e.g. in the github storage.
'''
from six.moves import reduce
base = kw.pop('base', 'osfstorage')
root = kw.pop('root', None)
if len(kw) > 0: raise ValueError('Unknown optional parameters: %s' % (list(kw.keys()),))
if k.lower().startswith('osf:'): k = k[4:]
k = k.lstrip('/')
pths = [p.lstrip('/') for p in (k.split('/') + list(pths))]
(bpth, pths) = (pths[0].strip('/'), [p for p in pths[1:] if p != ''])
if root is None: root = _osf_tree(bpth, base=base)
return reduce(lambda m,k: m[k], pths, root)
示例2: build_gemm_call
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def build_gemm_call(self):
return reduce(str.__add__, (
self.declare_NS,
self.check_xyz_rank2,
self.setup_z_Nz_Sz,
self.check_xyz_double_or_float,
self.check_ab_double_or_float,
self.check_dims,
self.check_strides,
self.encode_strides_in_unit,
self.compute_strides,
self.begin_switch_typenum,
self.case_float,
self.case_float_ab_constants,
self.case_float_gemm,
self.case_double,
self.case_double_ab_constants,
self.case_double_gemm,
self.end_switch_typenum), '')
示例3: values_eq_approx_high_tol
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def values_eq_approx_high_tol(a, b):
"""
This fct is needed to don't have DebugMode raise useless
error due to ronding error.
This happen as We reduce on the two last dimensions, so this
can raise the absolute error if the number of element we
reduce on is significant.
"""
assert a.ndim == 4
atol = None
if a.shape[-1] * a.shape[-2] > 100:
# For float32 the default atol is 1e-5
atol = 3e-5
return CudaNdarrayType.values_eq_approx(a, b, atol=atol)
示例4: get_gateway_address
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def get_gateway_address(self):
"""
Perform several set of commands to obtain the gateway address
* `adb getprop dhcp.wlan0.gateway`
* `adb shell netcfg | grep wlan0`
Returns:
None if no gateway address has been found, otherwise return the gateway address
"""
ip2int = lambda ip: reduce(lambda a, b: (a << 8) + b, map(int, ip.split('.')), 0)
int2ip = lambda n: '.'.join([str(n >> (i << 3) & 0xFF) for i in range(0, 4)[::-1]])
try:
res = self.shell('getprop dhcp.wlan0.gateway')
except AdbShellError:
res = ''
matcher = IP_PATTERN.search(res)
if matcher:
return matcher.group(0)
ip = self.get_ip_address()
if not ip:
return None
mask_len = self._get_subnet_mask_len()
gateway = (ip2int(ip) & (((1 << mask_len) - 1) << (32 - mask_len))) + 1
return int2ip(gateway)
示例5: __mul__
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def __mul__(self, other):
"""
Outer product of two cycles (`itertools.product`) or integer
multiplication.
Parameters
----------
other : Cycler or int
The second Cycler or integer
"""
if isinstance(other, Cycler):
return Cycler(self, other, product)
elif isinstance(other, int):
trans = self.by_key()
return reduce(add, (_cycler(k, v*other)
for k, v in six.iteritems(trans)))
else:
return NotImplemented
示例6: simplify
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def simplify(self):
"""Simplify the Cycler
Returned as a composition using only sums (no multiplications)
Returns
-------
simple : Cycler
An equivalent cycler using only summation"""
# TODO: sort out if it is worth the effort to make sure this is
# balanced. Currently it is is
# (((a + b) + c) + d) vs
# ((a + b) + (c + d))
# I would believe that there is some performance implications
trans = self.by_key()
return reduce(add, (_cycler(k, v) for k, v in six.iteritems(trans)))
示例7: _parse_reduction
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def _parse_reduction(self, operation, inames, red_exprs,
allow_simultaneous=False):
if isinstance(inames, p.Variable):
inames = (inames,)
if not isinstance(inames, (tuple)):
raise TypeError("iname argument to reduce() must be a symbol "
"or a tuple of symbols")
processed_inames = []
for iname in inames:
if not isinstance(iname, p.Variable):
raise TypeError("iname argument to reduce() must be a symbol "
"or a tuple or a tuple of symbols")
processed_inames.append(iname.name)
if len(red_exprs) == 1:
red_exprs = red_exprs[0]
return Reduction(operation, tuple(processed_inames), red_exprs,
allow_simultaneous=allow_simultaneous)
示例8: _handle_license_list
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def _handle_license_list(self, lics_set, cls=None):
"""
Return a license representing a `cls` object (LicenseConjunction
or LicenseDisjunction) from a list of license resources or None.
"""
licenses = []
for _, _, lics_member in self.graph.triples(
(lics_set, self.spdx_namespace['member'], None)):
try:
licenses.append(self.handle_lics(lics_member))
except CardinalityError:
self.value_error('LICS_LIST_MEMBER', lics_member)
break
if len(licenses) > 1:
return reduce(lambda a, b: cls(a, b), licenses)
else:
self.value_error('PKG_CONC_LIST', '')
return
示例9: crt
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def crt(ak, nk):
from six.moves import reduce
"""
Chinese-Reminders-Theorem Implementation
using Gauss's proof and generalization on gcd(n1, n2) != 1
Should be len(ak) == len(nk)
Original: https://gist.github.com/elliptic-shiho/901d223135965308a5f9ff0cf99dd7c8
Explanation: http://elliptic-shiho.hatenablog.com/entry/2016/04/03/020117
Args:
ak: A Numbers [a1, a2, ..., ak]
nk: A Modulus [n1, n2, ..., nk]
"""
assert len(ak) == len(nk)
N = reduce(lambda x, y: x * y, nk, 1)
l = lcm(*nk)
s = 0
for n, a in zip(nk, ak):
m = N // n
g, x, y = egcd(m, n)
s += (m // g) * x * a
s %= l
return s
示例10: get_queryset
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def get_queryset(self):
if not (self.request.user.is_authenticated and self.request.user.is_staff):
raise Http404
queryset = Plan.objects.exclude(enabled=False)
if self.q:
queryset = queryset.annotate(
name_provider__name__company=Concat(
F("name"), Value(" "), F("provider__name"), Value(" "), F("provider__company")
)
)
terms = self.q.split()
query = reduce(
operator.and_,
(Q(name_provider__name__company__icontains=term) for term in terms)
)
queryset = queryset.filter(query)
return queryset
示例11: get_common_course_modes
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def get_common_course_modes(course_runs):
"""
Fake implementation returning common course modes.
Arguments:
course_run_ids(Iterable[str]): Target Course run IDs.
Returns:
set: course modes found in all given course runs
"""
course_run_modes = [
set(seat.get("type") for seat in course_run.get("seats"))
for course_run in FAKE_COURSE_RUNS_RESPONSE
if course_run.get("key") in course_runs
]
return six_reduce(lambda left, right: left & right, course_run_modes)
示例12: discriminator
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def discriminator(self, name, image, label, feat_id):
if name in self.reuse.keys():
reuse = self.reuse[name]
else:
self.reuse[name] = True
reuse = False
X1 = image / 127.5 - 1
label_concat_list = [tf.tile(tf.expand_dims(tf.expand_dims(tf.expand_dims(label[:,j],-1),-1),-1), [1,self.height,self.width,1]) for j in range(self.n_feat)]
X2 = tf.concat(label_concat_list, -1)
X = tf.concat([X1, X2], -1)
with tf.variable_scope(name, reuse=reuse) as scope:
X = self.make_conv('conv1', X, shape=[4,4,3+self.n_feat,128], strides=[1,2,2,1])
X = self.leakyRelu(X, 0.2)
# print(name, X.get_shape())
X = self.make_conv_bn('conv2', X, shape=[4,4,128,256], strides=[1,2,2,1])
X = self.leakyRelu(X, 0.2)
# print(name, X.get_shape())
X = self.make_conv_bn('conv3', X, shape=[4,4,256,512], strides=[1,2,2,1])
X = self.leakyRelu(X, 0.2)
# print(name, X.get_shape())
X = self.make_conv_bn('conv4', X, shape=[4,4,512,512], strides=[1,2,2,1])
X = self.leakyRelu(X, 0.2)
# print(name, X.get_shape())
flat_dim = reduce(lambda x,y: x*y, X.get_shape().as_list()[1:])
X = tf.reshape(X, [-1, flat_dim])
X = self.make_fc('fct', X, self.n_feat)
# X = tf.nn.sigmoid(X)
return X[:,feat_id]
示例13: apply
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def apply(self, fgraph):
op = self.local_opt.op_key()
if isinstance(op, (list, tuple)):
q = reduce(list.__iadd__, map(fgraph.get_nodes, op))
else:
q = list(fgraph.get_nodes(op))
def importer(node):
if node is not current_node:
if node.op == op:
q.append(node)
def pruner(node):
if node is not current_node and node.op == op:
try:
q.remove(node)
except ValueError:
pass
u = self.attach_updater(fgraph, importer, pruner)
try:
while q:
node = q.pop()
current_node = node
self.process_node(fgraph, node)
except Exception:
self.detach_updater(fgraph, u)
raise
self.detach_updater(fgraph, u)
示例14: check_chain
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def check_chain(r, *chain):
"""
WRITEME
"""
if isinstance(r, graph.Apply):
r = r.outputs[0]
return _check_chain(r, reduce(list.__iadd__, ([x, 0] for x in chain)))
示例15: backtrack
# 需要導入模塊: from six import moves [as 別名]
# 或者: from six.moves import reduce [as 別名]
def backtrack(self, node, tasks):
candidates = self.fetch_tracks(node.op)
tracks = []
def filter(node, depth):
new_candidates = []
for candidate in candidates:
track, i, lopt = candidate
if i < depth:
pass
elif track[i-depth] in (None, node.op):
if i == depth:
tasks[node].append(lopt)
else:
tracks.append(candidate)
else:
new_candidates.append(candidate)
return new_candidates
depth = 0
nodes = [node]
while candidates:
for node in nodes:
candidates = list(filter(node, depth))
depth += 1
_nodes = nodes
nodes = reduce(list.__iadd__,
[reduce(list.__iadd__,
[[n for n, i in out.clients if not isinstance(n, string_types)] for out in node.outputs],
[]) for node in nodes],
[])
candidates = tracks
tracks = []