本文整理匯總了Python中functools.reduce方法的典型用法代碼示例。如果您正苦於以下問題:Python functools.reduce方法的具體用法?Python functools.reduce怎麽用?Python functools.reduce使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類functools
的用法示例。
在下文中一共展示了functools.reduce方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: begin_read_samples
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def begin_read_samples(self):
if self.cache:
return
self.input.begin_read_samples()
# copy meta
if self.output:
self.output.meta = self.input.meta
self.multipliers = {}
self.rngs = {}
def _mul(split):
return reduce(operator.mul, map(lambda op: _get_multiplier(split, op), self.ops), 1)
for split in SPLITS:
self.multipliers[split] = _mul(split)
self.cache[split] = self._calculate_num_samples(split)
self.rngs[split] = self.cache[split] * [None]
self.input.end_read_samples()
示例2: _apply_window
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def _apply_window(da, dims, window_type='hanning'):
"""Creating windows in dimensions dims."""
if window_type not in ['hanning']:
raise NotImplementedError("Only hanning window is supported for now.")
numpy_win_func = getattr(np, window_type)
if da.chunks:
def dask_win_func(n):
return dsar.from_delayed(
delayed(numpy_win_func, pure=True)(n),
(n,), float)
win_func = dask_win_func
else:
win_func = numpy_win_func
windows = [xr.DataArray(win_func(len(da[d])),
dims=da[d].dims, coords=da[d].coords) for d in dims]
return da * reduce(operator.mul, windows[::-1])
示例3: simplify_source
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def simplify_source(s):
s = map(lambda x: x.replace(',(1, 1),(0, 0),1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',1,1,bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace(',bias=True),#Conv2d',')'),s)
s = map(lambda x: x.replace('),#Conv2d',')'),s)
s = map(lambda x: x.replace(',1e-05,0.1,True),#BatchNorm2d',')'),s)
s = map(lambda x: x.replace('),#BatchNorm2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#MaxPool2d',')'),s)
s = map(lambda x: x.replace(',ceil_mode=False),#MaxPool2d',')'),s)
s = map(lambda x: x.replace('),#MaxPool2d',')'),s)
s = map(lambda x: x.replace(',(0, 0),ceil_mode=False),#AvgPool2d',')'),s)
s = map(lambda x: x.replace(',ceil_mode=False),#AvgPool2d',')'),s)
s = map(lambda x: x.replace(',bias=True)),#Linear',')), # Linear'),s)
s = map(lambda x: x.replace(')),#Linear',')), # Linear'),s)
s = map(lambda x: '{},\n'.format(x),s)
s = map(lambda x: x[1:],s)
s = reduce(lambda x,y: x+y, s)
return s
示例4: row_norms
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def row_norms(ii, f=Ellipsis, squared=False):
'''
row_norms(ii) yields a potential function h(x) that calculates the vector norms of the rows of
the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices).
row_norms(ii, f) yield a potential function h(x) equivalent to compose(row_norms(ii), f).
'''
try:
(n,m) = ii
# matrix shape given
ii = np.reshape(np.arange(n*m), (n,m))
except Exception: ii = np.asarray(ii)
f = to_potential(f)
if is_const_potential(f):
q = flattest(f.c)
q = np.sum([q[i]**2 for i in ii.T], axis=0)
return PotentialConstant(q if squared else np.sqrt(q))
F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii.T])
F = compose(F, f)
if not squared: F = sqrt(F)
return F
示例5: col_norms
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def col_norms(ii, f=Ellipsis, squared=False):
'''
col_norms(ii) yields a potential function h(x) that calculates the vector norms of the columns
of the matrix formed by [x[i] for i in ii] (ii is a matrix of parameter indices).
col_norms(ii, f) yield a potential function h(x) equivalent to compose(col_norms(ii), f).
'''
try:
(n,m) = ii
# matrix shape given
ii = np.reshape(np.arange(n*m), (n,m))
except Exception: ii = np.asarray(ii)
f = to_potential(f)
if is_const_potential(f):
q = flattest(f.c)
q = np.sum([q[i]**2 for i in ii], axis=0)
return PotentialConstant(q if squared else np.sqrt(q))
F = reduce(lambda a,b: a + b, [part(Ellipsis, col)**2 for col in ii])
F = compose(F, f)
if not squared: F = sqrt(F)
return F
示例6: cplus
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def cplus(*args):
'''
cplus(a, b...) returns the sum of all the values as a numpy array object. Like numpy's add
function or a+b syntax, plus will thread over the latest dimension possible.
Additionally, cplus works correctly with sparse arrays.
'''
n = len(args)
if n == 0: return np.asarray(0)
elif n == 1: return np.asarray(args[0])
elif n > 2: return reduce(plus, args)
(a,b) = args
if sps.issparse(a):
if not sps.issparse(b):
b = np.asarray(b)
if len(b.shape) == 0: b = np.reshape(b, (1,1))
elif sps.issparse(b):
a = np.asarray(a)
if len(a.shape) == 0: a = np.reshape(a, (1,1))
else:
a = np.asarray(a)
b = np.asarray(b)
return a + b
示例7: ctimes
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def ctimes(*args):
'''
ctimes(a, b...) returns the product of all the values as a numpy array object. Like numpy's
multiply function or a*b syntax, times will thread over the latest dimension possible; thus
if a.shape is (4,2) and b.shape is 2, times(a,b) is a equivalent to a * b.
Unlike numpy's multiply function, ctimes works with sparse matrices and will reify them.
'''
n = len(args)
if n == 0: return np.asarray(0)
elif n == 1: return np.asarray(args[0])
elif n > 2: return reduce(plus, args)
(a,b) = args
if sps.issparse(a): return a.multiply(b)
elif sps.issparse(b): return b.multiply(a)
else: return np.asarray(a) * b
示例8: _column_type
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def _column_type(strings, has_invisible=True):
"""The least generic type all column values are convertible to.
>>> _column_type(["1", "2"]) is _int_type
True
>>> _column_type(["1", "2.3"]) is _float_type
True
>>> _column_type(["1", "2.3", "four"]) is _text_type
True
>>> _column_type(["four", '\u043f\u044f\u0442\u044c']) is _text_type
True
>>> _column_type([None, "brux"]) is _text_type
True
>>> _column_type([1, 2, None]) is _int_type
True
>>> import datetime as dt
>>> _column_type([dt.datetime(1991,2,19), dt.time(17,35)]) is _text_type
True
"""
types = [_type(s, has_invisible) for s in strings ]
return reduce(_more_generic, types, int)
示例9: feed_forward_categorical_fun
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def feed_forward_categorical_fun(action_space, config, observations):
"""Feed-forward categorical."""
if not isinstance(action_space, gym.spaces.Discrete):
raise ValueError("Expecting discrete action space.")
flat_observations = tf.reshape(observations, [
tf.shape(observations)[0], tf.shape(observations)[1],
functools.reduce(operator.mul, observations.shape.as_list()[2:], 1)])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("policy"):
x = flat_observations
for size in config.policy_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(x, action_space.n,
activation_fn=None)
with tf.variable_scope("value"):
x = flat_observations
for size in config.value_layers:
x = tf.contrib.layers.fully_connected(x, size, tf.nn.relu)
value = tf.contrib.layers.fully_connected(x, 1, None)[..., 0]
policy = tf.contrib.distributions.Categorical(logits=logits)
return NetworkOutput(policy, value, lambda a: a)
示例10: dense_bitwise_categorical_fun
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def dense_bitwise_categorical_fun(action_space, config, observations):
"""Dense network with bitwise input and categorical output."""
del config
obs_shape = common_layers.shape_list(observations)
x = tf.reshape(observations, [-1] + obs_shape[2:])
with tf.variable_scope("network_parameters"):
with tf.variable_scope("dense_bitwise"):
x = discretization.int_to_bit_embed(x, 8, 32)
flat_x = tf.reshape(
x, [obs_shape[0], obs_shape[1],
functools.reduce(operator.mul, x.shape.as_list()[1:], 1)])
x = tf.contrib.layers.fully_connected(flat_x, 256, tf.nn.relu)
x = tf.contrib.layers.fully_connected(flat_x, 128, tf.nn.relu)
logits = tf.contrib.layers.fully_connected(x, action_space.n,
activation_fn=None)
value = tf.contrib.layers.fully_connected(
x, 1, activation_fn=None)[..., 0]
policy = tf.contrib.distributions.Categorical(logits=logits)
return NetworkOutput(policy, value, lambda a: a)
示例11: gather_indices_2d
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def gather_indices_2d(x, block_shape, block_stride):
"""Getting gather indices."""
# making an identity matrix kernel
kernel = tf.eye(block_shape[0] * block_shape[1])
kernel = reshape_range(kernel, 0, 1, [block_shape[0], block_shape[1], 1])
# making indices [1, h, w, 1] to appy convs
x_shape = common_layers.shape_list(x)
indices = tf.range(x_shape[2] * x_shape[3])
indices = tf.reshape(indices, [1, x_shape[2], x_shape[3], 1])
indices = tf.nn.conv2d(
tf.cast(indices, tf.float32),
kernel,
strides=[1, block_stride[0], block_stride[1], 1],
padding="VALID")
# making indices [num_blocks, dim] to gather
dims = common_layers.shape_list(indices)[:3]
if all([isinstance(dim, int) for dim in dims]):
num_blocks = functools.reduce(operator.mul, dims, 1)
else:
num_blocks = tf.reduce_prod(dims)
indices = tf.reshape(indices, [num_blocks, -1])
return tf.cast(indices, tf.int32)
示例12: setup_critic_optimizer
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def setup_critic_optimizer(self):
logger.info('setting up critic optimizer')
normalized_critic_target_tf = tf.clip_by_value(normalize(self.critic_target, self.ret_rms), self.return_range[0], self.return_range[1])
self.critic_loss = tf.reduce_mean(tf.square(self.normalized_critic_tf - normalized_critic_target_tf))
if self.critic_l2_reg > 0.:
critic_reg_vars = [var for var in self.critic.trainable_vars if 'kernel' in var.name and 'output' not in var.name]
for var in critic_reg_vars:
logger.info(' regularizing: {}'.format(var.name))
logger.info(' applying l2 regularization with {}'.format(self.critic_l2_reg))
critic_reg = tc.layers.apply_regularization(
tc.layers.l2_regularizer(self.critic_l2_reg),
weights_list=critic_reg_vars
)
self.critic_loss += critic_reg
critic_shapes = [var.get_shape().as_list() for var in self.critic.trainable_vars]
critic_nb_params = sum([reduce(lambda x, y: x * y, shape) for shape in critic_shapes])
logger.info(' critic shapes: {}'.format(critic_shapes))
logger.info(' critic params: {}'.format(critic_nb_params))
self.critic_grads = U.flatgrad(self.critic_loss, self.critic.trainable_vars, clip_norm=self.clip_norm)
self.critic_optimizer = MpiAdam(var_list=self.critic.trainable_vars,
beta1=0.9, beta2=0.999, epsilon=1e-08)
示例13: normalize
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def normalize(md):
'''Normalize anchors.'''
def on_match(link):
desc = link.group(1)
old = link.group(2)
href = (link.group(2)
.lower()
.replace('%20', '-')
.replace(" ", "-")
.replace("~", "")
.replace(".", ""))
old, new = f'[{desc}]({old})', f'[{desc}]({href})'
print(old, new)
return old, new
replacers = set((on_match(x) for x in re.finditer(r'\[([^\]\[]*)\]\((#[^\)]*)\)', md)))
return ft.reduce(lambda md, x: md.replace(x[0], x[1]), replacers, md)
示例14: extract
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def extract(iid, bindir):
print('Extracting binaries......')
query = '''select filename from object_to_image where iid=''' + iid + ''' and score>0 and (mime='application/x-executable; charset=binary' or mime='application/x-object; charset=binary' or mime='application/x-sharedlib; charset=binary') order by score DESC;'''
wanted = dbquery(query)
wanted = reduce((lambda a, b: a + b), wanted)
wanted = map((lambda a: '.' + a), wanted)
wanted = reduce((lambda a, b: a + ' ' + b), wanted)
cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted
subprocess.run([cmd], shell=True)
print('Extracting library links......')
query = '''select filename from object_to_image where iid=''' + iid + ''' and regular_file='f';'''
wanted = dbquery(query)
wanted = reduce((lambda a, b: a + b), wanted)
wanted = filter((lambda a: 'lib' in a), wanted)
wanted = map((lambda a: '.' + a), wanted)
wanted = reduce((lambda a, b: a + ' ' + b), wanted)
cmd = 'tar xf ' + bindir + '/../../../../images/' + iid + '.tar.gz -C ' + bindir + ' ' + wanted
subprocess.run([cmd], shell=True)
示例15: _get_r
# 需要導入模塊: import functools [as 別名]
# 或者: from functools import reduce [as 別名]
def _get_r(s, snesc):
# R^dag \tilde{S} R = S
# R = S^{-1/2} [S^{-1/2}\tilde{S}S^{-1/2}]^{-1/2} S^{1/2}
w, v = numpy.linalg.eigh(s)
idx = w > 1e-14
v = v[:,idx]
w_sqrt = numpy.sqrt(w[idx])
w_invsqrt = 1 / w_sqrt
# eigenvectors of S as the new basis
snesc = reduce(numpy.dot, (v.conj().T, snesc, v))
r_mid = numpy.einsum('i,ij,j->ij', w_invsqrt, snesc, w_invsqrt)
w1, v1 = numpy.linalg.eigh(r_mid)
idx1 = w1 > 1e-14
v1 = v1[:,idx1]
r_mid = numpy.dot(v1/numpy.sqrt(w1[idx1]), v1.conj().T)
r = numpy.einsum('i,ij,j->ij', w_invsqrt, r_mid, w_sqrt)
# Back transform to AO basis
r = reduce(numpy.dot, (v, r, v.conj().T))
return r