本文整理汇总了Python中bottleneck.nanmax方法的典型用法代码示例。如果您正苦于以下问题:Python bottleneck.nanmax方法的具体用法?Python bottleneck.nanmax怎么用?Python bottleneck.nanmax使用的例子?那么, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类bottleneck
的用法示例。
在下文中一共展示了bottleneck.nanmax方法的10个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: quickMinMax
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def quickMinMax(self, data):
"""
Estimate the min/max values of *data* by subsampling.
Returns [(min, max), ...] with one item per channel
"""
while data.size > 1e6:
ax = np.argmax(data.shape)
sl = [slice(None)] * data.ndim
sl[ax] = slice(None, None, 2)
data = data[sl]
cax = self.axes['c']
if cax is None:
return [(float(nanmin(data)), float(nanmax(data)))]
else:
return [(float(nanmin(data.take(i, axis=cax))),
float(nanmax(data.take(i, axis=cax)))) for i in range(data.shape[-1])]
示例2: reduce_to_array
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def reduce_to_array(self, reduce_func_nb, *args, **kwargs):
"""See `vectorbt.tseries.nb.reduce_to_array_nb`.
`**kwargs` will be passed to `vectorbt.tseries.common.TSArrayWrapper.wrap_reduced`.
Example:
```python-repl
>>> min_max_nb = njit(lambda col, a: np.array([np.nanmin(a), np.nanmax(a)]))
>>> print(df.vbt.tseries.reduce_to_array(min_max_nb, index=['min', 'max']))
a b c
min 1.0 1.0 1.0
max 5.0 5.0 3.0
```"""
checks.assert_numba_func(reduce_func_nb)
result = nb.reduce_to_array_nb(self.to_2d_array(), reduce_func_nb, *args)
return self.wrap_reduced(result, **kwargs)
示例3: get_probabilities
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def get_probabilities(self, store=True):
"""
Get the probabilities associated with each feature. This technique
uses the max across probabilities to form the global probabilities.
This method should be called after fitting the SP.
@param store: If True, the probabilities are stored internally. Set to
False to reduce memory.
@return: Return the probabilities.
"""
# Get the probabilities
prob = np.zeros(self.ninputs)
for i in xrange(self.ninputs):
# Find all of the potential synapses for this input
valid = self.syn_map == i
# Find the max permanence across each of the potential synapses
try:
prob[i] = bn.nanmax(self.p[valid])
except ValueError:
prob[i] = 0. # Occurs for missing connections
# Store the probabilities
if store: self.prob = prob
return prob
示例4: reconstruct_input
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def reconstruct_input(self, x=None):
"""
Reconstruct the original input using only the stored permanences and
the set of active columns. The maximization of probabilities approach
is used. This method must be called after fitting the SP.
@param x: The set of active columns or None if the SP was never fitted.
"""
# Check input
if x is None: x = self.column_activations
if x is None: return None
# Reshape x if needed
ravel = False
if len(x.shape) == 1:
ravel = True
x = x.reshape(1, x.shape[0])
# Get the input mapping
imap = [np.where(self.syn_map == i) for i in xrange(self.ninputs)]
# Get the reconstruction
x2 = np.zeros((x.shape[0], self.ninputs))
for i, xi in enumerate(x):
# Mask off permanences not relevant to this input
y = self.p * xi.reshape(self.ncolumns, 1)
# Remap permanences to input domain
for j in xrange(self.ninputs):
# Get the max probability across the current input space
try:
x2[i][j] = bn.nanmax(y[imap[j]])
except ValueError:
x2[i][j] = 0. # Occurs for missing connections
# Threshold back to {0, 1}
x2[i][j] = 1 if x2[i][j] >= self.syn_th else 0
return x2 if not ravel else x2.ravel()
示例5: max
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def max(self, **kwargs):
"""Return max of non-NaN elements."""
return self.wrap_reduced(nanmax(self.to_2d_array(), axis=0), **kwargs)
示例6: quickMinMax
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def quickMinMax(self, data):
"""
Estimate the min/max values of *data* by subsampling.
"""
while data.size > 1e6:
ax = np.argmax(data.shape)
sl = [slice(None)] * data.ndim
sl[ax] = slice(None, None, 2)
data = data[sl]
return nanmin(data), nanmax(data)
示例7: to8bit
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def to8bit(array):
"""Convert an array to 8 bit."""
return (old_div((255. * array), nanmax(array))).astype('uint8')
示例8: to16bit
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def to16bit(array):
"""Convert an array to 16 bit."""
return (old_div((65535. * array), nanmax(array))).astype('uint16')
示例9: _phase3
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def _phase3(self):
"""
Normal phase 3, but with tracking the boost changes. Double commented lines
are new.
"""
# Update permanences
self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
if self.disable_boost is False:
# Update the boosting mechanisms
if self.global_inhibition:
min_dc = np.zeros(self.ncolumns)
min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
else:
min_dc = self.c_mdc * bn.nanmax(self.neighbors * self.active_dc, 1)
## Save pre-overlap boost info
boost = list(self.boost)
# Update boost
self._update_active_duty_cycle()
self._update_boost(min_dc)
self._update_overlap_duty_cycle()
## Write out overlap boost changes
with open(os.path.join(self.out_path, 'overlap_boost.csv'), 'ab') as f:
writer = csv.writer(f)
writer.writerow([self.iter, bn.nanmean(boost != self.boost)])
# Boost permanences
mask = self.overlap_dc < min_dc
mask.resize(self.ncolumns, 1)
self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
## Write out permanence boost info
with open(os.path.join(self.out_path, 'permanence_boost.csv'), 'ab') \
as f:
writer = csv.writer(f)
writer.writerow([self.iter, bn.nanmean(mask)])
# Trim synapses
if self.trim is not False:
self.p[self.p < self.trim] = 0
示例10: _phase3
# 需要导入模块: import bottleneck [as 别名]
# 或者: from bottleneck import nanmax [as 别名]
def _phase3(self):
"""
Execute phase 3 of the SP region. This phase is used to conduct
learning.
Note - This should only be called after phase 2 has been called.
"""
# Notes:
# 1. logical_not is faster than invert
# 2. Multiplication is faster than bitwise_and which is faster than
# logical_not
# 3. Slightly different format than original definition
# (in the comment) to get even more speed benefits
"""
x = self.x[self.syn_map]
self.p = np.clip(self.p + self.y[:, 0:1] * (x * self.pinc -
np.logical_not(x) * self.pdec), 0, 1)
"""
self.p = np.clip(self.p + (self.c_pupdate * self.y[:, 0:1] *
self.x[self.syn_map] - self.pdec * self.y[:, 0:1]), 0, 1)
if self.disable_boost is False:
# Update the boosting mechanisms
if self.global_inhibition:
min_dc = np.zeros(self.ncolumns)
min_dc.fill(self.c_mdc * bn.nanmax(self.active_dc))
else:
min_dc = self.c_mdc * bn.nanmax(self.neighbors *
self.active_dc, 1)
self._update_active_duty_cycle()
self._update_boost(min_dc)
self._update_overlap_duty_cycle()
# Boost permanences
mask = self.overlap_dc < min_dc
mask.resize(self.ncolumns, 1)
self.p = np.clip(self.p + self.c_sboost * mask, 0, 1)
# Trim synapses
if self.trim is not False:
self.p[self.p < self.trim] = 0