本文整理汇总了Python中numpy.clip函数的典型用法代码示例。如果您正苦于以下问题:Python clip函数的具体用法?Python clip怎么用?Python clip使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clip函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lowess
def lowess(x, y, f=2. / 3., iter=3):
"""lowess(x, y, f=2./3., iter=3) -> yest
Lowess smoother: Robust locally weighted regression.
The lowess function fits a nonparametric regression curve to a scatterplot.
The arrays x and y contain an equal number of elements; each pair
(x[i], y[i]) defines a data point in the scatterplot. The function returns
the estimated (smooth) values of y.
The smoothing span is given by f. A larger value for f will result in a
smoother curve. The number of robustifying iterations is given by iter. The
function will run faster with a smaller number of iterations.
x and y should be numpy float arrays of equal length. The return value is
also a numpy float array of that length.
e.g.
>>> import numpy
>>> x = numpy.array([4, 4, 7, 7, 8, 9, 10, 10, 10, 11, 11, 12, 12, 12,
... 12, 13, 13, 13, 13, 14, 14, 14, 14, 15, 15, 15, 16, 16,
... 17, 17, 17, 18, 18, 18, 18, 19, 19, 19, 20, 20, 20, 20,
... 20, 22, 23, 24, 24, 24, 24, 25], numpy.float)
>>> y = numpy.array([2, 10, 4, 22, 16, 10, 18, 26, 34, 17, 28, 14, 20, 24,
... 28, 26, 34, 34, 46, 26, 36, 60, 80, 20, 26, 54, 32, 40,
... 32, 40, 50, 42, 56, 76, 84, 36, 46, 68, 32, 48, 52, 56,
... 64, 66, 54, 70, 92, 93, 120, 85], numpy.float)
>>> result = lowess(x, y)
>>> len(result)
50
>>> print "[%0.2f, ..., %0.2f]" % (result[0], result[-1])
[4.85, ..., 84.98]
"""
n = len(x)
r = int(numpy.ceil(f * n))
h = [numpy.sort(abs(x - x[i]))[r] for i in range(n)]
w = numpy.clip(abs(([x] - numpy.transpose([x])) / h), 0.0, 1.0)
w = 1 - w * w * w
w = w * w * w
yest = numpy.zeros(n)
delta = numpy.ones(n)
for iteration in range(iter):
for i in xrange(n):
weights = delta * w[:, i]
weights_mul_x = weights * x
b1 = numpy.dot(weights, y)
b2 = numpy.dot(weights_mul_x, y)
A11 = sum(weights)
A12 = sum(weights_mul_x)
A21 = A12
A22 = numpy.dot(weights_mul_x, x)
determinant = A11 * A22 - A12 * A21
beta1 = (A22 * b1 - A12 * b2) / determinant
beta2 = (A11 * b2 - A21 * b1) / determinant
yest[i] = beta1 + beta2 * x[i]
residuals = y - yest
s = median(abs(residuals))
delta[:] = numpy.clip(residuals / (6 * s), -1, 1)
delta[:] = 1 - delta * delta
delta[:] = delta * delta
return yest
示例2: mouseMoveEvent
def mouseMoveEvent(self, ev):
if self.lastMousePos is None:
self.lastMousePos = Point(ev.pos())
delta = Point(ev.pos() - self.lastMousePos)
self.lastMousePos = Point(ev.pos())
QtGui.QGraphicsView.mouseMoveEvent(self, ev)
if not self.mouseEnabled:
return
self.sigSceneMouseMoved.emit(self.mapToScene(ev.pos()))
if self.clickAccepted: ## Ignore event if an item in the scene has already claimed it.
return
if ev.buttons() == QtCore.Qt.RightButton:
delta = Point(np.clip(delta[0], -50, 50), np.clip(-delta[1], -50, 50))
scale = 1.01 ** delta
self.scale(scale[0], scale[1], center=self.mapToScene(self.mousePressPos))
self.sigRangeChanged.emit(self, self.range)
elif ev.buttons() in [QtCore.Qt.MidButton, QtCore.Qt.LeftButton]: ## Allow panning by left or mid button.
px = self.pixelSize()
tr = -delta * px
self.translate(tr[0], tr[1])
self.sigRangeChanged.emit(self, self.range)
示例3: _compute_normalized_data
def _compute_normalized_data(self, data_array):
"""
Apply `data_func`, then linearly scale to the unit interval, and
then apply `unit_func`.
"""
# FIXME: Deal with nans?
if self._dirty:
self._recalculate()
if self.data_func is not None:
data_array = self.data_func(data_array)
low, high = self.transformed_bounds
else:
low, high = self.range.low, self.range.high
range_diff = high - low
# Linearly transform the values to the unit interval.
if range_diff == 0.0 or isinf(range_diff):
# Handle null range, or infinite range (which can happen during
# initialization before range is connected to a data source).
norm_data = 0.5*ones_like(data_array)
else:
norm_data = empty(data_array.shape, dtype='float32')
norm_data[:] = data_array
norm_data -= low
norm_data /= range_diff
clip(norm_data, 0.0, 1.0, norm_data)
if self.unit_func is not None:
norm_data = self.unit_func(norm_data)
return norm_data
示例4: numpy_run
def numpy_run(self):
"""Forward propagation from batch on CPU only.
"""
super(All2AllStrictRELU, self).numpy_run()
self.output.map_write()
mem = self.output.mem
numpy.clip(mem, 0.0, 1.0e30, mem)
示例5: isotonic_regression
def isotonic_regression(y, sample_weight=None, y_min=None, y_max=None,
increasing=True):
"""Solve the isotonic regression model::
min sum w[i] (y[i] - y_[i]) ** 2
subject to y_min = y_[1] <= y_[2] ... <= y_[n] = y_max
where:
- y[i] are inputs (real numbers)
- y_[i] are fitted
- w[i] are optional strictly positive weights (default to 1.0)
Read more in the :ref:`User Guide <isotonic>`.
Parameters
----------
y : iterable of floating-point values
The data.
sample_weight : iterable of floating-point values, optional, default: None
Weights on each point of the regression.
If None, weight is set to 1 (equal weights).
y_min : optional, default: None
If not None, set the lowest value of the fit to y_min.
y_max : optional, default: None
If not None, set the highest value of the fit to y_max.
increasing : boolean, optional, default: True
Whether to compute ``y_`` is increasing (if set to True) or decreasing
(if set to False)
Returns
-------
y_ : list of floating-point values
Isotonic fit of y.
References
----------
"Active set algorithms for isotonic regression; A unifying framework"
by Michael J. Best and Nilotpal Chakravarti, section 3.
"""
order = np.s_[:] if increasing else np.s_[::-1]
y = np.array(y[order], dtype=np.float64)
if sample_weight is None:
sample_weight = np.ones(len(y), dtype=np.float64)
else:
sample_weight = np.array(sample_weight[order], dtype=np.float64)
_inplace_contiguous_isotonic_regression(y, sample_weight)
if y_min is not None or y_max is not None:
# Older versions of np.clip don't accept None as a bound, so use np.inf
if y_min is None:
y_min = -np.inf
if y_max is None:
y_max = np.inf
np.clip(y, y_min, y_max, y)
return y[order]
示例6: lossFun
def lossFun(inputs, targets, hprev):
"""
inputs,targets are both list of integers.
hprev is Hx1 array of initial hidden state
returns the loss, gradients on model parameters, and last hidden state
"""
xs, hs, ys, ps = {}, {}, {}, {}
hs[-1] = np.copy(hprev)
loss = 0
# forward pass
for t in xrange(len(inputs)):
xs[t] = np.zeros((vocab_size,1)) # encode in 1-of-k representation
xs[t][inputs[t]] = 1
hs[t] = np.tanh(np.dot(Wxh, xs[t]) + np.dot(Whh, hs[t-1]) + bh) # hidden state
ys[t] = np.dot(Why, hs[t]) + by # unnormalized log probabilities for next chars
ps[t] = np.exp(ys[t]) / np.sum(np.exp(ys[t])) # probabilities for next chars
loss += -np.log(ps[t][targets[t],0]) # softmax (cross-entropy loss)
# backward pass: compute gradients going backwards
dWxh, dWhh, dWhy = np.zeros_like(Wxh), np.zeros_like(Whh), np.zeros_like(Why)
dbh, dby = np.zeros_like(bh), np.zeros_like(by)
dhnext = np.zeros_like(hs[0])
for t in reversed(xrange(len(inputs))):
dy = np.copy(ps[t])
dy[targets[t]] -= 1 # backprop into y
dWhy += np.dot(dy, hs[t].T)
dby += dy
dh = np.dot(Why.T, dy) + dhnext # backprop into h
dhraw = (1 - hs[t] * hs[t]) * dh # backprop through tanh nonlinearity
dbh += dhraw
dWxh += np.dot(dhraw, xs[t].T)
dWhh += np.dot(dhraw, hs[t-1].T)
dhnext = np.dot(Whh.T, dhraw)
for dparam in [dWxh, dWhh, dWhy, dbh, dby]:
np.clip(dparam, -5, 5, out=dparam) # clip to mitigate exploding gradients
return loss, dWxh, dWhh, dWhy, dbh, dby, hs[len(inputs)-1]
示例7: _raisePermanenceToThreshold
def _raisePermanenceToThreshold(self, perm, mask):
"""
This method ensures that each column has enough connections to input bits
to allow it to become active. Since a column must have at least
'self._stimulusThreshold' overlaps in order to be considered during the
inhibition phase, columns without such minimal number of connections, even
if all the input bits they are connected to turn on, have no chance of
obtaining the minimum threshold. For such columns, the permanence values
are increased until the minimum number of connections are formed.
Parameters:
----------------------------
@param perm: An array of permanence values for a column. The array is
"dense", i.e. it contains an entry for each input bit, even
if the permanence value is 0.
@param mask: the indices of the columns whose permanences need to be
raised.
"""
if len(mask) < self._stimulusThreshold:
raise Exception("This is likely due to a " +
"value of stimulusThreshold that is too large relative " +
"to the input size. [len(mask) < self._stimulusThreshold]")
numpy.clip(perm, self._synPermMin, self._synPermMax, out=perm)
while True:
numConnected = numpy.nonzero(perm > self._synPermConnected)[0].size
if numConnected >= self._stimulusThreshold:
return
perm[mask] += self._synPermBelowStimulusInc
示例8: K
def K(self, X, X2=None,alpha=None,variance=None):
"""
Computes the covariance matrix cov(X[i,:],X2[j,:]).
Args:
X: Matrix where each row is a point.
X2: Matrix where each row is a point.
alpha: It's the scaled alpha.
Variance: Sigma hyperparameter.
"""
if alpha is None:
alpha=self.alpha
if variance is None:
variance=self.variance
if X2 is None:
X=X*alpha/self.scaleAlpha
Xsq=np.sum(np.square(X), 1)
r=-2.*np.dot(X, X.T) + (Xsq[:, None] + Xsq[None, :])
r = np.clip(r, 0, np.inf)
return variance*np.exp(-0.5*r)
else:
X=X*alpha/self.scaleAlpha
X2=X2*alpha/self.scaleAlpha
r=-2.*np.dot(X, X2.T) + (np.sum(np.square(X), 1)[:, None] + np.sum(np.square(X2), 1)[None, :])
r = np.clip(r, 0, np.inf)
return variance*np.exp(-0.5*r)
示例9: test_special_sparse_dot
def test_special_sparse_dot():
# Test the function that computes np.dot(W, H), only where X is non zero.
n_samples = 10
n_features = 5
n_components = 3
rng = np.random.mtrand.RandomState(42)
X = rng.randn(n_samples, n_features)
np.clip(X, 0, None, out=X)
X_csr = sp.csr_matrix(X)
W = np.abs(rng.randn(n_samples, n_components))
H = np.abs(rng.randn(n_components, n_features))
WH_safe = nmf._special_sparse_dot(W, H, X_csr)
WH = nmf._special_sparse_dot(W, H, X)
# test that both results have same values, in X_csr nonzero elements
ii, jj = X_csr.nonzero()
WH_safe_data = np.asarray(WH_safe[ii, jj]).ravel()
assert_array_almost_equal(WH_safe_data, WH[ii, jj], decimal=10)
# test that WH_safe and X_csr have the same sparse structure
assert_array_equal(WH_safe.indices, X_csr.indices)
assert_array_equal(WH_safe.indptr, X_csr.indptr)
assert_array_equal(WH_safe.shape, X_csr.shape)
示例10: _clipToSafeRange
def _clipToSafeRange(min_, max_, isLog):
# Clip range if needed
minLimit = FLOAT32_MINPOS if isLog else FLOAT32_SAFE_MIN
min_ = numpy.clip(min_, minLimit, FLOAT32_SAFE_MAX)
max_ = numpy.clip(max_, minLimit, FLOAT32_SAFE_MAX)
assert min_ < max_
return min_, max_
示例11: combine_images
def combine_images(imgs, alphas):
""" Combine multiple rgb images in one rgb image """
image_f = numpy.zeros(imgs[0].shape, dtype='float')
for i in range(0, len(imgs)):
image_f += alphas[i] * imgs[i]
numpy.clip(image_f, 0., 255., image_f)
return numpy.array(image_f, dtype='uint8')
示例12: _compute_disk_overlap
def _compute_disk_overlap(d, r1, r2):
"""
Compute surface overlap between two disks of radii ``r1`` and ``r2``,
with centers separated by a distance ``d``.
Parameters
----------
d : float
Distance between centers.
r1 : float
Radius of the first disk.
r2 : float
Radius of the second disk.
Returns
-------
vol: float
Volume of the overlap between the two disks.
"""
ratio1 = (d ** 2 + r1 ** 2 - r2 ** 2) / (2 * d * r1)
ratio1 = np.clip(ratio1, -1, 1)
acos1 = math.acos(ratio1)
ratio2 = (d ** 2 + r2 ** 2 - r1 ** 2) / (2 * d * r2)
ratio2 = np.clip(ratio2, -1, 1)
acos2 = math.acos(ratio2)
a = -d + r2 + r1
b = d - r2 + r1
c = d + r2 - r1
d = d + r2 + r1
area = (r1 ** 2 * acos1 + r2 ** 2 * acos2 -
0.5 * sqrt(abs(a * b * c * d)))
return area / (math.pi * (min(r1, r2) ** 2))
示例13: interpgrid
def interpgrid(a, xi, yi):
"""Fast 2D, linear interpolation on an integer grid"""
Ny, Nx = np.shape(a)
if isinstance(xi, np.ndarray):
x = xi.astype(np.int)
y = yi.astype(np.int)
# Check that xn, yn don't exceed max index
xn = np.clip(x + 1, 0, Nx - 1)
yn = np.clip(y + 1, 0, Ny - 1)
else:
x = np.int(xi)
y = np.int(yi)
# conditional is faster than clipping for integers
if x == (Nx - 2): xn = x
else: xn = x + 1
if y == (Ny - 2): yn = y
else: yn = y + 1
a00 = a[y, x]
a01 = a[y, xn]
a10 = a[yn, x]
a11 = a[yn, xn]
xt = xi - x
yt = yi - y
a0 = a00 * (1 - xt) + a01 * xt
a1 = a10 * (1 - xt) + a11 * xt
ai = a0 * (1 - yt) + a1 * yt
if not isinstance(xi, np.ndarray):
if np.ma.is_masked(ai):
raise TerminateTrajectory
return ai
示例14: rmsprop_one_step
def rmsprop_one_step(self, param_name, index, grad_args, decay = 0.9, momentum = 0, learning_rate_adapt = 0.05,
learning_rate_min = 1e-6, learning_rate_max = 10):
# RMSPROP: Tieleman, T. and Hinton, G. (2012), Lecture 6.5 - rmsprop, COURSERA: Neural Networks for Machine Learning
# Implementation based on https://github.com/BRML/climin/blob/master/climin/rmsprop.py
# We use Nesterov momentum: first, we make a step according to the momentum and then we calculate the gradient.
step1 = self.param_updates[param_name] * momentum
self.wrt[param_name].set_value(self.wrt[param_name].get_value()+step1)
grad = self.get_grad(*grad_args)
self.moving_mean_squared[param_name] = (decay * self.moving_mean_squared[param_name] + (1 - decay) * grad ** 2)
step2 = self.learning_rates[param_name] * grad / (self.moving_mean_squared[param_name] + 1e-8)**0.5
# DEBUG
if param_name == 'lhyp' or 'ls':
step2 = np.clip(step2, -0.1, 0.1)
self.wrt[param_name].set_value(self.wrt[param_name].get_value()+step2)
#self.params[param_name] += step2
step = step1 + step2
# Step rate adaption. If the current step and the momentum agree, we slightly increase the step rate for that dimension.
if learning_rate_adapt:
# This code might look weird, but it makes it work with both numpy and gnumpy.
step_non_negative = step > 0
step_before_non_negative = self.param_updates[param_name] > 0
agree = (step_non_negative == step_before_non_negative) * 1.#0か1が出る
adapt = 1 + agree * learning_rate_adapt * 2 - learning_rate_adapt
self.learning_rates[param_name] *= adapt
self.learning_rates[param_name] = np.clip(self.learning_rates[param_name], learning_rate_min, learning_rate_max)
self.param_updates[param_name] = step
示例15: updateParticles
def updateParticles(self):
# Update positions with velocity
self.particles[:, 0:2] += self.particles[:, 4:6]
#np.clip(self.particles[:,0], 0, self.bounds[0], self.particles[:,0])
#np.clip(self.particles[:,1], 0, self.bounds[1], self.particles[:,1])
# Add noise to w,h
if self.SIGMA_size > 0.0001:
self.particles[:, 2:4] += random.normal(0, self.SIGMA_size, (self.particles.shape[0], 2))
#np.clip(self.particles[:,2], 1, self.bounds[0], self.particles[:,2])
#np.clip(self.particles[:,3], 1, self.bounds[1], self.particles[:,3])
# Add noise to velocities and clip
self.particles[:, 4:6] += random.normal(
0, self.SIGMA_velocity, (self.particles.shape[0], 2))
#np.clip(self.particles[:,4:6], -MAX_velocity,MAX_velocity, self.particles[:,4:6])
lb = [0, 0, 1, 1, -MAX_velocity, -MAX_velocity, 0]
ub = [self.bounds[1],
self.bounds[0],
self.bounds[1],
self.bounds[0],
MAX_velocity,
MAX_velocity,
1]
np.clip(self.particles, lb, ub, self.particles)
if np.max(self.particles[:, 0]) > self.bounds[1]:
print "Not clipped"
self.iterations += 1