本文整理汇总了Python中numpy.common_type函数的典型用法代码示例。如果您正苦于以下问题:Python common_type函数的具体用法?Python common_type怎么用?Python common_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了common_type函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_df_arith_2d_array_collike_broadcasts
def test_df_arith_2d_array_collike_broadcasts(self,
all_arithmetic_operators):
# GH#23000
opname = all_arithmetic_operators
arr = np.arange(6).reshape(3, 2)
df = pd.DataFrame(arr, columns=[True, False], index=['A', 'B', 'C'])
collike = arr[:, [1]] # shape --> (nrows, 1)
assert collike.shape == (df.shape[0], 1)
exvals = {True: getattr(df[True], opname)(collike.squeeze()),
False: getattr(df[False], opname)(collike.squeeze())}
dtype = None
if opname in ['__rmod__', '__rfloordiv__']:
# Series ops may return mixed int/float dtypes in cases where
# DataFrame op will return all-float. So we upcast `expected`
dtype = np.common_type(*[x.values for x in exvals.values()])
expected = pd.DataFrame(exvals, columns=df.columns, index=df.index,
dtype=dtype)
result = getattr(df, opname)(collike)
tm.assert_frame_equal(result, expected)
示例2: minmax_normalize
def minmax_normalize(samples, out=None):
"""Min-max normalization of a function evaluated on the unit sphere
Normalizes samples to ``(samples - min(samples)) / (max(samples) -
min(samples))`` for each unit sphere.
Parameters
----------
samples : ndarray (..., N)
N samples on a unit sphere for each point, stored along the last axis
of the array.
out : ndrray (..., N), optional
An array to store the normalized samples.
Returns
-------
out : ndarray, (..., N)
Normalized samples.
"""
if out is None:
dtype = np.common_type(np.empty(0, 'float32'), samples)
out = np.array(samples, dtype=dtype, copy=True)
else:
out[:] = samples
sample_mins = np.min(samples, -1)[..., None]
sample_maxes = np.max(samples, -1)[..., None]
out -= sample_mins
out /= (sample_maxes - sample_mins)
return out
示例3: poly_outer_product
def poly_outer_product(left, right):
left, right = numpy.asarray(left), numpy.asarray(right)
nleft, nright = left.ndim-1, right.ndim-1
pshape = left.shape[1:] if not nright else right.shape[1:] if not nleft else (max(left.shape[1:])+max(right.shape[1:])-1,) * (nleft + nright)
outer = numpy.zeros((left.shape[0], right.shape[0], *pshape), dtype=numpy.common_type(left, right))
a = slice(None)
outer[(a,a,*(map(slice, left.shape[1:]+right.shape[1:])))] = left[(a,None)+(a,)*nleft+(None,)*nright]*right[(None,a)+(None,)*nleft+(a,)*nright]
return types.frozenarray(outer.reshape(left.shape[0] * right.shape[0], *pshape), copy=False)
示例4: _get_shared_type_and_fill_value
def _get_shared_type_and_fill_value(data1, data2, fill1=None, fill2=None) :
"""
Figure out a shared type that can be used when adding or subtracting
the two data sets given (accounting for possible overflow)
Also returns a fill value that can be used.
"""
# figure out the shared type
type_to_return = data1.dtype
changed_type = False
if data1.dtype is not data2.dtype:
type_to_return = np.common_type(data1, data2)
changed_type = True
# make sure we're using a type that has negative values in it
if type_to_return in DiffInfoObject.POSITIVE_UPCASTS :
type_to_return = DiffInfoObject.POSITIVE_UPCASTS[type_to_return]
changed_type = True
# upcast the type if we think we'll need more space for subtracting
if type_to_return in DiffInfoObject.DATATYPE_UPCASTS :
type_to_return = DiffInfoObject.DATATYPE_UPCASTS[type_to_return]
changed_type = True
if changed_type :
LOG.debug('To prevent overflow, difference data will be upcast from ('
+ str(data1.dtype) + '/' + str(data2.dtype) + ') to: ' + str(type_to_return))
# figure out the fill value
fill_value_to_return = None
# if both of the old fill values exist and are the same, use them
if (fill1 is not None) and (fill1 == fill2) :
fill_value_to_return = fill1
if changed_type :
fill_value_to_return = type_to_return(fill_value_to_return)
else:
# if we're looking at float or complex data, use a nan
if (np.issubdtype(type_to_return, np.float) or
np.issubdtype(type_to_return, np.complex)) :
fill_value_to_return = np.nan
# if we're looking at int data, use the minimum value
elif np.issubdtype(type_to_return, np.int) :
fill_value_to_return = np.iinfo(type_to_return).min
# if we're looking at unsigned data, use the maximum value
elif ((type_to_return is np.uint8) or
(type_to_return is np.uint16) or
(type_to_return is np.uint32) or
(type_to_return is np.uint64)) :
fill_value_to_return = np.iinfo(type_to_return).max
return type_to_return, fill_value_to_return
示例5: _normalize_scalar_dtype
def _normalize_scalar_dtype(s, arrs):
# cast python scalars to an appropriate numpy dtype
if isinstance(s, (int, float, complex)):
ndarrs = [_a for _a in arrs if hasattr(_a, 'dtype')]
flt_arrs = [_a for _a in ndarrs if _a.dtype.kind in 'fc']
int_arrs = [_a for _a in ndarrs if _a.dtype.kind in 'i']
if flt_arrs and isinstance(s, (int, float, complex)):
s = np.asarray(s).astype(np.common_type(*flt_arrs))
elif int_arrs and isinstance(s, (int, )):
s = np.asarray(s).astype(max([_a.dtype for _a in int_arrs]))
return s
示例6: as_series
def as_series(alist, trim=True):
"""Return arguments as a list of 1d arrays.
The return type will always be an array of double, complex double. or
object.
Parameters
----------
[a1, a2,...] : list of array_like.
The arrays must have no more than one dimension when converted.
trim : boolean
When True, trailing zeros are removed from the inputs.
When False, the inputs are passed through intact.
Returns
-------
[a1, a2,...] : list of 1d-arrays
A copy of the input data as a 1d-arrays.
Raises
------
ValueError :
Raised when an input can not be coverted to 1-d array or the
resulting array is empty.
"""
arrays = [np.array(a, ndmin=1, copy=0) for a in alist]
if min([a.size for a in arrays]) == 0:
raise ValueError("Coefficient array is empty")
if max([a.ndim for a in arrays]) > 1:
raise ValueError("Coefficient array is not 1-d")
if trim:
arrays = [trimseq(a) for a in arrays]
if any([a.dtype == np.dtype(object) for a in arrays]):
ret = []
for a in arrays:
if a.dtype != np.dtype(object):
tmp = np.empty(len(a), dtype=np.dtype(object))
tmp[:] = a[:]
ret.append(tmp)
else:
ret.append(a.copy())
else:
try:
dtype = np.common_type(*arrays)
except:
raise ValueError("Coefficient arrays have no common type")
ret = [np.array(a, copy=1, dtype=dtype) for a in arrays]
return ret
示例7: nulp_diff
def nulp_diff(x, y, dtype=None):
"""For each item in x and y, return the number of representable floating
points between them.
Parameters
----------
x : array_like
first input array
y : array_like
second input array
Returns
-------
nulp: array_like
number of representable floating point numbers between each item in x
and y.
Examples
--------
# By definition, epsilon is the smallest number such as 1 + eps != 1, so
# there should be exactly one ULP between 1 and 1 + eps
>>> nulp_diff(1, 1 + np.finfo(x.dtype).eps)
1.0
"""
import numpy as np
if dtype:
x = np.array(x, dtype=dtype)
y = np.array(y, dtype=dtype)
else:
x = np.array(x)
y = np.array(y)
t = np.common_type(x, y)
if np.iscomplexobj(x) or np.iscomplexobj(y):
raise NotImplementedError("_nulp not implemented for complex array")
x = np.array(x, dtype=t)
y = np.array(y, dtype=t)
if not x.shape == y.shape:
raise ValueError("x and y do not have the same shape: %s - %s" % \
(x.shape, y.shape))
def _diff(rx, ry, vdt):
diff = np.array(rx-ry, dtype=vdt)
return np.abs(diff)
rx = integer_repr(x)
ry = integer_repr(y)
return _diff(rx, ry, t)
示例8: matrixmultiply
def matrixmultiply(a, b):
if len(b.shape) == 1:
b_is_vector = True
b = b[:,newaxis]
else:
b_is_vector = False
assert_(a.shape[1] == b.shape[0])
c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
for i in xrange(a.shape[0]):
for j in xrange(b.shape[1]):
s = 0
for k in xrange(a.shape[1]):
s += a[i,k] * b[k, j]
c[i,j] = s
if b_is_vector:
c = c.reshape((a.shape[0],))
return c
示例9: dot_generalized
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
示例10: vq
def vq(obs, code_book):
""" Vector Quantization: assign features sets to codes in a code book.
Vector quantization determines which code in the code book best represents
an observation of a target. The features of each observation are compared
to each code in the book, and assigned the one closest to it. The
observations are contained in the obs array. These features should be
"whitened," or nomalized by the standard deviation of all the features
before being quantized. The code book can be created using the kmeans
algorithm or something similar.
:Parameters:
obs : ndarray
Each row of the array is an observation. The columns are the
"features" seen during each observation The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray.
The code book is usually generated using the kmeans algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
::
# f0 f1 f2 f3
code_book = [[ 1., 2., 3., 4.], #c0
[ 1., 2., 3., 4.], #c1
[ 1., 2., 3., 4.]]) #c2
:Returns:
code : ndarray
If obs is a NxM array, then a length N array is returned that holds
the selected code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code
Notes
-----
This currently forces 32 bit math precision for speed. Anyone know
of a situation where this undermines the accuracy of the algorithm?
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
try:
import _vq
ct = common_type(obs, code_book)
c_obs = obs.astype(ct)
c_code_book = code_book.astype(ct)
if ct is single:
results = _vq.vq(c_obs, c_code_book)
elif ct is double:
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
except ImportError:
results = py_vq(obs, code_book)
return results
示例11: diags
def diags(diagonals, offsets, shape=None, format=None, dtype=None):
"""
Note: copied from scipy.sparse.construct
Construct a sparse matrix from diagonals.
.. versionadded:: 0.11
Parameters
----------
diagonals : sequence of array_like
Sequence of arrays containing the matrix diagonals,
corresponding to `offsets`.
offsets : sequence of int
Diagonals to set:
- k = 0 the main diagonal
- k > 0 the k-th upper diagonal
- k < 0 the k-th lower diagonal
shape : tuple of int, optional
Shape of the result. If omitted, a square matrix large enough
to contain the diagonals is returned.
format : {"dia", "csr", "csc", "lil", ...}, optional
Matrix format of the result. By default (format=None) an
appropriate sparse matrix format is returned. This choice is
subject to change.
dtype : dtype, optional
Data type of the matrix.
See Also
--------
spdiags : construct matrix from diagonals
Notes
-----
This function differs from `spdiags` in the way it handles
off-diagonals.
The result from `diags` is the sparse equivalent of::
np.diag(diagonals[0], offsets[0])
+ ...
+ np.diag(diagonals[k], offsets[k])
Repeated diagonal offsets are disallowed.
Examples
--------
>>> diagonals = [[1,2,3,4], [1,2,3], [1,2]]
>>> diags(diagonals, [0, -1, 2]).todense()
matrix([[1., 0., 1., 0.],
[1., 2., 0., 2.],
[0., 2., 3., 0.],
[0., 0., 3., 4.]])
Broadcasting of scalars is supported (but shape needs to be
specified):
>>> diags([1, -2, 1], [-1, 0, 1], shape=(4, 4)).todense()
matrix([[-2., 1., 0., 0.],
[ 1., -2., 1., 0.],
[ 0., 1., -2., 1.],
[ 0., 0., 1., -2.]])
If only one diagonal is wanted (as in `numpy.diag`), the following
works as well:
>>> diags([1, 2, 3], 1).todense()
matrix([[ 0., 1., 0., 0.],
[ 0., 0., 2., 0.],
[ 0., 0., 0., 3.],
[ 0., 0., 0., 0.]])
"""
# if offsets is not a sequence, assume that there's only one diagonal
try:
iter(offsets)
except TypeError:
# now check that there's actually only one diagonal
try:
iter(diagonals[0])
except TypeError:
diagonals = [np.atleast_1d(diagonals)]
else:
raise ValueError("Different number of diagonals and offsets.")
else:
diagonals = list(map(np.atleast_1d, diagonals))
offsets = np.atleast_1d(offsets)
# Basic check
if len(diagonals) != len(offsets):
raise ValueError("Different number of diagonals and offsets.")
# Determine shape, if omitted
if shape is None:
m = len(diagonals[0]) + abs(int(offsets[0]))
shape = (m, m)
# Determine data type, if omitted
if dtype is None:
dtype = np.common_type(*diagonals)
#.........这里部分代码省略.........
示例12: vq
def vq(obs, code_book):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
acheived by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'N' x 'M' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]]) #c2
Returns
-------
code : ndarray
A length N array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Notes
-----
This currently forces 32-bit math precision for speed. Anyone know
of a situation where this undermines the accuracy of the algorithm?
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
try:
from . import _vq
ct = common_type(obs, code_book)
c_obs = obs.astype(ct)
c_code_book = code_book.astype(ct)
if ct is single:
results = _vq.vq(c_obs, c_code_book)
elif ct is double:
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
except ImportError:
results = py_vq(obs, code_book)
return results
示例13: prepare_for_fortran
def prepare_for_fortran(overwrite, *args):
"""Convert arrays to Fortran format.
This function takes a number of array objects in `args` and converts them
to a format that can be directly passed to a Fortran function (Fortran
contiguous NumPy array). If the arrays have different data type, they
converted arrays are cast to a common compatible data type (one of NumPy's
`float32`, `float64`, `complex64`, `complex128` data types).
If `overwrite` is ``False``, an NumPy array that would already be in the
correct format (Fortran contiguous, right data type) is neverthelessed
copied. (Hence, overwrite = True does not imply that acting on the
converted array in the return values will overwrite the original array in
all cases -- it does only so if the original array was already in the
correct format. The conversions require copying. In fact, that's the same
behavior as in SciPy, it's just not explicitly stated there)
If an argument is ``None``, it is just passed through and not used to
determine the proper data type.
`prepare_for_lapack` returns a character indicating the proper
data type in LAPACK style ('s', 'd', 'c', 'z') and a list of
properly converted arrays.
"""
# Make sure we have NumPy arrays
mats = [None]*len(args)
for i in range(len(args)):
if args[i] is not None:
arr = np.asanyarray(args[i])
if not np.issubdtype(arr.dtype, np.number):
raise ValueError("Argument cannot be interpreted "
"as a numeric array")
mats[i] = (arr, arr is not args[i] or overwrite)
else:
mats[i] = (None, True)
# First figure out common dtype
# Note: The return type of common_type is guaranteed to be a floating point
# kind.
dtype = np.common_type(*[arr for arr, ovwrt in mats if arr is not None])
if dtype == np.float32:
lapacktype = 's'
elif dtype == np.float64:
lapacktype = 'd'
elif dtype == np.complex64:
lapacktype = 'c'
elif dtype == np.complex128:
lapacktype = 'z'
else:
raise AssertionError("Unexpected data type from common_type")
ret = [ lapacktype ]
for npmat, ovwrt in mats:
# Now make sure that the array is contiguous, and copy if necessary.
if npmat is not None:
if npmat.ndim == 2:
if not npmat.flags["F_CONTIGUOUS"]:
npmat = np.asfortranarray(npmat, dtype = dtype)
elif npmat.dtype != dtype:
npmat = npmat.astype(dtype)
elif not ovwrt:
# ugly here: copy makes always C-array, no way to tell it
# to make a Fortran array.
npmat = np.asfortranarray(npmat.copy())
elif npmat.ndim == 1:
if not npmat.flags["C_CONTIGUOUS"]:
npmat = np.ascontiguousarray(npmat, dtype = dtype)
elif npmat.dtype != dtype:
npmat = npmat.astype(dtype)
elif not ovwrt:
npmat = np.asfortranarray(npmat.copy())
else:
raise ValueError("Dimensionality of array is not 1 or 2")
ret.append(npmat)
return tuple(ret)
示例14: arma_acovf
def arma_acovf(ar, ma, nobs=10, sigma2=1, dtype=None):
"""
Theoretical autocovariance function of ARMA process
Parameters
----------
ar : array_like, 1d
coefficient for autoregressive lag polynomial, including zero lag
ma : array_like, 1d
coefficient for moving-average lag polynomial, including zero lag
nobs : int
number of terms (lags plus zero lag) to include in returned acovf
sigma2 : float
Variance of the innovation term.
Returns
-------
acovf : array
autocovariance of ARMA process given by ar, ma
See Also
--------
arma_acf
acovf
References
----------
Brockwell, Peter J., and Richard A. Davis. 2009.
Time Series: Theory and Methods. 2nd ed. 1991.
New York, NY: Springer.
"""
if dtype is None:
dtype = np.common_type(np.array(ar), np.array(ma), np.array(sigma2))
p = len(ar) - 1
q = len(ma) - 1
m = max(p, q) + 1
if sigma2.real < 0:
raise ValueError('Must have positive innovation variance.')
# Short-circuit for trivial corner-case
if p == q == 0:
out = np.zeros(nobs, dtype=dtype)
out[0] = sigma2
return out
# Get the moving average representation coefficients that we need
ma_coeffs = arma2ma(ar, ma, lags=m)
# Solve for the first m autocovariances via the linear system
# described by (BD, eq. 3.3.8)
A = np.zeros((m, m), dtype=dtype)
b = np.zeros((m, 1), dtype=dtype)
# We need a zero-right-padded version of ar params
tmp_ar = np.zeros(m, dtype=dtype)
tmp_ar[:p + 1] = ar
for k in range(m):
A[k, :(k + 1)] = tmp_ar[:(k + 1)][::-1]
A[k, 1:m - k] += tmp_ar[(k + 1):m]
b[k] = sigma2 * np.dot(ma[k:q + 1], ma_coeffs[:max((q + 1 - k), 0)])
acovf = np.zeros(max(nobs, m), dtype=dtype)
acovf[:m] = np.linalg.solve(A, b)[:, 0]
# Iteratively apply (BD, eq. 3.3.9) to solve for remaining autocovariances
if nobs > m:
zi = signal.lfiltic([1], ar, acovf[:m:][::-1])
acovf[m:] = signal.lfilter([1], ar, np.zeros(nobs - m, dtype=dtype),
zi=zi)[0]
return acovf[:nobs]
示例15: vq
def vq(obs, code_book, check_finite=True):
"""
Assign codes from a code book to observations.
Assigns a code from a code book to each observation. Each
observation vector in the 'M' by 'N' `obs` array is compared with the
centroids in the code book and assigned the code of the closest
centroid.
The features in `obs` should have unit variance, which can be
achieved by passing them through the whiten function. The code
book can be created with the k-means algorithm or a different
encoding algorithm.
Parameters
----------
obs : ndarray
Each row of the 'M' x 'N' array is an observation. The columns are
the "features" seen during each observation. The features must be
whitened first using the whiten function or something equivalent.
code_book : ndarray
The code book is usually generated using the k-means algorithm.
Each row of the array holds a different code, and the columns are
the features of the code.
>>> # f0 f1 f2 f3
>>> code_book = [
... [ 1., 2., 3., 4.], #c0
... [ 1., 2., 3., 4.], #c1
... [ 1., 2., 3., 4.]] #c2
check_finite : bool, optional
Whether to check that the input matrices contain only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Default: True
Returns
-------
code : ndarray
A length M array holding the code book index for each observation.
dist : ndarray
The distortion (distance) between the observation and its nearest
code.
Examples
--------
>>> from numpy import array
>>> from scipy.cluster.vq import vq
>>> code_book = array([[1.,1.,1.],
... [2.,2.,2.]])
>>> features = array([[ 1.9,2.3,1.7],
... [ 1.5,2.5,2.2],
... [ 0.8,0.6,1.7]])
>>> vq(features,code_book)
(array([1, 1, 0],'i'), array([ 0.43588989, 0.73484692, 0.83066239]))
"""
obs = _asarray_validated(obs, check_finite=check_finite)
code_book = _asarray_validated(code_book, check_finite=check_finite)
ct = common_type(obs, code_book)
c_obs = obs.astype(ct, copy=False)
if code_book.dtype != ct:
c_code_book = code_book.astype(ct)
else:
c_code_book = code_book
if ct in (single, double):
results = _vq.vq(c_obs, c_code_book)
else:
results = py_vq(obs, code_book)
return results