本文整理汇总了Python中numpy.min_scalar_type函数的典型用法代码示例。如果您正苦于以下问题:Python min_scalar_type函数的具体用法?Python min_scalar_type怎么用?Python min_scalar_type使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了min_scalar_type函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: _convert_value
def _convert_value(self, value):
"""Convert a string into a numpy object (scalar or array).
The value is most of the time a string, but it can be python object
in case if TIFF decoder for example.
"""
if isinstance(value, list):
# convert to a numpy array
return numpy.array(value)
if isinstance(value, dict):
# convert to a numpy associative array
key_dtype = numpy.min_scalar_type(list(value.keys()))
value_dtype = numpy.min_scalar_type(list(value.values()))
associative_type = [('key', key_dtype), ('value', value_dtype)]
assert key_dtype.kind != "O" and value_dtype.kind != "O"
return numpy.array(list(value.items()), dtype=associative_type)
if isinstance(value, numbers.Number):
dtype = numpy.min_scalar_type(value)
assert dtype.kind != "O"
return dtype.type(value)
if isinstance(value, six.binary_type):
try:
value = value.decode('utf-8')
except UnicodeDecodeError:
return numpy.void(value)
if " " in value:
result = self._convert_list(value)
else:
result = self._convert_scalar_value(value)
return result
示例2: shuffle_group
def shuffle_group(df, col, stage, k, npartitions):
""" Splits dataframe into groups
The group is determined by their final partition, and which stage we are in
in the shuffle
"""
if col == '_partitions':
ind = df[col]
else:
ind = hash_pandas_object(df[col], index=False)
c = ind._values
typ = np.min_scalar_type(npartitions * 2)
npartitions, k, stage = [np.array(x, dtype=np.min_scalar_type(x))[()]
for x in [npartitions, k, stage]]
c = np.mod(c, npartitions).astype(typ, copy=False)
c = np.floor_divide(c, k ** stage, out=c)
c = np.mod(c, k, out=c)
indexer, locations = groupsort_indexer(c.astype(np.int64), k)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [df2.iloc[a:b] for a, b in zip(locations[:-1], locations[1:])]
return dict(zip(range(k), parts))
示例3: __init__
def __init__(self,Np):
if (type(Np) is not int):
raise ValueError("expecting integer for Np")
self._Np = Np
self._Ns = Np+1
self._dtype = _np.min_scalar_type(-self.Ns)
self._basis = _np.arange(self.Ns,dtype=_np.min_scalar_type(self.Ns))
self._operators = ("availible operators for ho_basis:"+
"\n\tI: identity "+
"\n\t+: raising operator"+
"\n\t-: lowering operator"+
"\n\tn: number operator")
示例4: histograma
def histograma(imagen):
ajuste = 0
minimo_valor = np.min(imagen)
if minimo_valor < 0:
ajuste = minimo_valor
rango = np.max(imagen).astype(np.int64) - minimo_valor
ajuste_dtype = np.promote_types(np.min_scalar_type(rango),np.min_scalar_type(minimo_valor))
if imagen.dtype != ajuste_dtype:
imagen = imagen.astype(ajuste_dtype)
imagen = imagen - ajuste
hist = np.bincount(imagen.ravel())
valores_centrales = np.arange(len(hist)) + ajuste
idx = np.nonzero(hist)[0][0]
return hist[idx:], valores_centrales[idx:]
示例5: _offset_array
def _offset_array(arr, low_boundary, high_boundary):
"""Offset the array to get the lowest value at 0 if negative."""
if low_boundary < 0:
offset = low_boundary
dyn_range = high_boundary - low_boundary
# get smallest dtype that can hold both minimum and offset maximum
offset_dtype = np.promote_types(np.min_scalar_type(dyn_range),
np.min_scalar_type(low_boundary))
if arr.dtype != offset_dtype:
# prevent overflow errors when offsetting
arr = arr.astype(offset_dtype)
arr = arr - offset
else:
offset = 0
return arr, offset
示例6: __getitem__
def __getitem__(self, key):
"""
Look up a cell in the map, but clip to the edges
For instance, `map[-1, -1] == map[0, 0]`, unlike in a normal np array
where it would be `map[map.shape[0]-1, map.shape[1]-1]`
Note that this only applies for pairwise integer indexing. Indexing
with boolean masks or slice objects uses the normal indexing rules.
"""
if not isinstance(key, tuple):
# probably a mask?
return self.grid[key]
if len(key) != 2:
# row, column, or just wrong
return self.grid[key]
if any(np.min_scalar_type(i) == np.bool for i in key):
# partial mask
return self.grid[key]
if any(isinstance(i, slice) for i in key):
# normal slicing
return self.grid[key]
keys = np.ravel_multi_index(key, dims=self.grid.shape, mode='clip')
# workaround for https://github.com/numpy/numpy/pull/7586
if keys.ndim == 0:
return self.grid.take(keys[np.newaxis])[0]
else:
return self.grid.take(keys)
示例7: full_cumsum
def full_cumsum(data, axis=None, dtype=None):
"""
A version of `numpy.cumsum` that includes the sum of the empty slice (zero). This
makes it satisfy the invariant::
cumsum(a)[i] == sum(a[:i])
which is a useful property to simplify the formula for the moving average. The result
will be one entry longer than *data* along *axis*.
"""
# All we need to do is construct a result array with the appropriate type and
# dimensions, and then feed a slice of it to cumsum, setting the rest to zero.
shape = list(data.shape)
if axis is None:
shape[0] += 1
else:
shape[axis] += 1
# Mimic cumsum's behavior with the dtype argument: use the original data type or
# the system's native word, whichever has the greater width. (This prevents us from
# attempting a cumulative sum using an 8-bit integer, for instance.)
if dtype is None:
dtype = np.promote_types(data.dtype, np.min_scalar_type(-sys.maxint))
out = np.zeros(shape, dtype)
s = axis_slice(axis)
np.cumsum(data, axis, dtype, out[s[1:]])
return out
示例8: go
def go(self):
pi = self.progress.indicator
pi.operation = 'Initializing'
with pi:
self.duration = self.kinetics_file['durations'][self.iter_start-1:self.iter_stop-1]
##Only select transition events from specified istate to fstate
mask = (self.duration['istate'] == self.istate) & (self.duration['fstate'] == self.fstate)
self.duration_dsspec = DurationDataset(self.kinetics_file['durations']['duration'], mask, self.iter_start)
self.wt_dsspec = DurationDataset(self.kinetics_file['durations']['weight'], mask, self.iter_start)
self.output_file = h5py.File(self.output_filename, 'w')
h5io.stamp_creator_data(self.output_file)
# Construct bin boundaries
self.construct_bins(self.parse_binspec(self.binspec))
for idim, (binbounds, midpoints) in enumerate(izip(self.binbounds, self.midpoints)):
self.output_file['binbounds_{}'.format(idim)] = binbounds
self.output_file['midpoints_{}'.format(idim)] = midpoints
# construct histogram
self.construct_histogram()
# Record iteration range
iter_range = numpy.arange(self.iter_start, self.iter_stop, 1, dtype=(numpy.min_scalar_type(self.iter_stop)))
self.output_file['n_iter'] = iter_range
self.output_file['histograms'].attrs['iter_start'] = self.iter_start
self.output_file['histograms'].attrs['iter_stop'] = self.iter_stop
self.output_file.close()
示例9: da_sub
def da_sub(daa, dab):
"""
subtract 2 DataArrays as cleverly as possible:
* keep the metadata of the first DA in the result
* ensures the result has the right type so that no underflows happen
returns (DataArray): the result of daa - dab
"""
rt = numpy.result_type(daa, dab) # dtype of result of daa-dab
dt = None # default is to let numpy decide
if rt.kind == "f":
# float should always be fine
pass
elif rt.kind in "iub":
# underflow can happen (especially if unsigned)
# find the worse case value (could be improved, but would be longer)
worse_val = int(daa.min()) - int(dab.max())
dt = numpy.result_type(rt, numpy.min_scalar_type(worse_val))
else:
# subtracting such a data is suspicious, but try anyway
logging.warning("Subtraction on data of type %s unsupported", rt.name)
res = numpy.subtract(daa, dab, dtype=dt) # metadata is copied from daa
logging.debug("type = %s, %s", res.dtype.name, daa.dtype.name)
return res
示例10: __init__
def __init__(self, func, nbins, args=None, kwargs=None):
self.func = func
self.args = args or ()
self.kwargs = kwargs or {}
self.nbins = nbins
self.index_dtype = numpy.min_scalar_type(self.nbins)
self.labels = ['{!r} bin {:d}'.format(func, ibin) for ibin in xrange(nbins)]
示例11: count_over_time_interval
def count_over_time_interval(
time,
values,
time_interval,
ignore_nodata,
nodata=None):
def aggregate_ignore_nodata(
values):
return (numpy.ones(values.shape[1:], dtype=numpy.uint8) *
values.shape[0]).tolist()
def aggregate_dont_ignore_nodata(
values):
return numpy.sum(values != nodata, 0)
aggregate = {
mds.constants.IGNORE_NODATA: aggregate_ignore_nodata,
mds.constants.DONT_IGNORE_NODATA: aggregate_dont_ignore_nodata
}
result_time, result_values = aggregate_over_time_interval(time, values,
TIME_POINT_TO_ID_BY_TIME_INTERVAL[time_interval],
aggregate[ignore_nodata])
return result_time, [numpy.array(result_values[i], numpy.min_scalar_type(
numpy.max(result_values[i]))) for i in xrange(len(result_values))]
示例12: normalize_compare_value
def normalize_compare_value(self, other):
other_dtype = np.min_scalar_type(other)
if other_dtype.kind in 'biuf':
other_dtype = np.promote_types(self.dtype, other_dtype)
ary = utils.scalar_broadcast_to(other, shape=len(self),
dtype=other_dtype)
return self.replace(data=Buffer(ary), dtype=ary.dtype)
else:
raise TypeError('cannot broadcast {}'.format(type(other)))
示例13: iter_range
def iter_range(self, iter_start = None, iter_stop = None, iter_step = None, dtype=None):
'''Return a sequence for the given iteration numbers and stride, filling
in missing values from those stored on ``self``. The smallest data type capable of
holding ``iter_stop`` is returned unless otherwise specified using the ``dtype``
argument.'''
iter_start = self.iter_start if iter_start is None else iter_start
iter_stop = self.iter_stop if iter_stop is None else iter_stop
iter_step = self.iter_step if iter_step is None else iter_step
return numpy.arange(iter_start, iter_stop, iter_step, dtype=(dtype or numpy.min_scalar_type(iter_stop)))
示例14: shuffle_group
def shuffle_group(df, col, stage, k, npartitions):
if col == '_partitions':
ind = df[col]
else:
ind = hash_pandas_object(df[col], index=False)
c = ind._values
typ = np.min_scalar_type(npartitions * 2)
c = c.astype(typ)
npartitions, k, stage = [np.array(x, dtype=np.min_scalar_type(x))[()]
for x in [npartitions, k, stage]]
c = np.mod(c, npartitions, out=c)
c = np.floor_divide(c, k ** stage, out=c)
c = np.mod(c, k, out=c)
indexer, locations = pd.algos.groupsort_indexer(c.astype(np.int64), k)
df2 = df.take(indexer)
locations = locations.cumsum()
parts = [df2.iloc[a:b] for a, b in zip(locations[:-1], locations[1:])]
return dict(zip(range(k), parts))
示例15: create
def create(predict_fn, word_representations,
batch_size, window_size, vocabulary_size,
result_callback):
assert result_callback is not None
instance_dtype = np.min_scalar_type(vocabulary_size - 1)
logging.info('Instance elements will be stored using %s.', instance_dtype)
batcher = WordBatcher(
predict_fn,
batch_size, window_size, instance_dtype,
result_callback)
return batcher