本文整理匯總了Python中__builtin__.min方法的典型用法代碼示例。如果您正苦於以下問題:Python __builtin__.min方法的具體用法?Python __builtin__.min怎麽用?Python __builtin__.min使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類__builtin__
的用法示例。
在下文中一共展示了__builtin__.min方法的6個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: tile
# 需要導入模塊: import __builtin__ [as 別名]
# 或者: from __builtin__ import min [as 別名]
def tile(a, reps):
if type(reps) in _numberTypes: reps = (reps,)
reps = tuple(reps) # for generator expressions
if type(a) in _numberTypes:
ret = empty(reps)
ret._base.assign(a)
return ret
a = as_garray(a)
if len(reps) > a.ndim: a = a._add_axes(len(reps))
if len(reps) < a.ndim: reps = _extend_shape(reps, a.ndim) # now len(reps)==a.ndim
retShape = tuple([ a.shape[i] * reps[i] for i in tuple(xrange(len(reps)))])
if _prodT(retShape)==0: return zeros(retShape)
if _prodT(reps)==1: return a
for i in range(a.ndim-1): # merge replication requests on adjacent axes, for efficiency.
if reps[i]!=1 and reps[i+1]!=1 and a.shape[i]==1: return a.reshape(_deleteT2(a.shape, i)).tile(reps[:i]+(_prodT(reps[i:i+2]),)+reps[i+2:]).reshape(map(operator.mul, a.shape, reps))
def dataIDone(nextA, i): return nextA.reshape(_modifyT(a.shape, i, a.shape[i]*reps[i])).tile(_modifyT(reps, i, 1))
if reps[0]!=1: # replicating rows is easy and efficient: just repeat the data a number of times.
temp = empty((reps[0], a.size)) # shape doesn't matter because dataIDone changes it
tempCm = temp._base_shaped(1)
if reps[0]>=1:
_cm_row_slice_read(tempCm, 0, 1).assign(a._base_as_row())
nCopiesDone = 1
while nCopiesDone < reps[0]:
nNow = __builtin__.min(nCopiesDone, reps[0]-nCopiesDone)
_cm_row_slice_read(tempCm, nCopiesDone, nCopiesDone + nNow).assign(_cm_row_slice_read(tempCm, 0, nNow))
nCopiesDone += nNow
return dataIDone(temp, 0)
# the general case is repeating a subset (aot the whole array) n times, before moving on to the next subset
# using a transpose with the right shape, the subsets can become columns. those can be lengthened because that is replicating rows; a second transpose makes them now-lengthened subsets again
axis = __builtin__.min( i for i in range(a.ndim) if reps[i]!=1)
return dataIDone(a.reshape_2d(axis).T.tile((reps[axis], 1)).T, axis)
示例2: min
# 需要導入模塊: import __builtin__ [as 別名]
# 或者: from __builtin__ import min [as 別名]
def min(x, axis=None):
""" On numpy arrays this returns a numpy array; on garrays and other array-likes this returns a garray. """
return _reductor__base(x, axis, garray.min, numpy.min)
示例3: all
# 需要導入模塊: import __builtin__ [as 別名]
# 或者: from __builtin__ import min [as 別名]
def all(self, axis=None): return ( True if self.size==0 else (self.as_bool()).min())
示例4: newmin
# 需要導入模塊: import __builtin__ [as 別名]
# 或者: from __builtin__ import min [as 別名]
def newmin(*args, **kwargs):
return new_min_max(_builtin_min, *args, **kwargs)
示例5: _process_arguments
# 需要導入模塊: import __builtin__ [as 別名]
# 或者: from __builtin__ import min [as 別名]
def _process_arguments(self, args, keywords):
kwdict = dict(keywords)
argdict = {}
nargs = min(len(args), len(self._argnames))
for iarg in range(nargs):
argdict[self._argnames[iarg]] = args[iarg]
if nargs < len(args):
if self._varpos is None:
msg = "macro '{0}' called with too many positional arguments "\
"(expected: {1}, received: {2})"\
.format(self._name, len(self._argnames), len(args))
raise FyppFatalError(msg, self._fname, self._spans[0])
else:
argdict[self._varpos] = list(args[nargs:])
elif self._varpos is not None:
argdict[self._varpos] = []
for argname in self._argnames[:nargs]:
if argname in kwdict:
msg = "got multiple values for argument '{0}'".format(argname)
raise FyppFatalError(msg, self._fname, self._spans[0])
if nargs < len(self._argnames):
for argname in self._argnames[nargs:]:
if argname in kwdict:
argdict[argname] = kwdict.pop(argname)
elif argname in self._defaults:
argdict[argname] = self._defaults[argname]
else:
msg = "macro '{0}' called without mandatory positional "\
"argument '{1}'".format(self._name, argname)
raise FyppFatalError(msg, self._fname, self._spans[0])
if kwdict and self._varkw is None:
kwstr = "', '".join(kwdict.keys())
msg = "macro '{0}' called with unknown keyword argument(s) '{1}'"\
.format(self._name, kwstr)
raise FyppFatalError(msg, self._fname, self._spans[0])
if self._varkw is not None:
argdict[self._varkw] = kwdict
return argdict
示例6: _reduction__base
# 需要導入模塊: import __builtin__ [as 別名]
# 或者: from __builtin__ import min [as 別名]
def _reduction__base(self, operatorName, axis):
if axis==None: return self.ravel()._reduction__base(operatorName, 0).item()
if not type(axis) in _numberTypes: raise TypeError('the value %s is not appropriate for the "axis" parameter.' % str(axis))
if axis < -self.ndim or axis>=self.ndim: raise ValueError('axis (%d) out of bounds for an array with %d axes.' % (axis, self.ndim))
axis = int(axis) % self.ndim
if self.size==0:
retShape = _deleteT2(self.shape, axis)
if operatorName=='sum': return zeros(retShape)
elif operatorName=='max': return tile(-inf, retShape)
else: assert False
if operatorName=='max' and axis==0 and cudamatHas('maxAxis0'): # my own fast implementation
ret = empty(self.shape[1:])
_ctInt = _cudamat.ct.c_int
nThreadsPerBlock = 32
gridX, gridY = ((ret.size+nThreadsPerBlock-1)//nThreadsPerBlock), 1
while gridX>65535: gridY*=2; gridX = (gridX+1)//2;
_cudamat._cudamat.maxAxis0.restype = _ctypes.c_int
assert 0==_cudamat._cudamat.maxAxis0(_ctInt(gridX), _ctInt(gridY), _ctInt(nThreadsPerBlock), self._base.p_mat, ret._base.p_mat, _ctInt(self.shape[0]), _ctInt(ret.size))
return ret
if axis==0 and operatorName=='max': # max over rows is not yet supported in cudamat
return self.reshape_2d(1).T.max(1).reshape(self.shape[1:])
if axis==0 and self.ndim==1 and self.size>5000 and operatorName=='sum': # optimization. apparently, cudamat is not maximally efficient.
n = int(numpy.sqrt(self.size-1))
return self[:n*n].reshape((n, n))._reduction__base(operatorName, 0)._reduction__base(operatorName, 0) + self[n*n:]._reduction__base(operatorName, 0)
if operatorName=='sum':
chunkSize = 1024*256 # sum over longer dimensions fails in cudamat
nChunks = (self.shape[axis] + chunkSize-1) // chunkSize
if nChunks>1:
return reduceAdd( self[(slice(None),) * axis + (slice(chunkI*chunkSize, __builtin__.min(self.shape[axis], (chunkI+1)*chunkSize)),)]._reduction__base(operatorName, axis)
for chunkI in range(nChunks))
if operatorName=='max' and self.isnan().any2(): # cudamat bug workaround
return garray(self.asarray().max(axis))
operatorInCm = {'sum': _cmType.sum, 'max': _cmType.max}[operatorName]
if axis==0: return _check_number_types(garray(operatorInCm(self._base_shaped(1), 1, _new_cm(_prodT(self.shape[1:]))), self.shape[1:], None))
if axis==self.ndim-1:
if self.ndim!=2: return self.reshape_2d(-1)._reduction__base(operatorName, 1).reshape(self.shape[:-1])
if self.ndim==2:
chunkSize = 2**16-1
nChunks = (len(self) + chunkSize-1) // chunkSize
if nChunks>1: # cudamat chokes on big arrays, so break it in pieces for cudamat
chunks = tuple([ self[chunkI*chunkSize : __builtin__.min((chunkI+1)*chunkSize, len(self))]
for chunkI in range(nChunks)])
return concatenate([ chunk._reduction__base(operatorName, 1) for chunk in chunks])
else: # small array
return _check_number_types(garray(operatorInCm(self._base_shaped(1), 0, _new_cm((len(self), 1))), (len(self),), None))
return self.transpose_simple(axis)._reduction__base(operatorName, 0).transpose_simple(-axis)
# ------------------------------------------------------------------------------- external misc non-numerical