本文整理汇总了Python中numpy.float_函数的典型用法代码示例。如果您正苦于以下问题:Python float_函数的具体用法?Python float_怎么用?Python float_使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了float_函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: assertDataAlmostEqual
def assertDataAlmostEqual(self, data, reference_filename, **kwargs):
reference_path = self.get_result_path(reference_filename)
if self._check_reference_file(reference_path):
kwargs.setdefault('err_msg', 'Reference file %s' % reference_path)
with open(reference_path, 'r') as reference_file:
stats = json.load(reference_file)
self.assertEqual(stats.get('shape', []), list(data.shape))
self.assertEqual(stats.get('masked', False),
ma.is_masked(data))
nstats = np.array((stats.get('mean', 0.), stats.get('std', 0.),
stats.get('max', 0.), stats.get('min', 0.)),
dtype=np.float_)
if math.isnan(stats.get('mean', 0.)):
self.assertTrue(math.isnan(data.mean()))
else:
data_stats = np.array((data.mean(), data.std(),
data.max(), data.min()),
dtype=np.float_)
self.assertArrayAllClose(nstats, data_stats, **kwargs)
else:
self._ensure_folder(reference_path)
stats = collections.OrderedDict([
('std', np.float_(data.std())),
('min', np.float_(data.min())),
('max', np.float_(data.max())),
('shape', data.shape),
('masked', ma.is_masked(data)),
('mean', np.float_(data.mean()))])
with open(reference_path, 'w') as reference_file:
reference_file.write(json.dumps(stats))
示例2: SmartGen
def SmartGen(device_id, k,device_user,generate_device_user_likelihood):
user_candidate = []
precision = []
candidates_n = estimation = candidates_l = 0.0
users = device_user.get(device_id)
likelihood =estimation_list = []
for user in users:
likelihood.append(generate_device_user_likelihood.get(user))
for k in range(1, len(users)):
for i in ~np.argsort(likelihood)[:k]:
user = users[i]
user_candidate.append(user)
candidates_n += len(AllCookie.get(user))
candidates_l += generate_device_user_likelihood.get(user)
for i in ~np.argsort(likelihood)[:k]:
user = users[i]
precision = np.float_(len(AllCookie.get(user))) / np.float_(candidates_n)
estimation += (generate_device_user_likelihood.get(user)*1.0 / candidates_l)* ((1.25*precision) / (0.25*precision + 1.0))
estimation_list.append(estimation)
k_final = ~np.argsort(estimation_list)
cookie_final = []
for i in ~np.argsort(likelihood)[:k_final]:
user = users[i]
cookie_candidates = HandleCookie.get(user)
for cookie in cookie_candidates:
cookie_final.append(cookie)
return cookie_final
示例3: compute_edf_distance
def compute_edf_distance(support1, support2):
bin_edges = numpy.empty((support1.shape[0] + support2.shape[0] + 2,))
bin_edges[0] = -numpy.inf
bin_edges[-1] = numpy.inf
bin_edges[1 : 1 + support1.shape[0]] = support1
bin_edges[1 + support1.shape[0] : 1 + support1.shape[0] + support2.shape[0]] = support2
bin_edges = numpy.sort(bin_edges)
#print bin_edges.shape
#print bin_edges
bin_edges = numpy.unique(bin_edges)
#bin_edges = get_unique_sorted_array(bin_edges, True)
#print bin_edges.shape
#print bin_edges
#print support1.shape
#print support2.shape
bin_counts1_i,bins1 = numpy.histogram(support1, bin_edges)
bin_counts2_i,bins2 = numpy.histogram(support2, bin_edges)
bin_counts1 = numpy.float_(bin_counts1_i)
bin_counts2 = numpy.float_(bin_counts2_i)
sum_counts1 = numpy.cumsum(bin_counts1) / numpy.sum(bin_counts1)
sum_counts2 = numpy.cumsum(bin_counts2) / numpy.sum(bin_counts2)
delta = numpy.abs(sum_counts1 - sum_counts2)
dist = numpy.max(delta)
return dist
示例4: __init__
def __init__(self, eps_par=numpy.float_(0.),
mask_var=numpy.float_(1),xlabel=None,ylabel=None):
super(MyEstimator, self).__init__()
# self.params=dict()
# self.params['']=
# self.params['eps_par']=eps_par
# self.params['mask_var']=mask_var
#these are the parameters
self.eps_par=eps_par
self.mask_var=mask_var
self.catastrophe=None
self.dm=None
self.max_distance=None
self.mask_scale=None
self.outlier_cut=0.95
self.optimize_frac = 0.1
self.xlabel=xlabel
self.ylabel=ylabel
self.zmin = 0.6
self.zmax=1.6
self.oiimin=6e-17
示例5: Init
def Init(self):
#boundary and domain condition
self.lat = io.read_PETSc_vec(self.config["-Metos3DBoundaryConditionInputDirectory"][0] + self.config["-Metos3DLatitudeFileFormat"][0])
dz = io.read_PETSc_vec(self.config["-Metos3DDomainConditionInputDirectory"][0] + self.config["-Metos3DLayerHeightFileFormat"][0])
z = io.read_PETSc_vec(self.config["-Metos3DDomainConditionInputDirectory"][0] + self.config["-Metos3DLayerDepthFileFormat"][0])
self.lsm = io.read_PETSc_mat(self.config["-Metos3DProfileInputDirectory"][0] + self.config["-Metos3DProfileMaskFile"][0])
self.fice = np.zeros((self.profiles,np.int_(self.config["-Metos3DIceCoverCount"][0])),dtype=np.float_)
for i in range(np.int_(self.config["-Metos3DIceCoverCount"][0])):
self.fice[:,i] = io.read_PETSc_vec(self.config["-Metos3DBoundaryConditionInputDirectory"][0] + (self.config["-Metos3DIceCoverFileFormat"][0] % i))
self.bc = np.zeros(2,dtype=np.float_)
self.dc = np.zeros((self.ny,2),dtype=np.float_)
self.dc[:,0] = z
self.dc[:,1] = dz
self.u = np.array(self.config["-Metos3DParameterValue"],dtype=np.float_)
self.dt = np.float_(self.config["-Metos3DTimeStep"][0])
self.nspinup = np.int_(self.config["-Metos3DSpinupCount"][0])
self.ntimestep = np.int_(self.config["-Metos3DTimeStepCount"][0])
self.matrixCount = np.int_(self.config["-Metos3DMatrixCount"][0])
self.U_PODN = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixPODFileFormat"][0])
self.U_PODDOP = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixPODFileFormat"][0])
self.U_DEIMN = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixDEIMFileFormat"][0])
self.U_DEIMDOP = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixDEIMFileFormat"][0])
self.DEIM_IndicesN = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DDEIMIndicesFileFormat"][0])
self.DEIM_IndicesDOP = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DDEIMIndicesFileFormat"][0])
self.AN = np.ndarray(shape=(self.matrixCount,self.U_PODN.shape[1],self.U_PODN.shape[1]), dtype=np.float_, order='C')
self.ADOP = np.ndarray(shape=(self.matrixCount,self.U_PODDOP.shape[1],self.U_PODDOP.shape[1]), dtype=np.float_, order='C')
for i in range(0,self.matrixCount):
self.AN[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixReducedFileFormat"][0] % i)
self.ADOP[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixReducedFileFormat"][0] % i)
self.PN = np.ndarray(shape=(self.matrixCount,self.U_PODN.shape[1],self.U_DEIMN.shape[1]), dtype=np.float_, order='C')
self.PDOP = np.ndarray(shape=(self.matrixCount,self.U_PODDOP.shape[1],self.U_DEIMDOP.shape[1]), dtype=np.float_, order='C')
for i in range(0,self.matrixCount):
self.PN[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'N/'+ self.config["-Metos3DMatrixReducedDEINFileFormat"][0] % i)
self.PDOP[i] = np.load(self.config["-Metos3DMatrixInputDirectory"][0] +'DOP/'+ self.config["-Metos3DMatrixReducedDEINFileFormat"][0] % i)
#precomputin the interplaton indices for a year
[self.interpolation_a,self.interpolation_b,self.interpolation_j,self.interpolation_k] = util.linearinterpolation(2880,12,0.0003472222222222)
self.yN = np.ones(self.ny,dtype=np.float_) * np.float_(self.config["-Metos3DTracerInitValue"])[0]
self.yDOP = np.ones(self.ny,dtype=np.float_) * np.float_(self.config["-Metos3DTracerInitValue"])[1]
self.y_redN = np.dot(self.U_PODN.T,self.yN)
self.y_redDOP = np.dot(self.U_PODDOP.T,self.yDOP)
self.qN = np.zeros(self.DEIM_IndicesN.shape[0],dtype=np.float_)
self.qDOP = np.zeros(self.DEIM_IndicesDOP.shape[0],dtype=np.float_)
self.J,self.PJ = util.generateIndicesForNonlinearFunction(self.lsm,self.profiles,self.ny)
self.out_pathN = self.config["-Metos3DTracerOutputDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][0]
self.out_pathDOP = self.config["-Metos3DTracerOutputDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][1]
self.monitor_path = self.config["-Metos3DTracerMointorDirectory"][0] +self.config["-Metos3DSpinupMonitorFileFormatPrefix"][0] + self.config["-Metos3DSpinupMonitorFileFormatPrefix"][1] +self.config["-Metos3DTracerOutputFile"][0]
示例6: rotatev_aroundz
def rotatev_aroundz(p,a): #
x=p[0]
y=p[1]
z=p[2]
cs=np.float_(np.cos(a))
cn=np.float_(np.sin(a))
return np.float_(np.array([cs*x-cn*y,cn*x+cs*y,z]))
示例7: rotatev_aroundx
def rotatev_aroundx(p,a): #
x=p[0]
y=p[1]
z=p[2]
cs=np.float_(np.cos(a))
cn=np.float_(np.sin(a))
return np.float_(np.array([x,cs*y-cn*z,cn*y+cs*z]))
示例8: loadDevices
def loadDevices(trainfile,DictHandle,DictDevice,DictDevType,DictDevOs,DictCountry,DictAnnC1,DictAnnC2):
NumRows = 0
with open(trainfile,'rb') as csvfile:
spamreader=csv.reader(csvfile,delimiter=',')
spamreader.next()
for row in spamreader:
NumRows = NumRows + 1
XDevices = np.zeros((NumRows,11))
NumRows = 0
with open(trainfile,'rb') as csvfile:
spamreader=csv.reader(csvfile,delimiter=',')
spamreader.next()
for row in spamreader:
XDevices[NumRows,0]=DictHandle[row[0]]
XDevices[NumRows,1]=DictDevice[row[1]]
XDevices[NumRows,2]=DictDevType[row[2]]
XDevices[NumRows,3]=DictDevOs[row[3]]
XDevices[NumRows,4]=DictCountry[row[4]]
XDevices[NumRows,5]=np.float_(row[5])
XDevices[NumRows,6]=DictAnnC1[row[6]]
XDevices[NumRows,7]=DictAnnC2[row[7]]
XDevices[NumRows,8]=np.float_(row[8])
XDevices[NumRows,9]=np.float_(row[9])
XDevices[NumRows,10]=np.float_(row[10])
NumRows = NumRows + 1
return XDevices
示例9: pval_KalZtest
def pval_KalZtest(n1,N1,n2,N2):
"""Compute p-value using Kal Z-test for count data.
Compute pval using Z-test, as published in
Kal et al, 1999, Mol Biol Cell 10:1859.
Z = (p1-p2) / sqrt( p0 * (1-p0) * (1/N1 + 1/N2) )
where p1 = n1/N1, p2=n2/N2, and p0=(n1+n2)/(N1+N2)
You reject if |Z| > Z_a/2 where a is sig lev. Here
we return the p-value itself.
"""
if n1==0 and n2==0:
return 1.0
n1 = np.float_(n1)
N1 = np.float_(N1)
n2 = np.float_(n2)
N2 = np.float_(N2)
p0 = (n1+n2)/(N1+N2)
p1 = n1/N1
p2 = n2/N2
Z = (p1-p2) / np.sqrt( p0 * (1-p0) * ((1/N1) + (1/N2)) )
pval = 2 * sp.stats.norm.cdf(-1*abs(Z))
return pval
示例10: matrix_mul
def matrix_mul(X1, X2, shard_size=5000):
""" Calculate matrix multiplication for big matrix,
X1 and X2 are sliced into pieces with shard_size rows(columns)
then multiplied together and concatenated to the proper size
"""
X1 = np.float_(X1)
X2 = np.float_(X2)
X1_shape = X1.shape
X2_shape = X2.shape
assert X1_shape[1] == X2_shape[0]
X1_iter = X1_shape[0] // shard_size + 1
X2_iter = X2_shape[1] // shard_size + 1
all_result = np.zeros((1,))
for X1_id in range(X1_iter):
result = np.zeros((1,))
for X2_id in range(X2_iter):
partial_result = np.matmul(
X1[X1_id * shard_size:min((X1_id + 1) *
shard_size, X1_shape[0]), :],
X2[:, X2_id * shard_size:min((X2_id + 1) *
shard_size, X2_shape[1])])
# calculate matrix multiplicatin on slices
if result.size == 1:
result = partial_result
else:
result = np.concatenate((result, partial_result), axis=1)
# concatenate the slices together
del partial_result
if all_result.size == 1:
all_result = result
else:
all_result = np.concatenate((all_result, result), axis=0)
del result
return all_result
示例11: linearinterpolation
def linearinterpolation(nstep,ndata,dt):
"""used for lienar interpolation between transportation matrices.
returns weights alpha, beta and indices of matrices.
Parameters
-------
nstep : int Number of timesteps
ndata : int Number of matrices
Returns
-------
alpha,beta : array
coefficients for interpolation
jalpha,jbeta : array
indices for interpolation
"""
t = np.zeros(nstep,dtype=np.float_)
for i in range(nstep):
t[i] = np.fmod(0 + i*dt, 1.0)
beta = np.array(nstep,dtype=np.float_)
alpha = np.array(nstep,dtype=np.float_)
w = t * ndata+0.5
beta = np.float_(np.fmod(w, 1.0))
alpha = np.float_(1.0-beta)
jalpha = np.fmod(np.floor(w)+ndata-1.0,ndata).astype(int)
jbeta = np.fmod(np.floor(w),ndata).astype(int)
return alpha,beta,jalpha,jbeta
示例12: readSnap
def readSnap(self,f):
snap = Snap()
snap.time = 0
snap.box = []
snap.atoms = []
snap.natoms = 0
for i, line in enumerate(f):
if i > 8 and i < 8 + snap.natoms:
snap.atoms.append(line.split())
elif i == 3:
snap.natoms = int(line.split()[0])
elif i == 5 or i == 6 or i == 7:
snap.box.append(np.float_(line.split()))
elif i == 4:
if len(line.split()) == 3:
snap.boundary = []
else:
snap.boundary = line.split()[3:]
elif i == 8:
snap.descriptor = line.split()[2:]
elif i == 1:
snap.time = int(line.split()[0])
elif i == 8 + snap.natoms:
snap.atoms.append(line.split())
break
snap.atoms = np.float_(snap.atoms)
snap.box = np.array(snap.box)
return snap
示例13: slidingMax
def slidingMax(x,y,dx):
x = np.float_(x)
y = np.float_(y)
LX = len(x)
ymax = np.ones(LX)*y.min()
code=\
"""
int j;
int i;
int j0;
int inloop;
j0=1;
for (i=0; i<LX; i++){
j=j0;
inloop=0;
while ((x(j)<=x(i)+dx/2) && (j<LX) ) {
if ((x(j)>=x(i)-dx/2) && (x(j)<=x(i)+dx/2)) {
if (y(j)>ymax(i)) {
ymax(i) = y(j);
}
inloop=1;
}
if (inloop==0) {
j0=j; // memorize where we started before
}
j++;
}
}
"""
err = weave.inline(code,
['x', 'y', 'dx','LX','ymax'],
type_converters=converters.blitz,
compiler = 'gcc')
return ymax
示例14: check_numpy_scalar_argument_return_generic
def check_numpy_scalar_argument_return_generic(self):
f = PyCFunction('foo')
f += Variable('a1', numpy.int_, 'in, out')
f += Variable('a2', numpy.float_, 'in, out')
f += Variable('a3', numpy.complex_, 'in, out')
foo = f.build()
args = 2, 1.2, 1+2j
results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
assert_equal(foo(*args),results)
args = [2], [1.2], [1+2j]
assert_equal(foo(*args),results)
args = [2], [1.2], [1,2]
assert_equal(foo(*args),results)
f = PyCFunction('foo')
f += Variable('a1', 'npy_int', 'in, out')
f += Variable('a2', 'npy_float', 'in, out')
f += Variable('a3', 'npy_complex', 'in, out')
foo = f.build()
args = 2, 1.2, 1+2j
results = numpy.int_(2), numpy.float_(1.2), numpy.complex(1+2j)
assert_equal(foo(*args),results)
args = [2], [1.2], [1+2j]
assert_equal(foo(*args),results)
args = [2], [1.2], [1,2]
assert_equal(foo(*args),results)
示例15: from_edf
def from_edf(fname, compression=None, below_water=False, lon=None, lat=None):
"""
DataFrame constructor to open XBT EDF ASCII format.
Examples
--------
>>> from ctd import DataFrame
>>> cast = DataFrame.from_edf('../test/data/XBT.EDF.gz',
... compression='gzip')
>>> fig, ax = cast['temperature'].plot()
>>> ax.axis([20, 24, 19, 0])
>>> ax.grid(True)
"""
f = read_file(fname, compression=compression)
header, names = [], []
for k, line in enumerate(f.readlines()):
line = line.strip()
if line.startswith("Serial Number"):
serial = line.strip().split(":")[1].strip()
elif line.startswith("Latitude"):
hemisphere = line[-1]
lat = line.strip(hemisphere).split(":")[1].strip()
lat = np.float_(lat.split())
if hemisphere == "S":
lat = -(lat[0] + lat[1] / 60.0)
elif hemisphere == "N":
lat = lat[0] + lat[1] / 60.0
else:
raise ValueError("Latitude not recognized.")
elif line.startswith("Longitude"):
hemisphere = line[-1]
lon = line.strip(hemisphere).split(":")[1].strip()
lon = np.float_(lon.split())
if hemisphere == "W":
lon = -(lon[0] + lon[1] / 60.0)
elif hemisphere == "E":
lon = lon[0] + lon[1] / 60.0
else:
raise ValueError("Longitude not recognized.")
else:
header.append(line)
if line.startswith("Field"):
col, unit = [l.strip().lower() for l in line.split(":")]
names.append(unit.split()[0])
if line == "// Data":
skiprows = k + 1
break
f.seek(0)
cast = read_table(
f, header=None, index_col=None, names=names, dtype=float, skiprows=skiprows, delim_whitespace=True
)
f.close()
cast.set_index("depth", drop=True, inplace=True)
cast.index.name = "Depth [m]"
name = basename(fname)[1]
if below_water:
cast = remove_above_water(cast)
return CTD(cast, longitude=lon, latitude=lat, serial=serial, name=name, header=header)