本文整理汇总了Python中numpy.nansum函数的典型用法代码示例。如果您正苦于以下问题:Python nansum函数的具体用法?Python nansum怎么用?Python nansum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nansum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: clean_weights
def clean_weights(weights, must_haves=None, fraction=0.5):
if must_haves is None:
must_haves=[True]*len(weights)
if not any(must_haves):
return [0.0]*len(weights)
needs_replacing=[(np.isnan(x) or x==0.0) and must_haves[i] for (i,x) in enumerate(weights)]
keep_empty=[(np.isnan(x) or x==0.0) and not must_haves[i] for (i,x) in enumerate(weights)]
no_replacement_needed=[(not keep_empty[i]) and (not needs_replacing[i]) for (i,x) in enumerate(weights)]
if not any(needs_replacing):
return weights
missing_weights=sum(needs_replacing)
total_for_missing_weights=fraction*missing_weights/(
float(np.nansum(no_replacement_needed)+np.nansum(missing_weights)))
adjustment_on_rest=(1.0-total_for_missing_weights)
each_missing_weight=total_for_missing_weights/missing_weights
def _good_weight(value, idx, needs_replacing, keep_empty,
each_missing_weight, adjustment_on_rest):
if needs_replacing[idx]:
return each_missing_weight
if keep_empty[idx]:
return 0.0
else:
return value*adjustment_on_rest
weights=[_good_weight(value, idx, needs_replacing, keep_empty,
each_missing_weight, adjustment_on_rest)
for (idx, value) in enumerate(weights)]
xsum=sum(weights)
weights=[x/xsum for x in weights]
return weights
示例2: logprob2dslope
def logprob2dslope(p,x,y,x_err,y_err):
m,scatter = p[0],p[1]
if scatter<0.0:
return -np.inf
sigma = (scatter+y_err**2+m**2*x_err**2)
lp = -0.5*np.nansum((y-m*x)**2/sigma)-0.5*np.nansum(np.log(sigma))
return lp
示例3: test_nansum_with_boolean
def test_nansum_with_boolean(self):
# gh-2978
a = np.zeros(2, dtype=bool)
try:
np.nansum(a)
except Exception:
raise AssertionError()
示例4: exp1computeLL
def exp1computeLL(self,dat,f):
T=20
#initialize
q=np.zeros(T+1); q[0]=self.q0
u=np.zeros(T+1); u[0]=self.u0
a=np.zeros((T+1,D));self.f=[]
p=np.zeros((T+1,D));
a[0,:]=np.ones(10)/3.0
a[0,-1]=np.nan
a[0,:3]*=q[0]
a[0,3:6]*=(1-q[0])*u[0]
a[0,6:]*=(1-q[0])*(1-u[0])
phase=0
LL=0
#print a[0,:]
for t in range(T):
if t>10: phase=1
else: phase=0
p[t,:]=getProb(a[t,:],self.d)
m=data2a[dat[t],:,phase]
w=np.power(a[t,:],self.m)
loglik= np.nansum(np.log(np.maximum(0.001,p[t,m==f[t]])))
if f[t]==1:
s=m*w
a[t+1,:]= self.g*s/np.nansum(s) + (1-self.g)*a[t,:]
else:
s=(1-m)*w
a[t+1,:]= self.h*s/np.nansum(s) + (1-self.h)*a[t,:]
#print t,dat[t],f[t],np.nansum(p[t,m==f[t]]),loglik
#print 'm= ',m
#print 'p= ',p
LL+=loglik
return LL
示例5: crunchy3
def crunchy3(offset, eta, sec, sigma=None):
powers = []
powers_norm = []
y_axis = sec.get_y_axis()
x_axis = sec.get_x_axis()
px_y = np.absolute(y_axis[1] - y_axis[0])
px_x = np.absolute(x_axis[1] - x_axis[0])
if sigma is None:
sigma = [px_y, px_x]
if sigma[0] < px_y:
sigma = [px_y, sigma[1]]
if sigma[1] < px_x:
sigma = [sigma[0], px_x]
for yi in range(len(y_axis)):
y = y_axis[yi]
for xi in range(len(x_axis)):
x = x_axis[xi]
y_eff = y + eta * offset ** 2
x_eff = x - offset
this_weight = weight_function3(eta, y, x, y_eff, x_eff, sigma)
if this_weight is None:
powers.append(None)
powers_norm.append(None)
else:
variance = 1 / this_weight
powers.append(sec.get([yi, xi]) / variance)
powers_norm.append(1 / variance)
p = np.nansum(list(filter(None, powers)))
pn = np.nansum(list(filter(None, powers_norm)))
return offset, p / pn
示例6: Q_factor
def Q_factor(A, B):
"""Compute the "overlap" between the images A and B.
"""
A_norm = np.nansum(A ** 2) ** 0.5
B_norm = np.nansum(B ** 2) ** 0.5
values = (A / A_norm) * (B / B_norm)
return np.nansum(values)
示例7: bic
def bic(em_fit_result_dict, LL_all):
'''
Compute the Bayesian Information Criterion score
'''
# Number of parameters:
# - mixt_target: Tnum
# - mixt_random: Tnum
# - mixt_nontargets: Tnum
# - alpha: 1
# - beta: 1
# First count the Loglikelihood
bic_tot = -2.*np.nansum(LL_all[np.isfinite(LL_all)])
# Then count alpha, beta appropriately
K = 2
bic_tot += K*np.log(np.nansum(np.isfinite(LL_all)))
# Finally, the mixture proportions per condition
for T_i, T in enumerate(em_fit_result_dict['T_space']):
K = 2 + int(T > 1)
bic_tot += K*np.log(np.nansum(np.isfinite(LL_all[T_i])))
return bic_tot
示例8: nansum
def nansum(self, axis=None, dtype=None, out=None):
return UncertainQuantity(
np.nansum(self.magnitude, axis, dtype, out),
self.dimensionality,
(np.nansum(self.uncertainty.magnitude**2, axis))**0.5,
copy=False
)
示例9: likelihood
def likelihood(self, a=None, b=None, s=None):
"""
\sum_{i,j} [w_{ij}[y_{i,j} s_j(a_i + b_j)
- log(1 + exp(s_j(a_i + b_j)))]
"""
if ((a is None) and (b is None) and (s is None)):
a = np.array(self.a_est.values())
b = np.array(self.b_est.values())
if self.model is '2PL':
s = np.array(self.s_est.values())
c = a[self.obser['index_user']] + b[self.obser['index_item']]
if (self.model is '2PL') and (s is not None):
c = s[self.obser['index_item']] * c
pos = self.data[self.response].values > 0
# account for weights
w = 1.0
if self.wts is not None:
w = _fc(self.data[self.wts])
first_term = np.nansum(w[pos] * c[pos])
else:
first_term = np.nansum(c[pos])
second_term = np.nansum(w * np.log(1 + np.exp(c)))
return (first_term - second_term -
self.alpha * np.sum(a*a) - self.alpha * np.sum(b*b))
示例10: effrad
def effrad(CL,inst,bindist='lin'):
""" This method returns the effective radius for a given instrument for the entire cloud period. The radius is in the same units as the instrument's units (usually micrometers). Note that one might get the effective diameter if the instrument's size bins are diameters.
example: CloudObj.effrad(inst='FSSP96',bindist='lin')
bindist is 'lin' if the difference between bins is linearly distributed (FSSPs) and 'log' if they are logarythmically distributed (PCASP)"""
# according to the formula in https://en.wikipedia.org/wiki/Cloud_drop_effective_radius latest access on Oct 2013.
[pos,sd]=[[i,sd] for i,sd in enumerate(CL.sd) if sd["Distname"].lower() == inst.lower()][0]
# building dr (dradius) vector
R=sd["bins"]; t=len(R)
b=np.zeros([t]);
h=np.zeros([t]);
if bindist=='lin':
for i in range(1,t):
b[i]=(R[i-1]+R[i])/2.;
for i in range(0,t-1):
h[i]=(R[i+1]+R[i])/2.;
b[0]=R[0]-(R[1]-h[0]);
h[t-1]=R[t-1]+(b[t-1]-R[t-2]);
dR=h-b;
elif bindist=='log':
for i in range(1,t):
b[i]=10**((np.log10(R[i-1])+np.log10(R[i]))/2.);
for i in range(0,t-1):
h[i]=10**((np.log10(R[i+1])+np.log10(R[i]))/2.);
b[0]=10**(np.log10(R[0])+(log10(R[1])-log10(h[1])));
h[t-1]=10**(np.log10(R[t-1])-(log10(b[t-2])-np.log10(R[t-2])));
dR=h-b;
else: print("[effrad] bindist option entry is neither 'lin' or 'log'.")
# calculating the effective radius
ER=np.nansum((sd["bins"]**3 *dR) * sd["data"].transpose(),axis=1)/np.nansum((sd["bins"]**2 *dR) * sd["data"].transpose(),axis=1)
return ER
示例11: LinearSolveAll
def LinearSolveAll():
Dir=os.getcwd();
DataDir=Dir + '/DataFormatted/';
Locations=np.genfromtxt(DataDir+'SeismicLocations.csv');
Locations[:,0]=Locations[:,0]-360;
Density=np.genfromtxt(DataDir+'DenseAll.csv');
Qs=np.genfromtxt(DataDir+'LongLatSurfaceHeat.csv',skip_header=1,delimiter=',');
Qm=np.genfromtxt(DataDir+'MantleHeat.txt',skip_header=1,delimiter=',');
QsInterp=Nearest2D(Qs[:,0:2],Qs[:,2]);
QmInterp=Nearest2D(Qm[:,0:2],Qm[:,2]);
Avocado=6.022e23; # mols to atoms conversion
qs=QsInterp(Locations[:,0:2])/1000;
qm=QmInterp(Locations[:,0:2])/1000;
#Density[Density>3.1]=np.nan;
Fels=(3-Density)/(0.3);
Fels[Density<2.7]=1;
Fels[Density>3]=0;
years=365.24*24*60*60;#years to seconds conversion
Depth=np.genfromtxt(DataDir+'Depth.csv');
dz=(Depth[1]-Depth[0])*1000;
UContentU=2.8e-6/238; #upper crust uranium mass fraction
ThContentU=UContentU*3.8/232; #upper crust thorium mass fraction
K40ContentU=2*120e-6*3.4e-2/94; #upper crust thorium mass fraction
UContentL=0.2e-6/238; #mol/g of each cell
ThContentL=1.2e-6/232;
K40ContentL=2*120e-6*0.6e-2/94;
alpha238=7.41e-12;#Joules/decay
alpha235=7.24e-12;#Joules/decay
alpha232=6.24e-12;#Joules/decay
beta=1.14e-13; #Joules/decay
LamU238 = np.log(2)/(4.468*1e9);#% decay rate of U in years
LamTh232 = np.log(2)/(1.405e10); # decay rate of Th in years
LamU235 = np.log(2)/(703800000); #decay rate of 235U in years
LamK40=np.log(2)/1.248e9;#decay rate of K40 in years
UraniumHeatL=alpha238*Avocado*UContentL*LamU238/years+alpha235*Avocado*UContentL*LamU235/years/137.88;
ThoriumHeatL=alpha232*Avocado*ThContentL*LamTh232/years;
KHeatL=beta*Avocado*K40ContentL*LamK40/years;
TotalHeatL=UraniumHeatL+ThoriumHeatL+KHeatL; # W/gram
UraniumHeatU=alpha238*Avocado*UContentU*LamU238/years+alpha235*Avocado*UContentU*LamU235/years/137.88;
ThoriumHeatU=alpha232*Avocado*ThContentU*LamTh232/years;
KHeatU=beta*Avocado*K40ContentU*LamK40/years;
qc=qs-qm;
FluxL=np.nansum((1-Fels)*TotalHeatL*dz*Density*1e6,0);
TotalHeatU=(qc-FluxL)/np.nansum(Fels*Density*1e6*dz,0);
print(TotalHeatL)
print(dz)
plt.close('all')
return qc*1e3 #return in W/g
示例12: calculate_avg
def calculate_avg(): #DONE
global data_ratios_avg
global data_ratios_std
#remove nan values of the weights
weights_nan = np.zeros((nb_rows, 1))
weights_nan_sq = np.zeros((nb_rows, 1))
nb_files = np.ones((nb_rows, 1)) * len(args.xvgfilenames)
tmp_weights_nan = np.zeros((nb_rows, len(args.xvgfilenames)))
for r in range(0, nb_rows):
tmp_weights_nan[r,:] = weights
for f_index in range(0, len(args.xvgfilenames)):
if np.isnan(data_ratios[r,f_index]):
tmp_weights_nan[r,f_index] = 0
nb_files[r,0] -= 1
weights_nan[:,0] = np.nansum(tmp_weights_nan, axis = 1)
weights_nan_sq[:,0] = np.nansum(tmp_weights_nan**2, axis = 1)
weights_nan[weights_nan == 0] = 1
#avg
data_ratios_avg = np.zeros((nb_rows,1))
data_ratios_avg[:,0] = scipy.stats.nanmean(data_ratios * weights * nb_files / weights_nan, axis = 1)
#std
tmp_std = np.zeros((nb_rows, 1))
tmp_std[:,0] = np.nansum(weights * (data_ratios - data_ratios_avg[:,0:1])**2, axis = 1)
tmp_div = np.copy((weights_nan)**2 - weights_nan_sq)
tmp_div[tmp_div == 0] = 1
data_ratios_std = np.sqrt(weights_nan / tmp_div * tmp_std)
return
示例13: orientation_numpy
def orientation_numpy(normals, weights):
# Project the normals against the plane
dx, dy, dz = np.rollaxis(normals, 2)
# Use the quadruple angle formula to push everything around the
# circle 4 times faster, like doing mod(x,pi/2)
qz = 4 * dz * dx * dx * dx - 4 * dz * dz * dz * dx
qx = dx * dx * dx * dx - 6 * dx * dx * dz * dz + dz * dz * dz * dz
# Build the weights using a threshold, finding the normals lying on
# the XZ plane
d = 0.3
global cx, qqx, qqz
cx = np.max((1.0 - dy * dy / (d * d), 0 * dy), 0)
w = weights * cx
qqx = np.nansum(w * qx) / w.sum()
qqz = np.nansum(w * qz) / w.sum()
angle = np.arctan2(qqz, qqx) / 4
q0 = np.array([np.cos(angle), 0, np.sin(angle)])
q0 /= np.sqrt(np.dot(q0, q0))
q2 = np.cross(q0, np.array([0, 1, 0]))
# Build an output matrix out of the components
mat = np.vstack((q0, np.array([0, 1, 0]), q2))
axes = expmap.rot2axis(mat)
return axes
示例14: integrate
def integrate(self, frequencies=None,
radius=2.7, nooffset=False,
azel='az'):
"""
Given a radius calculate beam integral inside the radius and
also the total integral
"""
if frequencies is None:
frequencies = self.cfg['synth']['freq']
lisdic = []
for i, freq in enumerate(frequencies):
if freq in self.cfg['synth']['freq']:
dic = {}
dic['frequency'] = freq
if azel in ('az', 'el'):
find = self.cfg['synth']['freq'].index(freq)*2 + 1
else:
find = self.cfg['synth']['freq'].index(freq)*2 + 2
if not nooffset:
ydata = numpy.sqrt(self.data[:, find]**2 - self.offset**2)
else:
ydata = self.data[:, find]
if azel in ('az', 'el'):
xdata = self.data[:, 0]
else:
xdata = numpy.sqrt(self.data[:, 0]**2 + self.data[:, 1]**2)
ind = numpy.where(self.data[:, 0] < 0)
xdata[ind] = -xdata[ind]
idx = numpy.where(numpy.abs(xdata) <= radius)
dic['inner'] = numpy.nansum(ydata[idx])
dic['all'] = numpy.nansum(ydata)
lisdic.append(dic)
print freq, dic['inner'], dic['all']
return pd.DataFrame(lisdic)
示例15: reportCreate
def reportCreate(data, paramDict):
report = copy.deepcopy(paramDict)
setKeys = data["DataSets"].keys()
# Order all Mod first, then all Org
setKeys.sort()
bestRes = ""
start = 0
end = len(setKeys)
middle = end / 2
i = start
while i < end / 2:
# Calculate Score
modBs = np.array(data["DataSets"][setKeys[i]])
obsBs = np.array(data["DataSets"][setKeys[middle]])
modBsmean = nanmean(modBs)
obsBsmean = nanmean(obsBs)
obsBsMinModBs = obsBs - modBs
obsBsMinMean = obsBs - obsBsmean
SSres = np.nansum(obsBsMinModBs ** 2)
SStot = np.nansum(obsBsMinMean ** 2)
ResNorm = SSres ** 0.5
if i == 0:
bestRes = copy.copy(ResNorm)
report[(setKeys[i] + "_RN")] = ResNorm # Norm of residuals
i = i + 1
middle = middle + 1
return report, bestRes