本文整理汇总了Python中numpy.nanvar函数的典型用法代码示例。如果您正苦于以下问题:Python nanvar函数的具体用法?Python nanvar怎么用?Python nanvar使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nanvar函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: plot_profile_TKE_wind
def plot_profile_TKE_wind(synth):
fig,ax=plt.subplots()
colors=get_colors(synth)
c=0
for key,value in synth.iteritems():
for v in value:
scase=str(key).zfill(2)
sleg=str(v).zfill(2)
synthfile=base_dir+'c'+scase+'/leg'+sleg+'.cdf'
U = read_synth(synthfile,'F2U')
V = read_synth(synthfile,'F2V')
Z = read_synth(synthfile,'z')
x=[]
y=[]
for n,z in enumerate(Z[1:15]):
u=U[:,:,n+1]
v=V[:,:,n+1]
u_var=np.nanvar(u)
v_var=np.nanvar(v)
TKE=(u_var+v_var)/2.
x.append(TKE)
y.append(z)
label='Case: '+scase+' Leg: '+sleg
ax.plot(x,y,'-',label=label,color=colors[c])
ax.set_ylim([0,4])
ax.set_xlabel('TKE [m2 s^-2]')
ax.set_ylabel('Altitude MSL [km]')
c+=1
plt.suptitle('Spatial TKE at P3 synth levels ')
plt.draw()
plt.legend()
示例2: calc_stresses
def calc_stresses(self, beamvel, beamAng):
"""
Calculate the stresses from the difference in the beam variances.
Reference: Stacey, Monosmith and Burau; (1999) JGR [104]
"Measurements of Reynolds stress profiles in unstratified
tidal flow"
"""
fac = 4 * np.sin(self['config']['beam_angle'] * deg2rad) * \
np.cos(self['config']['beam_angle'] * deg2rad)
# Note: Stacey defines the beams incorrectly for Workhorse ADCPs.
# According to the workhorse coordinate transformation
# documentation, the instrument's:
# x-axis points from beam 1 to 2, and
# y-axis points from beam 4 to 3.
# Therefore:
stress = ((np.nanvar(self.reshape(beamvel[0]), axis=-1) -
np.nanvar(self.reshape(beamvel[1]), axis=-1)) + 1j *
(np.nanvar(self.reshape(beamvel[2]), axis=-1) -
np.nanvar(self.reshape(beamvel[3]), axis=-1))
) / fac
if self.config.orientation == 'up':
# This comes about because, when the ADCP is 'up', the u
# and w velocities need to be multiplied by -1 (equivalent
# to adding pi to the roll). See the coordinate
# transformation documentation for more info.
#
# The uw (real) component has two minus signs, but the vw (imag)
# component only has one, therefore:
stress.imag *= -1
stress *= rotate.inst2earth_heading(self)
if self.props['coord_sys'] == 'principal':
stress *= np.exp(-1j * self.props['principal_angle'])
return stress.real, stress.imag
示例3: plot_profile_variance
def plot_profile_variance(dbz,vvel,ht, ax,case,ncases):
dbz_variance=[]
vvel_variance=[]
count_gates=[]
global ti
global n
global colors
if n==0:
# colors=sns.color_palette('hls',ncases)
colors=sns.color_palette('Paired',ncases)
for i in range(len(ht)):
dbz_variance.append(np.nanvar(dbz[i,:]))
vvel_variance.append(np.nanvar(vvel[i,:]))
count_gates.append(vvel[i,:].size-np.sum(np.isnan(vvel[i,:])))
inid=datetime(*(reqdates[case]['ini']+[0,0]))
endd=datetime(*(reqdates[case]['end']+[0,0]))
ti.append('\nCase '+case+': '+inid.strftime('%Y-%b %dT%H:%M')+endd.strftime(' - %dT%H:%M UTC'))
if n<7:
marker='None'
# marker='o'
else:
marker='o'
dbzv=[0,180]
vvelv=[0,6]
if np.any(ax):
ax[0].plot(dbz_variance,ht,marker=marker,color=colors[n])
ax[1].plot(vvel_variance,ht,marker=marker,color=colors[n])
ax[2].plot(count_gates,ht,marker=marker,color=colors[n],label='case '+case)
n+=1
else:
fig,ax=plt.subplots(1,3,sharey=True,figsize=(12,8))
ax[0].plot(dbz_variance,ht,color=colors[n])
ax[1].plot(vvel_variance,ht,color=colors[n])
ax[2].plot(count_gates,ht,color=colors[n], label='case '+case)
ax[0].set_ylabel('Height MSL [km]')
ax[0].set_xlabel('Reflectivity [dBZ^2]')
ax[1].set_xlabel('Vertical velocity [m2 s^-2]')
ax[2].set_xlabel('Count good gates')
ax[0].set_xlim(dbzv)
ax[1].set_xlim(vvelv)
n+=1
return ax
if n==ncases and ncases==4:
plt.suptitle('SPROF time variance'+''.join(ti))
plt.subplots_adjust(top=0.85, left=0.05, right=0.95, wspace=0.05)
ax[2].legend(loc='lower left')
elif n==ncases and ncases>4:
plt.suptitle('SPROF time variance')
plt.subplots_adjust(top=0.9, left=0.05, right=0.95, wspace=0.06)
ax[2].legend()
plt.draw()
示例4: test_nanvar
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
示例5: test_nanvar
def test_nanvar(eng):
original = arange(24).reshape((2, 3, 4)).astype(float64)
data = fromlist(list(original), engine=eng)
assert allclose(data.nanvar().shape, (1, 3, 4))
assert allclose(data.nanvar().toarray(), nanvar(original, axis=0))
original[0, 2, 3] = nan
original[1, 0, 2] = nan
original[1, 2, 2] = nan
data = fromlist(list(original), engine=eng)
assert allclose(data.nanvar().shape, (1, 3, 4))
assert allclose(data.nanvar().toarray(), nanvar(original, axis=0))
示例6: bayes_precision
def bayes_precision(x, y, distribution='normal', posterior_width=0.08, num_iters=25000, inference='sampling'):
""" Bayes precision computation.
:param x: sample of a treatment group
:type x: pd.Series or list (array-like)
:param y: sample of a control group
:type y: pd.Series or list (array-like)
:param distribution: name of the KPI distribution model, which assumes a Stan model file with the same name exists
:type distribution: str
:param posterior_width: the stopping criterion, threshold of the posterior width
:type posterior_width: float
:param num_iters: number of iterations of bayes sampling
:type num_iters: int
:param inference: sampling or variational inference method for approximation the posterior
:type inference: str
:return: results of type EarlyStoppingTestStatistics (without p-value and stat. power)
:rtype: EarlyStoppingTestStatistics
"""
logger.info("Started running bayes precision with {} procedure, treatment group of size {}, "
"control group of size {}, {} distribution.".format(len(x), len(y), distribution, inference))
traces, n_x, n_y, mu_x, mu_y = _bayes_sampling(x, y, distribution=distribution,
num_iters=num_iters, inference=inference)
trace_normalized_effect_size = get_trace_normalized_effect_size(distribution, traces)
trace_absolute_effect_size = traces['delta']
credible_mass = 0.95
left_out = 1.0 - credible_mass
p1 = round(left_out/2.0, 5)
p2 = round(1.0 - left_out/2.0, 5)
credible_interval_delta = HDI_from_MCMC(trace_absolute_effect_size, credible_mass)
credible_interval_delta_normalized = HDI_from_MCMC(trace_normalized_effect_size, credible_mass)
stop = credible_interval_delta_normalized[1] - credible_interval_delta_normalized[0] < posterior_width
treatment_statistics = SampleStatistics(int(n_x), float(mu_x), float(np.nanvar(x)))
control_statistics = SampleStatistics(int(n_y), float(mu_y), float(np.nanvar(y)))
variant_statistics = BaseTestStatistics(control_statistics, treatment_statistics)
logger.info("Finished running bayes precision with {} procedure, treatment group of size {}, "
"control group of size {}, {} distribution.".format(len(x), len(y), distribution, inference))
return EarlyStoppingTestStatistics(variant_statistics.control_statistics,
variant_statistics.treatment_statistics,
float(mu_x - mu_y),
dict([(p * 100, v) for p, v in zip([p1, p2], credible_interval_delta)]),
None, None, stop)
示例7: test_nanvar
def test_nanvar(eng):
arr = array([arange(8), arange(8)]).astype(float64)
data = fromarray(arr, engine=eng)
val = data.nanvar().toarray()
expected = nanvar(data.toarray(), axis=0)
assert allclose(val, expected)
assert str(val.dtype) == 'float64'
arr[0, 4] = nan
arr[1, 3] = nan
arr[1, 4] = nan
data = fromarray(arr, engine=eng)
val = data.nanvar().toarray()
expected = nanvar(data.toarray(), axis=0)
assert allclose(val, expected, equal_nan=True)
assert str(val.dtype) == 'float64'
示例8: test_GWAS
def test_GWAS(self):
Y = np.genfromtxt(self._liverPhenos)
# Loading npdump and first 1000 snps for speed
K = np.load(self._liverKinshipMatrix)
snps = np.load(self._liver1000SNPFile).T
vars = np.nanvar(snps, axis=0) #variances across the rows ignoring NaN, used to check which SNPs were not polymorphic across the given individuals
TS,PS = lmm.GWAS(Y,snps,K,REML=True,refit=True)
#SNPs that are not polymorphic (in the given individuals being tested) will have variance 0, this check ensures
#that only these SNPs have a return value of NaN
for i in range(len(PS)):
self.assertTrue( not math.isnan(PS[i]) or vars[i] == 0, "NaN found in results corresponding to polymorphic SNP")
results = np.array([TS,PS])
ansKey = np.load(self._liverTestFile)
#these results include np.nan values, so allclose cannot be used, also the results are similar with each
#run, but do vary, so we can only check for similarity to a precision of about 1e-06
for i in range(results.shape[0]):
for j in range(results.shape[1]):
a = results[i,j]
b = ansKey[i,j]
self.assertTrue( (np.isnan(a) and np.isnan(b)) or abs(a - b) < 1e-06 ,
"Mismatch on values: " + str(a) + " and " + str(b))
示例9: c
def c(self, P, h, bw):
"""Calculate the sill"""
c = np.nanvar(P[:, 2])
if h == 0:
return c
else:
return c - self.semivarh(P, h, bw)
示例10: compute
def compute(self, today, assets, out, close):
# get returns dataset
returns = ((close - np.roll(close, 1, axis=0)) / np.roll(close, 1, axis=0))[1:]
# get index of benchmark
benchmark_index = np.where((assets == 8554) == True)[0][0]
# get returns of benchmark
benchmark_returns = returns[:, benchmark_index]
# prepare X matrix (x_is - x_bar)
X = benchmark_returns
X_bar = np.nanmean(X)
X_vector = X - X_bar
X_matrix = np.tile(X_vector, (len(returns.T), 1)).T
# prepare Y matrix (y_is - y_bar)
Y_bar = np.nanmean(close, axis=0)
Y_bars = np.tile(Y_bar, (len(returns), 1))
Y_matrix = returns - Y_bars
# prepare variance of X
X_var = np.nanvar(X)
# multiply X matrix an Y matrix and sum (dot product)
# then divide by variance of X
# this gives the MLE of Beta
out[:] = (np.sum((X_matrix * Y_matrix), axis=0) / X_var) / (len(returns))
示例11: cal_stats
def cal_stats(in_fc, col_names):
"""Calculate stats for an array of double types, with nodata (nan, None)
: in the column.
:Requires:
:---------
: in_fc - input featureclass or table
: col_names - the columns... numeric (floating point, double)
:
:Notes:
:------ see the args tuple for examples of nan functions
: np.nansum(b, axis=0) # by column
: np.nansum(b, axis=1) # by row
: c_nan = np.count_nonzero(~np.isnan(b), axis=0) count nan if needed
"""
a = arcpy.da.FeatureClassToNumPyArray(in_fc, col_names) # "*")
b = a.view(np.float).reshape(len(a), -1)
if len(a.shape) == 1:
ax = 0
else:
ax = [1, 0][True] # ax = [1, 0][colwise] colwise= True
mask = np.isnan(b)
cnt = np.sum(~mask, axis=ax, dtype=np.intp, keepdims=False)
n_sum = np.nansum(b, axis=0)
n_mean = np.nanmean(b, axis=0)
n_var = np.nanvar(b, axis=0)
n_std = np.nanstd(b, axis=0)
sk, kurt = skew_kurt(b, avg=n_mean, var_x=n_var, std_x=n_std,
col=True, mom='both')
args = (col_names, cnt, n_sum, np.nanmin(b, axis=0), np.nanmax(b, axis=0),
np.nanmedian(b, axis=0), n_mean, n_std, n_var, sk, kurt)
return col_names, args
示例12: computeFisherScore
def computeFisherScore(data, class_ass, nb_classes):
'''
The Fisher Score assigns a rank to each of the features, with the goal of finding the subset of features of the data
such that in the data space spanned by the selected features, the distance between data points in different classes are
as large as possible and the distance between data points in the same class are as small as possible.
Input
- data: matrix of inputs, size N x M, where N is the number of trials and M is the number of features
- class_ass: array of class assignments, size 1 x N, where N is the number of trials
- nb_classes: number of classes
Output
- Fscores: array of scores, size 1 x M, for each of the features
'''
num_trials, num_features = data.shape
within_class_mean = np.zeros([nb_classes,num_features]) # mean for each feature within each class
within_class_var = np.zeros([nb_classes,num_features]) # variance for each feature within each class
num_points_within_class = np.zeros([1,nb_classes]) # number of points within each class
for i in range(nb_classes):
in_class = np.ravel(np.nonzero(class_ass == i))
num_points_within_class[0,i] = len(in_class)
class_data = data[in_class,:] # extract trails classified as belonging to this class
within_class_mean[i,:] = np.nanmean(class_data, axis=0) # length of mean vector should be equal to M, the number of features
within_class_var[i,:] = np.nanvar(class_data,axis=0)
between_class_mean = np.asmatrix(np.mean(within_class_mean,axis=0))
between_class_mean = np.dot(np.ones([nb_classes,1]), between_class_mean)
Fscores = np.dot(num_points_within_class,np.square(within_class_mean - between_class_mean))/np.dot(num_points_within_class,within_class_var)
return Fscores
示例13: _fit_model
def _fit_model(self, fcol, dis):
"""Determine the best fit for one feature column given distribution name
Parameters
----------
fcol: feature column, array
dis: distribution name, String
Returns
----------
function: fit model with feature as argument
"""
if dis == 'ratio':
itfreq = itemfreq(fcol)
uniqueVars = itfreq[:,0]
freq = itfreq[:,1]
rat = freq/sum(freq)
rat = dict(zip(uniqueVars, rat.T))
func = lambda x: self. funcs[dis](x, rat)
if dis == 'poisson':
lamb = np.nanmean(fcol, axis = 0)
func = lambda x: self.funcs[dis](x, lamb)
if dis == 'norm':
sigma = np.nanvar(fcol, axis=0)
theta = np.nanmean(fcol, axis = 0)
func = lambda x: self.funcs[dis](x, sigma, theta)
return np.vectorize(func)
示例14: fit_cols
def fit_cols(self, attributes, x, n_vals):
"""
Return `EuclideanColumnsModel` with stored means and variances
for normalization and imputation.
"""
def nowarn(msg, cat, *args, **kwargs):
if cat is RuntimeWarning and (
msg == "Mean of empty slice"
or msg == "Degrees of freedom <= 0 for slice"):
if self.normalize:
raise ValueError("some columns have no defined values")
else:
orig_warn(msg, cat, *args, **kwargs)
self.check_no_discrete(n_vals)
# catch_warnings resets the registry for "once", while avoiding this
# warning would be annoying and slow, hence patching
orig_warn = warnings.warn
with patch("warnings.warn", new=nowarn):
means = np.nanmean(x, axis=0)
vars = np.nanvar(x, axis=0)
if self.normalize and not vars.all():
raise ValueError("some columns are constant")
return EuclideanColumnsModel(
attributes, self.impute, self.normalize, means, vars)
示例15: autocorrelation_hourly
def autocorrelation_hourly(data):
from matplotlib.pyplot import plot, xlabel, ylabel, show
from numpy import nanmean, nanvar, mean, multiply, arange
# We choose 7 days and plus-minus 6 hours as the possible periodicity
# in traffic.
START_PERIOD = 7*24 - 6
END_PERIOD = 7*24 + 6
V = replace_placeholder(data, value = nanmean(data))
# We don't take the variance of entries that we replaced with nanmean.
sigma2 = nanvar(data)
autocorr_dict = {period:0 for period in range(START_PERIOD,END_PERIOD+1)}
Deviations = V - nanmean(V, axis=0)
for period in range(START_PERIOD, END_PERIOD+1):
autocorr = nanmean([multiply(Deviations[t],Deviations[t+period])
for t in range(len(V)-period)])/sigma2
autocorr_dict[period] = autocorr
print(period)
# Peaks in plot correspond to high autocorrelation i.e. high
# periodicity trend.
plot(arange(START_PERIOD, END_PERIOD+1),
[autocorr_dict[period] for period in range(START_PERIOD, END_PERIOD+1)],
'o-')
ylabel('Average autocorellation over full links')
xlabel('Assumed period of data (in hours)')
show()
#legend(bbox_to_anchor=(1.35, 0.95))
return None