本文整理汇总了Python中model.lorenz96函数的典型用法代码示例。如果您正苦于以下问题:Python lorenz96函数的具体用法?Python lorenz96怎么用?Python lorenz96使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了lorenz96函数的13个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: sir
def sir(initial,Usir,MO,NM,scov_bg,scov_model,H,y,tobs,pinv_obs,F,dt):
#Sizes
Nsir = len(Usir[:,0,0])
N = len(Usir[0,:,0])
J = len(Usir[0,0,:])-1
weights = np.zeros(Nsir)
eff = np.zeros(NM)
mcn = 0
for t in range(J):
for m in range(Nsir):
if (t == 0):
#random = np.random.randn(N)
#force = np.dot(scov_bg,random)
#Usir[m,:,t] = initial + force
Usir[m,:,t] = initial[m,:]
random = np.random.randn(N)
force = np.dot(scov_model,random)
Usir[m,:,t+1] = Mnl.lorenz96(Usir[m,:,t],force,dt)
if (t+1 == tobs[mcn]):
print tobs[mcn]
sum = 0
for m in range(Nsir):
diff = y[:,mcn]-np.dot(H,Usir[m,:,tobs[mcn]])
hulp = np.dot(pinv_obs,diff)
weights[m] = 1./2*np.dot(np.transpose(diff),hulp)
sum = sum + weights[m]*weights[m]
mcn = mcn+1
Usir[:,:,t+1], eff = resample(Usir[:,:,t+1],weights)
return Usir, eff
示例2: range
x = np.zeros([D,fc+1])
x[:,0] = xtrue[:,0] + dx0
#x[:,0] = xtrue[:,0]
print 'x[:,0]', x[:,0]
nTD = N + (M-1)*nTau
#t = np.zeros([1,nTD])
datat = np.dot(dt,list(xrange(N+1)))
for j in range(fc): # try nTD
force = np.zeros(D)
#force = np.random.rand(D)-0.5 # For only rand for 10 or 20 variables it overflows at time 2!!!!
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
xtrue[:,1] = xtrue[:,0] # try to sort python problem for 0 beginning index
x[:,1] = x[:,0] # try to sort python problem for 0 beginning index
print 'truth created'
################################## Creating the observations ###################################
y = np.zeros([L,N])
#### No noise for y (ok for seed=37)
#y = np.dot(h,xtrue[:,:N])
#print 'y', y.shape
#### Good noise values for y (for seed=37)
y = np.dot(h,xtrue[:,:N]) + np.random.uniform(0,1.2680e-04,N)-6.34e-05
#y = np.dot(h,xtrue[:,:N]) + np.random.uniform(0,1.2680e-04,N)-9.34e-05 #(out of zero mean! so tiny it's almost zero?)
#y = np.dot(h,xtrue[:,:N]) + np.random.uniform(0,1.8680e-04,N)-9.34e-05
示例3: range
max_pinv_rank = Nens
################### Creating truth ###################################
xtrue = np.zeros([D,N+1])
#***xtrue[:,0] = np.random.rand(D)
#print 'xtrue[:,0]', xtrue[:,0]
####### Start by spinning model in ###########
xtest = np.zeros([D,1001])
xtest[:,0]=np.random.rand(D)
#print '1st rand', xtest[:,0]
for j in range(1000):
force = np.zeros(D)
xtest[:,j+1]=mod.lorenz96(xtest[:,j],force,dt)
xtrue[:,0] = xtest[:,1000]
print 'xtrue[:,0]', xtrue[:,0]
## Plot xtrue to understand initial conditions influences ##
#plt.figure(1).suptitle('xtrue for seed='+str(r)+'')
#plt.plot(xtrue[:,0],'g-')
#plt.show()
dx0 = np.random.rand(D)-0.5
#print 'dx0', dx0
##dx0 = np.random.rand(D)
x = np.zeros([D,N+1])
x[:,0] = xtrue[:,0] + dx0
示例4: range
# Creating the truth run
xhulp = np.zeros([N,2000])
xtrue = np.zeros([N,J+1])
force = np.zeros(N)
#spin up
F=8.17
xhulp[:,0] = F ## CHANGE HERE IF WE WANT TO VARY FORCING PARAMETERS AS TABLE 1????####
pert = 0.05
pospert = np.ceil(N/2.0)-1
xhulp[pospert,0] = F+pert
spinup=1999
for j in range(spinup):
force = np.zeros(N)
xhulp[:,j+1] = mod.lorenz96(xhulp[:,j],force,dt) ### this line returns 1 column for the 20 variables at each loop ####
xtrue[:,0] = xhulp[:,spinup]
#print 'xtrue', xtrue
for j in range(J):
#random = np.random.randn(N)
#force = np.dot(scov_model,random)
force = np.zeros(N)
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
print 'truth created'
print xtrue.shape
# Adding 21st state variable
forc = np.zeros([1,J+1])
print forc.shape
forc[:,:] = F
print forc
xtrue = np.append(xtrue,forc, axis=0)
示例5: range
#spin up
F=8.17
######xhulp[:,0] = F
######pert = 0.05
######pospert = np.ceil(N/2.0)-1
######xhulp[pospert,0] = F+pert
######spinup=1999
######for j in range(spinup):
###### force = np.zeros(N)
###### xhulp[:,j+1] = mod.lorenz96(xhulp[:,j],force,dt)
######xtrue[:,0] = xhulp[:,spinup]
xtrue[:,0] = np.random.rand(N)
for j in range(J):
force = np.zeros(N)
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
print 'truth created'
print xtrue.shape
# Creating the observations
#if (observations == 1):
NM = J/ns
# Select an observation operator
if obsgrid == 1:
# Option 1: Observe all
observed_vars = range(N)
elif obsgrid == 2:
# Option 2: Observe every other variable
observed_vars = range(0,N,2)
示例6: range
# Creating the truth run
xhulp = np.zeros([N,2000])
xtrue = np.zeros([N,J+1])
force = np.zeros(N)
#spin up
F=8.17
xhulp[:,0] = F
pert = 0.05
pospert = np.ceil(N/2.0)-1
xhulp[pospert,0] = F+pert
spinup=1999
for j in range(spinup):
force = np.zeros(N)
xhulp[:,j+1] = mod.lorenz96(xhulp[:,j],force,dt)
xtrue[:,0] = xhulp[:,spinup]
for j in range(J):
force = np.zeros(N)
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
print 'truth created'
print xtrue.shape
# Creating the observations
#if (observations == 1):
NM = J/ns
# Select an observation operator
示例7: range
dx0 = np.random.rand(D)-0.5 #Changed to randn! It runned for both 10 and 20 variables
##dx0 = np.random.rand(D)
#print 'dx0', dx0
x = np.zeros([D,N+1])
x[:,0] = xtrue[:,0] + dx0
#x[:,0] = xtrue[:,0]
print 'x[:,0]', x[:,0]
nTD = N + (M-1)*nTau
#t = np.zeros([1,nTD])
datat = np.dot(dt,list(xrange(N+1)))
for j in range(N): # try nTD
force = np.zeros(D)
#force = np.random.rand(D)-0.5 # For only rand for 10 or 20 variables it overflows at time 2!!!!
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
xtrue[:,1] = xtrue[:,0] # try to sort python problem for 0 beginning index
x[:,1] = x[:,0] # try to sort python problem for 0 beginning index
print 'truth created'
y = np.zeros([L,N+1])
### No noise for y (ok for seed=37)
#y = np.dot(h,xtrue)
### Good noise values for y (for seed=37)
### (noises are centralized in zero)
#y = np.dot(h,xtrue) + np.random.uniform(0,1.2680e-04,N+1)-6.34e-05
#y = np.dot(h,xtrue) + np.random.uniform(0,1.2680e-04,N+1)-9.34e-05 #(out of zero mean!)
#y = np.dot(h,xtrue) + np.random.uniform(0,1.8680e-04,N+1)-9.34e-05
### Noise that runs perfect until time step 1500 (for seed=37)
示例8: range
######xhulp[:,0] = F ## CHANGE HERE IF WE WANT TO VARY FORCING PARAMETERS AS TABLE 1????#####
######pert = 0.05
######pospert = np.ceil(N/2.0)-1
######xhulp[pospert,0] = F+pert
######spinup=1999
######for j in range(spinup):
###### force = np.zeros(N)
###### xhulp[:,j+1] = mod.lorenz96(xhulp[:,j],force,dt) ### this line returns 1 column for the 20 variables at each loop ####
######xtrue[:,0] = xhulp[:,spinup]
xtrue[:,0] = np.random.rand(N)
print 'xtrue', xtrue
for j in range(J):
#random = np.random.randn(N)
#force = np.dot(scov_model,random)
force = np.zeros(N)
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
print 'truth created'
######print xtrue.shape
# Adding 21st state variable
######forc = np.zeros([1,J+1])
######print forc.shape
######forc[:,:] = F
######print forc
######xtrue = np.append(xtrue,forc, axis=0)
#####print 'New xtrue', xtrue.shape
#print xtrue[0,1]
# Creating the observations
#if (observations == 1):
#####NM = J*tau #number of measurement times # (Increasing DM is equivalent to increasing the number of measurements!)
示例9: ewpf
def ewpf(initial,Uew,MO,NM,scov_bg,scov_model,cov_model,pinv_model,H,y,tobs,cov_obs,pinv_obs,nudgeFac,F,dt):
#Sizes
New = len(Uew[:,0,0])
N = len(Uew[0,:,0])
J = len(Uew[0,0,:])-1
ns = J/NM
#Factors
retain = 5
epsilon = 0.001/New
gamma_u = 1e-2
gamma_n = 1e-5
#Arrays
weights = np.zeros(New) #
extraWeight = np.zeros(New) #additional weights due to mixture density
wpp = np.zeros(New) #storing nudge weights
c = np.zeros(New) #storing maximum weights
eff = np.zeros(NM)
#Matrices needed
HQHT = np.dot(H,np.dot(cov_model,np.transpose(H)))
pinv_QN = np.linalg.pinv(HQHT + cov_obs)
kgain = np.dot(cov_model,np.dot(np.transpose(H),pinv_QN))
mcn = 0
for t in range(J):
for m in range(New):
if (t == 0):
#random = np.random.randn(N)
#force = np.dot(scov_bg,random)
#Uew[m,:,t] = initial + force
Uew[m,:,t] = initial[m,:]
if(t+1 == tobs[mcn]):
force = 0.
Uew[m,:,t+1] = Mnl.lorenz96(Uew[m,:,t],force,dt)
else:
#nudge
tau = (t-mcn*ns)/float(ns)
diff = y[:,mcn]-np.dot(H,Uew[m,:,t])
help1 = np.dot(pinv_obs,diff)
help2 = np.dot(np.transpose(H),help1)
help3 = np.dot(cov_model,help2)
nudge = nudgeFac*tau*help3
#random error
random = np.random.randn(N)
force = np.dot(scov_model,random)
nudge = force + nudge
Uew[m,:,t+1] = Mnl.lorenz96(Uew[m,:,t],nudge,dt)
weight = np.dot(nudge,np.dot(pinv_model,nudge))-np.dot(force,np.dot(pinv_model,force))
wpp[m] = wpp[m] + 0.5*weight
#print wpp
if (t+1 == tobs[mcn]):
print tobs[mcn]
#Calculate weights for best fit to observations for each member
for m in range(New):
diff = y[:,mcn]-np.dot(H,Uew[m,:,tobs[mcn]])
c[m] = wpp[m] + 0.5*np.dot(diff,np.dot(pinv_QN,diff))
ccsort = np.sort(c)
#print ccsort
cc = ccsort[(retain*New/10)-1]
oldU = np.copy(Uew[:,:,tobs[mcn]])
for m in range(New):
if (c[m] <= cc):
diff = y[:,mcn]-np.dot(H,Uew[m,:,tobs[mcn]])
help = np.dot(H,np.dot(kgain,diff))
aaa = 0.5 * np.dot(diff,np.dot(pinv_obs,help))
bbb = 0.5 * np.dot(diff,np.dot(pinv_obs,diff))- cc + wpp[m]
alpha = 1 - np.sqrt(1. - bbb/aaa + 0.00000001)
Uew[m,:,tobs[mcn]] = Uew[m,:,tobs[mcn]] + alpha*np.dot(kgain,diff)
#Add random error from mixture density
u=np.random.rand(1)
factor = epsilon/(1-epsilon)*(2/np.pi)**(N/2)*(gamma_u**N/gamma_n)
if (u<epsilon):
random = gamma_n*np.random.randn(N)
#print 'random', random
addRandom = np.dot(scov_model,random)
extraWeight[m] = 1./(factor*np.exp(-(1./(2*gamma_n**2))*np.dot(random,random)))
else:
random=2*gamma_u**(1./2)*(np.random.rand(N)-0.5)
#print 'random', random
#.........这里部分代码省略.........
示例10: range
max_pinv_rank = M
################### Creating truth ###################################
xtrue = np.zeros([D,N+1])
#xtrue[:,0] = np.random.rand(D) # so uniform on [0,1]
#print 'xtrue[:,0]', xtrue[:,0]
##dx0 = np.random.rand(D)
# Start by spinning model in.
xtest = np.zeros([D,1001])
xtest[:,0]=np.random.rand(D)
for j in range(1000):
force = np.zeros(D)
xtest[:,j+1]=mod.lorenz96(xtest[:,j],force,dt)
xtrue[:,0] = xtest[:,1000]
#x[:,0] = xtrue[:,0]
x=np.zeros([D,N+1])
dx0 = np.random.rand(D)-0.5
x[:,0] = xtrue[:,0] + dx0
#nTD = N + (M-1)*nTau # total time steps needed
#t = np.zeros([1,nTD])
datat = np.dot(dt,list(xrange(N+1)))
for j in range(N): # try nTD
force = np.zeros(D)
#force = np.random.rand(D)-0.5 # For only rand for 10 or 20 variables it overflows at time 2!!!!
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
示例11: Pmatrix
#.........这里部分代码省略.........
##################### Seeding for 20 variables#######################
r=37 #for x[:,0] = xtrue[:,0]
np.random.seed(r)
#################### Constructing h (obs operator) ##################
observed_vars = range(1)
L = len(observed_vars)
h = np.zeros([L,D])
for i in range(L):
h[i,observed_vars[i]] = 1.0
xx = np.zeros([D,1])
Jac = np.zeros([D,D])
for i in range(D):
Jac[i,i] = 1.
#Jac0 = np.copy(Jac)
run = 1
################### Main loop ##########################################
for n in range(1,run+1):
xx = x
#Jac = Jac0
P = {}
P['00'] = Jac0
for s in range(1,M):
idxs = s
for m in range(1,M):
ii = idxs - m
iid = idxs + 1
id1 = str(0)+str(idxs)
id2 = str(0)+str(idxs-1)
id21 = str(m-1)+str(idxs)
id3 = str(iid-1)+str(ii)
id4 = str(iid-2)+str(ii)
id5 = str(iid-1)+str(iid-1)
id6 = str(iid-2)+str(iid-2)
if ii >= 0:
Jac3 = P[id4]
# Calculating the first row of Ps
if m == 1:
Jac2 = P[id2]
#########################
Jac2 = np.transpose(Jac2)
#########################
# Calculating all elements in the upper part of the diagonal#
Jacsize = D**2
Jacv2 = Jac2.reshape(Jacsize)
Jacvec2 = Jacv2.reshape(Jacsize,1)
Jac2 = mod.rk4_J3(Jacvec2,D,xx,dt)
Jac2 = np.transpose(Jac2)
P[id1] = Jac2
if m > 1:
# Calculating all elements in the upper part of the diagonal#
Jacsize = D**2
Jacv2 = Jac2.reshape(Jacsize)
Jacvec2 = Jacv2.reshape(Jacsize,1)
Jac2 = mod.rk4_J3(Jacvec2,D,xx,dt)
P[id21] = Jac2
# Calculating all elements in the lower part of the diagonal#
Jacv3 = Jac3.reshape(Jacsize)
Jacvec3 = Jacv3.reshape(Jacsize,1)
Jac3 = mod.rk4_J3(Jacvec3,D,xx,dt)
P[id3] = Jac3
# Calculating all elements in the diagonal#
Jacv4 = Jac2.reshape(Jacsize)
Jacvec4 = Jacv4.reshape(Jacsize,1)
Jac4 = mod.rk4_J3(Jacvec4,D,xx,dt)
P[id5] = Jac4
if m == (M-1):
random = np.zeros(D)
xx = mod.lorenz96(xx,random,dt)
return P
示例12: range
######xhulp[:,0] = F
######pert = 0.05
######pospert = np.ceil(N/2.0)-1
######xhulp[pospert,0] = F+pert
######spinup=1999
######for j in range(spinup):
###### force = np.zeros(N)
###### xhulp[:,j+1] = mod.lorenz96(xhulp[:,j],force,dt)
######xtrue[:,0] = xhulp[:,spinup]
xtrue[:,0] = np.random.rand(N)
#####print 'xtrue', xtrue
for j in range(J):
#random = np.random.randn(N)
#force = np.dot(scov_model,random)
force = np.zeros(N)
xtrue[:,j+1] = mod.lorenz96(xtrue[:,j],force,dt)
print 'truth created'
print xtrue.shape
# Creating the observations
#if (observations == 1):
NM = J/ns
# Select an observation operator
if obsgrid == 1:
# Option 1: Observe all
observed_vars = range(N)
elif obsgrid == 2:
# Option 2: Observe every other variable
observed_vars = range(0,N,2)
示例13: range
xtrue = np.zeros([D,N+1])
#xtrue[:,0] = np.random.rand(D) #Changed to randn! It runned for both 10 and 20 variables
####### Start by spinning model in ###########
xtest = np.zeros([D,1001])
xtest[:,0]=np.random.rand(D)
#print '1st rand', xtest[:,0]
for j in range(1000):
force = np.zeros(D)
##force = Q*np.ones(D) ########## ADDING Q NOISE ##############
###force = np.random.normal(0,Q,D) ########## ADDING NORMAL NOISE WITH VARIANCE=Q ##############
####force = np.random.normal(0,Q) ########## ADDING NORMAL NOISE WITH VARIANCE=Q ##############
xtest[:,j+1]=mod.lorenz96(xtest[:,j],force,dt)
xtrue[:,0] = xtest[:,1000]
print 'xtrue[:,0]', xtrue[:,0]
dx0 = np.random.rand(D)-0.5 #Changed to randn! It runned for both 10 and 20 variables
##dx0 = np.random.rand(D)
x = np.zeros([D,N+1])
z = np.zeros([D,N+1])
w = np.zeros([D,N+1])
k = np.zeros([D,N+1])
x[:,0] = xtrue[:,0] #+ dx0
z[:,0] = x[:,0]