本文整理汇总了Python中numpy.nsum函数的典型用法代码示例。如果您正苦于以下问题:Python nsum函数的具体用法?Python nsum怎么用?Python nsum使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了nsum函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: grow
def grow(self):
"""Grow the population to carrying capacity
The final population size is determined based on the proportion of
producers present. This population is determined by drawing from a
multinomial with the probability of each genotype proportional to its
abundance times its fitness.
"""
if self.is_empty():
return
if not self.diluted:
return
landscape = self.metapopulation.fitness_landscape
final_size = self.capacity_min + \
(self.capacity_max - self.capacity_min) * \
self.prop_producers()
grow_probs = self.abundances * (landscape/nsum(landscape))
if nsum(grow_probs) > 0:
norm_grow_probs = grow_probs/nsum(grow_probs)
self.abundances = multinomial(final_size, norm_grow_probs, 1)[0]
示例2: get_pos_norm
def get_pos_norm(pos):
""" Translate positions so that centroid is in the origin and return mean
norm of the translated positions. """
pos = Positions.create(pos)
assert(len(pos) == 1)
n = len(pos[0])
p = zeros((n, 2))
for i, node in enumerate(pos[0]):
p[i, :] = pos[0][node]
centroid = p.sum(axis=0)/n
p -= tile(centroid, (n, 1))
p_norm = nsum(sqrt(nsum(square(p), axis=1)))/n
return p_norm
示例3: __init__
def __init__(self, hamiltonian, beta, mu, verbose = False):
self.mu = mu
CanonicalEnsemble.__init__(self, hamiltonian, beta, verbose)
c = AnnihilationOperator(self.singleParticleBasis)
muMatrix = mu * nsum([c[orb].H.dot(c[orb]) for orb in self.orderedSingleParticleStates], axis = 0)
self.hamiltonian.matrix = self.hamiltonian.matrix - muMatrix
self.filling = None
示例4: RHS
def RHS(
self, N, t
):
dNdt = zeros_like(N)
if self.gamma is not None and self.betadxi is not None:
# Death breakup term
dNdt[1:] -= N[1:] * self.gamma[1:]
dNdt[:-1] += self.nu * dot(
self.betadxi[:-1, 1:], N[1:] * self.gamma[1:])
if self.Q is not None:
Cd = zeros_like(dNdt)
for i in arange(self.number_of_classes / 2):
ind = slice(i, self.number_of_classes - i - 1)
Cb = self.Q[i, ind] * N[i] * N[ind]
Cd[i] += nsum(Cb)
Cd[(i + 1):(self.number_of_classes - i - 1)] += Cb[1:]
Cb[0] = 0.5 * Cb[0]
dNdt[(2 * i + 1):] += Cb
dNdt -= Cd
if self.theta is not None:
dNdt += (self.n0 * self.A0 - N / self.theta)
# print('Time = {0:g}'.format(t))
return dNdt
示例5: sig_q_e_LHS
def sig_q_e_LHS( e ):
''' Summation / integration over the random domain '''
q_e_grid = q( e, T_la[:, None, None, None, None], T_xi[None, :, None, None, None],
T_E[None, None, :, None, None], T_th[None, None, None, :, None],
T_A[None, None, None, None, :] )
q_dG_grid = q_e_grid ** 2 / n_int ** n_k
return sqrt( nsum( q_dG_grid ) - mu_q_e_LHS( e ) ** 2 )
示例6: sig_q_e
def sig_q_e( e ):
''' Summation / integration over the random domain '''
q_e_grid = q( e, Theta_la[:, None, None, None, None], Theta_xi[None, :, None, None, None],
Theta_E[None, None, :, None, None], Theta_th[None, None, None, :, None],
Theta_A[None, None, None, None, :] )
q_dG_grid = q_e_grid ** 2 * dG_grid
return sqrt( nsum( q_dG_grid ) - mu_q_e( e ) ** 2 )
示例7: mu_q_e
def mu_q_e( e ):
''' Summation / integration over the random domain '''
q_e_grid = q( e, Theta_la[:, None, None, None, None], Theta_xi[None, :, None, None, None],
Theta_E[None, None, :, None, None], Theta_th[None, None, None, :, None],
Theta_A[None, None, None, None, :] )
q_dG_grid = q_e_grid * dG_grid # element by element product of two (m,m) arrays
return nsum( q_dG_grid ) # nsum has been imported at line 3 from numpy
示例8: __init__
def __init__(self, blocksizes, all_real = True):
Blocks.__init__(self, blocksizes)
for size in blocksizes:
if all_real:
self.datablocks.append(asmatrix(zeros([size, size])))
else:
self.datablocks.append(asmatrix(zeros([size, size], dtype=complex)))
self.shape = [nsum(blocksizes)]*2
示例9: likelihood_func_deriv
def likelihood_func_deriv(theta, N, X, B, z_A, z_B, mutation):
# derivative of the likelihood of the function for theta
p_A, p_B, f_A, f_B = p_read(theta, N, X, B, mutation)
p_A_deriv = B * (1.0-B) * ((1-theta+B*theta) **(-2.0))
p_B_deriv = ((1.0-B*theta) * (-B) - (B-B*theta) * (-B))/ ((1.0-B*theta)**2)
f_A_deriv = nchoosek(N,X) * X *(p_A_deriv) * ((1.0-p_A)**(N-X)) + nchoosek(N,X) * (p_A**X) * (N-X) * (-p_A_deriv)
f_B_deriv = nchoosek(N,X) * X *(p_B_deriv) * ((1.0-p_B)**(N-X)) + nchoosek(N,X) * (p_B**X) * (N-X) * (-p_B_deriv)
l_deriv = -1.0*nsum(1.0/(z_A*f_A + z_B*f_B) * (z_A * f_A_deriv + z_B * f_B_deriv))
return l_deriv
示例10: clean_correlation_matrix
def clean_correlation_matrix(evals, evecs, max_eig, phylogeny=True):
""" Cleans the correlation matrix of noise, and provides an option to remove the largest eigenvector to clean the matrix of phylogeny.
Arguments:
evals -- Eigenvalues of correlation matrix.
evecs -- Eigenvectors of correlation matrix.
max_eig -- Theoretical random maximum eigenvalue. We ignore anything below the minimum eigenvalue.
"""
return real(nsum((x*outer(y,y) for x,y in zip(evals, evecs))))
示例11: setHubbardMatrix
def setHubbardMatrix(t, u, spins, orbitals, siteSpaceTransformation = None): # TODO rm siteSTrafo
spins = range(len(spins))
c = AnnihilationOperator([spins, orbitals])
no = len(orbitals)
ns = len(spins)
spininds = range(len(spins))
uMatrix = zeros([no,no,no,no,ns,ns])
for i, j, k, l, s1, s2 in product(orbitals,orbitals,orbitals,orbitals,spins,spins):
if i == k and j == l and i == j and s1 != s2:
uMatrix[i, j, k, l, s1, s2] = u * .5
if siteSpaceTransformation != None:
p = array(siteSpaceTransformation)
t = p.transpose().dot(t).dot(p)
temp = uMatrix.copy()
for i, j, k, l, s1, s2 in product(orbitals,orbitals,orbitals,orbitals,spins,spins):
uMatrix[i,j,k,l,s1,s2] = nsum([p[i,m] * p[j,n] * temp[m,n,o,q,s1,s2] * p.transpose()[o,l] * p.transpose()[q,k] for m,n,o,q in product(orbitals,orbitals,orbitals,orbitals)], axis = 0)
ht = [t[i,j] * c[s,i].H.dot(c[s,j]) for s in spins for i,j in product(orbitals, orbitals) if t[i,j] != 0]
hu = [uMatrix[i, j, k, l, s1, s2] * c[s1,i].H.dot(c[s2, j].H).dot(c[s2, l]).dot(c[s1, k]) for i,j,k,l in product(orbitals, orbitals, orbitals, orbitals) for s1, s2 in product(spins, spins) if s1 != s2 and uMatrix[i, j, k, l, s1, s2] != 0]
return nsum(ht + hu, axis = 0)
示例12: getFockspaceNr
def getFockspaceNr(self, occupationOfSingleParticleStates = None, singleParticleStateNr = None, singleParticleState = None):
"""Fock state number."""
if occupationOfSingleParticleStates != None:
return nsum([int(occ)*2**i for i, occ in enumerate(occupationOfSingleParticleStates)])
elif singleParticleStateNr != None:
return 2**singleParticleStateNr
elif singleParticleState != None:
return 2**self.getSingleParticleStateNr(*singleParticleState)
else:
assert False, 'Need parameter.'
示例13: setMu
def setMu(self, mu):
c = AnnihilationOperator(self.singleParticleBasis)
nMatrix = nsum([c[orb].H.dot(c[orb]) for orb in self.orderedSingleParticleStates], axis = 0)
self.hamiltonian.matrix = self.hamiltonian.matrix + self.mu * nMatrix
self.mu = mu
self.hamiltonian.matrix = self.hamiltonian.matrix - mu * nMatrix
self.energyEigenvalues = None
self.energyEigenstates = None
self.partitionFunction = None
self.occupation = dict()
report('Chemical potential set to '+str(mu), self.verbose)
示例14: __getitem__
def __getitem__(self, spState):
"""The single particle state is given by a tuple of quantum numbers. Returns scipy.sparse.coo_matrix"""
instates = list()
outstates = list()
spStateOR = self.getOccupationRep(singleParticleState = spState)
for fockStateNr in range(self.fockspaceSize):
instateOR = self.getOccupationRep(fockStateNr)
if instateOR[self.orderedSingleParticleStates.index(spState)] == '1':
instates.append(fockStateNr)
outstates.append(self.getFockspaceNr(annihilateOccRep(spStateOR, instateOR)))
signs = [(-1)**nsum([1 for k in range(self.getSingleParticleStateNr(*spState)) if self.getOccupationRep(fockstateNr)[k] == '1']) for fockstateNr in instates]
return coo_matrix((signs, (outstates, instates)), [self.fockspaceSize]*2)
示例15: __init__
def __init__(
self, number_of_classes, t, dxi, N0=None, xi0=None,
beta=None, gamma=None, Q=None,
theta=None, n0=None, A0=None):
self.number_of_classes = number_of_classes
if xi0 is None:
self.xi0 = dxi
else:
self.xi0 = xi0
self.n0 = n0
self.theta = theta
# Uniform grid
self.xi = self.xi0 + dxi * arange(self.number_of_classes)
if N0 is None:
N0 = zeros_like(self.xi)
else:
N0 = array([
quad(N0, self.xi[i] - dxi / 2., self.xi[i] + dxi / 2.)[0]
for i in range(number_of_classes)])
self.nu = 2.0 # Binary breakup
# Kernels setup
if gamma is not None:
self.gamma = gamma(self.xi)
self.betadxi = zeros(
(self.number_of_classes, self.number_of_classes))
for i in range(1, len(self.xi)):
for j in range(i):
self.betadxi[j, i] = beta(self.xi[j], self.xi[i])
self.betadxi[:, i] =\
self.betadxi[:, i] / nsum(self.betadxi[:, i])
else:
self.gamma = None
self.betadxi = None
if Q is not None:
self.Q = zeros((self.number_of_classes, self.number_of_classes))
for i in range(len(self.xi)):
for j in range(len(self.xi)):
self.Q[i, j] = Q(self.xi[i], self.xi[j]) #
else:
self.Q = None
if A0 is None:
self.A0 = None
else:
self.A0 = array([
quad(A0, self.xi[i] - dxi / 2., self.xi[i] + dxi / 2.)[0]
for i in range(number_of_classes)])
# Solve procedure
self.N = odeint(lambda NN, t: self.RHS(NN, t), N0, t)