本文整理汇总了Python中random.normalvariate函数的典型用法代码示例。如果您正苦于以下问题:Python normalvariate函数的具体用法?Python normalvariate怎么用?Python normalvariate使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了normalvariate函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: generate_messy_data
def generate_messy_data():
data_1 = [(random.normalvariate(MESSY_MEAN_X_1, MESSY_STD_X_1),
random.normalvariate(MESSY_MEAN_Y_1, MESSY_STD_Y_1)) for i in range(500)]
data_2 = [(random.normalvariate(MESSY_MEAN_X_2, MESSY_STD_X_2),
random.normalvariate(MESSY_MEAN_Y_2, MESSY_STD_Y_2)) for i in range(500)]
return data_1, data_2, (data_1 + data_2)
示例2: test_003_test_message_with_noise
def test_003_test_message_with_noise(self):
NUM_GENERATED_PASSES = 5
N0 = 0.06
# get strider decoder
dec = rf.codes.strider.StriderFactory.createDecoder(1530)
# get turbo encoder
enc = rf.codes.strider.StriderFactory.createEncoder(1530)
for i in xrange(2):
# get random message
message = numpy.random.bytes(6179)
message += (chr(numpy.random.randint(0,4) & 0x3))
# encode
enc.setPacket(message)
encoderOutput = rf.vector_csymbol()
enc.encode(3840*NUM_GENERATED_PASSES, encoderOutput)
# Add noise
sigma = math.sqrt(N0 / 2.0)
for i in xrange(encoderOutput.size()):
encoderOutput[i] += random.normalvariate(0,sigma)
encoderOutput[i] += 1j * random.normalvariate(0,sigma)
# attempt to decode
dec.reset()
dec.add(encoderOutput,N0)
res = dec.decode()
self.assertEqual(len(res.packet), len(message))
self.assertEqual(res.packet, message)
示例3: test_te_local_values
def test_te_local_values():
"""Test local TE estimation."""
n = 1000
cov = 0.4
source = [rn.normalvariate(0, 1) for r in range(n)] # correlated src
target = [0] + [
sum(pair)
for pair in zip(
[cov * y for y in source[0 : n - 1]],
[(1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n - 1)]],
)
]
analysis_opts = {
"kraskov_k": 4,
"normalise": "false",
"theiler_t": 0,
"noise_level": 1e-8,
"local_values": True,
"tau_target": 1,
"tau_source": 1,
"source_target_delay": 1,
"history_target": 1,
"history_source": 1,
}
te_est = Estimator_te("jidt_kraskov")
te_res = te_est.estimate(np.array(source), np.array(target), analysis_opts)
assert te_res.shape[0] == n, "Local TE estimator did not return an array."
示例4: makeMeasAccToPlan_lognorm
def makeMeasAccToPlan_lognorm(func, expplan:list, b:list, c:dict, Ve=None, n=1, outfilename="", listOfOutvars=None):
"""
:param func: векторная функция
:param expplan: план эксперимента (список значений вектора x)
:param b: вектор b
:param c: вектор c
:param Ve: ковариационная матрица (np.array)
:param n: объём выборки y
:param outfilename: имя выходного файла, куда писать план
:param listOfOutvars: список выносимых переменных
:return: список экспериментальных данных в формате списка словарей 'x':..., 'y':...
"""
res = list()
for i in range(len(expplan)):
y=func(expplan[i],b,c)
if y is None: #если функция вернула чушь, то в measdata её не записывать!
continue
#Внесём возмущения:
if Ve is not None:
if np.linalg.det(Ve)>10e-15:
ydisps=np.diag(Ve)
for k in range(len(y)):
if (y[k]<0):
y[k]=-1*math.exp(random.normalvariate(math.log(math.fabs(y[k])), math.sqrt(ydisps[k])))
else:
y[k]=math.exp(random.normalvariate(math.log(y[k]), math.sqrt(ydisps[k])))
curdict = {'x':expplan[i], 'y':y}
#res[i]["y"]=y
res.append(curdict)
return res
示例5: generate_hypothesis
def generate_hypothesis(x_mean, y_mean, th_mean, x_sdev, y_sdev, th_sdev):
"""
Generate a gausian hypothesis
"""
return(random.normalvariate(x_mean, x_sdev),
random.normalvariate(y_mean, y_sdev),
random.normalvariate(th_mean, th_sdev))
示例6: test_cmi_no_c_estimator_ocl
def test_cmi_no_c_estimator_ocl():
"""Tests CMI estimation without a condional variable
The estimator should fall back to MI estiamtion and provide the correct result
"""
n = 4001 # This needs to be odd as we loose one sample when shifting signals
cov = 0.4
source_1 = [rn.normalvariate(0, 1) for r in range(n)] # correlated src
target = [sum(pair) for pair in zip(
[cov * y for y in source_1],
[(1 - cov) * y for y in [rn.normalvariate(0, 1) for r in range(n)]])]
# Cast everything to numpy so the idtxl estimator understands it.
source_1 = np.expand_dims(np.array(source_1), axis=1)
target = np.expand_dims(np.array(target), axis=1)
# Note that the calculation is a random variable (because the generated
# data is a set of random variables) - the result will be of the order of
# what we expect, but not exactly equal to it; in fact, there will be a
# large variance around it.
# opts = {'kraskov_k': 4, 'normalise': True, 'nchunkspergpu': 2}
opts = {'kraskov_k': 4, 'normalise': True}
n_chunks = 2
calculator_name = 'opencl_kraskov'
est = Estimator_cmi(calculator_name)
res_1 = est.estimate(var1=source_1[1:], var2=target[1:],
conditional=None, n_chunks=n_chunks, opts=opts)
expected_res = math.log(1 / (1 - math.pow(cov, 2)))
print('Example 1: TE result for second chunk is {0:.4f} nats;'
' expected to be close to {1:.4f} nats for these correlated'
' Gaussians.'.format(res_1[0], expected_res))
assert(res_1[0] != res_1[1]), ('CMI results for chunk 1 and 2 are'
'identical, this is unlikely for random'
'data.')
示例7: _determine_personality_feature
def _determine_personality_feature(self, feature_type):
"""Determine a value for a Big Five personality trait."""
config = self.person.game.config
feature_will_get_inherited = (
self.person.biological_mother and
random.random() < config.big_five_heritability_chance[feature_type]
)
if feature_will_get_inherited:
# Inherit this trait (with slight variance)
takes_after = random.choice([self.person.biological_father, self.person.biological_mother])
feature_value = random.normalvariate(
self._get_a_persons_feature_of_type(person=takes_after, feature_type=feature_type),
config.big_five_inheritance_sd[feature_type]
)
else:
takes_after = None
# Generate from the population mean
feature_value = random.normalvariate(
config.big_five_mean[feature_type], config.big_five_sd[feature_type]
)
if feature_value < config.big_five_floor:
feature_value = config.big_five_floor
elif feature_value > config.big_five_cap:
feature_value = config.big_five_cap
feature_object = Feature(value=feature_value, inherited_from=takes_after)
return feature_object
示例8: main
def main(argv=None):
n = 1000000
if argv is None:
argv = sys.argv
try:
try:
opts, args = getopt.getopt(argv[1:], "hn:", ["help","npts="])
except getopt.error, msg:
raise Usage(msg)
for o, a in opts:
if o in ("-h","--help"):
print "Usage: data.py --npts=Npts"
return 2
if o in ("-n","--npts"):
n = int(a)
else:
raise Usage()
# y_A ~ 2*x + 0.1 + N(0,0.5)
# y_B ~ 4*x - 0.1 + N(0,0.5)
# x ~ (-1,1)
labels = ['A','B']
bcoeffs = [ 2 , 4 ]
acoeffs = [+.1,-.1]
x = -1 + 1./n
for i in range(n):
which = random.randint(0,1)
noise = random.randint(0,10000)
if noise == 1:
print labels[which],"\t",x,"\t",random.normalvariate(0,0.5)+ \
1000.+bcoeffs[which]*x+acoeffs[which]
else:
print labels[which],"\t",x,"\t",random.normalvariate(0,0.5)+ \
bcoeffs[which]*x+acoeffs[which]
x = x + 2./n
示例9: animate
def animate(self, graph):
# If we're new, start in the middle
if self.position is None:
self.position = Numeric.array(graph.viewport.size, Numeric.Float)/2
# Slowly fade in
if self.opacity < 1:
self.opacity += 0.01
if not self.isGrabbed:
# Forces don't apply if we're being grabbed
# Stay away from other nodes
for other in graph.nodes:
if other.position:
repulsionForce(self, other, 80)
# Stay away from the viewport edges
edgeStrength = 200
for axis in (0,1):
self.position[axis] += edgeStrength / max(self.position[axis], 1)
self.position[axis] -= edgeStrength / max(graph.viewport.size[axis] - self.position[axis], 1)
# Random wandering
self.position += (random.normalvariate(0, graph.temperature),
random.normalvariate(0, graph.temperature))
示例10: main
def main():
f = ROOT.TFile.Open('input.root','recreate')
nom = ROOT.TH1F('nominal_histo','nominal_histo',2,0,2)
up = ROOT.TH1F('syst_up','syst_up',2,0,2)
dn = ROOT.TH1F('syst_down','syst_down',2,0,2)
nom.SetBinContent(1,10)
nom.SetBinContent(2,20)
nom.Sumw2(0)
nom.Write()
up.SetBinContent(1,12)
up.SetBinContent(2,24)
up.Sumw2(0)
up.Write()
dn.SetBinContent(1,8)
dn.SetBinContent(2,16)
dn.Sumw2(0)
dn.Write()
data1 = ROOT.TH1F('data1','data1',2,0,2)
data1.SetBinContent(1,10.2)
data1.SetBinContent(2,19.7)
data1.Sumw2(0)
data1.Write()
data2 = ROOT.TH2F('data2','data2',6,-3,3,6,-3,3)
for i in xrange(10000):
data2.Fill(random.normalvariate(0,1),random.normalvariate(0,1))
data2.Sumw2(0)
data2.Write()
f.Close()
示例11: testFuc
def testFuc():
import random
import time
import pylab
plot=True
points=[]
# create three random 2D gaussian clusters
for i in range(8):
x=random.random()*3
y=random.random()*3
c=[scipy.array((x+random.normalvariate(0,0.1), y+random.normalvariate(0,0.1))) for j in range(100)]
points+=c
if plot: pylab.scatter([x[0] for x in points], [x[1] for x in points])
random.shuffle(points)
n=len(points)
start=time.time()
# the value of N is generally quite forgiving, i.e.
# giving 6 will still only find the 3 clusters.
# around 10 it will start finding more
c=OnlineCluster(8)
while len(points)>0:
c.onlineCluster(points.pop())
clusters=c.clusters
#print ("I clustered %d points in %.2f seconds and found %d clusters."%(n, time.time()-start, len(clusters)))
if plot:
cx=[x.center[0] for x in clusters]
cy=[y.center[1] for y in clusters]
pylab.plot(cx,cy,"ro")
pylab.draw()
pylab.show()
示例12: placement
def placement(self, mode):
if (mode == "manual"):
# requires 2 values: x and y
if (self.info == None or len(self.info) == 0):
self.x = 0
self.y = 0
else:
self.x = self.info[0]
if (len(self.info) >= 1):
self.y = self.info[1]
else:
self.y = self.x
if (mode == "random"):
# requires 4 values: size_x, size_y, offset_x, offset_y
#if self.info == None or len(self.info) == 0:
# size_x, size_y = 100, 100
# offset_x, offset_y = 0, 0
#else:
size_x = self.info[0]
size_y = self.info[1]
offset_x = self.info[2]
offset_y = self.info[3]
self.x = random.random()*size_x + offset_x
self.y = random.random()*size_y + offset_y
if (mode == "cluster"):
# requires 4 values: center_x, center_y, spread_x, spread_y
center_x = self.info[0]
center_y = self.info[1]
spread_x = self.info[2]
spread_y = self.info[3]
self.x = random.normalvariate(center_x, spread_x)
self.y = random.normalvariate(center_y, spread_y)
示例13: generate_candidate
def generate_candidate(self,mu,sigma):
# randomly generate a candidate value from the proposal distribution
if self.method=="independent":
candidate = random.normalvariate(mu,sigma) # proposed move
elif self.method=="random_walk":
candidate = self.chain[i] + random.normalvariate(mu,sigma) # proposed move
return candidate
示例14: random_clusters
def random_clusters(imagedir, category, make_faces=False):
"""Creates a test mockup of random clusters from a folder of images
Returns:
clusters: a list of clusters that can be JSONified and passed to the
html renderer
"""
image_extensions = set(['jpg', 'png', 'jpeg', 'gif', 'ico'])
local_images = [os.path.splitext(x)[0]
for x in sorted(os.listdir(imagedir))
if os.path.splitext(x)[1][1:] in image_extensions]
local_images = [make_image(h, category, make_faces) for h in local_images]
clusters = []
n_clusters = max(int(random.normalvariate(6,2)),2)
# TODO add cluster children to simulate HAC
for i in range(n_clusters):
n_images = random.randrange(4,7)
n_size = random.randrange(40,60)
cluster = {'all_images': random.sample(local_images, n_size),
'sample_images': random.sample(local_images, n_images),
'std': random.normalvariate(10.0,2.0),
'position': (random.random(), random.random()),
'size': n_size,
'children': []}
clusters.append(cluster)
return clusters
示例15: run_ens
def run_ens(xi,yi,zi,ens_num,stepCnt,dt):
x = []
y = []
z = []
for t in range(ens_num):
xs = np.empty((stepCnt + 1,))
ys = np.empty((stepCnt + 1,))
zs = np.empty((stepCnt + 1,))
# Setting initial values
mean = 0
sdev = .5
xs[0], ys[0], zs[0] = (xi+random.normalvariate(mean, sdev),
yi+random.normalvariate(mean, sdev),
zi+random.normalvariate(mean, sdev))
for i in range(stepCnt) :
x_dot, y_dot, z_dot = lorenz(xs[i], ys[i], zs[i])
xs[i + 1] = xs[i] + (x_dot * dt)
ys[i + 1] = ys[i] + (y_dot * dt)
zs[i + 1] = zs[i] + (z_dot * dt)
x.append(xs)
y.append(ys)
z.append(zs)
return x,y,z