本文整理汇总了Python中random.Random.normalvariate方法的典型用法代码示例。如果您正苦于以下问题:Python Random.normalvariate方法的具体用法?Python Random.normalvariate怎么用?Python Random.normalvariate使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类random.Random
的用法示例。
在下文中一共展示了Random.normalvariate方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: create_board
# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import normalvariate [as 别名]
def create_board(fill_rate=0.3):
rand = Random()
return [
[
int(x and x != BOARD_SIZE + 1 and y and y != BOARD_SIZE + 1 and rand.normalvariate(0.5, 0.2) < fill_rate)
for x in range(0, BOARD_SIZE + 2)
]
for y in range(0, BOARD_SIZE + 2)
]
示例2: TestResponsiveness
# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import normalvariate [as 别名]
class TestResponsiveness(unittest.TestCase):
def createDBServer(self, file, port):
pid = os.fork()
if pid:
return pid
else:
server = TestZEOServer(port, file)
server.start()
def setUp(self):
self.random = Random()
self._loggingSetup()
self.falseSession = FalseSession()
self._log("Start of test. Current pid is: " + str(os.getpid()))
self._log("Initiating setUp of test")
self._log('')
try:
if makeCopy:
self._log("Copying " + testingDbfile + " to " + collaborationDbFile + " ...")
try:
os.remove(collaborationDbFile)
os.remove(collaborationDbFile + '.index')
os.remove(collaborationDbFile + '.tmp')
except:
pass
shutil.copy(testingDbfile, collaborationDbFile)
try:
shutil.copy(testingDbfile + '.index', collaborationDbFile + '.index')
except:
self._log("problem copying " + testingDbfile + '.index')
try:
shutil.copy(testingDbfile + '.tmp', collaborationDbFile + '.tmp')
except:
self._log("problem copying " + testingDbfile + '.tmp')
self._log("copy finished.")
self._log('')
self._log("Starting the ZEO server...")
self.zeoServer = self.createDBServer(collaborationDbFile, zeoPort)
self._log("zodb server started on pid: " + str(self.zeoServer) + " .")
self._log('')
self._log("Creating a CustomDBMgr on port " + str(zeoPort))
self.cdbmgr = DBMgr.getInstance(hostname="localhost", port=zeoPort)
self._log("Starting a request ...")
self.cdbmgr.startRequest()
self._log("Request started successfully.")
self._log('')
if doCleanUp:
self._log('Cleaning the DB of bookings and recreating the indexes')
DeleteAllBookingsAction(FalseAction()).call()
self._log('Cleanup succesfull')
self._log("We start populating DB with bookings...")
#ConferenceHolder()._getIdx() is an OOBTree
size = lastConfId - firstConfId
self._log("Populating among aproximately " + str(size) + " events, from " + str(firstConfId) + " to " + str(lastConfId) + ", with aproximately " + str(startingBookings) + " bookings")
self._log("Initial size of 'all' index: " + str(IndexesHolder().getById("Collaboration").getAllBookingsIndex().getCount()))
self.validConfIds = []
self.confsWithBookings = []
populated = 0
added = 0
chance = (float(startingBookings) / float(size)) / 2.0
ch = ConferenceHolder()
for confId in xrange(firstConfId, lastConfId + 1):
confId = str(confId)
if self.random.random() < chance:
try:
conf = ch.getById(confId)
self.validConfIds.append(confId)
except MaKaCError:
continue
i = 0
bookingsForThisConf = max(int(self.random.normalvariate(3, 3)),0)
added += bookingsForThisConf
while i < bookingsForThisConf:
conf.getCSBookingManager().createTestBooking(bookingParams = {'startDate': self._randomDate(startDate, endDate)})
i += 1
populated += 1
self.confsWithBookings.append(confId)
if populated % 100 == 0:
self._log(str(populated) + ' events populated. Index size: ' + str (IndexesHolder().getById("Collaboration").getAllBookingsIndex().getCount()))
self._log("Populating finished. " + str(populated) + " events populated with " + str(added) + " bookings")
self._log("Size of 'all' index is now: " + str(IndexesHolder().getById("Collaboration").getAllBookingsIndex().getCount()))
#.........这里部分代码省略.........
示例3: create_dqs_data
# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import normalvariate [as 别名]
def create_dqs_data():
"""Generates the set of input data files for this study. This method
supercedes the other create_* methods below, which are retained for
reference."""
# calculate seasonal and climatological means
obs_rainfall_vals = get_rain_values(model_loc + "/inputs_gujarat/weather/original/rainfall.dat")
obs_rainfall_climate_mean = obs_rainfall_vals.mean()
obs_rainfall_climate_std = obs_rainfall_vals.std()
obs_yield_climate_mean = obs_yield.mean()
obs_yield_climate_std = obs_yield.std()
obs_temp_season_mean = [] # line_num : seasonal_mean
obs_temp_season_std = [] # line_num : seasonal_std
obs_temp_avg_svals = array([]) # array of all temp values from the season we are interested in
for i in range(obs_min_temp_data.shape[0]): # tmin and tmax files have same structure
tmin_svals = obs_min_temp_data[i, 5:9] # June -> September (inclusive)
tmax_svals = obs_max_temp_data[i, 5:9]
tavg_svals = (tmin_svals + tmax_svals) / 2.0
obs_temp_season_mean.append(tavg_svals.mean())
obs_temp_season_std.append(tavg_svals.std())
obs_temp_avg_svals = append(obs_temp_avg_svals, tavg_svals)
obs_temp_climate_mean = obs_temp_avg_svals.mean()
obs_temp_climate_std = obs_temp_avg_svals.std()
# create directories
mkdir(model_loc + "/dqs_data")
for var_name, bias_type in dqs_data_config.iteritems():
mkdir(model_loc + "/dqs_data/" + var_name)
for b in bias_type:
mkdir(model_loc + "/dqs_data/" + var_name + "/" + b)
for s in dqs_seeds:
r = Random(s)
for p in percent_std_dev:
# -- daily uncorrelated precip --
out_file = open(model_loc + "/dqs_data/prec/day/p-" + str(p) + "_s-" + str(s) + ".dat", "w")
for row in obs_rain_data:
if len(row) in (51, 81): # there are 10 or 16 {3.1}f values
new_row = ""
index = 0
while index < (len(row) - 1): # there is a trailing '\n'
orig_value = float(row[index : (index + 5)])
out_std_dev = p / 100.0 * obs_rainfall_climate_std # was: * orig_value
new_value = r.normalvariate(orig_value, out_std_dev)
if new_value < 0:
new_value = 0.0
elif new_value >= 1000: # column format is {3.1}f
new_value = 999.9
new_row += "{0:5.1f}".format(new_value)
index += 5
new_row += "\n"
out_file.write(new_row)
else:
out_file.write(row)
out_file.close()
# -- seasonally biased precip --
out_file = open(model_loc + "/dqs_data/prec/season/p-" + str(p) + "_s-" + str(s) + ".dat", "w")
for rain_year in range(1966, 1990):
obs_rainfall_year_mean = get_rain_year_values(rain_year).mean()
obs_rainfall_year_std = get_rain_year_values(rain_year).std()
out_std_dev = p / 100.0 * obs_rainfall_year_std # was: * obs_rainfall_year_mean
diff_val = r.normalvariate(obs_rainfall_year_mean, out_std_dev) - obs_rainfall_year_mean
out_file.write(" BLOCK-NO 25 DAILY RF OF " + str(rain_year) + "\n")
for row in obs_rain_data[obs_rain_layout[rain_year][0] : (obs_rain_layout[rain_year][1] + 1)]:
new_row = ""
index = 0
while index < (len(row) - 1): # there is a trailing '\n'
orig_value = float(row[index : (index + 5)])
new_value = orig_value + diff_val
if new_value < 0:
new_value = 0.0
elif new_value >= 1000.0: # column format is {3.1}f
new_value = 999.9
new_row += "{0:5.1f}".format(new_value)
index += 5
new_row += "\n"
out_file.write(new_row)
out_file.write("\n\n")
out_file.close()
# -- climatologically biased precip --
out_file = open(model_loc + "/dqs_data/prec/climate/p-" + str(p) + "_s-" + str(s) + ".dat", "w")
out_std_dev = p / 100.0 * obs_rainfall_climate_std # was: obs_rainfall_climate_mean
diff_val = r.normalvariate(obs_rainfall_climate_mean, out_std_dev) - obs_rainfall_climate_mean
for row in obs_rain_data:
if len(row) in (51, 81): # there are 10 or 16 {3.1}f values
new_row = ""
index = 0
while index < (len(row) - 1): # there is a trailing '\n'
orig_value = float(row[index : (index + 5)])
new_value = orig_value + diff_val
if new_value < 0:
new_value = 0.0
elif new_value >= 1000.0: # column format is {3.1}f
new_value = 999.9
new_row += "{0:5.1f}".format(new_value)
index += 5
new_row += "\n"
out_file.write(new_row)
#.........这里部分代码省略.........
示例4: getExitRate
# 需要导入模块: from random import Random [as 别名]
# 或者: from random.Random import normalvariate [as 别名]
def getExitRate( timeOfDay ):
if timeOfDay < tenAM:
return 0
if timeOfDay < noon:
return 1
if timeOfDay < ninePM:
return 3
if timeOfDay > ninePM:
return 10
decisionMakingTime = 120
decisionMakingStdDeviation = 80
__decisionTimes = []
#WARNING: this will be done every 5 seconds
for i in xrange(10):
t = rng.normalvariate(decisionMakingTime, decisionMakingStdDeviation)
t = int(t)
if t <= 0:
t = 1
__decisionTimes.append( t )
__decisionTimesIndex = 0
def getNextDecisionDuration():
global __decisionTimesIndex
global __decisionTimes
__decisionTimesIndex +=1
__decisionTimesIndex %=len(__decisionTimes)
return __decisionTimes[__decisionTimesIndex]