本文整理汇总了Python中joblib.Parallel.index方法的典型用法代码示例。如果您正苦于以下问题:Python Parallel.index方法的具体用法?Python Parallel.index怎么用?Python Parallel.index使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类joblib.Parallel
的用法示例。
在下文中一共展示了Parallel.index方法的3个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: summary
# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import index [as 别名]
def summary(self, count=32, out_table=None, prange=None, pjob=4):
if self.ndims > 0:
if_exists = "replace"
if out_table == None:
out_table = "%s_%s_summary" %(self.table, self.name)
out_table = out_table.replace("[", "_").replace("]", "_")
def query(i):
self.logger.info("Processing column %d (of %d)" %(i, self.shape[0]))
query = self[i].data.alias(name="col")
q1 = sa.select([sa.text("madlib.fmsketch_dcount(col) as count"),
sa.text("madlib.mfvsketch_top_histogram(col, %s) as top" %count)]).select_from(query)
return [q1, i]
if prange == None:
prange = range(1, self.shape[0] + 1)
queries = [query(i) for i in prange]
dfs = Parallel(n_jobs=pjob)(delayed(process_query)(q) for q in queries)
dfs = pd.concat(dfs)
dfs.index = prange
dfs["table"] = self.table
dfs["column"] = self.name
return dfs
示例2: pso_horns
# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import index [as 别名]
def pso_horns():
windrose = open('horns_rev_windrose.dat', 'r')
windrose_angle = []
windrose_speed = []
windrose_frequency = []
for line in windrose:
columns = line.split()
windrose_angle.append(float(columns[0]))
windrose_speed.append(float(columns[1]))
windrose_frequency.append(float(columns[2]))
windrose.close()
data = open('ref3_layout.dat', 'w', 1)
data2 = open('ref3_random_layout.dat', 'w', 1)
data3 = open('ref3_best_global_fitness.dat', 'w', 1)
allfit = open('ref3_all_fitnesses.dat', 'w', 1)
# pso1 first run. Np 40 (max recommended). Ainslie model combination 30 from MCDA. 20 000 iterations to see what happens. 12.91 = 87.09% eff.
# pso2 Less particles down to 25. Ive changed the inertia and acceleration coefficients multiplied by 0.1. added e-1. For smaller movements and less chaos. Didnt work because of e-1 i think, it moves too slow so nothing changes. 13.46 = 86.54% eff.
# pso3 chagned e-1 to decimal notation of the accel. coefficients. 5 particles only to test.
# pso4 n_iter = 80 np = 25 ## Number of particles in swarm. 2000 function calls. 8 hrs
# pso6 same as pso 4 more iterations to 160.
# pso8 includes one particle as the original horns rev. 10 particles to test.
# ref1 reflecting, 10 particles. Changed vel and particles update with one operation per dimension. 138 seconds per iteration.
# ref2 10 particles, changed windrose to 30 degrees for speed. 3.5 seconds per iteration. 200 iterations.
# ref3 same as ref2, 2000 iterations.
n_iter = 2000
np = 10 ## Number of particles in swarm
nt = 80
diam = 80.0
particles = array([[[0.0, 0.0] for x in range(nt)] for x in range(np)])
# if random() < 0.5:
# sign1 = 1.0
# else:
# sign1 = - 1.0
# if random() < 0.5:
# sign2 = 1.0
# else:
# sign2 = - 1.0
vel = array([[[0.0, 0.0] for x in range(nt)] for x in range(np)])
best_own_fitness = [100.0 for x in range(np)]
best_global_fitness = 100.0
def create():
k = random()
l = random()
xt = 5457.0 * l
if xt <= 412.0:
yt = k * 3907.0 + (1.0 - k) * (- 3907.0 / 412.0 * xt + 3907.0)
elif xt <= 5040.0:
yt = k * 3907.0
else:
yt = k * (3907.0 / 417.0 * (- xt + 5457.0))
return xt, yt
## Produce starting positions. Includes boundaries of Horns Rev
for n in range(np):
for tur in range(nt):
particles[n][tur] = create()
# layout = open('horns_rev.dat', 'r')
# ll = 0
# horns = array([[0, 0] for gf in range(nt)])
# for line in layout:
# columns = line.split()
# horns[ll] = [float(columns[0]) - 423974.0, float(columns[1]) - 6147543.0]
# ll += 1
# layout.close()
# particles[np - 1] = horns
fitness = Parallel(n_jobs=-1)(delayed(fit)(particles[i], windrose_angle, windrose_speed, windrose_frequency) for i in range(np))
# fitness = Parallel(n_jobs=-1)(delayed(fit)(particles[i], nt) for i in range(np))
best_layout = copy.deepcopy(particles[fitness.index(min(fitness))])
best_local = copy.deepcopy(particles)
for ite in range(n_iter):
start_time2 = time.time()
for p in range(np): # Can be parallelised in the future.
# Solving Constrained Nonlinear Optimization Problems with Particle Swarm Optimization by Xiaohui Hu and Russell Eberhart. For 1.49445 learning coefficients.
for t in range(nt):
for coord in range(2):
vel[p][t][coord] = 0.72984 * vel[p][t][coord] + 1.49617 * random() * (best_local[p][t][coord] - particles[p][t][coord]) + 1.49617 * random() * (best_layout[t][coord] - particles[p][t][coord])
particles[p][t][coord] += vel[p][t][coord]
j = 1.0
w = 1.0
while particles[p][t][1] > 3907.0 or particles[p][t][1] < 0.0:
if particles[p][t][1] > 3907.0:
particles[p][t][1] = 3907.0 * 2.0 - particles[p][t][1]
j = random()
elif particles[p][t][1] < 0.0:
particles[p][t][1] = - particles[p][t][1]
j = random()
while particles[p][t][1] < - 3907.0 / 412.0 * (particles[p][t][0]+10.) + 3907.0 or particles[p][t][1] > 3907.0 / 417.0 * (- particles[p][t][0] + 5457.0+10.):
if particles[p][t][1] < - 3907.0 / 412.0 * (particles[p][t][0]+10.) + 3907.0:
particles[p][t][0] = 2.0 * (412.0 / 3907.0) * (3907.0 - particles[p][t][1]) - particles[p][t][0]
w = random()
elif particles[p][t][1] > 3907.0 / 417.0 * (- particles[p][t][0] + 5457.0+10.):
#.........这里部分代码省略.........
示例3: pso_horns
# 需要导入模块: from joblib import Parallel [as 别名]
# 或者: from joblib.Parallel import index [as 别名]
def pso_horns():
time4 = time.time()
windrose = open('horns_rev_windrose2.dat', 'r')
windrose_angle = []
windrose_speed = []
windrose_frequency = []
for line in windrose:
columns = line.split()
windrose_angle.append(float(columns[0]))
windrose_speed.append(float(columns[1]))
windrose_frequency.append(float(columns[2]))
windrose.close()
data = open('try6_layout_jensen.dat', 'w')
data2 = open('try6_random_layout_jensen.dat', 'w')
data3 = open('try6_best_global_fitness_jensen.dat', 'w', 1)
allfit = open('try6_all_fitnesses.dat', 'w', 1)
np = 10 ## Number of particles in swarm
nt = 80
diam = 80.0
particles = array([[[0.0, 0.0] for x in range(nt)] for x in range(np)])
vel = array([[[0.0, 0.0] for x in range(nt)] for x in range(np)])
best_own_fitness = [100.0 for x in range(np)]
def create():
kk = random()
l = random()
xt = 5457.0 * l
if xt <= 412.0:
yt = kk * 3907.0 + (1.0 - kk) * (- 3907.0 / 412.0 * (xt + 10.0) + 3907.0)
elif xt <= 5040.0:
yt = kk * 3907.0
else:
yt = kk * (3907.0 / 417.0 * (- xt + 5457.0 + 10.0))
return xt, yt
## Produce starting positions. Includes boundaries of Horns Rev
for n in range(np):
for tur in range(nt):
particles[n][tur] = create()
# layout = open('horns_rev.dat', 'r')
# ll = 0
# horns = array([[0, 0] for gf in range(nt)])
# for line in layout:
# columns = line.split()
# horns[ll] = [float(columns[0]) - 423974.0, float(columns[1]) - 6147543.0]
# ll += 1
# layout.close()
# particles[np - 1] = deepcopy(horns)
best_local = array([[[0.0, 0.0] for x in range(nt)] for x in range(np)])
# Fitness evaluation skipped if a turbine is out of boundaries. following: BrattonKennedy07 PSO.
# More 'repair' methods for particles out of boundaries shown in PhD thesis Helwig2010.
# Chu2011 proves that the reflecting boundary method is better than random or absorbing boundary. TODO implement
fitness = Parallel(n_jobs=-1)(delayed(efficiency)(particles[i], windrose_angle, windrose_speed, windrose_frequency, nt) for i in range(np))
print fitness
for fg in range(np):
allfit.write('{0:f}\n'.format(fitness[fg]))
allfit.write('\n')
for p in range(np):
best_own_fitness[p] = deepcopy(fitness[p])
best_local[p] = deepcopy(particles[p])
best_global_fitness = min(fitness)
print best_global_fitness
print best_own_fitness
best_layout = deepcopy(particles[fitness.index(min(fitness))])
print best_local[5][54][1] - particles[5][54][1]
# for i in range(nt):
# data.write('{2:d} {0:f} {1:f}\n'.format(best_layout[i][0], best_layout[i][1], i))
# data.write('\n')
# Velocity limiting to 10% to start with, for convergence, and then increase speed.
k = 1.0
for ite in range(200):
start_time2 = time.time()
particles = Parallel(n_jobs=-1)(delayed(movement)(vel[i], best_local[i], particles[i], best_layout, k, nt) for i in range(np))
# Find minimum distance between turbines, and if two are closer than 1D, then randomise one of them.
for b in range(np):
pp = 0
while pp == 0:
pp = 1
for i in range(nt):
for j in range(nt):
if i != j and distance(particles[b][i][0], particles[b][i][1], particles[b][j][0], particles[b][j][1]) < 2.0 * diam:
particles[b][j] = create()
pp = 0
# Fitness evaluation skipped if a turbine is out of boundaries. following: BrattonKennedy07 PSO.
# More 'repair' methods for particles out of boundaries shown in PhD thesis Helwig2010.
# Chu2011 proves that the reflecting boundary method is better than random or absorbing boundary. TODO implement
fitness = Parallel(n_jobs=-1)(delayed(efficiency)(particles[i], windrose_angle, windrose_speed, windrose_frequency, nt) for i in range(np))
#.........这里部分代码省略.........