本文整理汇总了Python中multiprocessing.Manager.pop方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.pop方法的具体用法?Python Manager.pop怎么用?Python Manager.pop使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.Manager
的用法示例。
在下文中一共展示了Manager.pop方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import pop [as 别名]
class Datefacet:
def __init__(self):
from couchbase.n1ql import N1QLQuery
from multiprocessing import Manager, Lock
self.cb = Bucket('couchbase://172.23.123.38/bucket-1')
self.row_iter = self.cb.n1ql_query(N1QLQuery('select meta().id from `bucket-1`'))
self.lock = Lock()
self.dsize = 1000000
self.dateiter = Manager().dict({key: None for key in ['2013-10-17', '2013-11-17', '2014-02-09', '2015-11-26']})
self.dateiter['2013-10-17'] = .65 * self.dsize
self.dateiter['2013-11-17'] = .2 * self.dsize
self.dateiter['2014-02-09'] = .1 * self.dsize
self.dateiter['2015-11-26'] = .05 * self.dsize
self.cycledates = itertools.cycle(self.dateiter.keys())
def createdateset(self):
for resultid in self.row_iter:
'''
Day 1 should have approximately 65% of the documents
Day 2 should have approximately 20% of the documents
Day 3 should have approximately 10% of the documents
Day 4 should have approximately 5% of the documents
format like this 2010-07-27
'''
val = self.cb.get(resultid["id"]).value
self.lock.acquire()
tmpdate = self.cycledates.next()
val["date"] = tmpdate
self.cb.set(resultid["id"], val)
'''
Critical section
'''
self.dateiter[tmpdate] -= 1
if self.dateiter[tmpdate] == 0:
self.dateiter.pop(tmpdate, None)
self.cycledates = itertools.cycle(self.dateiter.keys())
self.lock.release()
print(self.dateiter)
def run(self):
import concurrent.futures
with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor:
executor.submit(self.createdateset())
示例2: __init__
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import pop [as 别名]
class Datefacet:
def __init__(self):
from multiprocessing import Manager, Lock
self.cb = Bucket('couchbase://172.23.99.211/bucket-1', password="password")
self.lock = Lock()
self.dsize = 1000000
self.dateiter = Manager().dict({key: None for key in ['2013-10-17', '2013-11-17', '2014-02-09', '2015-11-26']})
self.dateiter['2013-10-17'] = .65 * self.dsize
self.dateiter['2013-11-17'] = .2 * self.dsize
self.dateiter['2014-02-09'] = .1 * self.dsize
self.dateiter['2015-11-26'] = .05 * self.dsize
self.cycledates = itertools.cycle(self.dateiter.keys())
def createdateset(self):
for resultid in range(0, self.dsize):
key = hex(resultid)[2:]
'''
Day 1 should have approximately 65% of the documents
Day 2 should have approximately 20% of the documents
Day 3 should have approximately 10% of the documents
Day 4 should have approximately 5% of the documents
format like this 2010-07-27
'''
val = self.cb.get(key).value
self.lock.acquire()
tmpdate = next(self.cycledates)
val["date"] = tmpdate
self.cb.set(key, val)
self.dateiter[tmpdate] -= 1
if self.dateiter[tmpdate] == 0:
self.dateiter.pop(tmpdate, None)
self.cycledates = itertools.cycle(self.dateiter.keys())
self.lock.release()
def run(self):
with concurrent.futures.ProcessPoolExecutor(max_workers=10) as executor:
executor.submit(self.createdateset())
示例3: main
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import pop [as 别名]
def main():
gps_n = Semaphore(0)
gps_s = Semaphore(1)
gps_coords_stack = Manager().list()
gps = GPS(gps_coords_stack, gps_n, gps_s)
gps.start()
# Get the first position
z = gps.getPosition()
dt = 0.05
range_std = 5. # Means meters
# Instantiate the filter
filterk = ExtendedKalmanFilter(2, 1, 0) # 1 type of value of position, but in 2 dimensions. sensor provides position in (x,y) so use 2
# Insert first position
filterk.x = array(z)
# Pretty sure this sets up the taylor series
filterk.F = eye(2) + array([[0,1], [0,0]])*dt
# Sets the uncertainty
filterk.R = np.diag([range_std**2])
# Trains it using white noise?
filterk.Q[0:2, 0:2] = Q_discrete_white_noise(2, dt=dt, var=0.1)
filterk.Q[2, 2] = 0.1
# Covariance matrix
filterk.P *= 50
for i in range(10):
# Pull a value from the GPS stack
gps_n.acquire()
gps_s.acquire()
result = gps_coords_stack.pop()
gps_s.release()
# Put new z value in
filterk.predict_update(array(result), HJacobian_at, hx) #this maaaaay need to be formatted differently, otherwise just put the longitude and lattitude as an array [x,y]
# Get the predicted value
np.append(xs, filterk.x)
print(filterk.x)
示例4: wadi
# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import pop [as 别名]
class wadi():
def __init__(self, args=None):
if args:
self.args = args
else:
pass
def writeTestCases(self,tcases,msg):
self.msg = msg[0]
self.code = msg[1]
self.add = msg[2]
self.testcases = tcases
self.hash = hashlib.md5()
self.b = self.code+self.add
self.hash.update(self.b)
self.dgst = self.hash.hexdigest()
self.path = "./"+self.dgst
if os.path.exists(self.path):
print "[*] Duplicate Crash: %s" % self.dgst
else:
os.makedirs(self.path)
f = open(self.path + "/" +self.dgst+".crash","w+b")
f.write(self.msg)
f.close()
print "[*] Written Crash file to: %s" % self.dgst+".crash"
for i in range(10):
self.tcase = self.testcases.pop()
f2 = open(self.path+"/"+self.dgst+"_"+str(i)+".html","w+b")
f2.write(self.tcase)
f2.close()
print "[*] Written testcases to %s" % self.path+"/"+self.dgst+str(i)+".html"
print "[*] Last TestCase Folder '%s'" % self.dgst
def close(self):
sys.exit()
def run(self):
self.queue = Manager().list()
self.tcases = Manager().list()
self.server_pid = None
self.debugger_pid = None
self.init = 0
while True:
if not self.server_pid:
self.server_process = Process(target=run_harness, args=(self.tcases,))
self.server_process.start()
self.server_pid = self.server_process.pid
print '[*] Running Server Process %s ' % (self.server_pid,)
#self.server_pid =
if not self.debugger_pid:
self.debugger_process = Process(target=run_debugger,args=(self.queue,))
self.debugger_process.start()
self.debugger_pid = self.debugger_process.pid
timer = Timer(120.0,timeout_debug,(self.debugger_process,))
timer.daemon = True
timer.start()
if not self.debugger_process.is_alive():
print "[*] Debugger Process %s exited" % self.debugger_pid
timer.cancel()
self.lenq = len(self.queue)
self.lentc = len(self.tcases)
if self.lenq:
self.msg = self.queue.pop()
#self.msg = self.queue.get()
print "[*] Wooops Crash !!!!"
print "[*] %s" % self.msg[0]
else:
print "[*] No Crashes"
#if not self.tcases.empty():
if self.lentc and self.lenq:
#self.tc = self.tcases.get()
self.writeTestCases(self.tcases, self.msg)
else:
print "[*] No TestCases"
self.debugger_pid = None
else:
pass