本文整理汇总了Python中java.util.Collections.synchronizedList方法的典型用法代码示例。如果您正苦于以下问题:Python Collections.synchronizedList方法的具体用法?Python Collections.synchronizedList怎么用?Python Collections.synchronizedList使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类java.util.Collections
的用法示例。
在下文中一共展示了Collections.synchronizedList方法的4个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_super_methods_merged
# 需要导入模块: from java.util import Collections [as 别名]
# 或者: from java.util.Collections import synchronizedList [as 别名]
def test_super_methods_merged(self):
'''Checks that all signatures on a class' methods are found, not just the first for a name
Bug #628315'''
synchList = Collections.synchronizedList(ArrayList())
synchList.add("a string")
self.assertEquals("a string", synchList.remove(0))
示例2: run
# 需要导入模块: from java.util import Collections [as 别名]
# 或者: from java.util.Collections import synchronizedList [as 别名]
def run(self):
# sanity check:
if len(self.sources) == 0: raise Exception("No sources defined")
if len(self.sinks) == 0: raise Exception("No sinks defined")
# create a plan:
specs = []
pipemap = {}
for sink in self.sinks:
spec = JobSpec(self._jobid(), self.workpath)
spec.outputpath = sink.sinkpath
spec.outputformat = sink.outputformat
spec.outputJson = sink.json
spec.compressoutput = sink.compressoutput
spec.compressiontype = sink.compressiontype
specs.append(spec)
if len(sink.sources) != 1: raise Exception("Sinks can only have one source: " + sink)
self._walkPipe(spec, sink.sources[0], specs, pipemap)
# sort out paths for jobs:
self._configureJobs(specs)
# run jobs:
_log.info("Working directory is " + self.workpath)
_log.info(str(len(specs)) + " job(s) found from " + str(len(self.pipes)) + " pipe action(s)")
happy.dfs.delete(self.workpath)
jobsDone = Collections.synchronizedSet(HashSet())
jobResults = Collections.synchronizedList(ArrayList())
jobsStarted = sets.Set()
while jobsDone.size() < len(specs):
# only keep 3 jobs in flight:
for spec in specs:
id = spec.id
if id not in jobsStarted:
parentIds = [parent.id for parent in spec.parents]
if jobsDone.containsAll(parentIds):
thread = threading.Thread(name="Cloud Job " + str(id), target=self._runJob, args=(spec.getJob(), id, jobsDone, jobResults))
thread.setDaemon(True)
thread.start()
jobsStarted.add(id)
if len(jobsStarted) - jobsDone.size() >= 3: break
time.sleep(1)
# compile results:
results = {}
for result in jobResults:
for key, value in result.iteritems():
results.setdefault(key, []).extend(value)
# check for errors:
if self.hasErrors():
totalErrors = sum(results["happy.cloud.dataerrors"])
_log.error("*** " + str(totalErrors) + " DataException errors were caught during this run, look in " + \
self.workpath + "/errors to see details ***")
return results
示例3: createWalkableZones
# 需要导入模块: from java.util import Collections [as 别名]
# 或者: from java.util.Collections import synchronizedList [as 别名]
def createWalkableZones():
MeshMaker.ledgeTiles = MeshMaker.getLedgeTiles()
if len(MeshMaker.ledgeTiles) == 0:
raise ValueError('No ledge tiles found on this map!')
MeshMaker.ledgeTiles = MeshMaker.tileListSorter(
MeshMaker.ledgeTiles)
ledgeTiles2 = Collections.synchronizedList(MeshMaker.ledgeTiles)
MeshMaker.walkableTileList = MeshMaker.splitWalkableZones(
ledgeTiles2)
MeshMaker.walkableTilesMap = {}
for tl in MeshMaker.walkableTileList:
s = MeshMaker.makeWalkShape(tl[0], tl[-1])
MeshMaker.walkableZones.append(s)
MeshMaker.walkableTilesMap[s] = tl
示例4: complete
# 需要导入模块: from java.util import Collections [as 别名]
# 或者: from java.util.Collections import synchronizedList [as 别名]
outfile.write('"'+self.formatter.print(timestamp) + '","'+str(total)+'","'+str(throughput)+'"')
outfile.flush()
i = i + 1
def complete(self):
self._exit = True
# Main
threadCount = 2
if len(sys.argv) > 1:
threadCount = int(sys.argv[1])
iterations = 3000000
main = Thread.currentThread()
print "Sleeping for 5 seconds"
main.sleep(5000)
resultList = Collections.synchronizedList(ArrayList())
start = System.currentTimeMillis()
threadList = ArrayList()
i = 0
ai = AtomicInteger(0)
while i < threadCount:
threadName = "Pooled Thread " + str(i)
t = FibThread(name=threadName, i=iterations, r=resultList)
t.start()
threadList.add(t)
i = i + 1
resultThread = ResultThread(r=resultList, pool=threadList)
resultThread.start()
print str(threadCount) + " Threads started ..."
for t in threadList:
t.join()