当前位置: 首页>>代码示例>>Python>>正文


Python Sliding.solution方法代码示例

本文整理汇总了Python中Sliding.solution方法的典型用法代码示例。如果您正苦于以下问题:Python Sliding.solution方法的具体用法?Python Sliding.solution怎么用?Python Sliding.solution使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Sliding的用法示例。


在下文中一共展示了Sliding.solution方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: solve_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    sol = Sliding.solution(WIDTH, HEIGHT)

    """ YOUR CODE HERE """
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, sol)
    new_visited = [(sol, level)]
    
    new_visited = sc.parallelize(new_visited)
    num = 1

    #while there are still (k, v) pairs at the current level
    while num:
        #use += as we do not retain board sets not at the global level
        #in our mapping function
        new_visited += new_visited.flatMap(bfs_map)
        if level % 4 == 3: # only reduce and filter every other iteration for performance reasons
            new_visited = new_visited.reduceByKey(bfs_reduce)
            new_visited = new_visited.partitionBy(PARTITION_COUNT) #figure out how to use hash
            num = new_visited.filter(filter_func).count() # count the number of elements in the RDD at the current level
        level += 1
        # Debuggin purposes print("\n\n\nLevel " + str(level) + '\n\n\n')

    """ YOUR OUTPUT CODE HERE """
    new_visited.coalesce(slaves).saveAsTextFile(output)

    sc.stop()
开发者ID:rtadinada,项目名称:GamesmanSpark,代码行数:35,代码来源:SlidingBfsSpark.py

示例2: solve_sliding_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """


    """ YOUR OUTPUT CODE HERE """

    sc.stop()
开发者ID:warlck,项目名称:cs61c,代码行数:32,代码来源:SlidingBfsSpark(new).py

示例3: solve_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, Sliding.solution(WIDTH, HEIGHT))    
    RDD = sc.parallelize([(sol,level)]) 
    count = RDD.count()
    RDD_count = 0
    search = True
    k = 1
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    while search:
        if k % 3== 0:
            RDD = RDD.flatMap(bfs_map).partitionBy(PARTITION_COUNT).reduceByKey(bfs_reduce) #PUT PARTITION_COUNT FOR 16
        else:
            RDD = RDD.flatMap(bfs_map).reduceByKey(bfs_reduce) 
        if k % 2 == 0:
            RDD_count = RDD.count() 
            if RDD_count == count: 
                search = False
            count = RDD_count
        k = k + 1
        level = level + 1
    """ YOUR OUTPUT CODE HERE """
    RDD = RDD.map(swap_map)  
    RDD.coalesce(slaves).saveAsTextFile(output)    
    #outputLst = RDD.collect()
    #for elem in outputLst:
       #output(str(elem[0]) + " " + str(elem[1])) #output the elements
    sc.stop()
开发者ID:shauryakalsi,项目名称:CS61C,代码行数:37,代码来源:SlidingBfsSpark.py

示例4: solve_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    solution=Sliding.solution(WIDTH, HEIGHT)
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, solution)
    data = sc.parallelize([(sol,level),])
    counter = 0
    curLen = 1 
    while(counter < curLen):
        level += 1
        data = data.flatMap(bfs_flat_map)
        

        if (level% 12 == 0):
            data = data.partitionBy(PARTITION_COUNT)
        data = data.reduceByKey(bfs_reduce)
        if (level% 6 == 0):
            counter = curLen
            curLen = data.count()
        
        
    """ YOUR OUTPUT CODE HERE """
    data.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
开发者ID:VictoriaSnow,项目名称:CS-Projects,代码行数:33,代码来源:SlidingBfsSpark.py

示例5: solve_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    NUM_WORKERS = slaves

    sol = Sliding.solution(WIDTH, HEIGHT)
    """ MAP REDUCE PROCESSING CODE HERE """
    level_pos = sc.parallelize((make_state(level, sol),))
    prev_size, size = 0, 1

    while prev_size != size:
        level += 1
        if level % 10 == 0:
            level_pos = level_pos.partitionBy(PARTITION_COUNT)
        level_pos = level_pos.flatMap(bfs_flat_map).reduceByKey(bfs_reduce)
        prev_size = size
        size = level_pos.count()

    """ OUTPUT CODE HERE """
    level_pos = level_pos.map(unhash_board)
    level_pos.coalesce(NUM_WORKERS).saveAsTextFile(output)

    sc.stop()
开发者ID:hansongcal,项目名称:CS61C,代码行数:31,代码来源:SlidingBfsSpark.py

示例6: solve_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol_board = Sliding.solution(WIDTH, HEIGHT)
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, sol_board)
    all_sols = sc.parallelize([(sol, level)]) #create an RDD 
    before_count = 1
    k = 0 #counter for iterations of partitionBy
    c = 0 #counter for iterations of count()
    while True:
        level += 1
        all_sols = all_sols.flatMap(bfs_map)
        if k%4 == 0: #every 4 iterations, use parititionBy
            all_sols = all_sols.partitionBy(PARTITION_COUNT)
        all_sols = all_sols.reduceByKey(bfs_reduce)
        if c%2 == 0: #every 2 iterations, use count()
            after_count = all_sols.count()
            if before_count == after_count:
                break
            before_count = after_count
        k += 1
        c += 1

    """ YOUR OUTPUT CODE HERE """
    all_sols = all_sols.map(lambda a: (a[1], a[0])).sortByKey()
    all_sols.coalesce(slaves).saveAsTextFile(output)
    sc.stop()
开发者ID:poywoo,项目名称:61cproj2-2,代码行数:35,代码来源:SlidingBfsSpark.py

示例7: solve_sliding_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
	
    myRDD = sc.parallelize([(sol, level)])
    counter = 0
    counter = myRDD.count()
    k = 0
    comp = 0
    repar = 0
    while k <= (math.sqrt(WIDTH * HEIGHT)-1) * math.log(math.factorial(WIDTH * HEIGHT),2):
    	myRDD = myRDD.flatMap(sol_map)
        if (repar % 8 == 0):
            myRDD = myRDD.partitionBy(6)
        myRDD = myRDD.reduceByKey(bfs_reduce)
        repar += 1
        level += 1
        k += 1
    k = 0

    while True:
        myRDD = myRDD.flatMap(sol_map)
        myRDD = myRDD.reduceByKey(bfs_reduce)
        if k % 3 == 0:
            comp = myRDD.count()
            if comp == counter:
                break
            else: 
                counter = comp
        level += 1
        k += 1

    myRDD = myRDD.map(bfs_map).collect()
    result = ""
    for each in myRDD:
        result += str(each) + "\n"
    output(result)
    sc.stop()
开发者ID:ShihengJiang666,项目名称:proj2-MapReduce-sliding-matrix,代码行数:59,代码来源:SlidingBfsSpark.py

示例8: solve_sliding_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level, prev_len, PARTITION_COUNT

    # Initialize global constants
    HEIGHT = height
    WIDTH = width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)
    level_nodes = sc.parallelize([(Sliding.board_to_hash(WIDTH, HEIGHT, sol), 0)])

    PARTITION_COUNT = 16
    prev_len = 0
    count = 0
    while True:
        level_nodes = level_nodes.flatMap(bfs_map).reduceByKey(bfs_reduce)
        next_len = level_nodes.count()
        if next_len == prev_len:
            break
        prev_len = next_len

        count += 1
        if count == 10:
            count = 0
            level_nodes = level_nodes.partitionBy(PARTITION_COUNT)

    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    # level = []
    # def add_to_string(obj):
    #     output(str(obj))
    level_nodes = level_nodes.map(lambda x : (x[1], x[0]))
    output_string = ""
    for l in level_nodes.sortByKey(True).collect():
        output_string += str(l) + "\n"
    output(output_string)
    # level_nodes.sortByKey(True).coalesce(1).saveAsTextFile("output")
    # level_nodes.foreach(add_to_string)


    """ YOUR OUTPUT CODE HERE """
    sc.stop()
开发者ID:rtadinada,项目名称:cs61c-proj2-1,代码行数:56,代码来源:SlidingBfsSpark.py

示例9: solve_sliding_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_sliding_puzzle(master, output, height, width, slaves):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job
    
    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    #cores = multiprocessing.cpu_count() #OPTIMIZATION, gives cpu count for this machine, for partitionBy
    constant = 8
    sol_hash = board_to_hash(WIDTH, HEIGHT, sol) #this is the initial hash
    lst = sc.parallelize([(sol_hash, level)]).partitionBy(PARTITION_COUNT) #creates initial RDD of [(hash, level)]
    #lst = sc.parallelize([(sol, level)]).partitionBy(PARTITION_COUNT) #this creates the initial (K, V) RDD comprised of: (0, ('A', 'B', 'C', '-'))
    
    lst = lst.flatMap(bfs_map).reduceByKey(bfs_reduce)
    level+=1 #this is so that repartition doesn't run right when level = 0
    while (True): #continually loop
        if (level % constant == 0):
            new_lst = lst.flatMap(bfs_map).repartition(PARTITION_COUNT).reduceByKey(bfs_reduce)
            #new_lst is going to be lst + the new children in lst
            if (new_lst.count() == lst.count()):
                break
        else:
            new_lst = lst.flatMap(bfs_map).reduceByKey(bfs_reduce)
        lst = new_lst #set lst to equal the new list + non-duplicate children
        level+=1 #increment level
    
    """ YOUR OUTPUT CODE HERE """
    lst.coalesce(slaves).saveAsTextFile(output) #I guess this is supposed to... write lst to output

    
    # toPrint = "" #set the empty string
    # for pair in lst.collect():
    #     toPrint += (str(pair[1]) + " " + str(pair[0]) + "\n") #get the elements to add to the string
    # output(toPrint) #write the string

    sc.stop()
开发者ID:leoleblanc,项目名称:CS61C,代码行数:56,代码来源:SlidingBfsSpark.py

示例10: main

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def main():
    parser = argparse.ArgumentParser(
            description="Returns back the entire solution graph.")
    parser.add_argument("-H", "--height", type=int, default=2,
            help="height of the puzzle")
    parser.add_argument("-W", "--width", type=int, default=2,
            help="width of the puzzle")
    args = parser.parse_args()

    p = Sliding.solution(args.width, args.height)
    slidingBfsSolver(p, args.width, args.height)
开发者ID:Jananiravichandran,项目名称:cs61c,代码行数:13,代码来源:SlidingBfsReference.py

示例11: solve_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol = Sliding.solution(WIDTH, HEIGHT)
    rdd = sc.parallelize([(sol, level)])

    prev_count = 0
    count = rdd.count()

    k = 0
    i = 0
    #put this here since I am assuming this part?
    #hashID = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?
    while prev_count < count:
        rdd = rdd.flatMap(bfs_map)
        if k % 4 == 0:
            rdd = rdd.partitionBy(16, partitionHash)
        rdd = rdd.reduceByKey(bfs_reduce)
        level += 1
        if i % 4 == 0:
            prev_count = count
            count = rdd.count()

        k += 1
        i += 1
    #nodes is an rdd
    #nodes.coalesce(NUM_WORKERS).saveAsTextFile(str(WIDTH) + "x" + str(HEIGHT) + "-output") # Let NUM_WORKERS be the number of workers (6 or 12)
    # replace num_workers with slaves?
    #rdd.coalesce(slaves).saveAsTextFile(output) # Let NUM_WORKERS be the number of workers (6 or 12), this is the new way IS IT SLAVES
    # for top line is NUM_WORKERS GLOBAL VARIABLE PARTITION_COUNT, or is it 6, 12 depending on some sort of if condition
    # ask manny for clarrification

    #hash_to_board(WIDTH, HEIGHT, hashID) #hash(int) to board(obj) #should be what we stored in hashID, this should be at top in map function right
    # do I save this instead as rdd? ask manny
    #hashID = board_to_hash(WIDTH, HEIGHT, value[0]) #board(obj) to hash(int) #either sol or value[0], is this here?
    #not sure if need to do
    #rdd = rdd.collect()
    #positions = rdd.collect()
    #positions = sorted(positions, key=lambda kv: kv[1])
    #for pos in positions:
    	#output = 
    hashID = Sliding.board_to_hash(WIDTH, HEIGHT, sol) #board(obj) to hash(int) #either sol or value[0], is this here?
    slaves = 6
    rdd.coalesce(slaves).saveAsTextFile(output) # Let NUM_WORKERS be the number of workers (6 or 12), this is the new way IS IT SLAVES
    sc.stop()
开发者ID:warlck,项目名称:cs61c,代码行数:53,代码来源:SlidingBfsSpark.py

示例12: solve_sliding_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. 
    sc = SparkContext(master, "python")


    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT= height
    WIDTH= width
    level = 0 

    sol = Sliding.solution(WIDTH, HEIGHT)

    # Create a list of just the solution
    solList = []
    solList.append((sol, 0))
    levelList = sc.parallelize(solList)
    counter = 0

    # Continue until all positions have been found.
    while level != -1:
        level += 1
        counter += 1
        levelList = levelList.flatMap(bfs_flat_map) \
                             .reduceByKey(bfs_reduce)

        # Checks if any positions were added
        newList = levelList.filter(lambda x: x[1] == level)
        if newList.count() == 0:
            level = -1

        # Repartitions every 32 steps
        if counter % 32 == 0:
            levelList = levelList.partitionBy(16)

    arr = levelList.collect()

    for elem in arr:
        finalStr = str(elem[1]) + " " + str(elem[0])
        output(finalStr)

    sc.stop()
开发者ID:kimfairhurst,项目名称:MapReduce,代码行数:52,代码来源:SlidingBfsSpark.py

示例13: solve_sliding_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    rdd = sc.parallelize([(sol, level)])

    prev_count = 0
    count = rdd.count()
    # while rdd.filter(lambda x: x[1] == level).count() != 0: 
    k = 0
    i = 0
    while prev_count < count:
        rdd = rdd.flatMap(bfs_map)
        if k % 4 == 0:
            rdd = rdd.partitionBy(16, partitionHash)
        rdd = rdd.reduceByKey(bfs_reduce)
        level += 1
        if i % 4 == 0:
            prev_count = count
            count = rdd.count()
        k += 1
        i += 1

    """ YOUR OUTPUT CODE HERE """
    positions = rdd.collect()
    positions = sorted(positions, key=lambda kv: kv[1]) #sort k, v pairs by level
    for pos in positions:
        output(str(pos[1]) + " " + str(pos[0]))
    sc.stop()
开发者ID:warlck,项目名称:cs61c,代码行数:52,代码来源:SlidingBfsSpark.py

示例14: solve_sliding_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_sliding_puzzle(master, output, height, width):
    """
    Solves a sliding puzzle of the provided height and width.
     master: specifies master url for the spark context
     output: function that accepts string to write to the output file
     height: height of puzzle
     width: width of puzzle
    """
    # Set up the spark context. Use this to create your RDD
    sc = SparkContext(master, "python")

    # Global constants that will be shared across all map and reduce instances.
    # You can also reference these in any helper functions you write.
    global HEIGHT, WIDTH, level

    # Initialize global constants
    HEIGHT=height
    WIDTH=width
    level = 0 # this "constant" will change, but it remains constant for every MapReduce job

    # The solution configuration for this sliding puzzle. You will begin exploring the tree from this node
    sol = Sliding.solution(WIDTH, HEIGHT)


    """ YOUR MAP REDUCE PROCESSING CODE HERE """
    # parallelize 
    job = sc.parallelize([(sol, level)])
    old_result = 1
    # loop until no more children
    while True:
	if level % 8 == 0:
		job = job.partitionBy(16)
        # do the map reduce
	curr_job = job.flatMap(press_map).map(bfs_map).reduceByKey(bfs_reduce)
	# check if no new children found
	num = curr_job.count()
	if num == old_result:
		break
        old_result = num
        job = curr_job
        level += 1
    

    """ YOUR OUTPUT CODE HERE """
    sorts = sorted(curr_job.collect(), key=lambda l: l[1])
    for item in sorts:
	output(str(item[1]) + " " + str(item[0]))
    sc.stop()
开发者ID:lukelev07,项目名称:proj2-spark,代码行数:50,代码来源:SlidingBfsSpark.py

示例15: solve_puzzle

# 需要导入模块: import Sliding [as 别名]
# 或者: from Sliding import solution [as 别名]
def solve_puzzle(master, output, height, width, slaves):
    global HEIGHT, WIDTH, level
    HEIGHT=height
    WIDTH=width
    level = 0

    sc = SparkContext(master, "python")

    """ YOUR CODE HERE """
    sol = Sliding.board_to_hash(WIDTH, HEIGHT, Sliding.solution(WIDTH, HEIGHT))
    RDD = sc.parallelize([(sol, level)])
    counter = RDD.count()
    k, comp, data = 0, 0, 0
    repar = 0
    bound = (math.sqrt(WIDTH * HEIGHT)-1) * math.log(math.factorial(WIDTH * HEIGHT),2)
     
    # running mapreduce under lower bound
    while k <= bound:
        RDD = RDD.flatMap(bfs_map)
        if repar % 8 == 0:
            RDD = RDD.partitionBy(PARTITION_COUNT, hash)
        RDD = RDD.reduceByKey(bfs_reduce)
        level += 1
        k += 1
        repar += 1
    k = 0
    repar = 0
    # running mapreduce until the number of elements in RDD stops increasing
    while True:
        RDD = RDD.flatMap(bfs_map)
        if repar % 8 == 0:
            RDD = RDD.partitionBy(PARTITION_COUNT, hash)
        RDD = RDD.reduceByKey(bfs_reduce)
        if k % 3 == 0:
            comp = RDD.count()
            if comp == counter:
                break
            else: 
                counter = comp
        level += 1
        k += 1
        repar += 1
    # output code
    RDD = RDD.map(revert_back)
    RDD.coalesce(6).saveAsTextFile(output)
    sc.stop()
开发者ID:ShihengJiang666,项目名称:proj2-2CodeToRunOnAmazonServer,代码行数:48,代码来源:SlidingBfsSpark.py


注:本文中的Sliding.solution方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。