当前位置: 首页>>代码示例>>Python>>正文


Python Manager.items方法代码示例

本文整理汇总了Python中multiprocessing.Manager.items方法的典型用法代码示例。如果您正苦于以下问题:Python Manager.items方法的具体用法?Python Manager.items怎么用?Python Manager.items使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在multiprocessing.Manager的用法示例。


在下文中一共展示了Manager.items方法的8个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: wait_for

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]
    def wait_for(self, key, value):
        d = Manager().dict()
        d[key] = None
        v = Manager().Value('s', ' ')
        e = Event()

        p_state = Process(target=self.state_refresher, args=(self, d, e))
        p_input = Process(target=self.input_waiter, args=(self, v, e))
        p_state.start()
        p_input.start()

        while v.value != 'exit' and dict(d.items())[key] != value:
            e.wait()
            e.clear()

        self.state = d['state']
        self.current = d['current']
        self.enemy = d['enemy']
        self.battlefield = d['battlefield']

        p_state.terminate()
        p_input.terminate()
        curses.endwin()
        p_state.join()
        p_input.join()

        return True if dict(d.items())[key] == value else False
开发者ID:Scanters,项目名称:pyttleship,代码行数:29,代码来源:pyttleship.py

示例2: research_keywords

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]
def research_keywords(something_unknown, model, websites_to_scan=10, keyword_count=25, attempts=0, google_api_key=GOOGLE_API_KEY):
	# searches for something unknown on Google to find related websites and returns a ranked list of keywords from across all sites
	maximum_number_of_google_search_attempts = 3
	if attempts < maximum_number_of_google_search_attempts:
		all_keywords = Manager().dict()
		engine = Google(license=google_api_key, throttle=1.0, language="en")
		try:
			processes = []
			for page in range(int(websites_to_scan/10)):
				for website in engine.search(something_unknown, start=page+1, count=10, type=SEARCH, cached=False):
					web_mining_process = Process(target=extract_keywords, args=(website.url, model, all_keywords))
					processes.append(web_mining_process)
					web_mining_process.start()
				[process.join() for process in processes]
		except HTTP403Forbidden:
			print "\nToday's maximum number of free searches from Google shared by this API across all words2map users has expired.\nPlease get your own key at https://code.google.com/apis/console\n\nFrom that site, simply:\n1. In the API Manager Overview, find \"Custom Search API\" and enable it\n2. Copy your new API key from \"Credentials\"\n3. Paste it in words2map.py in the global variable \"GOOGLE_API_KEY\"\n"
			sys.exit(1)
		except (URLError, URLTimeout, HTTPError, SSLError):
			print "\nUnable to reach Google Search for {}, trying one more time".format(something_unknown)
			return research_keywords(something_unknown, model, attempts=attempts+1)
		all_keywords = sorted(all_keywords.items(), key=itemgetter(1), reverse=True)
		print "\nKeywords about {} to combine vectors for:".format(something_unknown)
		top_keywords = []
		for i in range(keyword_count):
			try:
				keyword, score = all_keywords[i]
				top_keywords.append(all_keywords[i])
				print "{} {}".format(round(score, 3), unidecode(keyword).replace("_", " "))
			except IndexError:
				break
		return top_keywords
	else:
		print "After a few tries, it seems that Google is not returning results for us. If you haven't done so already, please try adding your own API key at https://code.google.com/apis/console\n\nFrom that site, simply:\n1. In the API Manager Overview, find \"Custom Search API\" and enable it\n2. Copy your new API key from \"Credentials\"\n3. Paste it in words2map.py in the global variable \"GOOGLE_API_KEY\"\n"
		sys.exit(1)
开发者ID:overlap-ai,项目名称:words2map,代码行数:36,代码来源:words2map.py

示例3: get

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]
    def get(self, wiki_id):
        """
        Averages sentiment for entities across all documents
        :param wiki_id: the id of the wiki
        :type wiki_id: str|int
        :return: response
        :rtype: dict
        """
        global USE_MULTIPROCESSING, MP_NUM_CORES

        page_doc_response = document_access.ListDocIdsService().get(wiki_id)
        if page_doc_response['status'] != 200:
            return page_doc_response

        if USE_MULTIPROCESSING:
            d = Manager().dict()

            Pool(processes=MP_NUM_CORES).map(self.__class__.update_dict_with_document_entity_sentiment(),
                                             [(d, doc_id) for doc_id in page_doc_response[wiki_id]])
            entity_sentiment = dict(d.items())
        else:
            entity_sentiment = {}
            dss = self._document_entity_sentiment_service()
            counter = 0
            total = len(page_doc_response[wiki_id])
            for doc_id in page_doc_response[wiki_id]:
                sent_response = dss.get_value(doc_id, {})
                avg_phrase_sentiment = sent_response.get('averagePhraseSentiment', {})
                print "%d / %d (%d keys)" % (counter, total, len(avg_phrase_sentiment.keys()))
                counter += 1
                for key in avg_phrase_sentiment:
                    entity_sentiment[key] = entity_sentiment.get(key, []) + [avg_phrase_sentiment[key]]

        return {'status': 200,
                wiki_id: dict([(key, numpy.mean([i+1 for i in entity_sentiment[key]])+1) for key in entity_sentiment])}
开发者ID:tristaneuan,项目名称:nlp_services,代码行数:37,代码来源:sentiment.py

示例4: links_finder

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]
def links_finder(urls):
    """Parses http links from URLs specified in list.
    urls: list with target URLs
    return: dictionary with http links
    """
    d = Manager().dict()
    processes = []
    for url in urls:
        p = Process(target=parser, args=(url, d))
        p.start()
        processes.append(p)
    map(lambda p: p.join(), processes)
    return dict(d.items())
开发者ID:dmitry-moroz,项目名称:python_examples,代码行数:15,代码来源:t3_2.py

示例5: index_reads

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]
def index_reads(num_threads, reads, k):
    idx = Manager().dict()
    args = (reads,k,idx,)
    run_map(num_threads, len(reads) / num_threads, map_index, args)
    return {key: val for key, val in idx.items()}
开发者ID:twareproj,项目名称:tware,代码行数:7,代码来源:vc.py

示例6: Manager

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]
'''
Created on Feb 26, 2014

@author: jimhorng
'''

import json
from multiprocessing import Manager

d_proxy = Manager().dict()
d_proxy['test1'] = 123
d_proxy['test2'] = {'foo' : 123}

print type(d_proxy)
print type(d_proxy.items())
print d_proxy['test2']
print type(d_proxy['test2'])

print json.dumps(d_proxy['test2'])
print json.dumps(d_proxy.items())

if __name__ == '__main__':
    pass
开发者ID:jimhorng,项目名称:python-test,代码行数:25,代码来源:json_test.py

示例7: learning

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]

#.........这里部分代码省略.........
                add_to_list(left, pre_parsed_data, i, has_hyponym)
            if has_hyponym:
                left.reverse()
                return 'left', left[(MAX_SEARCH_RANGE - has_hyponym[-1]):]

            return None

        def add_rule(rule):
            if len(rule[1]) >= 3:
                rts = []

                count = 0
                for element in rule[1]:
                    if element[1] == 'NPs' or element[1] == 'NP':
                        if count == 0 or count == len(rule[1]) - 1:
                            rts.append(element[1])
                        else:
                            if element[1] == 'NP':
                                rts.append(element[0])
                            else:
                                for node in element[1]:
                                    rts.append(node[0])
                    else:
                        rts.append(element[0])

                    count += 1

                if rule[0] == 'left':
                    rts[len(rule[1]) - 1] = 'HYPERNYM'
                else:
                    rts[0] = 'HYPERNYM'

                rts_str = ' | '.join(rts)

                if rts_str in rules:
                    rules[rts_str] += 1
                else:
                    rules[rts_str] = 1

        while not file_list.empty():
            input_file = file_list.get()

            tagged_data = pickle.load(open(input_file, 'rb'))

            pre_parsed_data = rule_parser(tagged_data)

            hypernym_positions = find_hypernyms(pre_parsed_data, h_dict.keys())

            for hypernym in hypernym_positions:
                rule = find_pattern(pre_parsed_data, hypernym, h_dict)

                if rule:
                    add_rule(rule)

            percentage = 100.0 - ((float(file_list.qsize()) / float(file_count)) * 100.0)
            sys.stdout.write("\rProgress: {0:.2f}%".format(percentage))
            sys.stdout.flush()

            file_list.task_done()

    def blacklist_filter(rule):
        result = True
        number_blacklist = []

        for i in range(100):
            number_blacklist.append(num2words(i).encode('ascii'))

        for word in (WORD_BLACKLIST + number_blacklist):
            if word in rule:
                result = False

        return result

    h_dict = pickle.load(open(input_dict, 'rb'))

    rules = Manager().dict()
    file_list = JoinableQueue()
    sys.stdout.write("\rProgress: 0.00%")

    for root, subFolders, files in os.walk(os.path.join(os.path.dirname(__file__), 'corpus', 'tagged')):
        for current_file in files:
            if current_file.endswith(".pickle"):
                file_list.put(os.path.join(root, current_file))

    file_count = file_list.qsize()

    for pid in range(8):
        process = Process(target=worker, args=())
        process.daemon = True
        process.start()

    file_list.join()
    print('')

    # filter the rules and save them
    filtered_rules = {k: v for k, v in rules.items() if v >= THRESHOLD and blacklist_filter(k)}
    rule_list = sorted(filtered_rules.keys(), key=filtered_rules.get)
    rule_list.reverse()

    pickle.dump(rule_list, open(output_rules, 'wb+'), 2)
开发者ID:tbraun89,项目名称:research-textmining,代码行数:104,代码来源:grammars.py

示例8: get_dockerstats_metrics

# 需要导入模块: from multiprocessing import Manager [as 别名]
# 或者: from multiprocessing.Manager import items [as 别名]
def get_dockerstats_metrics(endpoint, docker_client):
    """
    Pull performance metrics from docker remote api
    """

    interval = 60
    machine_stats = Manager().dict()
    processes = []
    payload = {}

    # Set up processes to collect stats from docker dameon for each container
    for container in docker_client.containers():
        processes.append(Process(target=get_stats_from_daemon, 
            args=(docker_client.stats(container['Id']),container['Id'],
                  container['Names'], machine_stats, interval)))
    for p in processes:
        p.start()
        p.join()

    for key, value in machine_stats.items():

        # Determine if one of the aliases matches (is something we want to collect metrics for)
        container_name = None
        for name in value['aliases']:
            if match_container_name(name.replace('/',''), value):
                container_name = name.replace('/','')
                break

        # Skip this if the container didn't match
        if container_name == None:
            continue

        # Compute the timestamp, using the first second in this series
        ts = int(dateutil.parser.parse(value['stats'][0]['read']).strftime('%s'))

        # Run through all the stat entries for this container
        stats = value['stats']
        stats_len = len(stats)  # Should always be 60

        # Initialize min/max/total variables for memory, cpu
        total_memory = 0
        min_memory = None
        max_memory = None
        total_load = 0
        min_load = None
        max_load = None

        # Compute min, max and average for all non-cumulative stats
        for stat in stats:

            # Grab the memory usage stats
            memory = stat['memory_stats']
            memory_kb = memory['usage']/1024.0
            total_memory, min_memory, max_memory = total_min_max(memory_kb, total_memory, min_memory, max_memory)

            # Get the CPU load. The load value is always 0?
            cpu = stat['cpu_stats']
            cpu_load = cpu['cpu_usage']['total_usage']
            total_load, min_load, max_load = total_min_max(cpu_load, total_load, min_load, max_load)

        # Compute first/last values of cumulative counters
        first = stats[0]  # First item in this series
        last = stats[stats_len-1]  # Last item in this series
        system_cpu = last['cpu_stats']['system_cpu_usage'] - first['cpu_stats']['system_cpu_usage'] # total system cpu usage
        
        cpu_usage = {
                   'total': last['cpu_stats']['cpu_usage']['total_usage'] - first['cpu_stats']['cpu_usage']['total_usage'], 
                    }
        memory_usage = {
                    'total': total_memory,
                    'ave': total_memory/stats_len,
                    'max': max_memory,
                    'min': min_memory
                    }
        network_usage = {
                    'tx_kb': (last['network']['tx_bytes'] - first['network']['tx_bytes'])/1024.0,
                    'rx_kb': (last['network']['rx_bytes'] - first['network']['rx_bytes'])/1024.0,
                    'tx_packets': last['network']['tx_packets'] - first['network']['tx_packets'],
                    'rx_packets': last['network']['rx_packets'] - first['network']['rx_packets'],
                    'tx_errors': last['network']['tx_errors'] - first['network']['tx_errors'],
                    'rx_errors': last['network']['rx_errors'] - first['network']['rx_errors'],
                    'tx_dropped': last['network']['tx_dropped'] - first['network']['tx_dropped'],
                    'rx_dropped': last['network']['rx_dropped'] - first['network']['rx_dropped']
                    }
        payload[container_name] = [format_data('cpu.usage', container_name, ts, cpu_usage),
                                   format_data('memory.usage', container_name, ts, memory_usage),
                                   format_data('network.usage', container_name, ts, network_usage)]
    send(endpoint, payload)
开发者ID:catalyzeio,项目名称:docker-container-metrics,代码行数:90,代码来源:sender.py


注:本文中的multiprocessing.Manager.items方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。