本文整理汇总了Python中multiprocessing.managers.SyncManager.register方法的典型用法代码示例。如果您正苦于以下问题:Python SyncManager.register方法的具体用法?Python SyncManager.register怎么用?Python SyncManager.register使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类multiprocessing.managers.SyncManager
的用法示例。
在下文中一共展示了SyncManager.register方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: dconnect
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def dconnect(self):
"""
Attempts to connect to the DFS on the host/port for which the Node was initialized for.
If no connection can be made, Node will keep attempting to connect until a connection
can be established. Once a connection can be established the remove methods requested
will be registered.
"""
# remove connection from cache:
# BaseProxy class has thread local storage which caches the connection
# which is reused for future connections causing "borken pipe" errors on
# creating new manager.
if self.dfs in BaseProxy._address_to_local:
if hasattr(BaseProxy._address_to_local[self.dfs][0], 'connection'):
del BaseProxy._address_to_local[self.dfs][0].connection
# register handlers
SyncManager.register("get_nodes")
print "connecting to dfs", self.dfs
while self.alive:
try:
self.impd= SyncManager(address= self.dfs, authkey= self.dauthkey)
self.impd.connect()
print "connected to dfs", self.dfs
break
except (EOFError, IOError, SocketError) as e:
print "could not connect ...trying again", str(e)
sleep(1)
示例2: __init__
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def __init__(self, queue, taskId=str(uuid1()), s3=None, taskDir=None):
"""creates an instance of Task
:param queue: <host>:<port>:<security key> of queue instance
:param taskId: optional, auto generated guid representing this
instance of Task. all jobs forked with thecurrent instance will
assume this taskId. optionally, the developer may pass in a taskId that is
meaing full to them.
:param s3: <access key>:<secret key>:<bucket> of s3 resource to use
when s3 transport is specified during .forkTask()
:param taskDir: output directory to write task results to
:returns: instance of Task
"""
self.taskId = str(uuid1()) if taskId == None else taskId
self.s3 = s3
self.subTaskId = 0
self.subTasks = dict()
self.sleep = 0.1
self.taskDir = taskDir
self.lock = Lock()
(qHost, qPort, qKey) = queue.split(":")
SyncManager.register("getPipeline")
SyncManager.register("getStore")
SyncManager.register("getFileContents")
SyncManager.register("setFileContents")
SyncManager.register("deleteFile")
self.queue = SyncManager(address=(qHost, int(qPort)), authkey=qKey)
self.queue.connect()
self.pipeline = self.queue.getPipeline()
self.store = self.queue.getStore()
super(Task, self).__init__()
示例3: get_binary_matrix_from_service
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def get_binary_matrix_from_service(q):
global matrix_service
if matrix_service == None:
matrices = dict()
matrix_service = SyncManager(address=("localhost", 50000), authkey="")
SyncManager.register("get_matrix", lambda q: get_matrix(q, matrices))
Process(target=lambda: matrix_service.get_server().serve_forever()).start()
SyncManager.register("get_matrix")
matrix_service.connect()
return matrix_service.get_matrix(q)
示例4: Queue
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
class Queue(Daemon):
"""
Creates remote methods for creating/deleteing streams,
and accessing the Streams priority queue, data-store and
properties.
"""
def __init__(self, address, authkey, logdir= curdir, piddir= curdir):
self.streams= {}
self.address= address
self.manager= SyncManager(address= self.address, authkey= authkey)
self.manager.register("create_stream", callable= self.create_stream)
self.manager.register("delete_stream", callable= self.delete_stream)
self.manager.register("get_streams", callable= lambda: self.streams, proxytype= DictProxy)
self.manager.register("get_store", callable= lambda id: self.streams[id].store, proxytype= DictProxy)
self.manager.register("get_queue", callable= lambda id: self.streams[id].queue, proxytype= PriorityQueue)
self.manager.register("get_properties", callable= lambda id: self.streams[id].properties, proxytype= DictProxy)
super(Queue, self).__init__(
pidfile= path.join(piddir, self.__class__.__name__ + ".pid"),
stdout= path.join(logdir, self.__class__.__name__ + ".out"),
stderr= path.join(logdir, self.__class__.__name__ + ".err"),
stdin= path.join(logdir, self.__class__.__name__ + ".in")
)
def create_stream(self, **properties):
"""
Creates stream and returns a unique stream identifier
to the caller. If an identifier was passed in by the caller
that identifier will be used to create the stream. If the
stream already exists then now new stream is created and the
existing stream is reused.
"""
stream= Stream(**properties)
try:
stream= self.streams[stream.id]
except KeyError:
self.streams[stream.id]= stream
print "created stream", stream.id, properties
def delete_stream(self, id):
"""Deletes the stream of the given identifier."""
del self.streams[id]
print "deleting stream", id
def run(self):
"""Starts up the Queue server and starts listing for requests."""
server= self.manager.get_server()
print "running"
server.serve_forever()
示例5: connect
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def connect(self):
# register with Queue
SyncManager.register('getPipeline')
SyncManager.register('getStore')
self.qInstance= self.opts.qInstance
(self.qHost, self.qPort, self.qKey)= self.opts.queue.split(':')
queue= SyncManager(address= (self.qHost, int(self.qPort)), authkey= self.qKey)
queue.connect()
self.pipeline= queue.getPipeline()
self.store= queue.getStore()
示例6: connect
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def connect(self):
"""
Attempts to connect to the Queue on the host/port for which the DFS was initialized for.
If no connection can be made, DFS will keep attempting to connect until a connection
can be established. One connection is established the remove methods requested will be
registered.
"""
# remove connection from cache:
# BaseProxy class has thread local storage which caches the connection
# which is reused for future connections causing "borken pipe" errors on
# creating new manager.
if self.queue in BaseProxy._address_to_local:
del BaseProxy._address_to_local[self.queue][0].connection
# register handlers
SyncManager.register("get_streams")
SyncManager.register("get_queue")
SyncManager.register("get_store")
SyncManager.register("get_properties")
print "connecting to queue", self.queue
while self.alive:
try:
self.impq= SyncManager(address= self.queue, authkey= self.qauthkey)
self.impq.connect()
break
except (EOFError, IOError, SocketError) as e:
print "could not connect ...trying again", str(e)
sleep(1)
示例7: connect
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def connect(self):
(qHost, qPort, qKey)= self.opts.queue.split(':')
self.queue= SyncManager(address= (qHost, int(qPort)), authkey= qKey)
self.queue.connect()
self.pipeline= self.queue.getPipeline()
self.store= self.queue.getStore()
# register with DFS
self.dfs = None
self.instances= dict()
if self.opts.dfs != None:
SyncManager.register('getInstances')
(dHost, dPort, dKey)= self.opts.dfs.split(':')
self.dfs= SyncManager(address= (dHost, int(dPort)), authkey= dKey)
self.dfs.connect()
self.instances= self.dfs.getInstances()
示例8: connect
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def connect(self):
"""Methods connects to queue
Args:
none
Returns:
void
Raises:
error: ValueError
"""
if self.__type != queue.QUEUE_TYPE_CLIENT:
raise ValueError(
'This operation cannot be done on this queue type')
q = Queue()
SyncManager.register('get_queue', callable=lambda: q)
self.__manager = SyncManager(self.__address, self.__authkey)
self.__manager.connect()
示例9: create
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
def create(self):
"""Methods creates queue server
Args:
none
Returns:
void
Raises:
error: ValueError
"""
if self.__type != queue.QUEUE_TYPE_SERVER:
raise ValueError(
'This operation cannot be done on this queue type')
q = Queue()
SyncManager.register('get_queue', callable=lambda: q)
self.__manager = SyncManager(self.__address, self.__authkey)
self.__manager.start()
示例10: Manager
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
import sys
import os
import multiprocessing
from multiprocessing.managers import SyncManager
sys.path.append(os.path.realpath('.'))
sys.path.append(os.path.realpath('../'))
from helpers.instrumentsmanager import *
print "Initializing instruments..."
MyManager = Manager()
#The temperature
temperature = MyManager.getInstrument("temperature")
print temperature.temperature()
if __name__ == "__main__":
manager = SyncManager(address=('localhost', 50000), authkey='abc')
manager.register('get_manager', callable=lambda:MyManager)
server = manager.get_server()
server.serve_forever()
示例11: Tracer
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
import argparse
from collections import deque
import multiprocessing
from multiprocessing.managers import SyncManager
from processing import processingProcess
# For debugging
# Trigger with: Tracer()()
# From: http://stackoverflow.com/a/35773311
from IPython.core.debugger import Tracer
#
# Allow working with a deque between threads
# http://stackoverflow.com/a/27345949
#
SyncManager.register('deque', deque)
#
# Work with data and commands
#
class NetworkData:
def __init__(self, data, commands, cond):
self.data = data
self.commands = commands
self.commandCondition = cond
# Add data/commands
def addData(self, d):
self.data.append(d)
def addCommand(self, c):
示例12: DFS
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
class DFS(Daemon):
"""
The Dynamic Frequency Scaler is responsible for
starting up instances and bootstrapping them with Node
to start processing jobs. DFS will gather update statistics
from all the Nodes that are registered with it and spin up/down
instances as needed. A configurable "billing period" can be set
so DFS will make the most effective use of the instance for the
duration of the billing cycle. Idle Nodes will be terminated
when the end of the billing cycle is reached.
"""
billing_period= 3600
shutdown_period= billing_period - 600
idle_time= 300
seconds_per_day= 86400
def __init__(self, address, authkey, queue, qauthkey, mnon, mpps, ec2= None, bootstrap= None, deploykey= None, logdir= curdir, piddir= curdir):
"""
Initializes the available remote methods
and I/O streams to be used for the method.
Establishes connection to the Queue.
"""
super(DFS, self).__init__(
pidfile= path.join(piddir, self.__class__.__name__ + ".pid"),
stdout= path.join(logdir, self.__class__.__name__ + ".out"),
stderr= path.join(logdir, self.__class__.__name__ + ".err"),
stdin= path.join(logdir, self.__class__.__name__ + ".in")
)
self.id= getipaddress()
self.address= address
self.authkey= authkey
self.queue= queue
self.qauthkey= qauthkey
self.mnon= mnon
self.mpps= mpps
self.bootstrap= bootstrap
self.deploykey= deploykey
self.nodes= {}
self.ec2= ec2
if self.ec2 != None:
(self.access_key, self.security_key, self.ami_id, self.security_group, self.key_name, self.instance_type)= self.ec2.split(',')
self.ec2= EC2Connection(self.access_key, self.security_key)
print "Connected to EC2", self.ec2
self.alive= True
self.manager= SyncManager(address= self.address, authkey= self.authkey)
self.manager.register("get_nodes", callable= lambda: self.nodes, proxytype= DictProxy)
self.connect()
def connect(self):
"""
Attempts to connect to the Queue on the host/port for which the DFS was initialized for.
If no connection can be made, DFS will keep attempting to connect until a connection
can be established. One connection is established the remove methods requested will be
registered.
"""
# remove connection from cache:
# BaseProxy class has thread local storage which caches the connection
# which is reused for future connections causing "borken pipe" errors on
# creating new manager.
if self.queue in BaseProxy._address_to_local:
del BaseProxy._address_to_local[self.queue][0].connection
# register handlers
SyncManager.register("get_streams")
SyncManager.register("get_queue")
SyncManager.register("get_store")
SyncManager.register("get_properties")
print "connecting to queue", self.queue
while self.alive:
try:
self.impq= SyncManager(address= self.queue, authkey= self.qauthkey)
self.impq.connect()
break
except (EOFError, IOError, SocketError) as e:
print "could not connect ...trying again", str(e)
sleep(1)
def get_bootstrap(self):
fh= open(self.bootstrap, "r")
template= Template(fh.read())
fh.close()
fh= open(self.deploykey, "r")
deploykey= fh.read()
fh.close()
#.........这里部分代码省略.........
示例13: Impetus
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
class Impetus(object):
"""
Multi-threaded library for interfacing with the Impetus system.
Hides threading considerations from the client. Determines callback
methods through introspection if callbacks are not explicitly stated.
Decorators are provided for the client to indicate methods which run on
the remote nodes and local process methods which consume the results.
Creates a single stream per instance. The client can created additional
streams through the Queue's remote methods via the "impq" handler.
"""
statuses= ("forked", "processed")
def __init__(self, address, authkey, taskdir= "tasks", id= None, **properties):
"""Creates a stream and retrieves the streams priority queue and data-store."""
self.id= id if id else str(uuid1())
self.ipaddress= getipaddress()
self.address= address
self.taskdir= path.join(taskdir, self.id)
self.properties= properties
self.impq= SyncManager(address= self.address, authkey= authkey)
self.impq.register("get_streams")
self.impq.register("create_stream")
self.impq.register("delete_stream")
self.impq.register("get_store")
self.impq.register("get_queue")
self.impq.connect()
self.jobs= []
self.impq.create_stream(id= self.id, ipaddress= self.ipaddress, **properties)
self.store= self.impq.get_store(id= self.id)
self.queue= self.impq.get_queue(id= self.id)
self.alive= True
self._current_thread= None
self._lock= Lock()
self.threads= []
self.errors= {}
self.ready= {}
self._progress= {}
try:
makedirs(self.taskdir)
except:
pass
def __del__(self):
"""Deletes the stream that was created during initialization."""
self.impq.delete_stream(self.id)
@staticmethod
def node(target):
"""
All methods that are to run on remote nodes must be staticmethods
as the context of which the methods was defined can not be serialized.
"""
return target
@staticmethod
def startup(target):
"""
Sets up the startup method for the object to run as a thread.
"""
def _process(self):
target(self)
global _declaration_order
_process.order= _declaration_order
_declaration_order+= 1
return _process
@staticmethod
def shutdown(target):
"""
Sets up the shutdown method to be excuted
after all threads have been terminated. The
ready and errors parameters will contain a dict
of file-handles pointing to the results files
(ie, ../tasks/<task_id>/<method>.ok, .err>
for each @process method.
"""
def _shutdown(self):
target(self, self.ready, self.errors, self._progress)
global _declaration_order
_shutdown.order= _declaration_order
return _shutdown
@staticmethod
def process(target):
"""
#.........这里部分代码省略.........
示例14: SyncManager
# 需要导入模块: from multiprocessing.managers import SyncManager [as 别名]
# 或者: from multiprocessing.managers.SyncManager import register [as 别名]
import sys
import os
import multiprocessing
from multiprocessing.managers import SyncManager
sys.path.append(os.path.realpath('.'))
sys.path.append(os.path.realpath('../'))
if __name__ == "__main__":
manager = SyncManager(address=('localhost', 50000), authkey='abc')
manager.register('get_manager')
manager.connect()
m = manager.get_manager()
m.
temp = m.instrument("temperature")
print temp
print temp.temperature()