本文整理汇总了Python中gevent.coros.BoundedSemaphore.acquire方法的典型用法代码示例。如果您正苦于以下问题:Python BoundedSemaphore.acquire方法的具体用法?Python BoundedSemaphore.acquire怎么用?Python BoundedSemaphore.acquire使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gevent.coros.BoundedSemaphore
的用法示例。
在下文中一共展示了BoundedSemaphore.acquire方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: LiberateProtocol
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class LiberateProtocol(protocols.BaseProtocol):
"""协议"""
buff = ""
def connectionMade(self):
"""连接建立处理
"""
address = self.transport.getAddress()
logger.info('Client %d login in.[%s,%d]' % (self.transport.sessionno,
address[0], address[1]))
self.factory.connmanager.addConnection(self)
self.factory.doConnectionMade(self)
self.sem = BoundedSemaphore(1)
def connectionLost(self, reason):
"""连接断开处理
"""
logger.info('Client %d login out(%s).' % (self.transport.sessionno,
reason))
self.factory.doConnectionLost(self)
self.factory.connmanager.dropConnectionByID(self.transport.sessionno)
def safeToWriteData(self, data, command):
"""线程安全的向客户端发送数据
@param data: str 要向客户端写的数据
"""
if data is None:
return
senddata = self.factory.produceResult(data, command)
self.sem.acquire()
self.transport.sendall(senddata)
self.sem.release()
def dataReceived(self, data):
"""数据到达处理
@param data: str 客户端传送过来的数据
"""
length = self.factory.dataprotocl.getHeadlength() # 获取协议头的长度
self.buff += data
while self.buff.__len__() >= length:
unpackdata = self.factory.dataprotocl.unpack(self.buff[:length])
if not unpackdata.get('result'):
logger.info('illegal data package --')
self.connectionLost('illegal data package')
break
command = unpackdata.get('command')
rlength = unpackdata.get('length')
request = self.buff[length:length + rlength]
if request.__len__() < rlength:
logger.info('some data lose')
break
self.buff = self.buff[length + rlength:]
response = self.factory.doDataReceived(self, command, request)
# if not response:
# continue
self.safeToWriteData(response, command)
示例2: locks
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class Lock:
""" UNIX-specific exclusive file locks (released when the process ends).
Based on
http://blog.vmfarms.com/2011/03/cross-process-locking-and.html,
adapted for context managers (the 'with' statement).
Modified to be gevent-safe! Locks held by a given Greenlet may not be
taken by other Greenlets until released, _as long as you only create one
Lock object per lockfile_. THIS IS VERY IMPORTANT. *Make sure* that you're
not creating multiple locks on the same file from the same process,
otherwise you'll bypass the gevent lock!
Parameters
----------
f : file or str
File handle or filename to use as the lock.
block : bool
Whether to block or throw IOError if the lock is grabbed multiple
times.
"""
TIMEOUT = 60
def __init__(self, f, block=True):
if isinstance(f, file):
self.filename = f.name
self.handle = f if not f.closed else open(f, 'w')
else:
self.filename = f
mkdirp(os.path.dirname(f))
self.handle = open(f, 'w')
if block:
self.lock_op = fcntl.LOCK_EX
else:
self.lock_op = fcntl.LOCK_EX | fcntl.LOCK_NB
self.block = block
self.gevent_lock = BoundedSemaphore(1)
def acquire(self):
got_gevent_lock = self.gevent_lock.acquire(blocking=self.block)
if not got_gevent_lock:
raise IOError("cannot acquire gevent lock")
fcntl.flock(self.handle, self.lock_op)
def release(self):
fcntl.flock(self.handle, fcntl.LOCK_UN)
self.gevent_lock.release()
def locked(self):
return self.gevent_lock.locked()
def __enter__(self):
self.acquire()
return self
def __exit__(self, type, value, traceback):
self.release()
def __del__(self):
self.handle.close()
示例3: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class UVEServer(object):
def __init__(self, redis_uve_server, logger, redis_password=None):
self._local_redis_uve = redis_uve_server
self._redis_uve_list = []
self._logger = logger
self._sem = BoundedSemaphore(1)
self._redis = None
self._redis_password = redis_password
if self._local_redis_uve:
self._redis = redis.StrictRedis(self._local_redis_uve[0],
self._local_redis_uve[1],
password=self._redis_password,
db=1)
self._uve_reverse_map = {}
for h,m in UVE_MAP.iteritems():
self._uve_reverse_map[m] = h
#end __init__
def update_redis_uve_list(self, redis_uve_list):
self._redis_uve_list = redis_uve_list
# end update_redis_uve_list
def fill_redis_uve_info(self, redis_uve_info):
redis_uve_info.ip = self._local_redis_uve[0]
redis_uve_info.port = self._local_redis_uve[1]
try:
self._redis.ping()
except redis.exceptions.ConnectionError:
redis_uve_info.status = 'DisConnected'
else:
redis_uve_info.status = 'Connected'
#end fill_redis_uve_info
@staticmethod
def merge_previous(state, key, typ, attr, prevdict):
print "%s New val is %s" % (attr, prevdict)
nstate = copy.deepcopy(state)
if UVEServer._is_agg_item(prevdict):
count = int(state[key][typ][attr]['previous']['#text'])
count += int(prevdict['#text'])
nstate[key][typ][attr]['previous']['#text'] = str(count)
if UVEServer._is_agg_list(prevdict):
sname = ParallelAggregator.get_list_name(
state[key][typ][attr]['previous'])
count = len(prevdict['list'][sname]) + \
len(state[key][typ][attr]['previous']['list'][sname])
nstate[key][typ][attr]['previous']['list'][sname].extend(
prevdict['list'][sname])
nstate[key][typ][attr]['previous']['list']['@size'] = \
str(count)
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
print "%s Merged val is %s"\
% (attr, nstate[key][typ][attr]['previous'])
return nstate
def run(self):
lck = False
while True:
try:
k, value = self._redis.brpop("DELETED")
self._sem.acquire()
lck = True
self._logger.debug("%s del received for " % value)
# value is of the format:
# DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
info = value.rsplit(":", 6)
key = info[0].split(":", 1)[1]
typ = info[5]
existing = self._redis.hgetall("PREVIOUS:" + key + ":" + typ)
tstate = {}
tstate[key] = {}
tstate[key][typ] = {}
state = UVEServer.convert_previous(existing, tstate, key, typ)
for attr, hval in self._redis.hgetall(value).iteritems():
snhdict = xmltodict.parse(hval)
if UVEServer._is_agg_list(snhdict[attr]):
if snhdict[attr]['list']['@size'] == "0":
continue
if snhdict[attr]['list']['@size'] == "1":
sname = ParallelAggregator.get_list_name(
snhdict[attr])
if not isinstance(
snhdict[attr]['list'][sname], list):
snhdict[attr]['list'][sname] = \
[snhdict[attr]['list'][sname]]
if (attr not in state[key][typ]):
#.........这里部分代码省略.........
示例4: DiscoveryZkClient
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
#.........这里部分代码省略.........
services = self._zk.get_children('/services/%s' % (service_type))
for service_id in services:
data, stat = self._zk.get(
'/services/%s/%s' % (service_type, service_id))
entry = json.loads(data)
yield(entry)
def subscriber_entries(self):
service_types = self._zk.get_children('/clients')
for service_type in service_types:
subscribers = self._zk.get_children('/clients/%s' % (service_type))
for client_id in subscribers:
data, stat = self._zk.get(
'/clients/%s/%s' % (service_type, client_id))
cl_entry = json.loads(data)
yield((client_id, service_type))
# end
def update_service(self, service_type, service_id, data):
path = '/services/%s/%s' % (service_type, service_id)
self.create_node(path, value=json.dumps(data), makepath=True)
# end
def insert_service(self, service_type, service_id, data):
# ensure election path for service type exists
path = '/election/%s' % (service_type)
self.create_node(path)
# preclude duplicate service entry
sid_set = set()
# prevent background task from deleting node under our nose
self._zk_sem.acquire()
seq_list = self._zk.get_children(path)
for sequence in seq_list:
sid, stat = self._zk.get(
'/election/%s/%s' % (service_type, sequence))
sid_set.add(sid)
self._zk_sem.release()
if not service_id in sid_set:
path = '/election/%s/node-' % (service_type)
pp = self._zk.create(
path, service_id, makepath=True, sequence=True)
pat = path + "(?P<id>.*$)"
mch = re.match(pat, pp)
seq = mch.group('id')
data['sequence'] = seq
self.syslog('ST %s, SID %s not found! Added with sequence %s' %
(service_type, service_id, seq))
self.update_service(service_type, service_id, data)
# end insert_service
def delete_service(self, service_type, service_id):
if self.lookup_subscribers(service_type, service_id):
return
path = '/services/%s/%s' %(service_type, service_id)
self._zk.delete(path)
# purge in-memory cache - ideally we are not supposed to know about this
self._ds.delete_pub_data(service_id)
# delete service node if all services gone
path = '/services/%s' %(service_type)
示例5: ZookeeperClient
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class ZookeeperClient(object):
def __init__(self, module, server_list):
# logging
logger = logging.getLogger(module)
logger.setLevel(logging.INFO)
try:
handler = logging.handlers.RotatingFileHandler('/var/log/contrail/' + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
except IOError:
print "Cannot open log file in /var/log/contrail/"
else:
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
self._zk_client = \
kazoo.client.KazooClient(
server_list,
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger)
self._logger = logger
self._election = None
self._zk_sem = BoundedSemaphore(1)
self.connect()
# end __init__
# reconnect
def reconnect(self):
self._zk_sem.acquire()
self.syslog("restart: acquired lock; state %s " % self._zk_client.state)
# initiate restart if our state is suspended or lost
if self._zk_client.state != "CONNECTED":
self.syslog("restart: starting ...")
try:
self._zk_client.stop()
self._zk_client.close()
self._zk_client.start()
self.syslog("restart: done")
except gevent.event.Timeout as e:
self.syslog("restart: timeout!")
except Exception as e:
self.syslog('restart: exception %s' % str(e))
except Exception as str:
self.syslog('restart: str exception %s' % str)
self._zk_sem.release()
# start
def connect(self):
while True:
try:
self._zk_client.start()
break
except gevent.event.Timeout as e:
self.syslog(
'Failed to connect with Zookeeper -will retry in a second')
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
self.syslog('%s -will retry in a second' % (str(e)))
gevent.sleep(1)
self.syslog('Connected to ZooKeeper!')
# end
def syslog(self, msg):
if not self._logger:
return
self._logger.info(msg)
# end syslog
def _zk_listener(self, state):
if state == "CONNECTED":
self._election.cancel()
# end
def _zk_election_callback(self, func, *args, **kwargs):
self._zk_client.remove_listener(self._zk_listener)
func(*args, **kwargs)
# Exit if running master encounters error or exception
exit(1)
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._zk_client.add_listener(self._zk_listener)
while True:
self._election = self._zk_client.Election(path, identifier)
self._election.run(self._zk_election_callback, func, *args, **kwargs)
# end master_election
def create_node(self, path, value=None):
try:
if value is None:
value = uuid.uuid4()
self._zk_client.create(path, str(value), makepath=True)
except (kazoo.exceptions.SessionExpiredError,
kazoo.exceptions.ConnectionLoss):
self.reconnect()
return self.create_node(path, value)
except kazoo.exceptions.NodeExistsError:
current_value = self.read_node(path)
#.........这里部分代码省略.........
示例6: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class UVEServer(object):
def __init__(self, redis_uve_server, logger):
self._local_redis_uve = redis_uve_server
self._redis_uve_list = []
self._logger = logger
self._sem = BoundedSemaphore(1)
self._redis = None
if self._local_redis_uve:
self._redis = redis.StrictRedis(self._local_redis_uve[0],
self._local_redis_uve[1], db=1)
#end __init__
def update_redis_uve_list(self, redis_uve_list):
self._redis_uve_list = redis_uve_list
# end update_redis_uve_list
def fill_redis_uve_info(self, redis_uve_info):
redis_uve_info.ip = self._local_redis_uve[0]
redis_uve_info.port = self._local_redis_uve[1]
try:
self._redis.ping()
except redis.exceptions.ConnectionError:
redis_uve_info.status = 'DisConnected'
else:
redis_uve_info.status = 'Connected'
#end fill_redis_uve_info
@staticmethod
def merge_previous(state, key, typ, attr, prevdict):
print "%s New val is %s" % (attr, prevdict)
nstate = copy.deepcopy(state)
if UVEServer._is_agg_item(prevdict):
count = int(state[key][typ][attr]['previous']['#text'])
count += int(prevdict['#text'])
nstate[key][typ][attr]['previous']['#text'] = str(count)
if UVEServer._is_agg_list(prevdict):
sname = ParallelAggregator.get_list_name(
state[key][typ][attr]['previous'])
count = len(prevdict['list'][sname]) + \
len(state[key][typ][attr]['previous']['list'][sname])
nstate[key][typ][attr]['previous']['list'][sname].extend(
prevdict['list'][sname])
nstate[key][typ][attr]['previous']['list']['@size'] = \
str(count)
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
print "%s Merged val is %s"\
% (attr, nstate[key][typ][attr]['previous'])
return nstate
def run(self):
lck = False
while True:
try:
k, value = self._redis.brpop("DELETED")
self._sem.acquire()
lck = True
self._logger.debug("%s del received for " % value)
# value is of the format:
# DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
info = value.rsplit(":", 6)
key = info[0].split(":", 1)[1]
typ = info[5]
existing = self._redis.hgetall("PREVIOUS:" + key + ":" + typ)
tstate = {}
tstate[key] = {}
tstate[key][typ] = {}
state = UVEServer.convert_previous(existing, tstate, key, typ)
for attr, hval in self._redis.hgetall(value).iteritems():
snhdict = xmltodict.parse(hval)
if UVEServer._is_agg_list(snhdict[attr]):
if snhdict[attr]['list']['@size'] == "0":
continue
if snhdict[attr]['list']['@size'] == "1":
sname = ParallelAggregator.get_list_name(
snhdict[attr])
if not isinstance(
snhdict[attr]['list'][sname], list):
snhdict[attr]['list'][sname] = \
[snhdict[attr]['list'][sname]]
if (attr not in state[key][typ]):
# There is no existing entry for the UVE
vstr = json.dumps(snhdict[attr])
else:
# There is an existing entry
# Merge the new entry with the existing one
state = UVEServer.merge_previous(
state, key, typ, attr, snhdict[attr])
#.........这里部分代码省略.........
示例7: QueueManager
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class QueueManager(object):
def __init__(self, clear_interval=DEFAULT_CLEAR_INTERVAL):
self.__updates = {}
self.__updates_lock = BoundedSemaphore()
# Start clearing daemon thread.
spawn(self._daemon_clear, interval=clear_interval)
def _load(self, queue_id):
"""Load and return queue update tracker for queue_id."""
self.__updates_lock.acquire()
# Hit.
if queue_id in self.__updates:
self.__updates_lock.release()
return self.__updates[queue_id].fresh()
# Miss.
self.__updates[queue_id] = QueueUpdate(queue_id)
self.__updates_lock.release()
return self.__updates[queue_id]
def _clear(self):
"""Clear the in-memory update tracking dictionary"""
self.__updates_lock.acquire()
print 'Clearing'
# Make sure anyone currently waiting reloads.
for queue_id in self.__updates:
self.__updates[queue_id].event.set()
self.__updates[queue_id].event.clear()
self.__updates = {}
print 'Clear'
self.__updates_lock.release()
def _daemon_clear(self, interval):
"""Clear the update tracking dictionary every interval seconds."""
while True:
sleep(interval)
self._clear()
def edit(self, user, queue_id, room_idlist, draw):
# Put together the list of Room objects.
rooms = []
print 'edit', room_idlist
for roomid in room_idlist:
room = Room.objects.get(pk=roomid)
if (not room) or not draw in room.building.draw.all():
return {'error':'bad room/draw'}
rooms.append(room)
update = self._load(queue_id)
# Clear out the old list.
queue = update.queue
queue.queuetoroom_set.all().delete()
# Put in new relationships
for i in range(0, len(rooms)):
qtr = QueueToRoom(queue=queue, room=rooms[i], ranking=i)
qtr.save()
# Store the update information on the queue.
queue.version += 1
queue.update_kind = Queue.EDIT
queue.update_user = user
queue.save()
# Notify others of the update.
update.event.set()
update.event.clear()
# Assemble and return response.
room_list = []
for room in rooms:
room_list.append({'id':room.id, 'number':room.number,
'building':room.building.name})
return {'rooms':room_list}
def check(self, user, queue_id, last_version):
update = self._load(queue_id)
print user, update.queue, last_version
if last_version == update.queue.version:
print 'going to wait'
print update.queue.version
update.event.wait()
update = self._load(queue_id)
print 'past wait'
queueToRooms = QueueToRoom.objects.filter(queue=update.queue).order_by('ranking')
if not queueToRooms:
return {'id':update.queue.version, 'rooms':[]}
room_list = []
if update.queue.update_user:
netid = update.queue.update_user.netid
else:
netid = ''
for qtr in queueToRooms:
room_list.append({'id':qtr.room.id, 'number':qtr.room.number,
'building':qtr.room.building.name})
return {'id':update.queue.version,
'kind':Queue.UPDATE_KINDS[update.queue.update_kind][1],
'netid':netid,
'rooms':room_list}
示例8: DiscoveryZkClient
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class DiscoveryZkClient(object):
def __init__(self, discServer, zk_srv_ip='127.0.0.1',
zk_srv_port='2181', reset_config=False):
self._reset_config = reset_config
self._service_id_to_type = {}
self._ds = discServer
self._zk_sem = BoundedSemaphore(1)
self._election = None
self._restarting = False
zk_endpts = []
for ip in zk_srv_ip.split(','):
zk_endpts.append('%s:%s' %(ip, zk_srv_port))
# logging
logger = logging.getLogger('discovery-service')
logger.setLevel(logging.WARNING)
handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
self._zk = kazoo.client.KazooClient(
hosts=','.join(zk_endpts),
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger)
self._logger = logger
# connect
self.connect()
if reset_config:
self.delete_node("/services", recursive=True)
self.delete_node("/clients", recursive=True)
self.delete_node("/election", recursive=True)
# create default paths
self.create_node("/services")
self.create_node("/clients")
self.create_node("/election")
self._debug = {
'subscription_expires': 0,
'oos_delete': 0,
'db_excepts': 0,
}
# end __init__
# Discovery server used for syslog, cleanup etc
def set_ds(self, discServer):
self._ds = discServer
# end set_ds
def is_restarting(self):
return self._restarting
# end is_restarting
# restart
def restart(self):
self._zk_sem.acquire()
self._restarting = True
self.syslog("restart: acquired lock; state %s " % self._zk.state)
# initiate restart if our state is suspended or lost
if self._zk.state != "CONNECTED":
self.syslog("restart: starting ...")
try:
self._zk.stop()
self._zk.close()
self._zk.start()
self.syslog("restart: done")
except:
e = sys.exc_info()[0]
self.syslog('restart: exception %s' % str(e))
self._restarting = False
self._zk_sem.release()
# start
def connect(self):
while True:
try:
self._zk.start()
break
except gevent.event.Timeout as e:
self.syslog(
'Failed to connect with Zookeeper -will retry in a second')
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
self.syslog('%s -will retry in a second' % (str(e)))
gevent.sleep(1)
self.syslog('Connected to ZooKeeper!')
# end
def start_background_tasks(self):
# spawn loop to expire subscriptions
gevent.Greenlet.spawn(self.inuse_loop)
# spawn loop to expire services
gevent.Greenlet.spawn(self.service_oos_loop)
#.........这里部分代码省略.........
示例9: CrawlSettings
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class Crawler:
settings = CrawlSettings()
def __init__(self, spider_class, settings):
def get(value, default={}):
try:
return getattr(settings, value)
except AttributeError:
return default
self.settings = CrawlSettings(get('CRAWL'))
Request.settings = RequestSettings(get('REQUEST'))
spider_settings = SpiderSettings(get('SPIDER'))
spider = spider_class(spider_settings)
log = LogSettings(get('LOGFORMATTERS'), get('LOGHANDLERS'),
get('LOGGERS'))
spider.logger = log.getLogger(spider.name)
self.logger = log.getLogger(spider.name)
self.load(spider)
Request.stats = self.stats
def load(self, spider):
redis_args = dict(host=self.settings.REDIS_URL,
port=self.settings.REDIS_PORT,
db=self.settings.REDIS_DB)
if hasattr(self.settings, 'NAMESPACE'):
redis_args['namespace'] = self.settings.NAMESPACE
else:
redis_args['namespace'] = spider.name
self.url_set = redisds.Set('urlset', **redis_args)
self.url_queue = redisds.Queue('urlqueue', serializer=Pickle(),
**redis_args)
self.runner = redisds.Lock("runner:%s" % uuid4().hex, **redis_args)
self.runners = redisds.Dict("runner:*", **redis_args)
self.stats = redisds.Dict("stats:*", **redis_args)
self.lock = BoundedSemaphore(1)
self.running_count = 0
self.allowed_urls_regex = self.get_regex(spider.allowed_domains)
self.spider = spider
self.start()
def get_regex(self, domains):
default = (r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?')
domain_regex = r'(%s)' % '|'.join(domains) if len(domains) else default
url_regex = r'^https?://%s(?:/?|[/?]\S+)$' % domain_regex
regex = re.compile(url_regex, re.IGNORECASE)
return regex
def current_time(self):
tz = timezone(self.settings.TIME_ZONE)
return datetime.now(tz).isoformat()
def start(self):
if not self.settings.RESUME and self.completed():
self.url_queue.clear()
self.url_set.clear()
if self.url_queue.empty():
self.stats.clear()
if isinstance(self.spider.start, list):
requests = self.spider.start
else:
requests = [self.spider.start]
for request in requests:
if isinstance(request, str):
request = Request(request)
if request.callback is None:
request.callback = "parse"
self.insert(request)
self.stats['status'] = "running"
self.stats['start_time'] = self.current_time()
def clear(self, finished):
self.runner.release()
if finished:
self.stats['status'] = 'finished'
self.url_queue.clear()
self.url_set.clear()
elif self.completed():
self.stats['end_time'] = self.current_time()
self.stats['status'] = 'stopped'
stats = dict(self.stats)
stats['runners'] = len(self.runners)
self.logger.info("%s", str(stats))
def completed(self):
return len(self.runners) == 0
def inc_count(self):
self.lock.acquire()
if self.running_count == 0:
self.runner.acquire()
self.running_count += 1
self.lock.release()
def decr_count(self):
self.lock.acquire()
self.running_count -= 1
if self.running_count == 0:
#.........这里部分代码省略.........
示例10: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class UVEServer(object):
def __init__(self, local_ip, local_port, redis_sentinel_client, service):
self._redis_sentinel_client = redis_sentinel_client
self._local_ip = local_ip
self._local_port = int(local_port)
self._service = service
self._uve_server_task = None
self._redis = None
self._redis_master_info = None
self._master_last_updated = None
self._num_mastership_changes = 0
self._sem = BoundedSemaphore(1)
if redis_sentinel_client is not None:
self._redis_master_info = \
redis_sentinel_client.get_redis_master(service)
if self._redis_master_info is not None:
self._num_mastership_changes += 1
self._master_last_updated = UTCTimestampUsec()
self._redis = redis.StrictRedis(self._redis_master_info[0],
self._redis_master_info[1],
db=0)
self._uve_server_task = gevent.spawn(self.run)
#end __init__
def set_redis_master(self, redis_master):
if self._redis_master_info != redis_master:
try:
self._sem.acquire()
if self._redis_master_info is not None:
gevent.kill(self._uve_server_task)
self._redis_master_info = redis_master
self._num_mastership_changes += 1
self._master_last_updated = UTCTimestampUsec()
self._redis = redis.StrictRedis(self._redis_master_info[0],
self._redis_master_info[1],
db=0)
self._uve_server_task = gevent.spawn(self.run)
except Exception as e:
print "Failed to set_redis_master: %s" % e
raise
finally:
self._sem.release()
#end set_redis_master
def reset_redis_master(self):
if self._redis_master_info is not None:
try:
self._sem.acquire()
self._redis_master_info = None
gevent.kill(self._uve_server_task)
self._redis = None
except Exception as e:
print "Failed to reset_redis_master: %s" % e
raise
finally:
self._sem.release()
#end reset_redis_master
def fill_redis_uve_master_info(self, uve_master_info):
if self._redis_master_info is not None:
uve_master_info.ip = self._redis_master_info[0]
uve_master_info.port = int(self._redis_master_info[1])
try:
self._redis.ping()
except redis.exceptions.ConnectionError:
uve_master_info.status = 'DisConnected'
else:
uve_master_info.status = 'Connected'
uve_master_info.master_last_updated = self._master_last_updated
uve_master_info.num_of_mastership_changes = self._num_mastership_changes
#end fill_redis_uve_master_info
@staticmethod
def merge_previous(state, key, typ, attr, prevdict):
print "%s New val is %s" % (attr, prevdict)
nstate = copy.deepcopy(state)
if UVEServer._is_agg_item(prevdict):
count = int(state[key][typ][attr]['previous']['#text'])
count += int(prevdict['#text'])
nstate[key][typ][attr]['previous']['#text'] = str(count)
if UVEServer._is_agg_list(prevdict):
sname = ParallelAggregator.get_list_name(
state[key][typ][attr]['previous'])
count = len(prevdict['list'][sname]) + \
len(state[key][typ][attr]['previous']['list'][sname])
nstate[key][typ][attr]['previous']['list'][sname].extend(
prevdict['list'][sname])
nstate[key][typ][attr]['previous']['list']['@size'] = \
str(count)
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
#.........这里部分代码省略.........
示例11: functions
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
#.........这里部分代码省略.........
if prev_location:
position = prev_location
self.api.set_position(*position)
# retry login every 30 seconds if any errors
self.log.info('Starting Login process...')
login = False
while not login:
login = self.api.login(self.config.auth_service, self.config.username, self.config.get_password())
if not login:
logger.error('Login error, retrying Login in 30 seconds')
self.sleep(30)
self.log.info('Login successful')
self._heartbeat(login, True)
return True
def reload_api(self, prev_location=None):
self.api = None
return self._load_api(prev_location)
'''
Blocking lock
- only locks if current thread (greenlet) doesn't own the lock
- persist=True will ensure the lock will not be released until the user
explicitly sets self.persist_lock=False.
'''
def thread_lock(self, persist=False):
if self.sem.locked():
if self.locker == id(gevent.getcurrent()):
self.log.debug("Locker is -- %s. No need to re-lock", id(gevent.getcurrent()))
return False
else:
self.log.debug("Already locked by %s. Greenlet %s will wait...", self.locker, id(gevent.getcurrent()))
self.sem.acquire()
self.persist_lock = persist
self.locker = id(gevent.getcurrent())
self.log.debug("%s acquired lock (persist=%s)!", self.locker, persist)
return True
'''
Releases the lock if needed and the user didn't persist it
'''
def thread_release(self):
if self.sem.locked() and self.locker == id(gevent.getcurrent()) and not self.persist_lock:
self.log.debug("%s is now releasing lock", id(gevent.getcurrent()))
self.sem.release()
def _callback(self, gt):
try:
if not gt.exception:
result = gt.value
logger.info('Thread finished with result: %s', result)
except KeyboardInterrupt:
return
logger.exception('Error in main loop %s, restarting at location: %s',
gt.exception, self.get_position())
# restart after sleep
self.sleep(30)
self.reload_config()
self.reload_api(self.get_position())
self.start()
def start(self):
self.thread = gevent.spawn(self._main_loop)
示例12: ConnectionPool
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class ConnectionPool(object):
"""
Generic TCP connection pool, with the following features:
* Configurable pool size
* Auto-reconnection when a broken socket is detected
* Optional periodic keepalive
"""
# Frequency at which the pool is populated at startup
SPAWN_FREQUENCY = 0.1
def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
self.size = size
self.conn = deque()
self.lock = BoundedSemaphore(size)
self.keepalive = keepalive
# Exceptions list must be in tuple form to be caught properly
self.exc_classes = tuple(exc_classes)
# http://stackoverflow.com/a/31136897/357578
try:
xrange
except NameError:
xrange = range
for i in xrange(size):
self.lock.acquire()
for i in xrange(size):
gevent.spawn_later(self.SPAWN_FREQUENCY*i, self._addOne)
if self.keepalive:
gevent.spawn(self._keepalive_periodic)
def _new_connection(self):
"""
Estabilish a new connection (to be implemented in subclasses).
"""
raise NotImplementedError
def _keepalive(self, c):
"""
Implement actual application-level keepalive (to be
reimplemented in subclasses).
:raise: socket.error if the connection has been closed or is broken.
"""
raise NotImplementedError()
def _keepalive_periodic(self):
delay = float(self.keepalive) / self.size
while 1:
try:
with self.get() as c:
self._keepalive(c)
except self.exc_classes:
# Nothing to do, the pool will generate a new connection later
pass
gevent.sleep(delay)
def _addOne(self):
stime = 0.1
while 1:
c = self._new_connection()
if c:
break
gevent.sleep(stime)
if stime < 400:
stime *= 2
self.conn.append(c)
self.lock.release()
@contextmanager
def get(self):
"""
Get a connection from the pool, to make and receive traffic.
If the connection fails for any reason (socket.error), it is dropped
and a new one is scheduled. Please use @retry as a way to automatically
retry whatever operation you were performing.
"""
self.lock.acquire()
try:
c = self.conn.popleft()
yield c
except self.exc_classes:
# The current connection has failed, drop it and create a new one
gevent.spawn_later(1, self._addOne)
raise
except:
self.conn.append(c)
self.lock.release()
raise
else:
# NOTE: cannot use finally because MUST NOT reuse the connection
# if it failed (socket.error)
self.conn.append(c)
self.lock.release()
示例13: DiscoveryServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
#.........这里部分代码省略.........
cl_entry = {
'instances': count,
'remote': bottle.request.environ.get('REMOTE_ADDR'),
'client_type': client_type,
}
self.create_sub_data(client_id, service_type)
self._db_conn.insert_client_data(service_type, client_id, cl_entry)
self.syslog('subscribe: service type=%s, client=%s:%s, ttl=%d, asked=%d' \
%(service_type, client_type, client_id, ttl, count))
sdata = self.get_sub_data(client_id, service_type)
sdata['ttl_expires'] += 1
# check existing subscriptions
subs = self._db_conn.lookup_subscription(service_type, client_id)
if subs:
for service_id, result in subs:
entry = self._db_conn.lookup_service(service_type, service_id = service_id)
if self.service_expired(entry):
#self.syslog('skipping expired service %s, info %s' %(service_id, entry['info']))
continue
self._db_conn.insert_client(service_type, service_id, client_id, result, ttl)
#self.syslog(' refresh subscrition for service %s' %(service_id))
r.append(result)
assigned_sid.add(service_id)
count -= 1
if count == 0:
response = {'ttl': ttl, service_type: r}
if ctype == 'application/xml':
response = xmltodict.unparse({'response':response})
return response
# acquire lock to update use count and TS
self._sem.acquire()
# lookup publishers of the service
pubs = self._db_conn.lookup_service(service_type)
if not pubs:
# force client to come back soon if service expectation is not met
if len(r) < reqcnt:
ttl_short = self.get_service_config(service_type, 'ttl_short')
if ttl_short:
ttl = self.get_ttl_short(client_id, service_type, ttl_short)
self._debug['ttl_short'] += 1
#self.syslog(' sending short ttl %d to %s' %(ttl, client_id))
response = {'ttl': ttl, service_type: r}
if ctype == 'application/xml':
response = xmltodict.unparse({'response':response})
self._sem.release()
return response
# eliminate inactive services
pubs_active = [item for item in pubs if not self.service_expired(item)]
#self.syslog(' Found %s publishers, %d active, need %d' %(len(pubs), len(pubs_active), count))
# find least loaded instances
pubs = self.service_list(service_type, pubs_active)
# prepare response - send all if count 0
for index in range(min(count, len(pubs)) if count else len(pubs)):
entry = pubs[index]
# skip duplicates - could happen if some publishers have quit and
# we have already picked up others from cached information above
if entry['service_id'] in assigned_sid:
示例14: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import acquire [as 别名]
class UVEServer(object):
def __init__(self, redis_uve_server, logger, redis_password=None):
self._local_redis_uve = redis_uve_server
self._redis_uve_map = {}
self._logger = logger
self._sem = BoundedSemaphore(1)
self._redis = None
self._redis_password = redis_password
if self._local_redis_uve:
self._redis = redis.StrictRedis(self._local_redis_uve[0],
self._local_redis_uve[1],
password=self._redis_password,
db=1)
self._uve_reverse_map = {}
for h,m in UVE_MAP.iteritems():
self._uve_reverse_map[m] = h
#end __init__
def update_redis_uve_list(self, redis_uve_list):
newlist = set()
for elem in redis_uve_list:
newlist.add((elem[0],elem[1]))
# if some redis instances are gone, remove them from our map
for test_elem in self._redis_uve_map.keys():
if test_elem not in newlist:
del self._redis_uve_map[test_elem]
# new redis instances need to be inserted into the map
for test_elem in newlist:
if test_elem not in self._redis_uve_map:
(r_ip, r_port) = test_elem
self._redis_uve_map[test_elem] = redis.StrictRedis(
r_ip, r_port, password=self._redis_password, db=1)
# end update_redis_uve_list
def fill_redis_uve_info(self, redis_uve_info):
redis_uve_info.ip = self._local_redis_uve[0]
redis_uve_info.port = self._local_redis_uve[1]
try:
self._redis.ping()
except redis.exceptions.ConnectionError:
redis_uve_info.status = 'DisConnected'
else:
redis_uve_info.status = 'Connected'
#end fill_redis_uve_info
@staticmethod
def merge_previous(state, key, typ, attr, prevdict):
print "%s New val is %s" % (attr, prevdict)
nstate = copy.deepcopy(state)
if UVEServer._is_agg_item(prevdict):
count = int(state[key][typ][attr]['previous']['#text'])
count += int(prevdict['#text'])
nstate[key][typ][attr]['previous']['#text'] = str(count)
if UVEServer._is_agg_list(prevdict):
sname = ParallelAggregator.get_list_name(
state[key][typ][attr]['previous'])
count = len(prevdict['list'][sname]) + \
len(state[key][typ][attr]['previous']['list'][sname])
nstate[key][typ][attr]['previous']['list'][sname].extend(
prevdict['list'][sname])
nstate[key][typ][attr]['previous']['list']['@size'] = \
str(count)
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
print "%s Merged val is %s"\
% (attr, nstate[key][typ][attr]['previous'])
return nstate
def run(self):
lck = False
while True:
try:
k, value = self._redis.brpop("DELETED")
self._sem.acquire()
lck = True
self._logger.debug("%s del received for " % value)
# value is of the format:
# DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
self._redis.delete(value)
except redis.exceptions.ResponseError:
#send redis connection down msg. Coule be bcos of authentication
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'UVE', status = ConnectionStatus.DOWN,
message = 'UVE result : Connection Error',
server_addrs = ['%s:%d' % (self._local_redis_uve[0],
self._local_redis_uve[1])])
sys.exit()
except redis.exceptions.ConnectionError:
if lck:
#.........这里部分代码省略.........