本文整理汇总了Python中gevent.coros.BoundedSemaphore.release方法的典型用法代码示例。如果您正苦于以下问题:Python BoundedSemaphore.release方法的具体用法?Python BoundedSemaphore.release怎么用?Python BoundedSemaphore.release使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类gevent.coros.BoundedSemaphore
的用法示例。
在下文中一共展示了BoundedSemaphore.release方法的14个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: locks
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class Lock:
""" UNIX-specific exclusive file locks (released when the process ends).
Based on
http://blog.vmfarms.com/2011/03/cross-process-locking-and.html,
adapted for context managers (the 'with' statement).
Modified to be gevent-safe! Locks held by a given Greenlet may not be
taken by other Greenlets until released, _as long as you only create one
Lock object per lockfile_. THIS IS VERY IMPORTANT. *Make sure* that you're
not creating multiple locks on the same file from the same process,
otherwise you'll bypass the gevent lock!
Parameters
----------
f : file or str
File handle or filename to use as the lock.
block : bool
Whether to block or throw IOError if the lock is grabbed multiple
times.
"""
TIMEOUT = 60
def __init__(self, f, block=True):
if isinstance(f, file):
self.filename = f.name
self.handle = f if not f.closed else open(f, 'w')
else:
self.filename = f
mkdirp(os.path.dirname(f))
self.handle = open(f, 'w')
if block:
self.lock_op = fcntl.LOCK_EX
else:
self.lock_op = fcntl.LOCK_EX | fcntl.LOCK_NB
self.block = block
self.gevent_lock = BoundedSemaphore(1)
def acquire(self):
got_gevent_lock = self.gevent_lock.acquire(blocking=self.block)
if not got_gevent_lock:
raise IOError("cannot acquire gevent lock")
fcntl.flock(self.handle, self.lock_op)
def release(self):
fcntl.flock(self.handle, fcntl.LOCK_UN)
self.gevent_lock.release()
def locked(self):
return self.gevent_lock.locked()
def __enter__(self):
self.acquire()
return self
def __exit__(self, type, value, traceback):
self.release()
def __del__(self):
self.handle.close()
示例2: LiberateProtocol
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class LiberateProtocol(protocols.BaseProtocol):
"""协议"""
buff = ""
def connectionMade(self):
"""连接建立处理
"""
address = self.transport.getAddress()
logger.info('Client %d login in.[%s,%d]' % (self.transport.sessionno,
address[0], address[1]))
self.factory.connmanager.addConnection(self)
self.factory.doConnectionMade(self)
self.sem = BoundedSemaphore(1)
def connectionLost(self, reason):
"""连接断开处理
"""
logger.info('Client %d login out(%s).' % (self.transport.sessionno,
reason))
self.factory.doConnectionLost(self)
self.factory.connmanager.dropConnectionByID(self.transport.sessionno)
def safeToWriteData(self, data, command):
"""线程安全的向客户端发送数据
@param data: str 要向客户端写的数据
"""
if data is None:
return
senddata = self.factory.produceResult(data, command)
self.sem.acquire()
self.transport.sendall(senddata)
self.sem.release()
def dataReceived(self, data):
"""数据到达处理
@param data: str 客户端传送过来的数据
"""
length = self.factory.dataprotocl.getHeadlength() # 获取协议头的长度
self.buff += data
while self.buff.__len__() >= length:
unpackdata = self.factory.dataprotocl.unpack(self.buff[:length])
if not unpackdata.get('result'):
logger.info('illegal data package --')
self.connectionLost('illegal data package')
break
command = unpackdata.get('command')
rlength = unpackdata.get('length')
request = self.buff[length:length + rlength]
if request.__len__() < rlength:
logger.info('some data lose')
break
self.buff = self.buff[length + rlength:]
response = self.factory.doDataReceived(self, command, request)
# if not response:
# continue
self.safeToWriteData(response, command)
示例3: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
#.........这里部分代码省略.........
sname = ParallelAggregator.get_list_name(
snhdict[attr])
if not isinstance(
snhdict[attr]['list'][sname], list):
snhdict[attr]['list'][sname] = \
[snhdict[attr]['list'][sname]]
if (attr not in state[key][typ]):
# There is no existing entry for the UVE
vstr = json.dumps(snhdict[attr])
else:
# There is an existing entry
# Merge the new entry with the existing one
state = UVEServer.merge_previous(
state, key, typ, attr, snhdict[attr])
vstr = json.dumps(state[key][typ][attr]['previous'])
# Store the merged result back in the database
self._redis.sadd("PUVES:" + typ, key)
self._redis.sadd("PTYPES:" + key, typ)
self._redis.hset("PREVIOUS:" + key + ":" + typ, attr, vstr)
self._redis.delete(value)
except redis.exceptions.ResponseError:
#send redis connection down msg. Coule be bcos of authentication
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'UVE', status = ConnectionStatus.DOWN,
message = 'UVE result : Connection Error',
server_addrs = ['%s:%d' % (self._local_redis_uve[0],
self._local_redis_uve[1])])
sys.exit()
except redis.exceptions.ConnectionError:
if lck:
self._sem.release()
lck = False
gevent.sleep(5)
else:
if lck:
self._sem.release()
lck = False
self._logger.debug("Deleted %s" % value)
self._logger.debug("UVE %s Type %s" % (key, typ))
@staticmethod
def _is_agg_item(attr):
if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
'u8', 'u16', 'u32', 'u64']:
if '@aggtype' in attr:
if attr['@aggtype'] == "counter":
return True
return False
@staticmethod
def _is_agg_list(attr):
if attr['@type'] in ['list']:
if '@aggtype' in attr:
if attr['@aggtype'] == "append":
return True
return False
@staticmethod
def convert_previous(existing, state, key, typ, afilter=None):
# Take the existing delete record, and load it into the state dict
for attr, hval in existing.iteritems():
hdict = json.loads(hval)
示例4: DiscoveryZkClient
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
#.........这里部分代码省略.........
def subscriber_entries(self):
service_types = self._zk.get_children('/clients')
for service_type in service_types:
subscribers = self._zk.get_children('/clients/%s' % (service_type))
for client_id in subscribers:
data, stat = self._zk.get(
'/clients/%s/%s' % (service_type, client_id))
cl_entry = json.loads(data)
yield((client_id, service_type))
# end
def update_service(self, service_type, service_id, data):
path = '/services/%s/%s' % (service_type, service_id)
self.create_node(path, value=json.dumps(data), makepath=True)
# end
def insert_service(self, service_type, service_id, data):
# ensure election path for service type exists
path = '/election/%s' % (service_type)
self.create_node(path)
# preclude duplicate service entry
sid_set = set()
# prevent background task from deleting node under our nose
self._zk_sem.acquire()
seq_list = self._zk.get_children(path)
for sequence in seq_list:
sid, stat = self._zk.get(
'/election/%s/%s' % (service_type, sequence))
sid_set.add(sid)
self._zk_sem.release()
if not service_id in sid_set:
path = '/election/%s/node-' % (service_type)
pp = self._zk.create(
path, service_id, makepath=True, sequence=True)
pat = path + "(?P<id>.*$)"
mch = re.match(pat, pp)
seq = mch.group('id')
data['sequence'] = seq
self.syslog('ST %s, SID %s not found! Added with sequence %s' %
(service_type, service_id, seq))
self.update_service(service_type, service_id, data)
# end insert_service
def delete_service(self, service_type, service_id):
if self.lookup_subscribers(service_type, service_id):
return
path = '/services/%s/%s' %(service_type, service_id)
self._zk.delete(path)
# purge in-memory cache - ideally we are not supposed to know about this
self._ds.delete_pub_data(service_id)
# delete service node if all services gone
path = '/services/%s' %(service_type)
if self._zk.get_children(path):
return
self._zk.delete(path)
#end delete_service
def lookup_service(self, service_type, service_id=None):
示例5: ZookeeperClient
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class ZookeeperClient(object):
def __init__(self, module, server_list):
# logging
logger = logging.getLogger(module)
logger.setLevel(logging.INFO)
try:
handler = logging.handlers.RotatingFileHandler('/var/log/contrail/' + module + '-zk.log', maxBytes=10*1024*1024, backupCount=5)
except IOError:
print "Cannot open log file in /var/log/contrail/"
else:
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
self._zk_client = \
kazoo.client.KazooClient(
server_list,
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger)
self._logger = logger
self._election = None
self._zk_sem = BoundedSemaphore(1)
self.connect()
# end __init__
# reconnect
def reconnect(self):
self._zk_sem.acquire()
self.syslog("restart: acquired lock; state %s " % self._zk_client.state)
# initiate restart if our state is suspended or lost
if self._zk_client.state != "CONNECTED":
self.syslog("restart: starting ...")
try:
self._zk_client.stop()
self._zk_client.close()
self._zk_client.start()
self.syslog("restart: done")
except gevent.event.Timeout as e:
self.syslog("restart: timeout!")
except Exception as e:
self.syslog('restart: exception %s' % str(e))
except Exception as str:
self.syslog('restart: str exception %s' % str)
self._zk_sem.release()
# start
def connect(self):
while True:
try:
self._zk_client.start()
break
except gevent.event.Timeout as e:
self.syslog(
'Failed to connect with Zookeeper -will retry in a second')
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
self.syslog('%s -will retry in a second' % (str(e)))
gevent.sleep(1)
self.syslog('Connected to ZooKeeper!')
# end
def syslog(self, msg):
if not self._logger:
return
self._logger.info(msg)
# end syslog
def _zk_listener(self, state):
if state == "CONNECTED":
self._election.cancel()
# end
def _zk_election_callback(self, func, *args, **kwargs):
self._zk_client.remove_listener(self._zk_listener)
func(*args, **kwargs)
# Exit if running master encounters error or exception
exit(1)
# end
def master_election(self, path, identifier, func, *args, **kwargs):
self._zk_client.add_listener(self._zk_listener)
while True:
self._election = self._zk_client.Election(path, identifier)
self._election.run(self._zk_election_callback, func, *args, **kwargs)
# end master_election
def create_node(self, path, value=None):
try:
if value is None:
value = uuid.uuid4()
self._zk_client.create(path, str(value), makepath=True)
except (kazoo.exceptions.SessionExpiredError,
kazoo.exceptions.ConnectionLoss):
self.reconnect()
return self.create_node(path, value)
except kazoo.exceptions.NodeExistsError:
current_value = self.read_node(path)
#.........这里部分代码省略.........
示例6: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
#.........这里部分代码省略.........
for attr, hval in self._redis.hgetall(value).iteritems():
snhdict = xmltodict.parse(hval)
if UVEServer._is_agg_list(snhdict[attr]):
if snhdict[attr]['list']['@size'] == "0":
continue
if snhdict[attr]['list']['@size'] == "1":
sname = ParallelAggregator.get_list_name(
snhdict[attr])
if not isinstance(
snhdict[attr]['list'][sname], list):
snhdict[attr]['list'][sname] = \
[snhdict[attr]['list'][sname]]
if (attr not in state[key][typ]):
# There is no existing entry for the UVE
vstr = json.dumps(snhdict[attr])
else:
# There is an existing entry
# Merge the new entry with the existing one
state = UVEServer.merge_previous(
state, key, typ, attr, snhdict[attr])
vstr = json.dumps(state[key][typ][attr]['previous'])
# Store the merged result back in the database
self._redis.sadd("PUVES:" + typ, key)
self._redis.sadd("PTYPES:" + key, typ)
self._redis.hset("PREVIOUS:" + key + ":" + typ, attr, vstr)
self._redis.delete(value)
except redis.exceptions.ConnectionError:
if lck:
self._sem.release()
lck = False
gevent.sleep(5)
else:
if lck:
self._sem.release()
lck = False
self._logger.debug("Deleted %s" % value)
self._logger.debug("UVE %s Type %s" % (key, typ))
@staticmethod
def _is_agg_item(attr):
if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
'u8', 'u16', 'u32', 'u64']:
if '@aggtype' in attr:
if attr['@aggtype'] == "counter":
return True
return False
@staticmethod
def _is_agg_list(attr):
if attr['@type'] in ['list']:
if '@aggtype' in attr:
if attr['@aggtype'] == "append":
return True
return False
@staticmethod
def convert_previous(existing, state, key, typ, afilter=None):
# Take the existing delete record, and load it into the state dict
for attr, hval in existing.iteritems():
hdict = json.loads(hval)
示例7: QueueManager
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class QueueManager(object):
def __init__(self, clear_interval=DEFAULT_CLEAR_INTERVAL):
self.__updates = {}
self.__updates_lock = BoundedSemaphore()
# Start clearing daemon thread.
spawn(self._daemon_clear, interval=clear_interval)
def _load(self, queue_id):
"""Load and return queue update tracker for queue_id."""
self.__updates_lock.acquire()
# Hit.
if queue_id in self.__updates:
self.__updates_lock.release()
return self.__updates[queue_id].fresh()
# Miss.
self.__updates[queue_id] = QueueUpdate(queue_id)
self.__updates_lock.release()
return self.__updates[queue_id]
def _clear(self):
"""Clear the in-memory update tracking dictionary"""
self.__updates_lock.acquire()
print 'Clearing'
# Make sure anyone currently waiting reloads.
for queue_id in self.__updates:
self.__updates[queue_id].event.set()
self.__updates[queue_id].event.clear()
self.__updates = {}
print 'Clear'
self.__updates_lock.release()
def _daemon_clear(self, interval):
"""Clear the update tracking dictionary every interval seconds."""
while True:
sleep(interval)
self._clear()
def edit(self, user, queue_id, room_idlist, draw):
# Put together the list of Room objects.
rooms = []
print 'edit', room_idlist
for roomid in room_idlist:
room = Room.objects.get(pk=roomid)
if (not room) or not draw in room.building.draw.all():
return {'error':'bad room/draw'}
rooms.append(room)
update = self._load(queue_id)
# Clear out the old list.
queue = update.queue
queue.queuetoroom_set.all().delete()
# Put in new relationships
for i in range(0, len(rooms)):
qtr = QueueToRoom(queue=queue, room=rooms[i], ranking=i)
qtr.save()
# Store the update information on the queue.
queue.version += 1
queue.update_kind = Queue.EDIT
queue.update_user = user
queue.save()
# Notify others of the update.
update.event.set()
update.event.clear()
# Assemble and return response.
room_list = []
for room in rooms:
room_list.append({'id':room.id, 'number':room.number,
'building':room.building.name})
return {'rooms':room_list}
def check(self, user, queue_id, last_version):
update = self._load(queue_id)
print user, update.queue, last_version
if last_version == update.queue.version:
print 'going to wait'
print update.queue.version
update.event.wait()
update = self._load(queue_id)
print 'past wait'
queueToRooms = QueueToRoom.objects.filter(queue=update.queue).order_by('ranking')
if not queueToRooms:
return {'id':update.queue.version, 'rooms':[]}
room_list = []
if update.queue.update_user:
netid = update.queue.update_user.netid
else:
netid = ''
for qtr in queueToRooms:
room_list.append({'id':qtr.room.id, 'number':qtr.room.number,
'building':qtr.room.building.name})
return {'id':update.queue.version,
'kind':Queue.UPDATE_KINDS[update.queue.update_kind][1],
'netid':netid,
'rooms':room_list}
示例8: DiscoveryZkClient
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class DiscoveryZkClient(object):
def __init__(self, discServer, zk_srv_ip='127.0.0.1',
zk_srv_port='2181', reset_config=False):
self._reset_config = reset_config
self._service_id_to_type = {}
self._ds = discServer
self._zk_sem = BoundedSemaphore(1)
self._election = None
self._restarting = False
zk_endpts = []
for ip in zk_srv_ip.split(','):
zk_endpts.append('%s:%s' %(ip, zk_srv_port))
# logging
logger = logging.getLogger('discovery-service')
logger.setLevel(logging.WARNING)
handler = logging.handlers.RotatingFileHandler('/var/log/contrail/discovery_zk.log', maxBytes=1024*1024, backupCount=10)
log_format = logging.Formatter('%(asctime)s [%(name)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
handler.setFormatter(log_format)
logger.addHandler(handler)
self._zk = kazoo.client.KazooClient(
hosts=','.join(zk_endpts),
handler=kazoo.handlers.gevent.SequentialGeventHandler(),
logger=logger)
self._logger = logger
# connect
self.connect()
if reset_config:
self.delete_node("/services", recursive=True)
self.delete_node("/clients", recursive=True)
self.delete_node("/election", recursive=True)
# create default paths
self.create_node("/services")
self.create_node("/clients")
self.create_node("/election")
self._debug = {
'subscription_expires': 0,
'oos_delete': 0,
'db_excepts': 0,
}
# end __init__
# Discovery server used for syslog, cleanup etc
def set_ds(self, discServer):
self._ds = discServer
# end set_ds
def is_restarting(self):
return self._restarting
# end is_restarting
# restart
def restart(self):
self._zk_sem.acquire()
self._restarting = True
self.syslog("restart: acquired lock; state %s " % self._zk.state)
# initiate restart if our state is suspended or lost
if self._zk.state != "CONNECTED":
self.syslog("restart: starting ...")
try:
self._zk.stop()
self._zk.close()
self._zk.start()
self.syslog("restart: done")
except:
e = sys.exc_info()[0]
self.syslog('restart: exception %s' % str(e))
self._restarting = False
self._zk_sem.release()
# start
def connect(self):
while True:
try:
self._zk.start()
break
except gevent.event.Timeout as e:
self.syslog(
'Failed to connect with Zookeeper -will retry in a second')
gevent.sleep(1)
# Zookeeper is also throwing exception due to delay in master election
except Exception as e:
self.syslog('%s -will retry in a second' % (str(e)))
gevent.sleep(1)
self.syslog('Connected to ZooKeeper!')
# end
def start_background_tasks(self):
# spawn loop to expire subscriptions
gevent.Greenlet.spawn(self.inuse_loop)
# spawn loop to expire services
gevent.Greenlet.spawn(self.service_oos_loop)
#.........这里部分代码省略.........
示例9: CrawlSettings
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class Crawler:
settings = CrawlSettings()
def __init__(self, spider_class, settings):
def get(value, default={}):
try:
return getattr(settings, value)
except AttributeError:
return default
self.settings = CrawlSettings(get('CRAWL'))
Request.settings = RequestSettings(get('REQUEST'))
spider_settings = SpiderSettings(get('SPIDER'))
spider = spider_class(spider_settings)
log = LogSettings(get('LOGFORMATTERS'), get('LOGHANDLERS'),
get('LOGGERS'))
spider.logger = log.getLogger(spider.name)
self.logger = log.getLogger(spider.name)
self.load(spider)
Request.stats = self.stats
def load(self, spider):
redis_args = dict(host=self.settings.REDIS_URL,
port=self.settings.REDIS_PORT,
db=self.settings.REDIS_DB)
if hasattr(self.settings, 'NAMESPACE'):
redis_args['namespace'] = self.settings.NAMESPACE
else:
redis_args['namespace'] = spider.name
self.url_set = redisds.Set('urlset', **redis_args)
self.url_queue = redisds.Queue('urlqueue', serializer=Pickle(),
**redis_args)
self.runner = redisds.Lock("runner:%s" % uuid4().hex, **redis_args)
self.runners = redisds.Dict("runner:*", **redis_args)
self.stats = redisds.Dict("stats:*", **redis_args)
self.lock = BoundedSemaphore(1)
self.running_count = 0
self.allowed_urls_regex = self.get_regex(spider.allowed_domains)
self.spider = spider
self.start()
def get_regex(self, domains):
default = (r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+[A-Z]{2,6}\.?|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
r'(?::\d+)?')
domain_regex = r'(%s)' % '|'.join(domains) if len(domains) else default
url_regex = r'^https?://%s(?:/?|[/?]\S+)$' % domain_regex
regex = re.compile(url_regex, re.IGNORECASE)
return regex
def current_time(self):
tz = timezone(self.settings.TIME_ZONE)
return datetime.now(tz).isoformat()
def start(self):
if not self.settings.RESUME and self.completed():
self.url_queue.clear()
self.url_set.clear()
if self.url_queue.empty():
self.stats.clear()
if isinstance(self.spider.start, list):
requests = self.spider.start
else:
requests = [self.spider.start]
for request in requests:
if isinstance(request, str):
request = Request(request)
if request.callback is None:
request.callback = "parse"
self.insert(request)
self.stats['status'] = "running"
self.stats['start_time'] = self.current_time()
def clear(self, finished):
self.runner.release()
if finished:
self.stats['status'] = 'finished'
self.url_queue.clear()
self.url_set.clear()
elif self.completed():
self.stats['end_time'] = self.current_time()
self.stats['status'] = 'stopped'
stats = dict(self.stats)
stats['runners'] = len(self.runners)
self.logger.info("%s", str(stats))
def completed(self):
return len(self.runners) == 0
def inc_count(self):
self.lock.acquire()
if self.running_count == 0:
self.runner.acquire()
self.running_count += 1
self.lock.release()
def decr_count(self):
self.lock.acquire()
self.running_count -= 1
if self.running_count == 0:
#.........这里部分代码省略.........
示例10: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class UVEServer(object):
def __init__(self, local_ip, local_port, redis_sentinel_client, service):
self._redis_sentinel_client = redis_sentinel_client
self._local_ip = local_ip
self._local_port = int(local_port)
self._service = service
self._uve_server_task = None
self._redis = None
self._redis_master_info = None
self._master_last_updated = None
self._num_mastership_changes = 0
self._sem = BoundedSemaphore(1)
if redis_sentinel_client is not None:
self._redis_master_info = \
redis_sentinel_client.get_redis_master(service)
if self._redis_master_info is not None:
self._num_mastership_changes += 1
self._master_last_updated = UTCTimestampUsec()
self._redis = redis.StrictRedis(self._redis_master_info[0],
self._redis_master_info[1],
db=0)
self._uve_server_task = gevent.spawn(self.run)
#end __init__
def set_redis_master(self, redis_master):
if self._redis_master_info != redis_master:
try:
self._sem.acquire()
if self._redis_master_info is not None:
gevent.kill(self._uve_server_task)
self._redis_master_info = redis_master
self._num_mastership_changes += 1
self._master_last_updated = UTCTimestampUsec()
self._redis = redis.StrictRedis(self._redis_master_info[0],
self._redis_master_info[1],
db=0)
self._uve_server_task = gevent.spawn(self.run)
except Exception as e:
print "Failed to set_redis_master: %s" % e
raise
finally:
self._sem.release()
#end set_redis_master
def reset_redis_master(self):
if self._redis_master_info is not None:
try:
self._sem.acquire()
self._redis_master_info = None
gevent.kill(self._uve_server_task)
self._redis = None
except Exception as e:
print "Failed to reset_redis_master: %s" % e
raise
finally:
self._sem.release()
#end reset_redis_master
def fill_redis_uve_master_info(self, uve_master_info):
if self._redis_master_info is not None:
uve_master_info.ip = self._redis_master_info[0]
uve_master_info.port = int(self._redis_master_info[1])
try:
self._redis.ping()
except redis.exceptions.ConnectionError:
uve_master_info.status = 'DisConnected'
else:
uve_master_info.status = 'Connected'
uve_master_info.master_last_updated = self._master_last_updated
uve_master_info.num_of_mastership_changes = self._num_mastership_changes
#end fill_redis_uve_master_info
@staticmethod
def merge_previous(state, key, typ, attr, prevdict):
print "%s New val is %s" % (attr, prevdict)
nstate = copy.deepcopy(state)
if UVEServer._is_agg_item(prevdict):
count = int(state[key][typ][attr]['previous']['#text'])
count += int(prevdict['#text'])
nstate[key][typ][attr]['previous']['#text'] = str(count)
if UVEServer._is_agg_list(prevdict):
sname = ParallelAggregator.get_list_name(
state[key][typ][attr]['previous'])
count = len(prevdict['list'][sname]) + \
len(state[key][typ][attr]['previous']['list'][sname])
nstate[key][typ][attr]['previous']['list'][sname].extend(
prevdict['list'][sname])
nstate[key][typ][attr]['previous']['list']['@size'] = \
str(count)
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
#.........这里部分代码省略.........
示例11: functions
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class Poketrainer:
""" Public functions (without _**) are callable by the webservice! """
def __init__(self, args):
self.thread = None
self.socket = None
self.cli_args = args
self.force_debug = args['debug']
self.log = logging.getLogger(__name__)
# timers, counters and triggers
self.pokemon_caught = 0
self._error_counter = 0
self._error_threshold = 10
self.start_time = time()
self.exp_start = None
self._heartbeat_number = 1 # setting this back to one because we make parse a full heartbeat during login!
self._heartbeat_frequency = 3 # 1 = always
self._full_heartbeat_frequency = 15 # 10 = as before (every 10th heartbeat)
self._farm_mode_triggered = False
# objects, order is important!
self.config = None
self._load_config()
self._open_socket()
self.player = Player({})
self.player_stats = PlayerStats({})
self.inventory = Inventory(self, [])
self.fort_walker = FortWalker(self)
self.map_objects = MapObjects(self)
self.poke_catcher = PokeCatcher(self)
self.incubate = Incubate(self)
self.evolve = Evolve(self)
self.release = Release(self)
self.sniper = Sniper(self)
self._origPosF = (0, 0, 0)
self.api = None
self._load_api()
# config values that might be changed during runtime
self.step_size = self.config.step_size
self.should_catch_pokemon = self.config.should_catch_pokemon
# threading / locking
self.sem = BoundedSemaphore(1) # gevent
self.persist_lock = False
self.locker = None
def sleep(self, t):
# eventlet.sleep(t * self.config.sleep_mult)
gevent.sleep(t * self.config.sleep_mult)
def _open_socket(self):
desc_file = os.path.join(os.path.dirname(os.path.dirname(os.path.realpath(__file__))), ".listeners")
s = socket.socket()
s.bind(("", 0)) # let the kernel find a free port
sock_port = s.getsockname()[1]
s.close()
data = {}
if os.path.isfile(desc_file):
with open(desc_file, 'r+') as f:
data = f.read()
if PY2:
data = json.loads(data.encode() if len(data) > 0 else '{}')
else:
data = json.loads(data if len(data) > 0 else '{}')
data[self.config.username] = sock_port
with open(desc_file, "w+") as f:
f.write(json.dumps(data, indent=2))
s = zerorpc.Server(self)
s.bind("tcp://127.0.0.1:%i" % sock_port) # the free port should still be the same
self.socket = gevent.spawn(s.run)
# zerorpc requires gevent, thus we would need a solution for eventlets
# self.socket = self.thread_pool.spawn(wsgi.server, eventlet.listen(('127.0.0.1', sock_port)), self)
# self.socket = self.thread_pool.spawn(eventlet.serve, eventlet.listen(('127.0.0.1', sock_port)), self)
# alternative: GreenRPCService
def _load_config(self):
if self.config is None:
config_file = "config.json"
# If config file exists, load variables from json
load = {}
if os.path.isfile(config_file):
with open(config_file) as data:
load.update(json.load(data))
defaults = load.get('defaults', {})
config = load.get('accounts', [])[self.cli_args['config_index']]
if self.cli_args['debug'] or config.get('debug', False):
logging.getLogger("requests").setLevel(logging.DEBUG)
logging.getLogger("pgoapi").setLevel(logging.DEBUG)
#.........这里部分代码省略.........
示例12: ConnectionPool
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
class ConnectionPool(object):
"""
Generic TCP connection pool, with the following features:
* Configurable pool size
* Auto-reconnection when a broken socket is detected
* Optional periodic keepalive
"""
# Frequency at which the pool is populated at startup
SPAWN_FREQUENCY = 0.1
def __init__(self, size, exc_classes=DEFAULT_EXC_CLASSES, keepalive=None):
self.size = size
self.conn = deque()
self.lock = BoundedSemaphore(size)
self.keepalive = keepalive
# Exceptions list must be in tuple form to be caught properly
self.exc_classes = tuple(exc_classes)
# http://stackoverflow.com/a/31136897/357578
try:
xrange
except NameError:
xrange = range
for i in xrange(size):
self.lock.acquire()
for i in xrange(size):
gevent.spawn_later(self.SPAWN_FREQUENCY*i, self._addOne)
if self.keepalive:
gevent.spawn(self._keepalive_periodic)
def _new_connection(self):
"""
Estabilish a new connection (to be implemented in subclasses).
"""
raise NotImplementedError
def _keepalive(self, c):
"""
Implement actual application-level keepalive (to be
reimplemented in subclasses).
:raise: socket.error if the connection has been closed or is broken.
"""
raise NotImplementedError()
def _keepalive_periodic(self):
delay = float(self.keepalive) / self.size
while 1:
try:
with self.get() as c:
self._keepalive(c)
except self.exc_classes:
# Nothing to do, the pool will generate a new connection later
pass
gevent.sleep(delay)
def _addOne(self):
stime = 0.1
while 1:
c = self._new_connection()
if c:
break
gevent.sleep(stime)
if stime < 400:
stime *= 2
self.conn.append(c)
self.lock.release()
@contextmanager
def get(self):
"""
Get a connection from the pool, to make and receive traffic.
If the connection fails for any reason (socket.error), it is dropped
and a new one is scheduled. Please use @retry as a way to automatically
retry whatever operation you were performing.
"""
self.lock.acquire()
try:
c = self.conn.popleft()
yield c
except self.exc_classes:
# The current connection has failed, drop it and create a new one
gevent.spawn_later(1, self._addOne)
raise
except:
self.conn.append(c)
self.lock.release()
raise
else:
# NOTE: cannot use finally because MUST NOT reuse the connection
# if it failed (socket.error)
self.conn.append(c)
self.lock.release()
示例13: DiscoveryServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
#.........这里部分代码省略.........
for service_id, result in subs:
entry = self._db_conn.lookup_service(service_type, service_id = service_id)
if self.service_expired(entry):
#self.syslog('skipping expired service %s, info %s' %(service_id, entry['info']))
continue
self._db_conn.insert_client(service_type, service_id, client_id, result, ttl)
#self.syslog(' refresh subscrition for service %s' %(service_id))
r.append(result)
assigned_sid.add(service_id)
count -= 1
if count == 0:
response = {'ttl': ttl, service_type: r}
if ctype == 'application/xml':
response = xmltodict.unparse({'response':response})
return response
# acquire lock to update use count and TS
self._sem.acquire()
# lookup publishers of the service
pubs = self._db_conn.lookup_service(service_type)
if not pubs:
# force client to come back soon if service expectation is not met
if len(r) < reqcnt:
ttl_short = self.get_service_config(service_type, 'ttl_short')
if ttl_short:
ttl = self.get_ttl_short(client_id, service_type, ttl_short)
self._debug['ttl_short'] += 1
#self.syslog(' sending short ttl %d to %s' %(ttl, client_id))
response = {'ttl': ttl, service_type: r}
if ctype == 'application/xml':
response = xmltodict.unparse({'response':response})
self._sem.release()
return response
# eliminate inactive services
pubs_active = [item for item in pubs if not self.service_expired(item)]
#self.syslog(' Found %s publishers, %d active, need %d' %(len(pubs), len(pubs_active), count))
# find least loaded instances
pubs = self.service_list(service_type, pubs_active)
# prepare response - send all if count 0
for index in range(min(count, len(pubs)) if count else len(pubs)):
entry = pubs[index]
# skip duplicates - could happen if some publishers have quit and
# we have already picked up others from cached information above
if entry['service_id'] in assigned_sid:
continue
assigned_sid.add(entry['service_id'])
result = entry['info']
r.append(result)
self.syslog(' assign service=%s, info=%s' %(entry['service_id'], json.dumps(result)))
# don't update pubsub data if we are sending entire list
if count == 0:
continue
# create client entry
self._db_conn.insert_client(service_type, entry['service_id'], client_id, result, ttl)
# update publisher entry
示例14: UVEServer
# 需要导入模块: from gevent.coros import BoundedSemaphore [as 别名]
# 或者: from gevent.coros.BoundedSemaphore import release [as 别名]
#.........这里部分代码省略.........
tstate = {}
tstate[typ] = {}
tstate[typ][attr] = copy.deepcopy(
nstate[key][typ][attr]['previous'])
nstate[key][typ][attr]['previous'] =\
ParallelAggregator.consolidate_list(tstate, typ, attr)
print "%s Merged val is %s"\
% (attr, nstate[key][typ][attr]['previous'])
return nstate
def run(self):
lck = False
while True:
try:
k, value = self._redis.brpop("DELETED")
self._sem.acquire()
lck = True
self._logger.debug("%s del received for " % value)
# value is of the format:
# DEL:<key>:<src>:<node-type>:<module>:<instance-id>:<message-type>:<seqno>
self._redis.delete(value)
except redis.exceptions.ResponseError:
#send redis connection down msg. Coule be bcos of authentication
ConnectionState.update(conn_type = ConnectionType.REDIS,
name = 'UVE', status = ConnectionStatus.DOWN,
message = 'UVE result : Connection Error',
server_addrs = ['%s:%d' % (self._local_redis_uve[0],
self._local_redis_uve[1])])
sys.exit()
except redis.exceptions.ConnectionError:
if lck:
self._sem.release()
lck = False
gevent.sleep(5)
else:
if lck:
self._sem.release()
lck = False
self._logger.debug("Deleted %s" % value)
self._logger.debug("UVE %s Type %s" % (key, typ))
@staticmethod
def _is_agg_item(attr):
if attr['@type'] in ['i8', 'i16', 'i32', 'i64', 'byte',
'u8', 'u16', 'u32', 'u64']:
if '@aggtype' in attr:
if attr['@aggtype'] == "counter":
return True
return False
@staticmethod
def _is_agg_list(attr):
if attr['@type'] in ['list']:
if '@aggtype' in attr:
if attr['@aggtype'] == "append":
return True
return False
def get_part(self, part):
uves = {}
for r_inst in self._redis_uve_map.keys():
try:
(r_ip,r_port) = r_inst
if not self._redis_uve_map[r_inst]: