本文整理汇总了Python中Service类的典型用法代码示例。如果您正苦于以下问题:Python Service类的具体用法?Python Service怎么用?Python Service使用的例子?那么恭喜您, 这里精选的类代码示例或许可以为您提供帮助。
在下文中一共展示了Service类的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: __init__
def __init__(self, shard):
logger.initialize(ServiceCoord("Worker", shard))
Service.__init__(self, shard, custom_logger=logger)
self.file_cacher = FileCacher(self)
self.work_lock = threading.Lock()
self.ignore_job = False
示例2: __init__
def __init__(self):
"""Crea un objeto del tipo MasterI"""
Service.__init__(self)
# CurrentZones y CurrentIdWork representan al trabajo actual.
self.CurrentZones, self.CurrentIdWork = [], -1
# CurrentImage representa la imagen final relativa al trabajo actual.
self.CurrentImage = None
# RenderAgents representa a los agentes especializados en el render.
# (Clave, Valor) == (Nombre del agente, Proxy al agente)
self.RenderAgents = {}
# BenchmarkValue representa el tiempo medio de ejecuci贸n del benchmark en la plataforma MASYRO.
self.BenchmarkValue = 1
# Biddings representa las pujas actuales de los agentes.
self.Biddings = []
self.InitialBidding = 1
# NumberOfPieces representa el n煤mero de trozos del trabajo actual que han llegado.
self.NumberOfPieces = 0
# CurrentBiddingTime representa el tiempo empleado para las distintas pujas.
self.CurrentBiddingTime = 0
# Proxies a distintos servicios.
self.Repository, self.Blackboard = None, None
# Log del Master.
self.Log = ""
# Tiempos finales asociados al trabajo.
self.FinalTimes = ""
示例3: __init__
def __init__(self, shard, contest_id):
logger.initialize(ServiceCoord("EvaluationService", shard))
Service.__init__(self, shard, custom_logger=logger)
self.contest_id = contest_id
self.queue = JobQueue()
self.pool = WorkerPool(self)
self.scoring_service = self.connect_to(
ServiceCoord("ScoringService", 0))
for i in xrange(get_service_shards("Worker")):
worker = ServiceCoord("Worker", i)
self.pool.add_worker(worker)
self.add_timeout(self.dispatch_jobs, None,
EvaluationService.CHECK_DISPATCH_TIME,
immediately=True)
self.add_timeout(self.check_workers_timeout, None,
EvaluationService.WORKER_TIMEOUT_CHECK_TIME,
immediately=False)
self.add_timeout(self.check_workers_connection, None,
EvaluationService.WORKER_CONNECTION_CHECK_TIME,
immediately=False)
self.add_timeout(self.search_jobs_not_done, None,
EvaluationService.JOBS_NOT_DONE_CHECK_TIME,
immediately=True)
示例4: __init__
def __init__(self, shard, contest_id=None):
"""If contest_id is not None, we assume the user wants the
autorestart feature.
"""
logger.initialize(ServiceCoord("ResourceService", shard))
Service.__init__(self, shard, custom_logger=logger)
self.contest_id = contest_id
# _local_store is a dictionary indexed by time in int(epoch)
self._local_store = []
# Floating point epoch using for precise measurement of percents
self._last_saved_time = time.time()
# Starting point for cpu times
self._prev_cpu_times = self._get_cpu_times()
# Sorted list of ServiceCoord running in the same machine
self._local_services = self._find_local_services()
# Dict service with bool to mark if we will restart them.
self._will_restart = dict(
(service, None if self.contest_id is None else True) for service in self._local_services
)
# Found process associate to the ServiceCoord.
self._procs = dict((service, None) for service in self._local_services)
# Previous cpu time for each service.
self._services_prev_cpu_times = dict((service, (0.0, 0.0)) for service in self._local_services)
# Start finding processes and their cputimes.
self._store_resources(store=False)
self.add_timeout(self._store_resources, None, 5)
if self.contest_id is not None:
self._launched_processes = set([])
self.add_timeout(self._restart_services, None, 5, immediately=True)
示例5: __init__
def __init__(self, shard):
logger.initialize(ServiceCoord("Worker", shard))
Service.__init__(self, shard, custom_logger=logger)
self.file_cacher = FileCacher(self)
self.task_type = None
self.work_lock = threading.Lock()
self.session = None
示例6: addCustomService
def addCustomService(self):
service = Service()
service.id = 'custom-%i' % random.randrange(1, 999999)
service.name = self.tr('New Service, edit me')
service.description = self.tr('Enter a short, concise description here')
self.sources[QString('custom.xml')].services.append(service)
self.sources[QString('custom.xml')].writeBack()
self.readSources()
self.populateCustomList(service.id)
self.synchronizeLineEdits() # Triggert nicht automatisch
示例7: __init__
def __init__ (self):
"""Crea un objeto de tipo BlackboardI"""
Service.__init__(self)
# (Clave, Valor) == ((IdWork, IdZone), Register)
self.Registers = {}
# AnalysisTime representa el tiempo en segundos dedicado al análisis de la escena más reciente.
self.AnalysisTime = 0
# EstimatedRenderTime representa el tiempo en segundos dedicado a la estimación de las unidades de trabajo.
self.EstimatedRenderTime = 0
示例8: __init__
def __init__(self, shard, contest_id):
logger.initialize(ServiceCoord("ScoringService", shard))
Service.__init__(self, shard, custom_logger=logger)
self.contest_id = contest_id
self.scorers = {}
self._initialize_scorers()
# If for some reason (SS switched off for a while, or broken
# connection with ES), submissions have been left without
# score, this is the set where you want to pur their ids. Note
# that sets != {} if and only if there is an alive timeout for
# the method "score_old_submission".
self.submission_ids_to_score = set([])
self.submission_ids_to_token = set([])
self.scoring_old_submission = False
# We need to load every submission at start, but we don't want
# to invalidate every score so that we can simply load the
# score-less submissions. So we keep a set of submissions that
# we analyzed (for scoring and for tokens).
self.submission_ids_scored = set()
self.submission_ids_tokened = set()
# Initialize ranking web servers we need to send data to.
self.rankings = []
for i in xrange(len(config.rankings_address)):
address = config.rankings_address[i]
username = config.rankings_username[i]
password = config.rankings_password[i]
self.rankings.append((address[0], # HTTP / HTTPS
"%s:%d" % tuple(address[1:]),
get_authorization(username, password)))
self.initialize_queue = set()
self.submission_queue = dict()
self.subchange_queue = dict()
self.operation_queue_lock = threading.Lock()
for ranking in self.rankings:
self.initialize_queue.add(ranking)
self.log_bridge = LogBridge()
thread = threading.Thread(target=self.dispath_operations_thread,
args=(self.log_bridge,))
thread.daemon = True
thread.start()
self.add_timeout(self.search_jobs_not_done, None,
ScoringService.JOBS_NOT_DONE_CHECK_TIME,
immediately=True)
self.add_timeout(self.forward_logs, None,
ScoringService.FORWARD_LOG_TIME,
immediately=True)
示例9: __init__
def __init__(self, shard, contest_id):
logger.initialize(ServiceCoord("ScoringService", shard))
Service.__init__(self, shard, custom_logger=logger)
self.contest_id = contest_id
# Initialize scorers, the ScoreType objects holding all
# submissions for a given task and deciding scores.
self.scorers = {}
with SessionGen(commit=False) as session:
contest = session.query(Contest).\
filter_by(id=contest_id).first()
logger.info("Loaded contest %s" % contest.name)
contest.create_empty_ranking_view(timestamp=contest.start)
for task in contest.tasks:
self.scorers[task.id] = get_score_type(task=task)
session.commit()
# If for some reason (SS switched off for a while, or broken
# connection with ES), submissions have been left without
# score, this is the list where you want to pur their
# ids. Note that list != [] if and only if there is an alive
# timeout for the method "score_old_submission".
self.submission_ids_to_score = []
self.submission_ids_to_token = []
# We need to load every submission at start, but we don't want
# to invalidate every score so that we can simply load the
# score-less submissions. So we keep a set of submissions that
# we analyzed (for scoring and for tokens).
self.submission_ids_scored = set()
self.submission_ids_tokened = set()
# Initialize ranking web servers we need to send data to.
self.rankings = []
for i in xrange(len(config.rankings_address)):
address = config.rankings_address[i]
username = config.rankings_username[i]
password = config.rankings_password[i]
auth = get_authorization(username, password)
self.rankings.append(("%s:%d" % tuple(address), auth))
self.operation_queue = []
for ranking in self.rankings:
self.operation_queue.append((self.initialize, [ranking]))
self.add_timeout(self.dispatch_operations, None,
ScoringService.CHECK_DISPATCH_TIME,
immediately=True)
self.add_timeout(self.search_jobs_not_done, None,
ScoringService.JOBS_NOT_DONE_CHECK_TIME,
immediately=True)
示例10: __init__
def __init__(self, shard, custom_logger=None):
Service.__init__(self, shard, custom_logger)
global logger
from cms.async.AsyncLibrary import logger as _logger
logger = _logger
self.start = 0
self.total_time = 0
self.allright = 0
self.current = -1
self.ongoing = False
self.failed = False
self.retry = False
self.add_timeout(self.test, None, 0.2, immediately=True)
self.initialized = False
示例11: loadAuxiliaryData
def loadAuxiliaryData():
# Service.logger.debug("Loading auxiliary data for terminology extraction system...")
global ngramFilePath, adskUnwordsRoot
global ngrams, nowords
# ngrams = codecs.open(ngramFilePath, "r", "utf-8").read()
conn = Service.connectToDB()
cursor = conn.cursor()
cursor.execute("select LangCode3Ltr from TargetLanguages")
langs = cursor.fetchall()
conn.close()
for lang in langs:
if __debug_on__:
Service.logger.debug("\t\tReading nGram file " + ngramFilePath+"."+lang[0].upper()+".bz2...")
ngrams[lang[0]] = bz2.BZ2File(ngramFilePath+"."+lang[0].upper()+".bz2", "r").read()
# Load Autodesk-related lists:
# - ngram-list (from Ventzi, including only the ngrams without counts)
# - NeXLT product names (------ there is an N/A in it???)
# - NeXLT language list
# - city names from http://www.geodatasource.com/ and http://www.maxmind.com/en/worldcities
# - words which should not be harvested (unwords and general words)
# - Autodesk trademarks
# - company names
# Define nowords as filter
nowords = preplists(adskUnwordsRoot+"/general_words.txt").union(preplists(adskUnwordsRoot+"/un_words.txt").union(preplists(adskUnwordsRoot+"/autodesk_trademarks.txt").union(preplists(adskUnwordsRoot+"/company_names.txt").union(preplists(adskUnwordsRoot+"/cities_regions.txt")))))
示例12: __init__
def __init__(self, listen_port, handlers, parameters, shard=0,
custom_logger=None, listen_address=""):
Service.__init__(self, shard, custom_logger)
global logger
from cms.async.AsyncLibrary import logger as _logger
logger = _logger
# This ensures that when the server autoreloads because its source is
# modified, the socket is closed correctly.
# In the development branch of Tornado, you can add a hook before
# the server reloads.
try:
if parameters["debug"]:
fcntl.fcntl(self.server.socket,
fcntl.F_SETFD, fcntl.FD_CLOEXEC)
except KeyError:
pass
self.__responses = {}
# TODO: why are the following two lines needed?
self._RPCRequestHandler__responses = self.__responses
self._RPCAnswerHandler__responses = self.__responses
handlers += [(r"/rpc_request/([a-zA-Z0-9_-]+)/" \
"([0-9]+)/([a-zA-Z0-9_-]+)",
RPCRequestHandler),
(r"/rpc_answer", RPCAnswerHandler),
(r"/sync_rpc_request/([a-zA-Z0-9_-]+)/" \
"([0-9]+)/([a-zA-Z0-9_-]+)",
SyncRPCRequestHandler)]
self.application = tornado.web.Application(handlers, **parameters)
# xheaders=True means that Tornado uses the content of the
# header X-Real-IP as the request IP. This means that if it is
# behind a proxy, it can see the real IP the request is coming
# from. But, to use it, we need to be sure we can trust it
# (i.e., if we are not behind a proxy that sets that header,
# we must not use it).
self.application.service = self
http_server = tornado.httpserver.HTTPServer(
self.application, xheaders=parameters.get("is_proxy_used", True))
http_server.listen(listen_port, address=listen_address)
self.instance = tornado.ioloop.IOLoop.instance()
示例13: __init__
def __init__(self, shard, contest_id):
logger.initialize(ServiceCoord("ScoringService", shard))
Service.__init__(self, shard, custom_logger=logger)
self.contest_id = contest_id
self.scorers = {}
self._initialize_scorers()
# If for some reason (SS switched off for a while, or broken
# connection with ES), submissions have been left without
# score, this is the list where you want to pur their
# ids. Note that list != [] if and only if there is an alive
# timeout for the method "score_old_submission".
self.submission_ids_to_score = []
self.submission_ids_to_token = []
# We need to load every submission at start, but we don't want
# to invalidate every score so that we can simply load the
# score-less submissions. So we keep a set of submissions that
# we analyzed (for scoring and for tokens).
self.submission_ids_scored = set()
self.submission_ids_tokened = set()
# Initialize ranking web servers we need to send data to.
self.rankings = []
for i in xrange(len(config.rankings_address)):
address = config.rankings_address[i]
username = config.rankings_username[i]
password = config.rankings_password[i]
auth = get_authorization(username, password)
self.rankings.append(("%s:%d" % tuple(address), auth))
self.operation_queue = []
for ranking in self.rankings:
self.operation_queue.append((self.initialize, [ranking]))
self.add_timeout(self.dispatch_operations, None,
ScoringService.CHECK_DISPATCH_TIME,
immediately=True)
self.add_timeout(self.search_jobs_not_done, None,
ScoringService.JOBS_NOT_DONE_CHECK_TIME,
immediately=True)
示例14: threadProcess
def threadProcess(self, client, clientaddr):
recv = client.recv(int(Var.IDF_SOCKET_BUFFER))
try:
service = Service()
while len(recv):
# 处理发送过来的指令
print recv
ret = service.process(recv)
# 向服务端发送数据
client.send(ret + "\n")
time.sleep(0.1)
recv = client.recv(int(Var.IDF_SOCKET_BUFFER))
#print recv
#function.log('socket recv data', 'data/data-'+clientaddr[0]+'.log').info(recv)
#function.log('socket send data', 'data/data-'+clientaddr[0]+'.log').info(ret)
function.log('socket data', 'data/data-' + clientaddr[0] + '.log').info("socket close\n")
except Exception, ex:
print "Socket threadProcess function error:\n"
print ex
示例15: __init__
def __init__(self, shard):
logger.initialize(ServiceCoord("LogService", shard))
Service.__init__(self, shard, custom_logger=logger)
log_dir = os.path.join(config.log_dir, "cms")
if not mkdir(config.log_dir) or \
not mkdir(log_dir):
logger.error("Cannot create necessary directories.")
self.exit()
return
log_filename = "%d.log" % int(time.time())
self._log_file = codecs.open(os.path.join(log_dir, log_filename),
"w", "utf-8")
try:
os.remove(os.path.join(log_dir, "last.log"))
except OSError:
pass
os.symlink(log_filename,
os.path.join(log_dir, "last.log"))
self._last_messages = []