本文整理汇总了Python中twisted.internet.defer.returnD函数的典型用法代码示例。如果您正苦于以下问题:Python returnD函数的具体用法?Python returnD怎么用?Python returnD使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了returnD函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: SingleMongo
def SingleMongo(coll, method, *args, **kwargs):
conn = MongoConnection(MONGODB['HOST'], MONGODB['PORT'])
db = conn[MONGODB['DATABASE']]
yield db.authenticate(MONGODB['USER'], MONGODB['PSWD'])
res = yield getattr(db[coll], method)(*args, **kwargs)
conn.disconnect()
returnD(res)
示例2: __run__
def __run__(self, coll, method, *args, **kwargs):
attempts_left = self.retries
result = []
lasttry = False
if 'lasttry' in kwargs:
lasttry = True
del kwargs['lasttry']
while True:
try:
self.coll = coll
self.method = method
if not self.conn and not self.db:
status = "Connec"
self.conn = yield MongoConnection(MONGODB['HOST'], MONGODB['PORT'], reconnect=False)
self.db = self.conn[MONGODB['DATABASE']]
status = "Authentica"
yield self.db.authenticate(MONGODB['USER'], MONGODB['PSWD'])
status = "Communica"
result = yield getattr(self.db[coll], method)(*args, **kwargs)
except Exception as e:
if not lasttry:
if attempts_left > 0:
attempts_left -= 1
if DEBUG:
self.logerr("%sting" % status, "Retry #%d" % (self.retries-attempts_left))
yield self.close(silent=True)
continue
if DEBUG:
self.logerr("%sting" % status, "HARD RETRY %s %s" % (type(e), str(e)))
result = yield Mongo(coll, method, *args, lasttry=True, **kwargs)
yield self.close()
returnD(result)
示例3: get_queue
def get_queue(self, corpus, specs={}, **kwargs):
if "sort" not in kwargs:
kwargs["sort"] = sortasc('timestamp')
res = yield self.queue(corpus).find(specs, **kwargs)
if res and "limit" in kwargs and kwargs["limit"] == 1:
res = res[0]
returnD(res)
示例4: save_WEs_query
def save_WEs_query(self, corpus, ids, query_options):
res = yield self.queries(corpus).insert_one({
"webentities": ids,
"total": len(ids),
"query": query_options
})
returnD(str(res.inserted_id))
示例5: save_WEs_query
def save_WEs_query(self, corpus, ids, query_options):
res = yield self.queries(corpus).insert({
"webentities": ids,
"total": len(ids),
"query": query_options
}, safe=True)
returnD(str(res))
示例6: list_jobs
def list_jobs(self, corpus, specs={}, **kwargs):
if "sort" not in kwargs:
kwargs["sort"] = sortasc("crawling_status") + sortasc("indexing_status") + sortasc("created_at")
jobs = yield self.jobs(corpus).find(specs, **kwargs)
if jobs and "limit" in kwargs and kwargs["limit"] == 1:
jobs = jobs[0]
returnD(jobs)
示例7: start_stream
def start_stream(self, conf):
if not self.fact.__init_timeout__():
returnD(False)
queries = yield self.fact.db['feeds'].find({'database': 'tweets', 'channel': self.fact.channel}, fields=['query'])
track = []
skip = []
k = 0
for query in queries:
q = str(query['query'].encode('utf-8')).lower()
# queries starting with @ should return only tweets from corresponding user, stream doesn not know how to handle this so skip
if self.re_twitter_account.match(q):
continue
elif " OR " in q or " -" in q or '"' in q or len(q) > 60 or len(q) < 6:
skip.append(q)
continue
track.append(q)
k += 1
if k > 395:
break
if self.fact.twuser not in track:
track.append(self.fact.twuser)
if len(skip):
self.log("Skipping unprocessable queries for streaming: « %s »" % " » | « ".join(skip), hint=True)
self.log("Start search streaming for: « %s »" % " » | « ".join(track), hint=True)
conn = Microblog("twitter", conf, bearer_token=self.fact.twitter_token)
# tries to find users corresponding with queries to follow with stream
users, self.fact.ircclient.twitter['users'] = conn.lookup_users(track, self.fact.ircclient.twitter['users'])
deferToThreadPool(reactor, self.threadpool, self.follow_stream, conf, users.values(), track)
self.depiler = LoopingCall(self.flush_tweets)
self.depiler.start(1)
returnD(True)
示例8: depile
def depile(self):
if self.queue is None:
yield self.init_queue()
if not len(self.queue):
returnD(None)
status = yield self.get_scrapyd_status()
if status["pending"] > 0:
returnD(None)
# Add some random wait to allow possible concurrent Hyphe instance
# to compete for ScrapyD's empty slots
yield deferredSleep(1./randint(4,20))
# Order jobs by corpus with less currently running crawls then age
ordered = sorted(self.queue.items(), key=lambda x: \
float("%s.%s" % (status.get(x[1]["corpus"], 0), x[1]["timestamp"])))
job_id, job = ordered[0]
res = yield self.send_scrapy_query('schedule', job["crawl_arguments"])
ts = now_ts()
if is_error(res):
logger.msg("WARNING: error sending job %s to ScrapyD: %s" % (job, res))
self.queue[job_id]['timestamp'] = ts # let it retry a bit later
else:
yield self.db.update_job(job["corpus"], job_id, res['jobid'], ts)
yield self.db.add_log(job["corpus"], job_id, "CRAWL_SCHEDULED", ts)
del(self.queue[job_id])
示例9: get_WEs
def get_WEs(self, corpus, query=None):
if not query:
res = yield self.WEs(corpus).find()
else:
if isinstance(query, list) and isinstance(query[0], int):
query = {"_id": {"$in": query}}
res = yield self.WEs(corpus).find(query)
returnD(res)
示例10: stop_corpus
def stop_corpus(self, name, quiet=False):
if self.stopped_corpus(name):
if config["DEBUG"]:
self.log(name, "Traph already stopped", quiet=quiet)
returnD(False)
if name in self.corpora:
yield self.corpora[name].stop()
returnD(True)
示例11: list_logs
def list_logs(self, corpus, job, **kwargs):
if "sort" not in kwargs:
kwargs["sort"] = sortasc('timestamp')
if "projection" not in kwargs:
kwargs["projection"] = ['timestamp', 'log']
if type(job) == list:
job = {"$in": job}
res = yield self.logs(corpus).find({"_job": job}, **kwargs)
returnD(res)
示例12: run_twitter_search
def run_twitter_search(self):
if not self.__init_timeout__():
returnD(False)
queries = yield self.db['feeds'].find({'database': 'tweets', 'channel': self.channel})
randorder = range(len(queries))
shuffle(randorder)
urls = yield getFeeds(self.db, self.channel, 'tweets', randorder=randorder)
yield self.protocol.start_twitter_search(urls, randorder=randorder)
self.status = "stopped"
示例13: add_job
def add_job(self, args, corpus, webentity_id):
ts = now_ts()
job_id = yield self.db.add_job(corpus, webentity_id, args, ts)
self.queue[job_id] = {
"corpus": corpus,
"timestamp": ts,
"crawl_arguments": args
}
yield self.db.add_log(corpus, job_id, "CRAWL_ADDED", ts)
returnD(job_id)
示例14: list_logs
def list_logs(self, corpus, job, **kwargs):
if "filter" not in kwargs:
kwargs["filter"] = sortasc('timestamp')
if "fields" not in kwargs:
kwargs["fields"] = ['timestamp', 'log']
kwargs["safe"] = True
if type(job) == list:
job = {"$in": job}
res = yield self.logs(corpus).find({"_job": job}, **kwargs)
returnD(res)
示例15: run_rss_feeds
def run_rss_feeds(self):
if not self.__init_timeout__():
returnD(False)
urls = self.feeds
if not urls:
urls = yield getFeeds(self.db, self.channel, self.name, add_url=self.tweets_search_page)
ct = 0
for url in urls:
yield deferredSleep(3 + int(random()*500)/100)
self.update_timeout(extra=10)
yield self.protocol.start(url)
self.status = "stopped"