本文整理汇总了Python中taurus.metric_collectors.collectorsdb.engineFactory函数的典型用法代码示例。如果您正苦于以下问题:Python engineFactory函数的具体用法?Python engineFactory怎么用?Python engineFactory使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了engineFactory函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: updateLastEmittedNonMetricSequence
def updateLastEmittedNonMetricSequence(key, seq):
""" Update the last emitted sample timestamp value in the database for the
Tweet Volume metrics
:param str key: caller's key in schema.emittedNonMetricTracker
:param int seq: sequence of last successfully-emitted non-metric
"""
update = schema.emittedNonMetricTracker.update( # pylint: disable=E1120
).values(
last_seq=seq
).where(
(schema.emittedNonMetricTracker.c.key == key)
)
result = collectorsdb.engineFactory().execute(update)
# If update didn't find the key, then insert
#
# NOTE: sqlalchemy doesn't support "ON DUPLICATE KEY UPDATE" in its syntactic
# sugar; see https://bitbucket.org/zzzeek/sqlalchemy/issue/960
if result.rowcount == 0:
# The row didn't exist, so create it
collectorsdb.engineFactory().execute(
schema.emittedNonMetricTracker.insert() # pylint: disable=E1120
.values(key=key, last_seq=seq))
示例2: establishLastEmittedSampleDatetime
def establishLastEmittedSampleDatetime(key, aggSec):
""" Query UTC timestamp of the last emitted sample batch; if one hasn't been
saved yet, then synthesize one, using negative aggregation period offset
from current time
:param int aggSec: aggregation period in seconds
:returns: (possibly synthesized) UTC timestamp of the last
successfully-emitted sample batch
:rtype: datetime.datetime
"""
lastEmittedTimestamp = queryLastEmittedSampleDatetime(key)
if lastEmittedTimestamp is not None:
return lastEmittedTimestamp
# Start at the present to avoid re-sending metric data that we may have
# already sent to Taurus.
lastEmittedTimestamp = (datetime.utcnow().replace(microsecond=0) -
timedelta(seconds=aggSec))
collectorsdb.engineFactory().execute(
schema.emittedSampleTracker.insert(
).prefix_with("IGNORE", dialect="mysql"
).values(key=key,
sample_ts=lastEmittedTimestamp))
# Query again after saving to account for mysql's loss of accuracy
return queryLastEmittedSampleDatetime(key)
示例3: _flagUnknownSymbolAsReported
def _flagUnknownSymbolAsReported(symbol):
"""
Flag unknown company symbol as reported in database
:param str symbol: symbol of the company's security (e.g., "AAPL")
"""
ins = schema.companySymbolFailures.insert().prefix_with("IGNORE", dialect="mysql").values(symbol=symbol)
collectorsdb.engineFactory().execute(ins)
g_log.debug("Saved unknown company symbol=%s", symbol)
示例4: testEngineFactorySingletonPattern
def testEngineFactorySingletonPattern(self):
# Call collectorsdb.engineFactory()
engine = collectorsdb.engineFactory()
# Call collectorsdb.engineFactory() again and assert singleton
engine2 = collectorsdb.engineFactory()
self.assertIs(engine2, engine)
# Call collectorsdb.engineFactory() in different process, assert raises
# AssertionError
with self.assertRaises(AssertionError):
multiprocessing.Pool(processes=1).apply(collectorsdb.engineFactory)
示例5: testEngineFactorySingletonPattern
def testEngineFactorySingletonPattern(self):
# Call collectorsdb.engineFactory()
engine = collectorsdb.engineFactory()
# Call collectorsdb.engineFactory() again and assert singleton
engine2 = collectorsdb.engineFactory()
self.assertIs(engine2, engine)
# Call collectorsdb.engineFactory() in different process, assert new
# instance
originalEngineId = id(engine)
engine3 = multiprocessing.Pool(processes=1).apply(_forkedEngineId)
self.assertNotEqual(id(engine3), originalEngineId)
示例6: _saveScreenNameFailure
def _saveScreenNameFailure(unmappedScreenName):
"""
Save unmapped twitter handle in database
:param unmappedScreenName: the twitter handle that is not valid anymore
:type unmappedScreenName: string
"""
ins = (collectorsdb.schema.twitterHandleFailures.insert()
.prefix_with('IGNORE', dialect="mysql")
.values(handle=unmappedScreenName))
collectorsdb.engineFactory().execute(ins)
g_log.info("Saved unmapped twitter handle; handle=%s", unmappedScreenName)
示例7: updateLastEmittedSampleDatetime
def updateLastEmittedSampleDatetime(key, sampleDatetime):
""" Update the last emitted sample timestamp value in the database for the
News Volume metrics
:param str key: caller's key in schema.emittedSampleTracker
:param datetime sampleDatetime: UTC datetime of last successfully-emitted
sample batch
"""
update = (
schema.emittedSampleTracker.update() # pylint: disable=E1120
.values(sample_ts=sampleDatetime)
.where((schema.emittedSampleTracker.c.key == key))
)
collectorsdb.engineFactory().execute(update)
示例8: _deleteSecurity
def _deleteSecurity(symbol):
"""Delete security from xignite_security table"""
with collectorsdb.engineFactory().begin() as conn:
conn.execute(
schema.xigniteSecurity # pylint: disable=E1120
.delete()
.where(schema.xigniteSecurity.c.symbol == symbol))
示例9: main
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--symbol", required=True)
args = parser.parse_args()
expectedAnswer = "Yes-%s" % (random.randint(1, 30),)
with collectorsdb.engineFactory().begin() as conn:
answer = raw_input(
"Attention! You are about to reset the emitted status for the \"{}\""
" stock symbol at {}.\n"
"\n"
"To back out immediately without making any changes, feel free to type "
"anything but \"{}\" in the prompt below, and press return.\n"
"\n"
"Are you sure you want to continue? ".format(args.symbol,
str(conn.engine),
str(expectedAnswer)))
if answer.strip() != expectedAnswer:
print "Aborting - Wise choice, my friend. Bye."
return 1
deleteFromEmitted(conn, schema.emittedStockPrice, args.symbol)
deleteFromEmitted(conn, schema.emittedStockVolume, args.symbol)
示例10: _purgeStaleDeletionRecords
def _purgeStaleDeletionRecords(limit):
""" Delete stale rows in schema.twitterDeletion table
:param limit: max records to purge per call
:returns: a sequence of tweet_uid's of deleted schema.twitterDeletion rows
"""
twitterDeletionSchema = collectorsdb.schema.twitterDeletion
# NOTE: we first query the row id's to delete, so we can return them for
# accountability and debugging
rowsToDeleteSel = sqlalchemy.select(
[twitterDeletionSchema.c.tweet_uid]).where(
twitterDeletionSchema.c.created_at <
sqlalchemy.func.date_sub(
sqlalchemy.func.current_timestamp(),
sqlalchemy.text("INTERVAL %i DAY" % (_DELETION_ROW_EXPIRY_DAYS,)))
).limit(limit)
numDeleted = 0
with collectorsdb.engineFactory().begin() as conn:
rowIdsToDelete = tuple(
str(row[0]) for row in conn.execute(rowsToDeleteSel).fetchall()
)
if rowIdsToDelete:
deletion = twitterDeletionSchema.delete().where(
twitterDeletionSchema.c.tweet_uid.in_(rowIdsToDelete))
numDeleted = conn.execute(deletion).rowcount
if len(rowIdsToDelete) != numDeleted:
g_log.error("Expected to delete %d tweet delition request rows, but "
"actually deleted %d rows", len(rowIdsToDelete), numDeleted)
return rowIdsToDelete
示例11: _queryNewsVolumes
def _queryNewsVolumes(aggStartDatetime, aggStopDatetime):
""" Query the database for the counts of security releases+headlines for each
company that were detected during the specified time window.
:param aggStartDatetime: inclusive start of aggregation interval as
UTC datetime
:param aggStopDatetime: non-inclusive upper bound of aggregation interval as
UTC datetime
:returns: a sparse sequence of two-tuples: (symbol, count); companies that
have no detected news in the given aggregation period will be absent from
the result.
"""
headlineSel = sql.select(
[schema.xigniteSecurityHeadline.c.symbol.label("symbol")]
).where(
(schema.xigniteSecurityHeadline.c.discovered_at >= aggStartDatetime) &
(schema.xigniteSecurityHeadline.c.discovered_at < aggStopDatetime))
releaseSel = sql.select(
[schema.xigniteSecurityRelease.c.symbol]
).where(
(schema.xigniteSecurityRelease.c.discovered_at >= aggStartDatetime) &
(schema.xigniteSecurityRelease.c.discovered_at < aggStopDatetime))
allNewsUnion = sql.union_all(headlineSel, releaseSel)
aggSel = sql.select(
["symbol", sql.func.count("symbol").label("sym_count")]
).select_from(allNewsUnion.alias("union_of_tables")
).group_by("symbol")
return collectorsdb.engineFactory().execute(aggSel).fetchall()
示例12: _purgeTweetsSlatedForDeletion
def _purgeTweetsSlatedForDeletion(limit):
""" Purge tweets that are slated for deletion as indicated by entries in the
schema.twitterDeletion table
:param limit: max records to purge per call
:returns: a sequence of id's of deleted tweets
"""
twitterTweetsSchema = collectorsdb.schema.twitterTweets
twitterDeletionSchema = collectorsdb.schema.twitterDeletion
# NOTE: we first query the row id's to delete, so we can return them for
# accountability and debugging
rowsToDeleteSel = sqlalchemy.select([twitterTweetsSchema.c.uid]).where(
twitterTweetsSchema.c.uid.in_(
sqlalchemy.select([twitterDeletionSchema.c.tweet_uid]))).limit(limit)
numDeleted = 0
with collectorsdb.engineFactory().begin() as conn:
rowIdsToDelete = tuple(
str(row[0]) for row in conn.execute(rowsToDeleteSel).fetchall()
)
if rowIdsToDelete:
tweetDeletion = twitterTweetsSchema.delete().where(
twitterTweetsSchema.c.uid.in_(rowIdsToDelete))
numDeleted = conn.execute(tweetDeletion).rowcount
if len(rowIdsToDelete) != numDeleted:
g_log.error("Expected to delete %d tweets, but actually deleted %d tweets",
len(rowIdsToDelete), numDeleted)
return rowIdsToDelete
示例13: main
def main():
"""
NOTE: main also serves as entry point for "console script" generated by setup
"""
logging_support.LoggingSupport().initTool()
try:
options = _parseArgs()
days = options["days"]
g_log.info("Purging records from table=%s older than numDays=%s",
collectorsdb.schema.twitterTweets, days)
twitterTweetsSchema = collectorsdb.schema.twitterTweets
query = twitterTweetsSchema.delete().where(
twitterTweetsSchema.c.created_at <
sqlalchemy.func.date_sub(
sqlalchemy.func.utc_timestamp(),
sqlalchemy.text("INTERVAL %i DAY" % (days,)))
)
with collectorsdb.engineFactory().begin() as conn:
result = conn.execute(query)
g_log.info("Purged numRows=%s from table=%s",
result.rowcount, collectorsdb.schema.twitterTweets)
except SystemExit as e:
if e.code != 0:
g_log.exception("Failed!")
raise
except Exception:
g_log.exception("Failed!")
raise
示例14: testEmittedSampleDatetime
def testEmittedSampleDatetime(self):
key = "bogus-test-key"
# Establish initial sample datetime
result = metric_utils.establishLastEmittedSampleDatetime(key, 300)
# Cleanup
self.addCleanup(collectorsdb.engineFactory().execute,
schema.emittedSampleTracker.delete().where(
(schema.emittedSampleTracker.c.key == key)
)
)
self.assertIsInstance(result, datetime)
# Update latest emitted sample datetime to now
now = datetime.utcnow().replace(microsecond=0)
metric_utils.updateLastEmittedSampleDatetime(key, now)
# Verify that it was updated
lastEmittedSample = metric_utils.queryLastEmittedSampleDatetime(key)
self.assertEqual(now, lastEmittedSample)
self.assertLess(result, lastEmittedSample)
示例15: testTransientErrorRetryDecorator
def testTransientErrorRetryDecorator(self):
# Setup proxy. We'll patch config later, so we need to cache the values
# so that the original proxy may be restarted with the original params
config = collectorsdb.CollectorsDbConfig()
originalHost = config.get("repository", "host")
originalPort = config.getint("repository", "port")
def _startProxy():
p = startProxy(originalHost, originalPort, 6033)
p.next()
return p
proxy = _startProxy()
self.addCleanup(proxy.send, "kill")
# Patch collectorsdb config with local proxy
with ConfigAttributePatch(
config.CONFIG_NAME,
config.baseConfigDir,
(("repository", "host", "127.0.0.1"),
("repository", "port", "6033"))):
# Force refresh of engine singleton
collectorsdb.resetEngineSingleton()
engine = collectorsdb.engineFactory()
# First, make sure valid query returns expected results
res = collectorsdb.retryOnTransientErrors(engine.execute)("select 1")
self.assertEqual(res.scalar(), 1)
@collectorsdb.retryOnTransientErrors
def _killProxyTryRestartProxyAndTryAgain(n=[]):
if not n:
# Kill the proxy on first attempt
proxy.send("kill")
proxy.next()
try:
engine.execute("select 1")
self.fail("Proxy did not terminate as expected...")
except sqlalchemy.exc.OperationalError:
pass
n.append(None)
elif len(n) == 1:
# Restore proxy in second attempt
newProxy = _startProxy()
self.addCleanup(newProxy.send, "kill")
n.append(None)
res = engine.execute("select 2")
return res
# Try again w/ retry decorator
result = _killProxyTryRestartProxyAndTryAgain()
# Verify that the expected value is eventually returned
self.assertEqual(result.scalar(), 2)