本文整理汇总了Python中whisper.create函数的典型用法代码示例。如果您正苦于以下问题:Python create函数的具体用法?Python create怎么用?Python create使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了create函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: benchmark_create_update_fetch
def benchmark_create_update_fetch():
path, archive_list, tear_down = set_up_create()
# start timer
start_time = time.clock()
for i in range(100):
whisper.create(path, archive_list)
seconds_ago = 3500
current_value = 0.5
increment = 0.2
now = time.time()
# file_update closes the file so we have to reopen every time
for i in range(seconds_ago):
whisper.update(path, current_value, now - seconds_ago + i)
current_value += increment
from_time = now - seconds_ago
until_time = from_time + 1000
whisper.fetch(path, from_time, until_time)
tear_down()
# end timer
end_time = time.clock()
elapsed_time = end_time - start_time
print "Executed 100 iterations in %ss (%i ns/op)" % (elapsed_time, (elapsed_time * 1000 * 1000 * 1000) / 100)
示例2: test_setAggregation
def test_setAggregation(self):
"""Create a db, change aggregation, xFilesFactor, then use info() to validate"""
retention = [(1, 60), (60, 60)]
# create a new db with a valid configuration
whisper.create(self.db, retention)
#set setting every AggregationMethod available
for ag in whisper.aggregationMethods:
for xff in 0.0,0.2,0.4,0.7,0.75,1.0:
#original xFilesFactor
info0 = whisper.info(self.db)
#optional xFilesFactor not passed
whisper.setAggregationMethod(self.db, ag)
#original value should not change
info1 = whisper.info(self.db)
self.assertEqual(info0['xFilesFactor'],info1['xFilesFactor'])
#the selected aggregation method should have applied
self.assertEqual(ag,info1['aggregationMethod'])
#optional xFilesFactor used
whisper.setAggregationMethod(self.db, ag, xff)
#new info should match what we just set it to
info2 = whisper.info(self.db)
#packing and unpacking because
#AssertionError: 0.20000000298023224 != 0.2
target_xff = struct.unpack("!f", struct.pack("!f",xff))[0]
self.assertEqual(info2['xFilesFactor'], target_xff)
#same aggregationMethod asssertion again, but double-checking since
#we are playing with packed values and seek()
self.assertEqual(ag,info2['aggregationMethod'])
self._removedb()
示例3: test_render_view
def test_render_view(self):
url = reverse('graphite.render.views.renderView')
response = self.client.get(url, {'target': 'test', 'format': 'json'})
self.assertEqual(json.loads(response.content), [])
self.assertTrue(response.has_header('Expires'))
self.assertTrue(response.has_header('Last-Modified'))
self.assertTrue(response.has_header('Cache-Control'))
response = self.client.get(url, {'target': 'test'})
self.assertEqual(response['Content-Type'], 'image/png')
self.assertTrue(response.has_header('Expires'))
self.assertTrue(response.has_header('Last-Modified'))
self.assertTrue(response.has_header('Cache-Control'))
self.addCleanup(self.wipe_whisper)
whisper.create(self.db, [(1, 60)])
ts = int(time.time())
whisper.update(self.db, 0.5, ts - 2)
whisper.update(self.db, 0.4, ts - 1)
whisper.update(self.db, 0.6, ts)
response = self.client.get(url, {'target': 'test', 'format': 'json'})
data = json.loads(response.content)
end = data[0]['datapoints'][-4:]
self.assertEqual(
end, [[None, ts - 3], [0.5, ts - 2], [0.4, ts - 1], [0.6, ts]])
示例4: test_update_many_excess
def test_update_many_excess(self):
# given an empty db
wsp = "test_update_many_excess.wsp"
self.addCleanup(self._remove, wsp)
archive_len = 3
archive_step = 1
whisper.create(wsp, [(archive_step, archive_len)])
# given too many points than the db can hold
excess_len = 1
num_input_points = archive_len + excess_len
test_now = int(time.time())
input_start = test_now - num_input_points + archive_step
input_points = [(input_start + i, random.random() * 10)
for i in range(num_input_points)]
# when the db is updated with too many points
whisper.update_many(wsp, input_points, now=test_now)
# then only the most recent input points (those at the end) were written
actual_time_info = whisper.fetch(wsp, 0, now=test_now)[0]
self.assertEqual(actual_time_info,
(input_points[-archive_len][0],
input_points[-1][0] + archive_step, # untilInterval = newest + step
archive_step))
示例5: _populate_data
def _populate_data(self):
self.db = os.path.join(settings.WHISPER_DIR, 'test.wsp')
whisper.create(self.db, [(1, 60)])
ts = int(time.time())
for i, value in enumerate(reversed(self._test_data)):
whisper.update(self.db, value, ts - i)
self.ts = ts
示例6: test_file_fetch_edge_cases
def test_file_fetch_edge_cases(self):
"""
Test some of the edge cases in file_fetch() that should return
None or raise an exception
"""
whisper.create(self.filename, [(1, 60)])
with open(self.filename, 'rb') as fh:
msg = "Invalid time interval: from time '{0}' is after until time '{1}'"
until_time = 0
from_time = int(time.time()) + 100
with AssertRaisesException(
whisper.InvalidTimeInterval(msg.format(from_time, until_time))):
whisper.file_fetch(fh, fromTime=from_time, untilTime=until_time)
# fromTime > now aka metrics from the future
self.assertIsNone(
whisper.file_fetch(fh, fromTime=int(time.time()) + 100,
untilTime=int(time.time()) + 200),
)
# untilTime > oldest time stored in the archive
headers = whisper.info(self.filename)
the_past = int(time.time()) - headers['maxRetention'] - 200
self.assertIsNone(
whisper.file_fetch(fh, fromTime=the_past - 1, untilTime=the_past),
)
# untilTime > now, change untilTime to now
now = int(time.time())
self.assertEqual(
whisper.file_fetch(fh, fromTime=now, untilTime=now + 200, now=now),
((now + 1, now + 2, 1), [None]),
)
示例7: createWhisperFile
def createWhisperFile(metric, dbFilePath, dbFileExists):
if not dbFileExists:
archiveConfig = None
xFilesFactor, aggregationMethod = None, None
for schema in schemas:
if schema.matches(metric):
log.creates('new metric %s matched schema %s' % (metric, schema.name))
archiveConfig = [archive.getTuple() for archive in schema.archives]
break
for schema in agg_schemas:
if schema.matches(metric):
log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name))
xFilesFactor, aggregationMethod = schema.archives
break
if not archiveConfig:
raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric)
dbDir = dirname(dbFilePath)
try:
os.makedirs(dbDir)
except OSError as e:
if e.errno != errno.EEXIST:
log.err("%s" % e)
log.creates("creating database file %s (archive=%s xff=%s agg=%s)" %
(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod))
try:
whisper.create(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod, settings.WHISPER_SPARSE_CREATE, settings.WHISPER_FALLOCATE_CREATE)
instrumentation.increment('creates')
except Exception, e:
log.err("Error creating %s: %s" % (dbFilePath, e))
return False
示例8: test_fetch
def test_fetch(self):
"""fetch info from database """
# check a db that doesnt exist
with self.assertRaises(Exception):
whisper.fetch("this_db_does_not_exist", 0)
# SECOND MINUTE HOUR DAY
retention = [(1, 60), (60, 60), (3600, 24), (86400, 365)]
whisper.create(self.db, retention)
# check a db with an invalid time range
with self.assertRaises(whisper.InvalidTimeInterval):
whisper.fetch(self.db, time.time(), time.time()-6000)
fetch = whisper.fetch(self.db, 0)
# check time range
self.assertEqual(fetch[0][1] - fetch[0][0],
retention[-1][0] * retention[-1][1])
# check number of points
self.assertEqual(len(fetch[1]), retention[-1][1])
# check step size
self.assertEqual(fetch[0][2], retention[-1][0])
self._removedb()
示例9: test_single_metric
def test_single_metric(self):
xfilesfactor = 0.5
aggregation_method = "last"
# This retentions are such that every other point is present in both
# archives. Test validates that duplicate points gets inserted only once.
retentions = [(1, 10), (2, 10)]
high_precision_duration = retentions[0][0] * retentions[0][1]
low_precision_duration = retentions[1][0] * retentions[1][1]
now = int(time.time())
time_from, time_to = now - low_precision_duration, now
points = [(float(t), float(now-t)) for t in xrange(time_from, time_to)]
metric = "test_metric"
metric_path = os_path.join(self.tempdir, metric + ".wsp")
whisper.create(metric_path, retentions, xfilesfactor, aggregation_method)
whisper.update_many(metric_path, points)
self._call_main()
metric = self.accessor.get_metric(metric)
self.assertTrue(metric)
self.assertEqual(metric.name, metric.name)
self.assertEqual(metric.aggregator.carbon_name, aggregation_method)
self.assertEqual(metric.carbon_xfilesfactor, xfilesfactor)
self.assertEqual(metric.retention.as_string, "10*1s:10*2s")
points_again = list(self.accessor.fetch_points(
metric, time_from, time_to, metric.retention[0]))
self.assertEqual(points[-high_precision_duration:], points_again)
示例10: create_db
def create_db(self, metric):
archiveConfig = None
xFilesFactor, aggregationMethod = None, None
for schema in schemas:
if schema.matches(metric):
log.creates('new metric %s matched schema %s' % (metric, schema.name))
archiveConfig = [archive.getTuple() for archive in schema.archives]
break
for schema in agg_schemas:
if schema.matches(metric):
log.creates('new metric %s matched aggregation schema %s' % (metric, schema.name))
xFilesFactor, aggregationMethod = schema.archives
break
if not archiveConfig:
raise Exception("No storage schema matched the metric '%s', check your storage-schemas.conf file." % metric)
dbDir = dirname(dbFilePath)
os.system("mkdir -p -m 755 '%s'" % dbDir)
log.creates("creating database file %s (archive=%s xff=%s agg=%s)" %
(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod))
whisper.create(dbFilePath, archiveConfig, xFilesFactor, aggregationMethod, settings.WHISPER_SPARSE_CREATE)
os.chmod(dbFilePath, 0755)
示例11: test_normal
def test_normal(self):
whisper.create(self.filename, [(1, 60), (60, 60)])
whisper.CACHE_HEADERS = True
whisper.info(self.filename)
whisper.info(self.filename)
whisper.CACHE_HEADERS = False
示例12: test_fetch
def test_fetch(self):
"""
fetch info from database
"""
# Don't use AssertRaisesException due to a super obscure bug in
# python2.6 which returns an IOError in the 2nd argument of __exit__
# in a context manager as a tuple. See this for a minimal reproducer:
# http://git.io/cKz30g
with self.assertRaises(IOError):
# check a db that doesnt exist
whisper.fetch("this_db_does_not_exist", 0)
# SECOND MINUTE HOUR DAY
retention = [(1, 60), (60, 60), (3600, 24), (86400, 365)]
whisper.create(self.filename, retention)
# check a db with an invalid time range
now = int(time.time())
past = now - 6000
msg = "Invalid time interval: from time '{0}' is after until time '{1}'"
with AssertRaisesException(whisper.InvalidTimeInterval(msg.format(now, past))):
whisper.fetch(self.filename, now, past)
fetch = whisper.fetch(self.filename, 0)
# check time range
self.assertEqual(fetch[0][1] - fetch[0][0],
retention[-1][0] * retention[-1][1])
# check number of points
self.assertEqual(len(fetch[1]), retention[-1][1])
# check step size
self.assertEqual(fetch[0][2], retention[-1][0])
示例13: _create_dbs
def _create_dbs(self):
for db in (
('test', 'foo.wsp'),
('test', 'bar', 'baz.wsp'),
):
db_path = os.path.join(WHISPER_DIR, *db)
os.makedirs(os.path.dirname(db_path))
whisper.create(db_path, [(1, 60)])
示例14: test_00_create_empty_whisper
def test_00_create_empty_whisper(self):
"""
Create a whisper file with one archive of 5 points where each point
covers 60 second, and default xFilesFactor and aggregationMethod.
"""
self.assertTrue(not os.path.exists(FILENAME))
whisper.create(FILENAME, [(SECONDS_PER_POINT, NUMBER_OF_POINTS)])
self.assertTrue(os.path.exists(FILENAME))
示例15: record_metering_data
def record_metering_data(self, data):
record = copy.deepcopy(data)
timestamp = record["timestamp"].replace(second=0, microsecond=0)
timestamp = int((timestamp - datetime.datetime(1970, 1, 1)).total_seconds())
value = float(record["counter_volume"])
record_path = (
env_variables["whisper_path"] + data["resource_id"] + "_" + data["counter_name"].replace(".", "_") + ".wsp"
)
# if not os.path.isdir(os.path.dirname(record_path)):
# os.makedirs(os.path.dirname(record_path))
if not os.path.isfile(record_path):
whisper.create(record_path, archieve_list)
whisper.update(record_path, value, timestamp)
# add resource & meter to sqlite db
conn = sqlite3.connect(env_variables["sql_db_path"])
c = conn.cursor()
c.execute("select count(*) from resources where resource_id=?", (data["resource_id"],))
r = c.fetchone()[0]
if r == 0:
c.execute(
"insert into resources (resource_id, user_id, project_id, source_id, resource_metadata)"
+ "values (?,?,?,?,?)",
(
data["resource_id"],
data["user_id"],
data["project_id"],
data["source"],
json.dumps(data["resource_metadata"]),
),
)
c.execute(
"select count(*) from meters where name=? and resource_id=?", (data["counter_name"], data["resource_id"])
)
r = c.fetchone()[0]
if r == 0:
c.execute(
"insert into meters (name, type, unit, resource_id, project_id, user_id, source)"
+ "values (?,?,?,?,?,?,?)",
(
data["counter_name"],
data["counter_type"],
data["counter_unit"],
data["resource_id"],
data["project_id"],
data["user_id"],
data["source"],
),
)
conn.commit()
conn.close()