本文整理汇总了Python中util.run_benchmark函数的典型用法代码示例。如果您正苦于以下问题:Python run_benchmark函数的具体用法?Python run_benchmark怎么用?Python run_benchmark使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了run_benchmark函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: run
def run(num_runs=1, geo_mean=True):
# Get all our IO over with early.
data_dir = os.path.join(os.path.dirname(__file__), "data")
spec_filename = os.path.join(data_dir, "html5lib_spec.html")
with open(spec_filename) as spec_fh:
spec_data = io.StringIO(spec_fh.read())
util.run_benchmark(geo_mean, num_runs, test_html5lib, spec_data)
示例2: entry_point
def entry_point(argv):
import optparse
import util
def parse_depths(option, opt_str, value, parser):
parser.values.depths = [v for v in value.split(',') if v]
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the garbage collector benchmark")
util.add_standard_options_to(parser)
parser.add_option('--threads', default=0, action="store",
help="provide number of threads (default 1)")
parser.add_option('--depths', default=DEFAULT_DEPTHS, type="string",
action="callback", callback=parse_depths,
help='tree depths')
parser.add_option('--debug', default=False, action='store_true',
help="enable debugging")
options, args = parser.parse_args(argv)
util.run_benchmark(options, options.num_runs, main,
options.depths, options.threads, options.debug)
示例3: test_list_unpacking
def test_list_unpacking(iterations, timer):
x = list(range(10))
return do_unpacking(iterations, timer, x)
def test_all(iterations, timer):
tuple_data = test_tuple_unpacking(iterations, timer)
list_data = test_list_unpacking(iterations, timer)
return [x + y for (x, y) in zip(tuple_data, list_data)]
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options] [test]",
description=("Test the performance of sequence unpacking."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
tests = {"tuple": test_tuple_unpacking, "list": test_list_unpacking}
if len(args) > 1:
parser.error("Can only specify one test")
elif len(args) == 1:
func = tests.get(args[0])
if func is None:
parser.error("Invalid test name")
util.run_benchmark(options, options.num_runs, func)
else:
util.run_benchmark(options, options.num_runs, test_all)
示例4: test_mako
% endfor
</tr>
% endfor
</table>
""")
def test_mako(count, timer):
table = [xrange(150) for _ in xrange(150)]
# Warm up Mako.
MAKO_TMPL.render(table = table)
MAKO_TMPL.render(table = table)
times = []
for _ in xrange(count):
t0 = timer()
MAKO_TMPL.render(table = table)
t1 = timer()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test the performance of Mako templates."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_mako)
示例5: RuntimeError
if not args:
bench_func = bench_parse
elif args[0] not in benchmarks:
raise RuntimeError("invalid benchmark requested")
else:
bench_func = globals()['bench_%s' % args[0]]
if options.no_accelerator and sys.version_info >= (3, 3):
# prevent C accelerator from being used in 3.3
sys.modules['_elementtree'] = None
import xml.etree.ElementTree as et
if et.SubElement.__module__ != 'xml.etree.ElementTree':
raise RuntimeError("Unexpected C accelerator for ElementTree")
try:
from importlib import import_module
except ImportError:
def import_module(module_name):
__import__(module_name)
return sys.modules[module_name]
try:
etree_module = import_module(options.etree_module)
except ImportError:
if options.etree_module != default_etmodule:
raise
etree_module = import_module(fallback_etmodule)
util.run_benchmark(options, options.num_runs,
run_etree_benchmark, etree_module, bench_func)
示例6: test_richards
# Python imports
import optparse
import time
# Local imports
import richards
import util
def test_richards(iterations):
# Warm-up
r = richards.Richards()
r.run(iterations=2)
times = []
for _ in xrange(iterations):
t0 = time.time()
r.run(iterations=1)
t1 = time.time()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description="Test the performance of the Richards benchmark")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_richards)
示例7: time
# train it with some patterns
n.train(pat, 5000)
# test it
#n.test(pat)
def time(fn, *args):
import time, traceback
begin = time.time()
result = fn(*args)
end = time.time()
return result, end-begin
def test_bpnn(iterations):
times = []
for _ in range(iterations):
result, t = time(demo)
times.append(t)
return times
main = test_bpnn
if __name__ == "__main__":
import optparse
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test the performance of a neural network."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_bpnn)
示例8: runtest
import bigtable
# bummer, timeit module is stupid
from bigtable import test_python_cstringio, test_spitfire_o4, test_spitfire
def runtest(n, benchmark):
times = []
for i in range(n):
sys.stdout = StringIO()
bigtable.run([benchmark], 100)
times.append(float(sys.stdout.getvalue().split(" ")[-2]))
sys.stdout = sys.__stdout__
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]", description="Test the performance of the spitfire benchmark"
)
parser.add_option(
"--benchmark",
type="choice",
choices=["python_cstringio", "spitfire_o4"],
default="spitfire_o4",
help="choose between cstringio and spitfire_o4",
)
util.add_standard_options_to(parser)
options, args = parser.parse_args(sys.argv)
util.run_benchmark(options, options.num_runs, runtest, options.benchmark)
示例9: test_regex_effbot
re.search(regexs[id], string_tables[n][id])
re.search(regexs[id], string_tables[n][id])
def test_regex_effbot(iterations):
sizes = init_benchmarks()
# Warm up.
for size in sizes:
run_benchmarks(size)
times = []
for i in xrange(iterations):
t0 = time.time()
for size in sizes:
run_benchmarks(size)
t1 = time.time()
times.append(t1 - t0)
return times
if __name__ == '__main__':
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test the performance of regexps using Fredik Lundh's "
"benchmarks."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_regex_effbot)
示例10: test_regex_compile
bm_regex_v8.test_regex_v8(1)
finally:
re.compile = real_compile
re.search = real_search
re.sub = real_sub
return regexes
def test_regex_compile(count):
re._cache = EmptyCache()
regexes = capture_regexes()
times = []
for _ in xrange(count):
t0 = time.time()
for regex, flags in regexes:
re.compile(regex, flags)
t1 = time.time()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test regex compilation performance"))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_regex_compile)
示例11: test_django
<tr>{% for col in row %}<td>{{ col|escape }}</td>{% endfor %}</tr>
{% endfor %}
</table>
""")
def test_django(count, timer):
table = [xrange(150) for _ in xrange(150)]
context = Context({"table": table})
# Warm up Django.
DJANGO_TMPL.render(context)
DJANGO_TMPL.render(context)
times = []
for _ in xrange(count):
t0 = timer()
data = DJANGO_TMPL.render(context)
t1 = timer()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test the performance of Django templates."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_django)
示例12: make_http_server
host, port = make_http_server(loop, make_application())
url = "http://%s:%s/" % (host, port)
times = []
@coroutine
def main():
client = AsyncHTTPClient()
for i in xrange(count):
t0 = timer()
futures = [client.fetch(url) for j in xrange(CONCURRENCY)]
for fut in futures:
resp = yield fut
buf = resp.buffer
buf.seek(0, 2)
assert buf.tell() == len(CHUNK) * NCHUNKS
t1 = timer()
times.append(t1 - t0)
loop.run_sync(main)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options]",
description=("Test the performance of HTTP requests with Tornado."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
util.run_benchmark(options, options.num_runs, test_tornado)
示例13: timer
for thread in threads:
thread.join()
t1 = timer()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [options] benchmark_name",
description="Test the performance of Python's threads.")
parser.add_option("--num_threads", action="store", type="int", default=2,
dest="num_threads", help="Number of threads to test.")
parser.add_option("--check_interval", action="store", type="int",
default=sys.getcheckinterval(),
dest="check_interval",
help="Value to pass to sys.setcheckinterval().")
util.add_standard_options_to(parser)
options, args = parser.parse_args()
if len(args) != 1:
parser.error("incorrect number of arguments")
bm_name = args[0].lower()
func = globals().get("test_" + bm_name)
if not func:
parser.error("unknown benchmark: %s" % bm_name)
sys.setcheckinterval(options.check_interval)
util.run_benchmark(options, options.num_runs, func, options.num_threads)
示例14: run
def run(geo_mean, num_runs):
return util.run_benchmark(geo_mean, num_runs, test_regex_effbot)
示例15: timer
json.loads(json_dict_group)
json.loads(json_dict_group)
json.loads(json_dict_group)
json.loads(json_dict_group)
json.loads(json_dict_group)
json.loads(json_dict_group)
t1 = timer()
times.append(t1 - t0)
return times
if __name__ == "__main__":
parser = optparse.OptionParser(
usage="%prog [json_dump|json_load] [options]",
description=("Test the performance of JSON (de)serializing."))
util.add_standard_options_to(parser)
options, args = parser.parse_args()
benchmarks = ["json_dump", "json_load"]
for bench_name in benchmarks:
if bench_name in args:
benchmark = globals()["test_" + bench_name]
break
else:
raise RuntimeError("Need to specify one of %s" % benchmarks)
num_obj_copies = 8000
import json
util.run_benchmark(options, num_obj_copies, benchmark, json, options)