本文整理汇总了Python中timeit.time函数的典型用法代码示例。如果您正苦于以下问题:Python time函数的具体用法?Python time怎么用?Python time使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了time函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: main
def main():
N = 1e8 // 2
print("Data size", N)
targets = ["cpu", "parallel"]
# run just one target if is specified in the argument
for t in targets:
if t in sys.argv[1:]:
targets = [t]
break
for target in targets:
print("== Target", target)
vect_discriminant = vectorize(["f4(f4, f4, f4)", "f8(f8, f8, f8)"], target=target)(discriminant)
A, B, C = generate_input(N, dtype=np.float32)
D = np.empty(A.shape, dtype=A.dtype)
ts = time()
D = vect_discriminant(A, B, C)
te = time()
total_time = te - ts
print("Execution time %.4f" % total_time)
print("Throughput %.4f" % (N / total_time))
if "-verify" in sys.argv[1:]:
check_answer(D, A, B, C)
示例2: run_div_test
def run_div_test(fld, exact, title='', show=False, ignore_inexact=False):
t0 = time()
result_numexpr = viscid.div(fld, preferred="numexpr", only=False)
t1 = time()
logger.info("numexpr magnitude runtime: %g", t1 - t0)
result_diff = viscid.diff(result_numexpr, exact)['x=1:-1, y=1:-1, z=1:-1']
if not ignore_inexact and not (result_diff.data < 5e-5).all():
logger.warning("numexpr result is far from the exact result")
logger.info("min/max(abs(numexpr - exact)): %g / %g",
np.min(result_diff.data), np.max(result_diff.data))
planes = ["y=0j", "z=0j"]
nrows = 2
ncols = len(planes)
_, axes = plt.subplots(nrows, ncols, squeeze=False)
for i, p in enumerate(planes):
vlt.plot(result_numexpr, p, ax=axes[0, i], show=False)
vlt.plot(result_diff, p, ax=axes[1, i], show=False)
plt.suptitle(title)
vlt.auto_adjust_subplots(subplot_params=dict(top=0.9))
plt.savefig(next_plot_fname(__file__))
if show:
vlt.mplshow()
示例3: timeit
def timeit(f, *args, **kwargs):
t0 = time()
ret = f(*args, **kwargs)
t1 = time()
print("Took {0:.03g} secs.".format(t1 - t0))
return ret
示例4: test_func
def test_func(self):
A = np.array(np.random.random((n, n)), dtype=np.float32)
B = np.array(np.random.random((n, n)), dtype=np.float32)
C = np.empty_like(A)
print("N = %d x %d" % (n, n))
s = time()
stream = cuda.stream()
with stream.auto_synchronize():
dA = cuda.to_device(A, stream)
dB = cuda.to_device(B, stream)
dC = cuda.to_device(C, stream)
cu_square_matrix_mul[(bpg, bpg), (tpb, tpb), stream](dA, dB, dC)
dC.copy_to_host(C, stream)
e = time()
tcuda = e - s
# Host compute
Amat = np.matrix(A)
Bmat = np.matrix(B)
s = time()
Cans = Amat * Bmat
e = time()
tcpu = e - s
print('cpu: %f' % tcpu)
print('cuda: %f' % tcuda)
print('cuda speedup: %.2fx' % (tcpu / tcuda))
# Check result
self.assertTrue(np.allclose(C, Cans))
示例5: func2
def func2(num):
s = time()
num = sp.sympify(num)
res = num.is_Symbol
e = time()
print e-s
return res
示例6: _bin_data
def _bin_data(self, X, rng, is_training_data):
"""Bin data X.
If is_training_data, then set the bin_mapper_ attribute.
Else, the binned data is converted to a C-contiguous array.
"""
description = 'training' if is_training_data else 'validation'
if self.verbose:
print("Binning {:.3f} GB of {} data: ".format(
X.nbytes / 1e9, description), end="", flush=True)
tic = time()
if is_training_data:
X_binned = self.bin_mapper_.fit_transform(X) # F-aligned array
else:
X_binned = self.bin_mapper_.transform(X) # F-aligned array
# We convert the array to C-contiguous since predicting is faster
# with this layout (training is faster on F-arrays though)
X_binned = np.ascontiguousarray(X_binned)
toc = time()
if self.verbose:
duration = toc - tic
print("{:.3f} s".format(duration))
return X_binned
示例7: main
def main():
targets = ['cpu', 'parallel']
# run just one target if is specified in the argument
for t in targets:
if t in sys.argv[1:]:
targets = [t]
break
for target in targets:
print('== Target', target)
vect_sum = vectorize(['f4(f4, f4)', 'f8(f8, f8)'],
target=target)(sum)
A = np.random.random(N).astype(np.float32)
B = np.random.random(N).astype(np.float32)
assert A.shape == B.shape
assert A.dtype == B.dtype
assert len(A.shape) == 1
D = np.empty(A.shape, dtype=A.dtype)
print('Data size', N)
ts = time()
D = vect_sum(A, B)
te = time()
total_time = (te - ts)
print('Execution time %.4f' % total_time)
print('Throughput %.4f' % (N / total_time))
if '-verify' in sys.argv[1:]:
check_answer(D, A, B, C)
示例8: test
def test(ty):
print("Test %s" % ty)
data = np.array(np.random.random(1e6 + 1), dtype=ty)
ts = time()
stream = cuda.stream()
device_data = cuda.to_device(data, stream)
dresult = cuda_ufunc(device_data, device_data, stream=stream)
result = dresult.copy_to_host()
stream.synchronize()
tnumba = time() - ts
ts = time()
gold = np_ufunc(data, data)
tnumpy = time() - ts
print("Numpy time: %fs" % tnumpy)
print("Numba time: %fs" % tnumba)
if tnumba < tnumpy:
print("Numba is FASTER by %fx" % (tnumpy / tnumba))
else:
print("Numba is SLOWER by %fx" % (tnumba / tnumpy))
self.assertTrue(np.allclose(gold, result), (gold, result))
示例9: timeit
def timeit(message, display=True):
"""Context to time an execution."""
start = time()
yield
if not display:
return
print("{}: {:.3f} s".format(message, time() - start))
示例10: run
def run(self):
print('Running part 5')
filename = './' + self.out_dir + '/time.txt'
with open(filename, 'w') as text_file:
t0 = time()
self.nn_pca_cluster_wine()
text_file.write('nn_pca_wine: %0.3f seconds\n' % (time() - t0))
t0 = time()
self.nn_ica_cluster_wine()
text_file.write('nn_ica_wine: %0.3f seconds\n' % (time() - t0))
t0 = time()
self.nn_rp_cluster_wine()
text_file.write('nn_rp_wine: %0.3f seconds\n' % (time() - t0))
t0 = time()
self.nn_lda_cluster_wine()
text_file.write('nn_lda_wine: %0.3f seconds\n' % (time() - t0))
t0 = time()
self.nn_wine_orig()
text_file.write('nn_wine_orig: %0.3f seconds\n' % (time() - t0))
示例11: test_gufunc
def test_gufunc(self):
@guvectorize([void(float32[:, :], float32[:, :], float32[:, :])],
'(m,n),(n,p)->(m,p)',
target='cuda')
def matmulcore(A, B, C):
m, n = A.shape
n, p = B.shape
for i in range(m):
for j in range(p):
C[i, j] = 0
for k in range(n):
C[i, j] += A[i, k] * B[k, j]
gufunc = matmulcore
gufunc.max_blocksize = 512
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
ts = time()
C = gufunc(A, B)
tcuda = time() - ts
ts = time()
Gold = ut.matrix_multiply(A, B)
tcpu = time() - ts
non_stream_speedups.append(tcpu / tcuda)
self.assertTrue(np.allclose(C, Gold))
示例12: wrapper
def wrapper(self, *args, **kwargs):
# Open link
if self.link is None:
self.open()
# Init time
start = time()
no_control = self.callback_timeout >= self.instrument_timeout
# Loop over timeouts
while True:
try:
# Run the function
result = func(self, *args, **kwargs)
except Vxi11Exception as exc:
# Time control
no_timeout = exc.err != ERR_IO_TIMEOUT
expired = time() > start + self.instrument_timeout
# Reraise exception
if no_control or no_timeout or expired:
raise
# Callback with exc
if self.callback:
self.callback(exc)
else:
# Callback without exc
if self.callback:
self.callback(None)
# Return
return result
示例13: main
def main():
N = 1e+8 // 2
print('Data size', N)
targets = ['cpu', 'parallel']
# run just one target if is specified in the argument
for t in targets:
if t in sys.argv[1:]:
targets = [t]
break
for target in targets:
print('== Target', target)
vect_discriminant = vectorize([f4(f4, f4, f4), f8(f8, f8, f8)],
target=target)(discriminant)
A, B, C = generate_input(N, dtype=np.float32)
D = np.empty(A.shape, dtype=A.dtype)
ts = time()
D = vect_discriminant(A, B, C)
te = time()
total_time = (te - ts)
print('Execution time %.4f' % total_time)
print('Throughput %.4f' % (N / total_time))
if '-verify' in sys.argv[1:]:
check_answer(D, A, B, C)
示例14: test_gufunc_stream
def test_gufunc_stream(self):
#cuda.driver.flush_pending_free()
matrix_ct = 1001 # an odd number to test thread/block division in CUDA
A = np.arange(matrix_ct * 2 * 4, dtype=np.float32).reshape(matrix_ct, 2,
4)
B = np.arange(matrix_ct * 4 * 5, dtype=np.float32).reshape(matrix_ct, 4,
5)
ts = time()
stream = cuda.stream()
dA = cuda.to_device(A, stream)
dB = cuda.to_device(B, stream)
dC = cuda.device_array(shape=(1001, 2, 5), dtype=A.dtype, stream=stream)
dC = gufunc(dA, dB, out=dC, stream=stream)
C = dC.copy_to_host(stream=stream)
stream.synchronize()
tcuda = time() - ts
ts = time()
Gold = ut.matrix_multiply(A, B)
tcpu = time() - ts
stream_speedups.append(tcpu / tcuda)
self.assertTrue(np.allclose(C, Gold))
示例15: test_func
def test_func(self):
np.random.seed(42)
A = np.array(np.random.random((n, n)), dtype=np.float32)
B = np.array(np.random.random((n, n)), dtype=np.float32)
C = np.empty_like(A)
s = time()
stream = cuda.stream()
with stream.auto_synchronize():
dA = cuda.to_device(A, stream)
dB = cuda.to_device(B, stream)
dC = cuda.to_device(C, stream)
cu_square_matrix_mul[(bpg, bpg), (tpb, tpb), stream](dA, dB, dC)
dC.copy_to_host(C, stream)
e = time()
tcuda = e - s
# Host compute
s = time()
Cans = np.dot(A, B)
e = time()
tcpu = e - s
# Check result
np.testing.assert_allclose(C, Cans, rtol=1e-5)