本文整理汇总了Python中sympy.core.cache.clear_cache函数的典型用法代码示例。如果您正苦于以下问题:Python clear_cache函数的具体用法?Python clear_cache怎么用?Python clear_cache使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了clear_cache函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: write_in_chunks
def write_in_chunks(lines, mainfile, deffile, name, chunk_size):
funcname = "definitions_" + name
first_chunk = []
try:
for i in range(chunk_size+1):
first_chunk.append(next(lines))
except StopIteration:
for line in first_chunk:
mainfile.write(line)
else:
lines = chain(first_chunk, lines)
while True:
mainfile.write(funcname + "();\n")
deffile.write("void " + funcname + "(void){\n")
try:
for i in range(chunk_size):
deffile.write(next(lines))
except StopIteration:
break
finally:
deffile.write("}\n")
funcname = count_up(funcname)
clear_cache()
示例2: alt_enumerate
def alt_enumerate(self, cross_sections=None):
'''only works when the set is a generating set for sortables and the top layer has all the same length!!!'''
ml = max([len(s) for s in self])
PPS = PegPermSet([s for s in self if len(s) == ml])
(gf, cross_sections) = PPS.alt_cross_sections()
gc.collect()
print('\tDone computing cross_sections. There are',len(cross_sections),'cross sections.')
print('Starting to compute generating function for uncleans.')
i = 0
n = len(cross_sections)
t = time.time()
# print 'clean gf:',gf.simplify()
for clean_perm in cross_sections.keys():
if i % 10000 == 0 and i > 0:
gf = gf.simplify()
if i % 50000 == 0 and i > 0:
clear_cache()
if i % 10000 == 0 and i > 0:
print('\t\t',i,'of',n,'\ttook',(time.time()-t),'seconds.')
t = time.time()
# gf -= clean_perm.csgf([])
# print 'subtracting gf for',clean_perm,'with basis []'
# print 'adding gf for',clean_perm,'with basis',cross_sections[clean_perm]
gf += clean_perm.csgf(cross_sections[clean_perm])
i += 1
print('\tDone!')
return gf.simplify()
示例3: main
def main(results):
testholder = []
for (test, dropsize) in testparams:
datasrc = sprinkler(dropsize, files)
testholder += [(test, datasrc)]
failcount = 0
while failcount != len(testparams) * len(files):
for (test, datasrc) in testholder:
fails = []
for (fname, Drip) in datasrc.items():
block = Drip.drip()
if block == []:
fails += [(fname, datasrc)]
failcount += 1
else:
print('Testing', fname, 'with', test.__name__, 'drip no.', Drip.dripno)
if not results.hasresult(Drip.dripno, fname, test.__name__):
while True:
try:
pvalue = test(block)
break
except(MemoryError):
clear_cache()
try:
for i, pval in enumerate(pvalue):
results.store(Drip.dripno, fname, test.__name__, i, pval)
except(TypeError):
results.store(Drip.dripno, fname, test.__name__, 1, pvalue)
remfails(fails)
示例4: compute_psi_stats
def compute_psi_stats(self):
# define some normal distributions
mus = [sp.var("mu_%i" % i, real=True) for i in range(self.input_dim)]
Ss = [sp.var("S_%i" % i, positive=True) for i in range(self.input_dim)]
normals = [
(2 * sp.pi * Si) ** (-0.5) * sp.exp(-0.5 * (xi - mui) ** 2 / Si) for xi, mui, Si in zip(self._sp_x, mus, Ss)
]
# do some integration!
# self._sp_psi0 = ??
self._sp_psi1 = self._sp_k
for i in range(self.input_dim):
print "perfoming integrals %i of %i" % (i + 1, 2 * self.input_dim)
sys.stdout.flush()
self._sp_psi1 *= normals[i]
self._sp_psi1 = sp.integrate(self._sp_psi1, (self._sp_x[i], -sp.oo, sp.oo))
clear_cache()
self._sp_psi1 = self._sp_psi1.simplify()
# and here's psi2 (eek!)
zprime = [sp.Symbol("zp%i" % i) for i in range(self.input_dim)]
self._sp_psi2 = self._sp_k.copy() * self._sp_k.copy().subs(zip(self._sp_z, zprime))
for i in range(self.input_dim):
print "perfoming integrals %i of %i" % (self.input_dim + i + 1, 2 * self.input_dim)
sys.stdout.flush()
self._sp_psi2 *= normals[i]
self._sp_psi2 = sp.integrate(self._sp_psi2, (self._sp_x[i], -sp.oo, sp.oo))
clear_cache()
self._sp_psi2 = self._sp_psi2.simplify()
示例5: __init__
def __init__(self):
# Upon creating a new model, clear the cache
# Otherwise creating multiple models creates
# problems because sympy() will not reevaluate
# functions and the series accessor will not
# get created. Because sympy keeps this cache
# around, will have to be careful if using these
# models in a multi-threaded context.
clear_cache()
self.variables = collections.OrderedDict()
self.parameters = collections.OrderedDict()
self.solutions = list()
self.equations = list()
self._private_parameters = collections.OrderedDict()
self._local_context = dict()
self._var_default = None
self._param_default = None
self._need_function_update = True
_add_functions(self._local_context)
# Variables used to lambdify the expressions
self._arg_list = None
self._private_funcs = None
self._solvers = dict()
self._solvers['newton-raphson'] = NewtonRaphsonSolver(self)
self._solvers['gauss-seidel'] = GaussSeidelSolver(self)
self._solvers['broyden'] = BroydenSolver(self)
示例6: test_dielectric
def test_dielectric(ctx_getter, qbx_order, op_class, mode, visualize=False):
cl_ctx = ctx_getter()
queue = cl.CommandQueue(cl_ctx)
import logging
logging.basicConfig(level=logging.INFO)
from pytools.convergence import EOCRecorder
eoc_rec = EOCRecorder()
for nelements in [30, 50, 70]:
# prevent sympy cache 'splosion
from sympy.core.cache import clear_cache
clear_cache()
errs = run_dielectric_test(
cl_ctx, queue,
nelements=nelements, qbx_order=qbx_order,
op_class=op_class, mode=mode,
visualize=visualize)
eoc_rec.add_data_point(1/nelements, la.norm(list(errs), np.inf))
print(eoc_rec)
assert eoc_rec.order_estimate() > qbx_order - 0.5
示例7: test_issue_7688
def test_issue_7688():
from sympy.core.function import Function, UndefinedFunction
f = Function('f') # actually an UndefinedFunction
clear_cache()
class A(UndefinedFunction):
pass
a = A('f')
assert isinstance(a, type(f))
示例8: main
def main(n, bench):
func = globals()['bench_' + bench]
l = []
for i in range(n):
clear_cache()
t0 = time.time()
func()
l.append(time.time() - t0)
return l
示例9: _lambdify
def _lambdify(self):
lambda_list = []
vars = [range_[0] for range_ in self._ranges[1:]]
for sym_sol in self.sym_sols:
lambda_list.append(lambdify(vars,sym_sol))
self.__call__.__func__.__doc__ += ('Function signature is f('
+','.join([str(var) for var in vars]
)+')\n')
clear_cache()
return vars,lambda_list
示例10: test_Basic_keep_sign
def test_Basic_keep_sign():
Basic.keep_sign = True
assert Mul(x - 1, x + 1) == (x - 1)*(x + 1)
assert (1/(x - 1)).as_coeff_terms()[0] == +1
clear_cache()
Basic.keep_sign = False
assert Mul(x - 1, x + 1) == -(1 - x)*(1 + x)
assert (1/(x - 1)).as_coeff_terms()[0] == -1
示例11: test_file
def test_file(self, filename):
clear_cache()
import unittest
from StringIO import StringIO
rel_name = filename[len(self._root_dir)+1:]
module = rel_name.replace(os.sep, '.')[:-3]
setup_pprint()
try:
module = pdoctest._normalize_module(module)
tests = SymPyDocTestFinder().find(module)
except:
self._reporter.import_error(filename, sys.exc_info())
return
tests = [test for test in tests if len(test.examples) > 0]
# By default (except for python 2.4 in which it was broken) tests
# are sorted by alphabetical order by function name. We sort by line number
# so one can edit the file sequentially from bottom to top...HOWEVER
# if there are decorated functions, their line numbers will be too large
# and for now one must just search for these by text and function name.
tests.sort(key=lambda x: -x.lineno)
if not tests:
return
self._reporter.entering_filename(filename, len(tests))
for test in tests:
assert len(test.examples) != 0
runner = SymPyDocTestRunner(optionflags=pdoctest.ELLIPSIS | \
pdoctest.NORMALIZE_WHITESPACE)
old = sys.stdout
new = StringIO()
sys.stdout = new
# If the testing is normal, the doctests get importing magic to
# provide the global namespace. If not normal (the default) then
# then must run on their own; all imports must be explicit within
# a function's docstring. Once imported that import will be
# available to the rest of the tests in a given function's
# docstring (unless clear_globs=True below).
if not self._normal:
test.globs = {}
# if this is uncommented then all the test would get is what
# comes by default with a "from sympy import *"
#exec('from sympy import *') in test.globs
try:
f, t = runner.run(test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if f > 0:
self._reporter.doctest_fail(test.name, new.getvalue())
else:
self._reporter.test_pass()
self._reporter.leaving_filename()
示例12: test_issue_7687
def test_issue_7687():
from sympy.core.function import Function
from sympy.abc import x
f = Function('f')(x)
ff = Function('f')(x)
match_with_cache = ff.matches(f)
assert isinstance(f, type(ff))
clear_cache()
ff = Function('f')(x)
assert isinstance(f, type(ff))
assert match_with_cache == ff.matches(f)
示例13: bench_sympy
def bench_sympy(loops, func):
timer = perf.perf_counter
dt = 0
for _ in xrange(loops):
# Don't benchmark clear_cache(), exclude it of the benchmark
clear_cache()
t0 = timer()
func()
dt += (timer() - t0)
return dt
示例14: test_pow_eval_subs_no_cache
def test_pow_eval_subs_no_cache():
# Tests pull request 9376 is working
from sympy.core.cache import clear_cache
s = 1/sqrt(x**2)
# This bug only appeared when the cache was turned off.
# We need to approximate running this test without the cache.
# This creates approximately the same situation.
clear_cache()
# This used to fail with a wrong result.
# It incorrectly returned 1/sqrt(x**2) before this pull request.
result = s.subs(sqrt(x**2), y)
assert result == 1/y
示例15: __call__
def __call__(self,*args):
if len(args) != len(self.sympy_variables):
print 'args = ',args
print 'sympy_vars = ',self.sympy_variables
raise Error('invalid argument list given in call to Integrand!')
import pdb;pdb.set_trace()
out = self.lambdified(*args)
out1 = self.ctypesified(len(args),tuple(args))
print out - out1
import pdb;pdb.set_trace()
# print (out-out2)**2
# exit()
clear_cache()
return out