本文整理汇总了Python中doctest.ELLIPSIS属性的典型用法代码示例。如果您正苦于以下问题:Python doctest.ELLIPSIS属性的具体用法?Python doctest.ELLIPSIS怎么用?Python doctest.ELLIPSIS使用的例子?那么恭喜您, 这里精选的属性代码示例或许可以为您提供帮助。您也可以进一步了解该属性所在类doctest
的用法示例。
在下文中一共展示了doctest.ELLIPSIS属性的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: test_basic_functions
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def test_basic_functions(self):
import code
import doctest
import sys
db = pg_simple.PgSimple(self.pool)
if sys.argv.count('--interact'):
db.log = sys.stdout
code.interact(local=locals())
else:
try:
# Setup tables
self._drop_tables(db)
self._create_tables(db, fill=True)
# Run tests
doctest.testmod(optionflags=doctest.ELLIPSIS)
finally:
# Drop tables
self._drop_tables(db)
self.assertEqual(True, True)
示例2: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
sys.exit(-1)
示例3: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import os
import doctest
from pyspark.context import SparkContext
from pyspark.sql import Row
import pyspark.sql.session
os.chdir(os.environ["SPARK_HOME"])
globs = pyspark.sql.session.__dict__.copy()
sc = SparkContext('local[4]', 'PythonTest')
globs['sc'] = sc
globs['spark'] = SparkSession(sc)
globs['rdd'] = rdd = sc.parallelize(
[Row(field1=1, field2="row1"),
Row(field1=2, field2="row2"),
Row(field1=3, field2="row3")])
globs['df'] = rdd.toDF()
(failure_count, test_count) = doctest.testmod(
pyspark.sql.session, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
globs['sc'].stop()
if failure_count:
exit(-1)
示例4: test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def test():
import doctest
print("----------------------------------------------------------")
print("expr")
print("----------------------------------------------------------")
doctest.testmod(expr, verbose=True, raise_on_error=False, optionflags=doctest.ELLIPSIS)
expr.TestExpr().run()
print("----------------------------------------------------------")
print("algebraic")
print("----------------------------------------------------------")
doctest.testmod(algebraic, verbose=True, raise_on_error=True, optionflags=doctest.ELLIPSIS)
algebraic.TestAlgebraic().run()
print("----------------------------------------------------------")
print("brain")
print("----------------------------------------------------------")
doctest.testmod(brain, verbose=True, raise_on_error=True, optionflags=doctest.ELLIPSIS)
TestBrain().run()
示例5: makeTest
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def makeTest(self, obj, parent):
"""Look for doctests in the given object, which will be a
function, method or class.
"""
#print 'Plugin analyzing:', obj, parent # dbg
# always use whitespace and ellipsis options
optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS
doctests = self.finder.find(obj, module=getmodule(parent))
if doctests:
for test in doctests:
if len(test.examples) == 0:
continue
yield DocTestCase(test, obj=obj,
optionflags=optionflags,
checker=self.checker)
示例6: load_tests
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def load_tests(loader, tests, ignore):
env = os.environ.copy()
env['CR8_NO_TQDM'] = 'True'
node.start()
assert node.http_host, "http_url must be available"
tests.addTests(doctest.DocFileSuite(
os.path.join('..', 'README.rst'),
globs={
'sh': functools.partial(
subprocess.run,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
timeout=60,
shell=True,
env=env
)
},
optionflags=doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS,
setUp=setup,
tearDown=teardown,
parser=Parser()
))
return tests
示例7: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import doctest
import pyspark.ml.image
globs = pyspark.ml.image.__dict__.copy()
spark = SparkSession.builder\
.master("local[2]")\
.appName("ml.image tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.ml.image, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
示例8: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import doctest
from pyspark.sql import SparkSession
globs = globals().copy()
# The small batch size here ensures that we see multiple batches,
# even in these small test examples:
spark = SparkSession.builder\
.master("local[2]")\
.appName("mllib.util tests")\
.getOrCreate()
globs['spark'] = spark
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
示例9: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.mllib.fpm
globs = pyspark.mllib.fpm.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.fpm tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
import tempfile
temp_path = tempfile.mkdtemp()
globs['temp_path'] = temp_path
try:
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
finally:
from shutil import rmtree
try:
rmtree(temp_path)
except OSError:
pass
if failure_count:
sys.exit(-1)
示例10: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import doctest
import numpy
from pyspark.sql import SparkSession
try:
# Numpy 1.14+ changed it's string format.
numpy.set_printoptions(legacy='1.13')
except TypeError:
pass
globs = globals().copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("mllib.stat.statistics tests")\
.getOrCreate()
globs['sc'] = spark.sparkContext
(failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS)
spark.stop()
if failure_count:
sys.exit(-1)
示例11: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import doctest
from pyspark.sql import Row, SparkSession
import pyspark.sql.functions
globs = pyspark.sql.functions.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.functions tests")\
.getOrCreate()
sc = spark.sparkContext
globs['sc'] = sc
globs['spark'] = spark
globs['df'] = spark.createDataFrame([Row(name='Alice', age=2), Row(name='Bob', age=5)])
(failure_count, test_count) = doctest.testmod(
pyspark.sql.functions, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
示例12: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.udf
globs = pyspark.sql.udf.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.udf tests")\
.getOrCreate()
globs['spark'] = spark
(failure_count, test_count) = doctest.testmod(
pyspark.sql.udf, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
spark.stop()
if failure_count:
sys.exit(-1)
示例13: _test
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def _test():
import doctest
from pyspark.sql import SparkSession
import pyspark.sql.column
globs = pyspark.sql.column.__dict__.copy()
spark = SparkSession.builder\
.master("local[4]")\
.appName("sql.column tests")\
.getOrCreate()
sc = spark.sparkContext
globs['spark'] = spark
globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \
.toDF(StructType([StructField('age', IntegerType()),
StructField('name', StringType())]))
(failure_count, test_count) = doctest.testmod(
pyspark.sql.column, globs=globs,
optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF)
spark.stop()
if failure_count:
sys.exit(-1)
示例14: load_tests
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def load_tests(loader, tests, ignore):
""" Many docstrings contain doctests. Instead of using a separate doctest
runner, we use doctest's Unittest API."""
account = searchconsole.authenticate(
client_config='auth/client_secrets.json',
credentials='auth/credentials.dat'
)
globs = {
'account': account,
'webproperty': account[webproperty_uri],
'www_webproperty_com': webproperty_uri,
'query': account[webproperty_uri].query
}
kwargs = {
'globs': globs,
'optionflags': doctest.ELLIPSIS
}
tests.addTests(doctest.DocTestSuite(searchconsole.auth, **kwargs))
tests.addTests(doctest.DocTestSuite(searchconsole.account, **kwargs))
tests.addTests(doctest.DocTestSuite(searchconsole.query, **kwargs))
return tests
示例15: test_describe_non_ascii_bytes
# 需要导入模块: import doctest [as 别名]
# 或者: from doctest import ELLIPSIS [as 别名]
def test_describe_non_ascii_bytes(self):
"""Even with bytestrings, the mismatch should be coercible to unicode
DocTestMatches is intended for text, but the Python 2 str type also
permits arbitrary binary inputs. This is a slightly bogus thing to do,
and under Python 3 using bytes objects will reasonably raise an error.
"""
header = _b("\x89PNG\r\n\x1a\n...")
if str_is_unicode:
self.assertRaises(TypeError,
DocTestMatches, header, doctest.ELLIPSIS)
return
matcher = DocTestMatches(header, doctest.ELLIPSIS)
mismatch = matcher.match(_b("GIF89a\1\0\1\0\0\0\0;"))
# Must be treatable as unicode text, the exact output matters less
self.assertTrue(unicode(mismatch.describe()))