我们从Python开源项目中,提取了以下43个代码示例,用于说明如何使用doctest.NORMALIZE_WHITESPACE。
def main(args): """ Parses command line arguments and do the work of the program. "args" specifies the program arguments, with args[0] being the executable name. The return value should be used as the program's exit code. """ if len(args) == 2 and args[1] == "--test": # Run the tests return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) options = parse_args(args) # This holds the nicely-parsed options object # Actually do the work. We structure it like this so we can use it as a # script or a module. run(options)
def _test(): import doctest from pyspark.sql import SparkSession import pyspark.sql.column globs = pyspark.sql.column.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.column tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) (failure_count, test_count) = doctest.testmod( pyspark.sql.column, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) spark.stop() if failure_count: exit(-1)
def _test(): import doctest from pyspark.sql import Row, SparkSession import pyspark.sql.functions globs = pyspark.sql.functions.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.functions tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark globs['df'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.functions, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) spark.stop() if failure_count: exit(-1)
def _test(): import os import doctest from pyspark.sql import SparkSession import pyspark.sql.catalog os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.catalog.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.catalog tests")\ .getOrCreate() globs['sc'] = spark.sparkContext globs['spark'] = spark (failure_count, test_count) = doctest.testmod( pyspark.sql.catalog, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) spark.stop() if failure_count: exit(-1)
def _test(): import os import doctest from pyspark.context import SparkContext from pyspark.sql import Row import pyspark.sql.session os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.session.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['spark'] = SparkSession(sc) globs['rdd'] = rdd = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")]) globs['df'] = rdd.toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.session, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1)
def makeTest(self, obj, parent): """Look for doctests in the given object, which will be a function, method or class. """ #print 'Plugin analyzing:', obj, parent # dbg # always use whitespace and ellipsis options optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS doctests = self.finder.find(obj, module=getmodule(parent)) if doctests: for test in doctests: if len(test.examples) == 0: continue yield DocTestCase(test, obj=obj, optionflags=optionflags, checker=self.checker)
def suite(package): """Assemble test suite for doctests in path (recursively)""" from importlib import import_module for module in find_modules(package.__file__): try: module = import_module(module) yield DocTestSuite(module, globs=Context(module.__dict__.copy()), optionflags=ELLIPSIS | NORMALIZE_WHITESPACE) except ValueError: pass # No doctests in module except ImportError: import warnings warnings.warn('Unimportable module: {}'.format(module)) # Add documentation tests yield DocFileSuite(path.normpath(path.join(path.dirname(__file__), '..', '..', '..', 'doc', 'scripting.rst')), module_relative=False, globs=Context(module.__dict__.copy()), optionflags=ELLIPSIS | NORMALIZE_WHITESPACE )
def _test(): import doctest from pyspark.context import SparkContext from pyspark.sql import SQLContext import pyspark.sql.column globs = pyspark.sql.column.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) (failure_count, test_count) = doctest.testmod( pyspark.sql.column, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) globs['sc'].stop() if failure_count: exit(-1)
def _test(): import doctest from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.functions globs = pyspark.sql.functions.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['df'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.functions, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1)
def test_print_whats_next(self): profile = { "name": factory.make_name("profile"), "url": factory.make_name("url"), } stdout = self.patch(sys, "stdout", StringIO()) cli.cmd_login.print_whats_next(profile) expected = dedent("""\ You are now logged in to the MAAS server at %(url)s with the profile name '%(name)s'. For help with the available commands, try: maas %(name)s --help """) % profile observed = stdout.getvalue() flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE self.assertThat(observed, DocTestMatches(expected, flags))
def _get_flag_lookup(): import doctest return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, ELLIPSIS=doctest.ELLIPSIS, IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, ALLOW_UNICODE=_get_allow_unicode_flag(), ALLOW_BYTES=_get_allow_bytes_flag(), )
def main(args): """ Parses command line arguments and do the work of the program. "args" specifies the program arguments, with args[0] being the executable name. The return value should be used as the program's exit code. """ if len(args) == 2 and args[1] == "--test": # Run the tests return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) options = parse_args(args) # This holds the nicely-parsed options object # Make a root job root_job = Job.wrapJobFn(copy_everything, options, cores=1, memory="1G", disk="4G") # Run it and see how many jobs fail failed_jobs = Job.Runner.startToil(root_job, options) if failed_jobs > 0: raise Exception("{} jobs failed!".format(failed_jobs)) print("All jobs completed successfully") return 0
def _test(): import doctest from pyspark.sql import Row, SparkSession import pyspark.sql.group globs = pyspark.sql.group.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.group tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80), Row(name='Bob', age=5, height=85)]).toDF() globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000), Row(course="Java", year=2012, earnings=20000), Row(course="dotNET", year=2012, earnings=5000), Row(course="dotNET", year=2013, earnings=48000), Row(course="Java", year=2013, earnings=30000)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.group, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) spark.stop() if failure_count: exit(-1)
def _test(): import doctest from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext, SparkSession import pyspark.sql.dataframe from pyspark.sql.functions import from_unixtime globs = pyspark.sql.dataframe.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['spark'] = SparkSession(sc) globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF() globs['df3'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF() globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80), Row(name='Bob', age=5, height=None), Row(name='Tom', age=None, height=None), Row(name=None, age=None, height=None)]).toDF() globs['sdf'] = sc.parallelize([Row(name='Tom', time=1479441846), Row(name='Bob', time=1479442946)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.dataframe, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) globs['sc'].stop() if failure_count: exit(-1)
def _test(): import os import doctest import tempfile from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.context os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.context.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['tempfile'] = tempfile globs['os'] = os globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['rdd'] = rdd = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")] ) globs['df'] = rdd.toDF() jsonStrings = [ '{"field1": 1, "field2": "row1", "field3":{"field4":11}}', '{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},' '"field6":[{"field7": "row2"}]}', '{"field1" : null, "field2": "row3", ' '"field3":{"field4":33, "field5": []}}' ] globs['jsonStrings'] = jsonStrings globs['json'] = sc.parallelize(jsonStrings) (failure_count, test_count) = doctest.testmod( pyspark.sql.context, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1)
def _test(): import doctest import os import tempfile from pyspark.sql import Row, SparkSession, SQLContext import pyspark.sql.streaming os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.streaming.__dict__.copy() try: spark = SparkSession.builder.getOrCreate() except py4j.protocol.Py4JError: spark = SparkSession(sc) globs['tempfile'] = tempfile globs['os'] = os globs['spark'] = spark globs['sqlContext'] = SQLContext.getOrCreate(spark.sparkContext) globs['sdf'] = \ spark.readStream.format('text').load('python/test_support/sql/streaming') globs['sdf_schema'] = StructType([StructField("data", StringType(), False)]) globs['df'] = \ globs['spark'].readStream.format('text').load('python/test_support/sql/streaming') (failure_count, test_count) = doctest.testmod( pyspark.sql.streaming, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) globs['spark'].stop() if failure_count: exit(-1)
def loadTestsFromModule(self, module): #print '*** ipdoctest - lTM',module # dbg if not self.matches(module.__name__): log.debug("Doctest doesn't want module %s", module) return tests = self.finder.find(module,globs=self.globs, extraglobs=self.extraglobs) if not tests: return # always use whitespace and ellipsis options optionflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS tests.sort() module_file = module.__file__ if module_file[-4:] in ('.pyc', '.pyo'): module_file = module_file[:-1] for test in tests: if not test.examples: continue if not test.filename: test.filename = module_file yield DocTestCase(test, optionflags=optionflags, checker=self.checker)
def test(): import doctest doctest.NORMALIZE_WHITESPACE = 1 doctest.testfile("README.txt", verbose=1)
def suite(): from genshi.input import HTML from genshi.core import Markup from genshi.builder import tag suite = unittest.TestSuite() for test in (SelectTest, InvertTest, EndTest, EmptyTest, RemoveTest, UnwrapText, WrapTest, FilterTest, MapTest, SubstituteTest, RenameTest, ReplaceTest, BeforeTest, AfterTest, PrependTest, AppendTest, AttrTest, CopyTest, CutTest): suite.addTest(unittest.makeSuite(test, 'test')) suite.addTest(doctest.DocTestSuite( genshi.filters.transform, optionflags=doctest.NORMALIZE_WHITESPACE, extraglobs={'HTML': HTML, 'tag': tag, 'Markup': Markup})) return suite
def _test(): import doctest from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.group globs = pyspark.sql.group.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df3'] = sc.parallelize([Row(name='Alice', age=2, height=80), Row(name='Bob', age=5, height=85)]).toDF() globs['df4'] = sc.parallelize([Row(course="dotNET", year=2012, earnings=10000), Row(course="Java", year=2012, earnings=20000), Row(course="dotNET", year=2012, earnings=5000), Row(course="dotNET", year=2013, earnings=48000), Row(course="Java", year=2013, earnings=30000)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.group, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) globs['sc'].stop() if failure_count: exit(-1)
def _test(): import doctest from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.dataframe globs = pyspark.sql.dataframe.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')])\ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) globs['df2'] = sc.parallelize([Row(name='Tom', height=80), Row(name='Bob', height=85)]).toDF() globs['df3'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF() globs['df4'] = sc.parallelize([Row(name='Alice', age=10, height=80), Row(name='Bob', age=5, height=None), Row(name='Tom', age=None, height=None), Row(name=None, age=None, height=None)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.dataframe, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) globs['sc'].stop() if failure_count: exit(-1)
def _test(): import os import doctest from pyspark.context import SparkContext from pyspark.sql import Row, SQLContext import pyspark.sql.context os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.context.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['sqlContext'] = SQLContext(sc) globs['rdd'] = rdd = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")] ) globs['df'] = rdd.toDF() jsonStrings = [ '{"field1": 1, "field2": "row1", "field3":{"field4":11}}', '{"field1" : 2, "field3":{"field4":22, "field5": [10, 11]},' '"field6":[{"field7": "row2"}]}', '{"field1" : null, "field2": "row3", ' '"field3":{"field4":33, "field5": []}}' ] globs['jsonStrings'] = jsonStrings globs['json'] = sc.parallelize(jsonStrings) (failure_count, test_count) = doctest.testmod( pyspark.sql.context, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1)
def test_suite(*args): return doctest.DocTestSuite(optionflags=(doctest.NORMALIZE_WHITESPACE| doctest.ELLIPSIS| doctest.REPORT_ONLY_FIRST_FAILURE| doctest.REPORT_NDIFF ))
def test_doctests(module_name): _, test_count = doctest.testmod( import_module(module_name), report=True, verbose=True, raise_on_error=True, optionflags=doctest.NORMALIZE_WHITESPACE, ) assert test_count > 0
def _get_flag_lookup(): import doctest return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1, DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE, NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE, ELLIPSIS=doctest.ELLIPSIS, IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL, COMPARISON_FLAGS=doctest.COMPARISON_FLAGS, ALLOW_UNICODE=_get_allow_unicode_flag())
def additional_tests(): # for setup.py return doctest.DocTestSuite(optionflags=doctest.NORMALIZE_WHITESPACE, checker=Checker())
def assertDocTestMatches(self, expected, observed): return self.assertThat(observed, DocTestMatches( dedent(expected), doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE))
def run_func_docstring(tester, test_func, globs=None, verbose=False, compileflags=None, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE): """ Similar to doctest.run_docstring_examples, but takes a single function/bound method, extracts it's singular docstring (no looking for subobjects with tests), runs it, and most importantly raises an exception if the test doesn't pass. tester should be an instance of dtest.Tester test_func should be a function/bound method the docstring to be tested """ name = test_func.__name__ if globs is None: globs = build_doc_context(tester, name) # dumb function that remembers values that it is called with # the DocTestRunner.run function called below accepts a callable for logging # and this is a hacky but easy way to capture the nicely formatted value for reporting def test_output_capturer(content): if not hasattr(test_output_capturer, 'content'): test_output_capturer.content = '' test_output_capturer.content += content test = doctest.DocTestParser().get_doctest(inspect.getdoc(test_func), globs, name, None, None) runner = doctest.DocTestRunner(verbose=verbose, optionflags=optionflags) runner.run(test, out=test_output_capturer, compileflags=compileflags) failed, attempted = runner.summarize() if failed > 0: raise RuntimeError("Doctest failed! Captured output:\n{}".format(test_output_capturer.content)) if failed + attempted == 0: raise RuntimeError("No tests were run!")
def setup_optionflags(self): if 'optionflags' not in self._kw: self._kw['optionflags'] = ( doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE)
def setup_optionflags(self): if 'optionflags' not in self._kw: self._kw['optionflags'] = ( doctest.ELLIPSIS | doctest.REPORT_NDIFF | doctest.NORMALIZE_WHITESPACE)
def load_tests(loader, tests, ignore): import ibex doctest_flags = doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE tests.addTests(doctest.DocTestSuite(__import__('ibex'), optionflags=doctest_flags)) for mod_name in dir(ibex): try: mod =__import__('ibex.' + mod_name) except ImportError: continue tests.addTests(doctest.DocTestSuite(mod, optionflags=doctest_flags)) f_name = os.path.join(_this_dir, '../ibex/sklearn/__init__.py') tests.addTests(doctest.DocFileSuite(f_name, module_relative=False, optionflags=doctest_flags)) f_name = os.path.join(_this_dir, '../ibex/xgboost/__init__.py') tests.addTests(doctest.DocFileSuite(f_name, module_relative=False, optionflags=doctest_flags)) f_name = os.path.join(_this_dir, '../ibex/tensorflow/contrib/keras/wrappers/scikit_learn/__init__.py') tests.addTests(doctest.DocFileSuite(f_name, module_relative=False, optionflags=doctest_flags)) doc_f_names = list(glob(os.path.join(_this_dir, '../docs/source/*.rst'))) doc_f_names += [os.path.join(_this_dir, '../README.rst')] tests.addTests( doctest.DocFileSuite(*doc_f_names, module_relative=False, optionflags=doctest_flags)) doc_f_names = list(glob(os.path.join(_this_dir, '../docs/build/text/*.txt'))) tests.addTests( doctest.DocFileSuite(*doc_f_names, module_relative=False, optionflags=doctest_flags)) return tests
def make_suite(): # pragma: no cover from calmjs.parse.lexers import es5 as es5lexer from calmjs.parse import walkers from calmjs.parse import sourcemap def open(p, flag='r'): result = StringIO(examples[p] if flag == 'r' else '') result.name = p return result parser = doctest.DocTestParser() optflags = doctest.NORMALIZE_WHITESPACE | doctest.ELLIPSIS dist = get_distribution('calmjs.parse') if dist: if dist.has_metadata('PKG-INFO'): pkgdesc = dist.get_metadata('PKG-INFO').replace('\r', '') elif dist.has_metadata('METADATA'): pkgdesc = dist.get_metadata('METADATA').replace('\r', '') else: pkgdesc = '' pkgdesc_tests = [ t for t in parser.parse(pkgdesc) if isinstance(t, doctest.Example)] test_loader = unittest.TestLoader() test_suite = test_loader.discover( 'calmjs.parse.tests', pattern='test_*.py', top_level_dir=dirname(__file__) ) test_suite.addTest(doctest.DocTestSuite(es5lexer, optionflags=optflags)) test_suite.addTest(doctest.DocTestSuite(walkers, optionflags=optflags)) test_suite.addTest(doctest.DocTestSuite(sourcemap, optionflags=optflags)) test_suite.addTest(doctest.DocTestCase( # skipping all the error case tests which should all be in the # troubleshooting section at the end; bump the index whenever # more failure examples are added. # also note that line number is unknown, as PKG_INFO has headers # and also the counter is somehow inaccurate in this case. doctest.DocTest(pkgdesc_tests[:-1], { 'open': open}, 'PKG_INFO', 'README.rst', None, pkgdesc), optionflags=optflags, )) return test_suite
def __init__(self): """ Uses :meth:`unittest.TestSuite.addTests` to add :obj:`doctest.DocFileSuite` and :obj:`doctest.DocTestSuite` tests. """ readme_file_name = \ os.path.realpath( os.path.join(os.path.dirname(__file__), "..", "README.rst") ) examples_rst_file_name = \ os.path.realpath( os.path.join( os.path.dirname(__file__), "..", "docs", "source", "examples", "index.rst" ) ) suite = _unittest.TestSuite() if os.path.exists(readme_file_name): suite.addTests( _doctest.DocFileSuite( readme_file_name, module_relative=False, optionflags=_doctest.NORMALIZE_WHITESPACE ) ) if os.path.exists(examples_rst_file_name): suite.addTests( _doctest.DocFileSuite( examples_rst_file_name, module_relative=False, optionflags=_doctest.NORMALIZE_WHITESPACE ) ) suite.addTests( _doctest.DocTestSuite( _array_split, optionflags=_doctest.NORMALIZE_WHITESPACE ) ) suite.addTests( _doctest.DocTestSuite( _split, optionflags=_doctest.NORMALIZE_WHITESPACE ) ) _unittest.TestSuite.__init__(self, suite)