我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用doctest.testmod()。
def _check_docs(self, module): if self._skip: # Printing this directly to __stdout__ so that it doesn't get # captured by nose. print("Warning: Skipping doctests for %s because " "pdbpp is installed." % module.__name__, file=sys.__stdout__) return try: doctest.testmod( module, verbose=True, raise_on_error=True, optionflags=self.flags, ) except doctest.UnexpectedException as e: raise e.exc_info[1] except doctest.DocTestFailure as e: print("Got:") print(e.got) raise
def _test(): testfiles = [arg for arg in sys.argv[1:] if arg and arg[0] != '-'] if not testfiles: name = os.path.basename(sys.argv[0]) if '__loader__' in globals(): # python -m name, _ = os.path.splitext(name) print("usage: {0} [-v] file ...".format(name)) return 2 for filename in testfiles: if filename.endswith(".py"): # It is a module -- insert its dir into sys.path and try to # import it. If it is part of a package, that possibly # won't work because of package imports. dirname, filename = os.path.split(filename) sys.path.insert(0, dirname) m = __import__(filename[:-3]) del sys.path[0] failures, _ = testmod(m) else: failures, _ = testfile(filename, module_relative=False) if failures: return 1 return 0
def __init__(self, verbose=False, parser=DocTestParser(), recurse=True, _namefilter=None, exclude_empty=True): """ Create a new doctest finder. The optional argument `parser` specifies a class or function that should be used to create new DocTest objects (or objects that implement the same interface as DocTest). The signature for this factory function should match the signature of the DocTest constructor. If the optional argument `recurse` is false, then `find` will only examine the given object, and not any contained objects. If the optional argument `exclude_empty` is false, then `find` will include tests for objects with empty docstrings. """ self._parser = parser self._verbose = verbose self._recurse = recurse self._exclude_empty = exclude_empty # _namefilter is undocumented, and exists only for temporary backward- # compatibility support of testmod's deprecated isprivate mess. self._namefilter = _namefilter
def test_subclassing(): table_name = 'project_data' manager = DynamoDBManager( table_name, session_args={ 'aws_access_key_id': "access-key-id-of-your-choice", 'aws_secret_access_key': "secret-key-of-your-choice"}, resource_args={ 'endpoint_url': 'http://localhost:8000/', 'region_name': 'us-east-1'}) manager.delete_table(table_name, raise_on_err=False) manager.create_archive_table(table_name, raise_on_err=False) failures, _ = doctest.testmod(client, report=True) assert failures == 0 manager.delete_table(table_name)
def main(args): """ Parses command line arguments and do the work of the program. "args" specifies the program arguments, with args[0] being the executable name. The return value should be used as the program's exit code. """ if len(args) == 2 and args[1] == "--test": # Run the tests return doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE) options = parse_args(args) # This holds the nicely-parsed options object # Actually do the work. We structure it like this so we can use it as a # script or a module. run(options)
def _test(): import doctest from pyspark.sql import SparkSession import pyspark.sql.column globs = pyspark.sql.column.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.column tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['df'] = sc.parallelize([(2, 'Alice'), (5, 'Bob')]) \ .toDF(StructType([StructField('age', IntegerType()), StructField('name', StringType())])) (failure_count, test_count) = doctest.testmod( pyspark.sql.column, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE | doctest.REPORT_NDIFF) spark.stop() if failure_count: exit(-1)
def _test(): import doctest from pyspark.sql import Row, SparkSession import pyspark.sql.functions globs = pyspark.sql.functions.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.functions tests")\ .getOrCreate() sc = spark.sparkContext globs['sc'] = sc globs['spark'] = spark globs['df'] = sc.parallelize([Row(name='Alice', age=2), Row(name='Bob', age=5)]).toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.functions, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) spark.stop() if failure_count: exit(-1)
def _test(): import os import doctest from pyspark.context import SparkContext from pyspark.sql.session import SparkSession import pyspark.sql.conf os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.conf.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.conf tests")\ .getOrCreate() globs['sc'] = spark.sparkContext globs['spark'] = spark (failure_count, test_count) = doctest.testmod(pyspark.sql.conf, globs=globs) spark.stop() if failure_count: exit(-1)
def _test(): import os import doctest from pyspark.sql import SparkSession import pyspark.sql.catalog os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.catalog.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("sql.catalog tests")\ .getOrCreate() globs['sc'] = spark.sparkContext globs['spark'] = spark (failure_count, test_count) = doctest.testmod( pyspark.sql.catalog, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) spark.stop() if failure_count: exit(-1)
def _test(): import os import doctest from pyspark.context import SparkContext from pyspark.sql import Row import pyspark.sql.session os.chdir(os.environ["SPARK_HOME"]) globs = pyspark.sql.session.__dict__.copy() sc = SparkContext('local[4]', 'PythonTest') globs['sc'] = sc globs['spark'] = SparkSession(sc) globs['rdd'] = rdd = sc.parallelize( [Row(field1=1, field2="row1"), Row(field1=2, field2="row2"), Row(field1=3, field2="row3")]) globs['df'] = rdd.toDF() (failure_count, test_count) = doctest.testmod( pyspark.sql.session, globs=globs, optionflags=doctest.ELLIPSIS | doctest.NORMALIZE_WHITESPACE) globs['sc'].stop() if failure_count: exit(-1)
def _test(): import doctest from pyspark.sql import SparkSession import pyspark.mllib.fpm globs = pyspark.mllib.fpm.__dict__.copy() spark = SparkSession.builder\ .master("local[4]")\ .appName("mllib.fpm tests")\ .getOrCreate() globs['sc'] = spark.sparkContext import tempfile temp_path = tempfile.mkdtemp() globs['temp_path'] = temp_path try: (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() finally: from shutil import rmtree try: rmtree(temp_path) except OSError: pass if failure_count: exit(-1)
def _test(): import doctest from pyspark.sql import SparkSession from pyspark.mllib.linalg import Matrices import pyspark.mllib.linalg.distributed globs = pyspark.mllib.linalg.distributed.__dict__.copy() spark = SparkSession.builder\ .master("local[2]")\ .appName("mllib.linalg.distributed tests")\ .getOrCreate() globs['sc'] = spark.sparkContext globs['Matrices'] = Matrices (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() if failure_count: exit(-1)
def _test(): import doctest from pyspark.sql import SparkSession globs = globals().copy() # The small batch size here ensures that we see multiple batches, # even in these small test examples: spark = SparkSession.builder\ .master("local[2]")\ .appName("mllib.util tests")\ .getOrCreate() globs['spark'] = spark globs['sc'] = spark.sparkContext (failure_count, test_count) = doctest.testmod(globs=globs, optionflags=doctest.ELLIPSIS) spark.stop() if failure_count: exit(-1)
def test(): import doctest fails, tests = doctest.testmod(optionflags=doctest.IGNORE_EXCEPTION_DETAIL) sys.exit(min(1, fails))
def _test(): import doctest return doctest.testmod()
def _test(): import doctest, difflib return doctest.testmod(difflib)
def _test(): import doctest, Cookie return doctest.testmod(Cookie)
def __init__(self, checker=None, verbose=None, optionflags=0): """ Create a new test runner. Optional keyword arg `checker` is the `OutputChecker` that should be used to compare the expected outputs and actual outputs of doctest examples. Optional keyword arg 'verbose' prints lots of stuff if true, only failures if false; by default, it's true iff '-v' is in sys.argv. Optional argument `optionflags` can be used to control how the test runner compares expected output to actual output, and how it displays failures. See the documentation for `testmod` for more information. """ self._checker = checker or OutputChecker() if verbose is None: verbose = '-v' in sys.argv self._verbose = verbose self.optionflags = optionflags self.original_optionflags = optionflags # Keep track of the examples we've run. self.tries = 0 self.failures = 0 self._name2ft = {} # Create a fake output target for capturing doctest output. self._fakeout = _SpoofOut() #///////////////////////////////////////////////////////////////// # Reporting methods #/////////////////////////////////////////////////////////////////
def run_docstring_examples(f, globs, verbose=False, name="NoName", compileflags=None, optionflags=0): """ Test examples in the given object's docstring (`f`), using `globs` as globals. Optional argument `name` is used in failure messages. If the optional argument `verbose` is true, then generate output even if there are no failures. `compileflags` gives the set of flags that should be used by the Python compiler when running the examples. If not specified, then it will default to the set of future-import flags that apply to `globs`. Optional keyword arg `optionflags` specifies options for the testing and output. See the documentation for `testmod` for more information. """ # Find, parse, and run all tests in the given module. finder = DocTestFinder(verbose=verbose, recurse=False) runner = DocTestRunner(verbose=verbose, optionflags=optionflags) for test in finder.find(f, name, globs=globs): runner.run(test, compileflags=compileflags) ###################################################################### ## 7. Tester ###################################################################### # This is provided only for backwards compatibility. It's not # actually used in any way.
def run_tests(modules, verbose=None): "Run tests for a list of modules; then summarize results." for module in modules: tests, demos = split_extra_tests(module.__name__ + ".txt") if tests: if '__doc__' not in dir(module): module.__doc__ = '' module.__doc__ += '\n' + tests + '\n' doctest.testmod(module, report=0, verbose=verbose) if demos: for stmt in re.findall(">>> (.*)", demos): exec stmt in module.__dict__ doctest.master.summarize()
def report_failure(self, out, test, example, got): raise DocTestFailure(test, example, got) ###################################################################### ## 6. Test Functions ###################################################################### # These should be backwards compatible. # For backward compatibility, a global instance of a DocTestRunner # class, updated by testmod.
def _test(): import doctest doctest.testmod()
def _test(): import doctest, stripxml doctest.testmod(stripxml)
def test_local(): failures, _ = doctest.testmod(local, report=True) assert failures == 0
def test_ondisk(): if has_special_dependencies: failures, _ = doctest.testmod(ondisk, report=True) assert failures == 0
def test_s3(): failures, _ = doctest.testmod(s3, report=True) assert failures == 0
def test_caching(): failures, _ = doctest.testmod(caching, report=True) assert failures == 0
def test_docs_pythonapi_creating_archives(example_snippet_working_dirs): failures, _ = doctest.testmod(pythonapi_creating_archives, report=True) assert failures == 0
def test_docs_pythonapi_tagging(example_snippet_working_dirs): failures, _ = doctest.testmod(pythonapi_tagging, report=True) assert failures == 0
def test_docs_pythonapi_dependencies(example_snippet_working_dirs): failures, _ = doctest.testmod(pythonapi_dependencies, report=True) assert failures == 0
def test_docs_pythonapi_io(example_snippet_working_dirs): if has_special_dependencies: failures, _ = doctest.testmod(pythonapi_io, report=True) assert failures == 0
def test_docs_pythonapi_metadata(example_snippet_working_dirs): failures, _ = doctest.testmod(pythonapi_metadata, report=True) assert failures == 0
def test_docs_pythonapi_finding_archives(example_snippet_working_dirs): failures, _ = doctest.testmod(pythonapi_finding_archives, report=True) assert failures == 0
def _test(): import doctest, os, sys sys.path.insert(0, os.pardir) import pytz return doctest.testmod(pytz)
def test(): import doctest doctest.testmod()
def testModule(name): print "--- Test module %s" % name module = importModule(name) failure, nb_test = testmod(module) if failure: exit(1) print "--- End of test"