private static List collectWrappedRunners(Class[] classes) throws InitializationError { final List runners = new ArrayList(); final List nonSuiteClasses = new ArrayList(); final SuiteMethodBuilder suiteMethodBuilder = new SuiteMethodBuilder(); for (int i = 0, length = classes.length; i < length; i++) { Class aClass = classes[i]; if (suiteMethodBuilder.hasSuiteMethod(aClass)) { try { runners.add(new ClassAwareSuiteMethod(aClass)); } catch (Throwable throwable) { runners.add(new ErrorReportingRunner(aClass, throwable)); } } else { nonSuiteClasses.add(aClass); } } runners.addAll(new AllDefaultPossibilitiesBuilder(false).runners(null, (Class[])nonSuiteClasses.toArray(new Class[nonSuiteClasses.size()]))); return runners; }
private Request constructLeafRequest(List<Description> leaves) { final List<Runner> runners = new ArrayList<Runner>(); for (Description each : leaves) { runners.add(buildRunner(each)); } return new Request() { @Override public Runner getRunner() { try { return new Suite((Class<?>) null, runners) { }; } catch (InitializationError e) { return new ErrorReportingRunner(null, e); } } }; }
/** * Creates a {@link Request}. * * @param computer {@link Computer} to be used. */ public Request createRequest(Computer computer) { if (parserErrors.isEmpty()) { return Request .classes(computer, classes.toArray(new Class<?>[classes.size()])) .filterWith(filter); } else { return new Request() { @Override public Runner getRunner() { return new ErrorReportingRunner( JUnitCommandLineParseResult.class, new InitializationError(parserErrors)); } }; } }
@Override public List<TestUnit> findTestUnits(final Class<?> clazz) { final Runner runner = AdaptedJUnitTestUnit.createRunner(clazz); if ((runner == null) || runner.getClass().isAssignableFrom(ErrorReportingRunner.class)) { return Collections.emptyList(); } if (isParameterizedTest(runner)) { return handleParameterizedTest(clazz, runner.getDescription()); } return Collections.emptyList(); }
@Test public void testNoMatching() throws Exception { Request request = Request.classes(MethodFilterTest.class);// contains 0 testRunTestClass request = request.filterWith(new MethodFilter( Collections.singletonList("testRunTestClass") )); assertTrue(request.getRunner() instanceof ErrorReportingRunner); }
@Override public Runner getRunner() { try { Runner runner = fRequest.getRunner(); fFilter.apply(runner); return runner; } catch (NoTestsRemainException e) { return new ErrorReportingRunner(Filter.class, new Exception(String .format("No tests found matching %s from %s", fFilter .describe(), fRequest.toString()))); } }
/** * Always returns a runner, even if it is just one that prints an error instead of running tests. * * @param testClass class to be run * @return a Runner */ public Runner safeRunnerForClass(Class<?> testClass) { try { return runnerForClass(testClass); } catch (Throwable e) { return new ErrorReportingRunner(testClass, e); } }
private void checkForErrorRunner(final Runner runner) { if (runner instanceof ErrorReportingRunner) { LOG.warning("JUnit error for class " + this.clazz + " : " + runner.getDescription()); } }
private boolean isNotARunnableTest(final Runner runner, final String className) { try { return (runner == null) || runner.getClass().isAssignableFrom(ErrorReportingRunner.class) || isParameterizedTest(runner) || isAJUnitThreeErrorOrWarning(runner) || isJUnitThreeSuiteMethodNotForOwnClass(runner, className); } catch (RuntimeException ex) { // some runners (looking at you spock) can throw a runtime exception // when the getDescription method is called return true; } }
/** * Update the given class. * * @param testClassName * The name of the test class. */ public void updateTestClass(Class<?> testClass) { // Create a runner Runner runner = Request.aClass(testClass).getRunner(); if (runner instanceof ErrorReportingRunner) { /* * TODO This is a very dirty instanceof check, and we know it. * * We need this because JUnit does not let us know about validation * errors without actually running the test. Even the description of * an ErrorReportingRunner does not give this away. * * The alternative would be to add a field to the Description class * indicating that it describes an invalid test. Such a change would * break a lot of things, and it doesn't seem worthwhile to modify * JUnit for this one small thing. */ return; } if (runner == null) return; // Collect test method names Set<String> testNames = new HashSet<String>(); for (Description description : runner.getDescription().getChildren()) { // Only consider atomic tests if (description.isTest()) { testNames.add(description.getMethodName()); } } // Update database updateTests(testClass.getName(), testNames); }
/** * Not used within JUnit. Clients should simply instantiate ErrorReportingRunner themselves */ @Deprecated public static Request errorReport(Class<?> klass, Throwable cause) { return runner(new ErrorReportingRunner(klass, cause)); }
@Test(expected = NullPointerException.class) public void cannotCreateWithNullClass() { new ErrorReportingRunner(null, new RuntimeException()); }
@SuppressWarnings({"ThrowableInstanceNeverThrown"}) private static Request createErrorReportingRequestForFilterError(Filter filter) { ErrorReportingRunner runner = new ErrorReportingRunner(Filter.class, new Exception( String.format("No tests found matching %s", filter.describe()))); return Request.runner(runner); }