Browse Source

prism-auto: tweak test statistics (count non-convergence, ...)

We now count non-convergence (i.e., error message contains 'did not converge') as a sub-type of failures.

Additionally, count skipped export-runs and skipped duplicate runs as sub-types of skipped tests.
master
Joachim Klein 8 years ago
parent
commit
e55d9ca6e3
  1. 30
      prism/etc/scripts/prism-auto

30
prism/etc/scripts/prism-auto

@ -21,7 +21,7 @@ from threading import Timer
#==================================================================================================
# statistics about test results
testStats = dict(SUCCESS = 0, FAILURE = 0, SKIPPED = 0, UNSUPPORTED = 0, WARNING = 0, DDWARNING = 0, DUPLICATE = 0, TIMEOUT = 0)
testStats = dict(SUCCESS = 0, FAILURE = 0, FAIL_NONCONVERGE = 0, SKIPPED = 0, SKIPPED_EXPORT = 0, SKIPPED_DUPLICATE = 0, UNSUPPORTED = 0, WARNING = 0, DDWARNING = 0, TIMEOUT = 0)
# colour coding for test results
# for colour values, see https://en.wikipedia.org/wiki/ANSI_escape_code#Colors
@ -487,7 +487,8 @@ def runPrism(args, dir=""):
if options.skipDuplicates:
canonicalArgs = canonicaliseArgs(args)
if (tuple(canonicalArgs) in alreadyRan):
incrementTestStat('DUPLICATE')
incrementTestStat('SKIPPED')
incrementTestStat('SKIPPED_DUPLICATE')
return
alreadyRan.add(tuple(canonicalArgs))
@ -657,21 +658,28 @@ def incrementTestStat(stat):
def printTestStatistics():
if options.test and not options.echo:
print('\nTest results:')
printColoured('SUCCESS', ' Success: ' + str(testStats['SUCCESS']))
printColoured('WARNING', ' Warnings: ' + str(testStats['WARNING'])
printColoured('SUCCESS', ' Success: ' + str(testStats['SUCCESS']))
printColoured('WARNING', ' Warnings: ' + str(testStats['WARNING'])
+ (' (use -w to show)' if (testStats['WARNING']>0 and not options.showWarnings and not options.verboseTest) else ''))
if (options.ddWarnings):
printColoured('WARNING', ' DD-Warnings: ' + str(testStats['DDWARNING']))
printColoured('FAILURE', ' Failure: ' + str(testStats['FAILURE']))
printColoured('UNSUPPORTED', ' Unsupported: ' + str(testStats['UNSUPPORTED']))
printColoured('SKIPPED', ' Skipped: ' + str(testStats['SKIPPED']))
printColoured('WARNING', ' DD-Warnings: ' + str(testStats['DDWARNING']))
printColoured('FAILURE', ' Failure: ' + str(testStats['FAILURE']))
if testStats['FAIL_NONCONVERGE']:
printColoured('FAILURE', ' - Non-convergence: ' + str(testStats['FAIL_NONCONVERGE']))
printColoured('UNSUPPORTED', ' Unsupported: ' + str(testStats['UNSUPPORTED']))
printColoured('SKIPPED', ' Skipped: ' + str(testStats['SKIPPED']))
if options.skipDuplicates:
printColoured('SKIPPED', ' Skipped dup.: ' + str(testStats['DUPLICATE']) + ' (due to --skip-duplicate-runs)')
printColoured('SKIPPED', ' - Duplicates: ' + str(testStats['SKIPPED_DUPLICATE']) + ' (due to --skip-duplicate-runs)')
if options.skipExportRuns:
printColoured('SKIPPED', ' - Export: ' + str(testStats['SKIPPED_EXPORT']) + ' (due to --skip-export-runs)')
if options.timeout is not None:
printColoured('FAILURE', ' Timeouts: ' + str(testStats['TIMEOUT']))
printColoured('FAILURE', ' Timeouts: ' + str(testStats['TIMEOUT']))
def countTestResult(msg):
if 'Error:' in msg or 'FAIL' in msg:
if 'did not converge' in msg:
incrementTestStat('FAIL_NONCONVERGE')
incrementTestStat('FAILURE')
elif options.ddWarnings and re.match('Warning: CUDD reports .* non-zero references', msg):
incrementTestStat('WARNING')
@ -742,6 +750,8 @@ def benchmark(file, args, dir=""):
args = expandFilenames(args, dir)
if options.skipExportRuns and hasExportSwitches(args):
incrementTestStat('SKIPPED')
incrementTestStat('SKIPPED_EXPORT')
return
# Determine which out files apply to this benchmark from the -export switches (if required)

Loading…
Cancel
Save