|
|
|
@ -172,8 +172,10 @@ def getArgsListsFromFile(file): |
|
|
|
|
|
|
|
# Create a valid name for a log file based on a list of benchmark arguments |
|
|
|
|
|
|
|
def createLogFileName(args): |
|
|
|
def createLogFileName(args, dir=""): |
|
|
|
logFile = '.'.join(args) |
|
|
|
if len(dir) > 0: |
|
|
|
logFile = re.sub(dir, '', logFile) |
|
|
|
logFile = re.sub('/', '_', logFile) |
|
|
|
logFile = re.sub('[^a-zA-Z0-9_, \.]', '', logFile) |
|
|
|
logFile = re.sub('[ ]+', '.', logFile) |
|
|
|
@ -197,7 +199,7 @@ def walk(dir, meth): |
|
|
|
|
|
|
|
# Run PRISM with a given list of command-line args |
|
|
|
|
|
|
|
def runPrism(args): |
|
|
|
def runPrism(args, dir=""): |
|
|
|
if options.test: |
|
|
|
args.append("-test") |
|
|
|
if options.echo: |
|
|
|
@ -208,7 +210,7 @@ def runPrism(args): |
|
|
|
prismArgs += options.extraArgs.split(' '); |
|
|
|
print ' '.join(prismArgs) |
|
|
|
if options.logDir: |
|
|
|
logFile = os.path.join(options.logDir, createLogFileName(args)) |
|
|
|
logFile = os.path.join(options.logDir, createLogFileName(args, dir)) |
|
|
|
f = open(logFile, 'w') |
|
|
|
exitCode = subprocess.Popen(prismArgs, stdout=f).wait() |
|
|
|
#exitCode = subprocess.Popen(prismArgs, cwd=dir, stdout=f).wait() |
|
|
|
@ -233,15 +235,15 @@ def runPrism(args): |
|
|
|
# Run a benchmark, specified by a list of command-line args, |
|
|
|
# possibly iterating over further lists of args from a "bm" file |
|
|
|
|
|
|
|
def benchmark(args): |
|
|
|
def benchmark(args, dir=""): |
|
|
|
# Loop through benchmark options, if required |
|
|
|
if options.bmFile and os.path.isfile(os.path.join(options.bmFile)): |
|
|
|
argsLists = getArgsListsFromFile(options.bmFile) |
|
|
|
for bmArgs in argsLists: |
|
|
|
runPrism(args + bmArgs) |
|
|
|
runPrism(args + bmArgs, dir) |
|
|
|
# If none, just use existing args |
|
|
|
else: |
|
|
|
runPrism(args) |
|
|
|
runPrism(args, dir) |
|
|
|
|
|
|
|
# Execute benchmarking based on (possibly recursive) processing of a directory |
|
|
|
|
|
|
|
@ -257,13 +259,13 @@ def benchmarkDir(dir): |
|
|
|
for modelFile in modelFiles: |
|
|
|
# Build mode: just build |
|
|
|
if options.build: |
|
|
|
benchmark(modelFile + args) |
|
|
|
benchmark(modelFile + args, dir) |
|
|
|
# Otherwise, find properties |
|
|
|
else: |
|
|
|
if options.matching: propertiesFiles = getMatchingPropertiesInDir(dir, modelFile[0]) |
|
|
|
else: propertiesFiles = getPropertiesInDir(dir) |
|
|
|
for propertiesFile in propertiesFiles: |
|
|
|
benchmark(modelFile + propertiesFile) |
|
|
|
benchmark(modelFile + propertiesFile, dir) |
|
|
|
|
|
|
|
# Execute benchmarking based on a single file (model, property, list) |
|
|
|
|
|
|
|
|