#!/usr/bin/env python
#
#  Copyright (c) 2006, Sean Reifschneider, tummy.com, ltd.
#  All Rights Reserved.

import time, sys, pickle, os, optparse, re

verbose = 4

#################################
def skipTest(nameList, testName):
    'Return 1 if test should be skipped based on nameList'
    returnValue = False
    for rx in nameList:
        if rx[0] != '!':
            returnValue = True
            if re.match(rx, testName): return(False)
        else:
            returnValue = False
            if re.match(rx[1:], testName): return(True)
    return(returnValue)


######################
def rusageTimer(self):
    import resource
    return(resource.getrusage(resource.RUSAGE_SELF)[0])


#########################
def getEnvironmentInfo():
    import re
    reg = re.compile(r'^(?P<version>\S+)\s+\((?P<build>[^\)]+)\).*\s*\['
            r'(?P<environment>[^\]]+)\]', re.M)
    m = reg.match(sys.version)
    if not m: raise ValueError, 'Could not parse sys.version'
    results = m.groupdict()
    results['host'] = os.uname()[1]
    return(results)


########################
def shortifyTestName(s):
    #if s[:6] == 'Tests.': s = s[6:]
    s = '.'.join(s.split('.')[-2:])
    return(s)


###########################################################
def compareResults(testResults, verbose, compareAgainst):
    #  display comparison results
    print ('Comparing %(version)s (%(build)s)'
            % compareAgainst['environment'])
    print ('       to %(version)s (%(build)s)'
            % testResults['environment'])
    print ('Comparing [%(environment)s] on %(host)s'
            % compareAgainst['environment'])
    print ('       to [%(environment)s] on %(host)s'
            % testResults['environment'])
    print

    #  get list of tests
    testList = map(lambda x: ( shortifyTestName(str(x[0][1])), x[0], x[1] ),
            testResults['results'].items())
    testList.sort()

    #  print individual results
    overallDiff = 0.0
    overallSpeedups = 0.0
    overallSlowdowns = 0.0
    for testSource in testList:
        compareData = compareAgainst['results'][testSource[1]]
        testCompare = [ None, None, [compareData[0], compareData[1]] ]
        sourceBest = min(testSource[2][1])
        compareBest = min(testCompare[2][1])

        #  calculate normalization
        normalizationFactor = compareBest / sourceBest
        sourceLapsNormalized = testSource[2][0] * normalizationFactor

        #  compare
        difference = 100.0 - ((sourceLapsNormalized / testCompare[2][0])
                * 100.0)
        differenceStr = '%7.1f' % difference
        if differenceStr.strip() == '-0.0': differenceStr = '    0.0'
        differenceShorter = float(differenceStr.strip())

        #  debugging
        if verbose > 2:
            print 'Source Laps: ', testSource[2][0]
            print 'Source Laps Normalized: ', sourceLapsNormalized
            print 'Compare Laps: ', testCompare[2][0]
            print 'Source Best:', sourceBest
            print 'Compare Best:', compareBest
            print 'Normalization Factor:', normalizationFactor
            print 'Difference:', difference
            print 'Difference Str:', differenceStr
            print 'Difference Shorter:', differenceShorter

        overallDiff = overallDiff + differenceShorter
        if differenceShorter > 0:
            overallSlowdowns = overallSlowdowns + differenceShorter
        if differenceShorter < 0:
            overallSpeedups = overallSpeedups + differenceShorter

        print '%-63s -> %s%%' % ( testSource[0], differenceStr )

    print '=' * 78
    print '%68s %5.1f%%' % ( 'Overall difference:', overallDiff )
    print '%68s %5.1f%%' % ( 'Total speedups:', overallSpeedups )
    print '%68s %5.1f%%' % ( 'Total slowdowns:', overallSlowdowns )


##################################
parser = optparse.OptionParser()
parser.add_option('-s', dest = 'compareSrcFileLoad',
        type = 'str', help = 'Compare with results stored in this file.',
        metavar = 'FILE_NAME')
parser.add_option('-d', dest = 'compareDestFileLoad',
        type = 'str',
        help = 'Instead of running tests, load data from this file',
        metavar = 'FILE_NAME')
parser.add_option('-w', dest = 'compareDestFileSave',
        type = 'str', help = 'Write test results out to this file',
        metavar = 'FILE_NAME')
def testNameCallback(option, opt, value, parser):
    getattr(parser.values, 'testNames').append(value)
parser.add_option('-t', dest = 'testNames',
        action = 'callback', callback = testNameCallback, nargs = 1,
        type = 'string', default = [],
        help = 'May be specified once or more to select tests to run.  '
                'Prefix regex with ! to specify tests not to run.',
        metavar = 'TEST_REGEX')
parser.add_option('-v', '--verbose', dest = 'verbose', action = 'count',
        default = 0, help = 'Increase verbosity level.')
parser.add_option('-l', '--list-tests', dest = 'listTests', action = 'count',
        default = 0, help = 'List available tests.')
options, args = parser.parse_args()

#  list tests
if options.listTests:
    import Tests
    for moduleName in Tests.testModules:
        exec('from Tests import %s' % moduleName)
        module = eval(moduleName)
        for testClass in module.__dict__.values():
            if (not hasattr(testClass, 'is_a_test')
                    or 'TestHelpers' in str(testClass)):
                continue
            print shortifyTestName(str(testClass))
    sys.exit(0)

if options.compareDestFileLoad:
    #  load results from a file
    testData = pickle.load(open(options.compareDestFileLoad, 'r'))
else:
    #  run tests locally
    import Tests
    testResults = {}
    for moduleName in Tests.testModules:
        if options.verbose >= 3: print moduleName
        exec('from Tests import %s' % moduleName)
        module = eval(moduleName)
        for testClass in module.__dict__.values():
            if (not hasattr(testClass, 'is_a_test')
                    or 'TestHelpers' in str(testClass)):
                continue

            #  skip based on -t option
            if skipTest(options.testNames, str(testClass)[6:]): continue

            #  set up test
            if options.verbose >= 1: print 'Test:', moduleName, testClass
            test = testClass()
            test.timer = time.time
            test.runtimeTarget = 0.5
            test.runtimeAccuracyTarget = 0.025
            test.requiredStablePasses = 10
            test.passSleepTime = 0.1
            test.verbose = options.verbose

            #  run test
            if options.verbose >= 2: print 'Calibrating...'
            test.cowlibrate()
            if options.verbose >= 3:
                print 'Calibrated to %d rounds' % test.rounds

            if options.verbose >= 2: print 'Running tests...'
            first = None
            passResults = [test.rounds, []]
            while len(passResults[1]) < 5:
                latest = test.run()
                passResults[1].append(latest)
                if first == None: first = latest
                if options.verbose >= 3:
                    print '   %3.2f' % ((first / latest) * 100)
            testResults[( moduleName, str(testClass) )] = passResults

    environment = getEnvironmentInfo()
    testData = { 'environment' : environment, 'results' : testResults }

    #  save results to a file
    if options.compareDestFileSave:
        pickle.dump(testData, open(options.compareDestFileSave, 'w'))

#  deal with results
if options.compareSrcFileLoad:
    compareResults(testData, options.verbose,
            pickle.load(open(options.compareSrcFileLoad, 'r')))

sys.exit(0)
