[asterisk-commits] gtjoseph: testsuite/asterisk/trunk r5956 - in /asterisk/trunk: ./ contrib/scr...

SVN commits to the Asterisk project asterisk-commits at lists.digium.com
Wed Nov 19 12:03:41 CST 2014


Author: gtjoseph
Date: Wed Nov 19 12:03:36 2014
New Revision: 5956

URL: http://svnview.digium.com/svn/testsuite?view=rev&rev=5956
Log:
testsuite: Update pretty_print and add --timeout to runtests.py

runtests.py:
* Changed to re-open stdout in single line buffered mode.
* Changed to print total number of tests that will be run before actually
  running any tests.  This allows_pretty print to tell you where in the
  test run you are.
* Changed to add a '--timeout n' parameter.  If a test fails to output anything
  within n seconds, the test is aborted.  The default is inf so without the
  parameter the behavior is the same as today.  If  the test does timeout, 
  the status printed will be 'timedout' instead of 'passed' or 'failed'.

pretty-print:
* Changed back to filter.   
  Run with ./runtests.py <options> | contrib/scripts/pretty_print
* Added time current test has been running.
* Added total run time to final summary.
* Added 'timedout' to the possible statuses.


Modified:
    asterisk/trunk/contrib/scripts/pretty_print
    asterisk/trunk/runtests.py

Modified: asterisk/trunk/contrib/scripts/pretty_print
URL: http://svnview.digium.com/svn/testsuite/asterisk/trunk/contrib/scripts/pretty_print?view=diff&rev=5956&r1=5955&r2=5956
==============================================================================
--- asterisk/trunk/contrib/scripts/pretty_print (original)
+++ asterisk/trunk/contrib/scripts/pretty_print Wed Nov 19 12:03:36 2014
@@ -1,4 +1,4 @@
-#!/bin/bash
+#!/usr/bin/env bash
 #
 # Copyright (C) 2014, Fairview 5 Engineering, LLC
 # George Joseph <george.joseph at fairview5.com>
@@ -6,34 +6,74 @@
 # This program is free software, distributed under the terms of
 # the GNU General Public License Version 2.
 #
+
 if [ -t 0 ] ; then
 	echo "pretty_print is a filter and needs the output of runtests.py piped to it."
-	echo "Try python -u ./runtests.py <options> | ./pretty_print"
-	echo "The 'python -u' is needed to use unbuffered output mode otherwise you'll only see output in big chunks."
+	echo "Try ./runtests.py <options> | $0"
 	exit 1
 fi
 
+declare -ix runnable=0
+declare -ix total=0
 declare -ix passed=0
 declare -ix failed=0
+declare -ix timedout=0
 declare -ix tests=0
+declare -ix maxelapsed=0
 declare -a failures
+declare -a timeouts
+declare -ix test_timeout=0
+YELLOW='\033[01;33m'
 GREEN='\033[01;32m'
 RED='\033[01;31m'
 NORM='\033[m'
 
-col=$(( $(tput cols) - 28 ))
+col=$(( $(tput cols) - 36 ))
 
-trap break INT
+counter() {
+	s=1
+	status=Running
+	while(true) ; do
+		sleep 1
+		if [ $test_timeout -gt 0 -a $s -gt $(( ($test_timeout / 4) * 3 )) ] ; then
+			printf "${YELLOW}[%12s %3ss ]${NORM}" "Hung in" $(( $test_timeout - s ))
+		else
+			printf "[%12s %3ss ]" "Running for" $s
+		fi
+		(( s++ ))
+		tput cub 20
+	done
+}
+
+echo -e -n "Calculating...\r"
+
+trap 'kill $countpid &>/dev/null' INT ERR
 while read line ; do
-	if [[ $line =~ ^Running\ tests\ for.* ]] ; then
-		echo $line
-		printf "%-*.*s %4s/${GREEN}%4s${NORM}/${RED}%4s${NORM} ${COLOR} [ %s ]${NORM}\n" $col $col "Test" "Run" "Pass" "Fail" "Status"
+	if [[ $line =~ ^Running.tests.for.(Asterisk.*) ]] ; then
+		version=${BASH_REMATCH[1]}
+		starttime=$SECONDS
 	fi
-	if [[ $line =~ ^Test.*(tests[^\']+)\',.*(passed|failed)$ ]] ; then
+
+	if [[ $line =~ ^Tests.to.run:.([0-9]+),[\ ]+Maximum.test.inactivity.time:.([0-9-]+) ]] ; then
+		runnable=${BASH_REMATCH[1]}
+		export test_timeout=${BASH_REMATCH[2]}
+		echo ${line/-1/unlimited}
+		printf "[%-*.*s ][%-11s][%s][${GREEN}%4s${NORM}][${RED}%4s${NORM}]\n" $col $col "Test" "   Test" "Status" "Pass" "Fail"
+	fi
+
+	if [[ $line =~ ^--\>.Running.test.\'(.+)\'.\.\.\. ]] ; then
+		(( tests++ ))
+		printf "[%-*.*s ][%4d of %3d]" $col $col ${BASH_REMATCH[1]#*/} $tests $runnable
+		st=$SECONDS
+		counter &
+		disown $!
+		countpid=$!
+	fi
+
+	if [[ $line =~ ^Test.*tests/([^\']+)\',.*(passed|failed|timedout)$ ]] ; then
 		test=${BASH_REMATCH[1]}
 		status=${BASH_REMATCH[2]}
-		col=$(( $(tput cols) - 28 ))
-		(( tests++ ))
+		col=$(( $(tput cols) - 36 ))
 		if [[ $status = passed ]] ; then
 			(( passed++ ))
 			COLOR=${GREEN}
@@ -45,12 +85,34 @@
 			label=Failed
 			failures+=("FAILED: $test")
 		fi
-		printf "%-*.*s %4d/${GREEN}%4d${NORM}/${RED}%4d${NORM} ${COLOR} [ %s ]${NORM}\n" $col $col $test $tests $passed $failed $label
+		if [[ $status = timedout ]] ; then
+			(( failed++ ))
+			(( timedout++ ))
+			COLOR=${RED}
+			label=Hung!!
+			timeouts+=("TIMEDOUT: $test")
+		fi
+		kill $countpid
+		et=$(( $SECONDS - $st ))
+		[[ $et -gt $maxelapsed ]] && maxelapsed=$et
+		printf "[${COLOR}%s${NORM}][${GREEN}%4d${NORM}][${RED}%4d${NORM}]\n" $label $passed $failed
 	fi
 done
-trap - INT
-echo -e "\tTests: $tests\t\t${GREEN}Passed: $passed\t\t${RED}Failed: $failed${NORM}"
+
+trap - INT ERR
+
+kill $countpid &>/dev/null
+
 for fail in "${failures[@]}" ; do
 	echo -e "${RED}$fail${NORM}"
 done
+for to in "${timeouts[@]}" ; do
+	echo -e "${YELLOW}$to${NORM}"
+done
 
+elapsed=$(( $SECONDS - $starttime ))
+time="$(( $elapsed / 60 ))m $(( $elapsed % 60 ))s"
+echo -e "Tests: $runnable   ${GREEN}Passed: $passed   ${RED}Failed: $failed   TimedOut: $timedout${NORM}   Time: $time\tLongest test: ${maxelapsed}s"
+
+
+

Modified: asterisk/trunk/runtests.py
URL: http://svnview.digium.com/svn/testsuite/asterisk/trunk/runtests.py?view=diff&rev=5956&r1=5955&r2=5956
==============================================================================
--- asterisk/trunk/runtests.py (original)
+++ asterisk/trunk/runtests.py Wed Nov 19 12:03:36 2014
@@ -18,6 +18,13 @@
 import shutil
 import xml.dom
 import random
+import select
+
+# Re-open stdout so it's line buffered.
+# This allows timely processing of piped output.
+newfno = os.dup(sys.stdout.fileno())
+os.close(sys.stdout.fileno())
+sys.stdout = os.fdopen(newfno, 'w', 1)
 
 sys.path.append("lib/python")
 
@@ -30,7 +37,7 @@
 
 
 class TestRun:
-    def __init__(self, test_name, ast_version, options, global_config=None):
+    def __init__(self, test_name, ast_version, options, global_config=None, timeout=-1):
         self.can_run = False
         self.did_run = False
         self.time = 0.0
@@ -41,6 +48,7 @@
         self.failure_message = ""
         self.__check_can_run(ast_version)
         self.stdout = ""
+        self.timeout = timeout
 
         assert self.test_name.startswith('tests/')
         self.test_relpath = self.test_name[6:]
@@ -64,9 +72,20 @@
             p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
                                  stderr=subprocess.STDOUT)
             self.pid = p.pid
+
+            poll = select.poll()
+            poll.register(p.stdout, select.POLLIN)
+
+            timedout = False
             try:
-                for l in p.stdout.readlines():
-                    print l,
+                while(True):
+                    if not poll.poll(self.timeout):
+                        timedout = True
+                        p.terminate()
+                    l = p.stdout.readline()
+                    if not l:
+                        break
+                    print l
                     self.stdout += l
             except IOError:
                 pass
@@ -95,7 +114,7 @@
 
             if not self.passed:
                 self._archive_logs()
-            print 'Test %s %s\n' % (cmd, 'passed' if self.passed else 'failed')
+            print 'Test %s %s\n' % (cmd, 'timedout' if timedout else 'passed' if self.passed else 'failed')
 
         else:
             print "FAILED TO EXECUTE %s, it must exist and be executable" % cmd
@@ -289,7 +308,7 @@
                                     for test in self.options.tests)):
                         continue
 
-                    tests.append(TestRun(path, ast_version, self.options, self.global_config))
+                    tests.append(TestRun(path, ast_version, self.options, self.global_config, self.options.timeout))
                 elif val == "dir":
                     tests += self._parse_test_yaml(path, ast_version)
 
@@ -346,6 +365,16 @@
 
     def run(self):
         test_suite_dir = os.getcwd()
+        i = 0
+        for t in self.tests:
+            if t.can_run is False:
+                continue
+            if self.global_config != None:
+                for excluded in self.global_config.excluded_tests:
+                    if excluded in t.test_name:
+                        continue
+            i += 1
+        print "Tests to run: %d,  Maximum test inactivity time: %d sec." % (i, (self.options.timeout / 1000))
 
         for t in self.tests:
             if t.can_run is False:
@@ -497,12 +526,18 @@
     parser.add_option("-n", "--dry-run", action="store_true",
             dest="dry_run", default=False,
             help="Only show which tests would be run.")
+    parser.add_option("--timeout", metavar='int', type=int,
+            dest="timeout", default=-1,
+            help="Abort test after n seconds of no output.")
     parser.add_option("-V", "--valgrind", action="store_true",
             dest="valgrind", default=False,
             help="Run Asterisk under Valgrind")
     (options, args) = parser.parse_args(argv)
 
     ast_version = AsteriskVersion(options.version)
+
+    if options.timeout > 0:
+        options.timeout *= 1000
 
     # Ensure that there's a trailing '/' in the tests specified with -t
     for i, test in enumerate(options.tests):




More information about the asterisk-commits mailing list