summaryrefslogtreecommitdiff
path: root/test/py
diff options
context:
space:
mode:
authorStephen Warren <swarren@nvidia.com>2016-02-10 13:47:37 -0700
committerTom Rini <trini@konsulko.com>2016-02-15 20:58:28 +0000
commit1326022c2edf4210f5726fb6a46ebbbb2926230f (patch)
treeb833313c928807062e4f4418bf45e4b4bf69adc6 /test/py
parent1235c791827ce6040d4e0103cb6d84a150a84d3c (diff)
downloadu-boot-imx-1326022c2edf4210f5726fb6a46ebbbb2926230f.zip
u-boot-imx-1326022c2edf4210f5726fb6a46ebbbb2926230f.tar.gz
u-boot-imx-1326022c2edf4210f5726fb6a46ebbbb2926230f.tar.bz2
test/py: print summary in test order
Use lists rather than sets to record the status of tests. This causes the test summary in the HTML file to be generated in the same order as the tests are (or would have been) run. This makes it easier to locate the first failed test. The log for this test might have interesting first clues re: interaction with the environment (e.g. hardware flashing, serial console, ...) and may help tracking down external issues. Signed-off-by: Stephen Warren <swarren@nvidia.com> Acked-by: Simon Glass <sjg@chromium.org>
Diffstat (limited to 'test/py')
-rw-r--r--test/py/conftest.py16
1 files changed, 8 insertions, 8 deletions
diff --git a/test/py/conftest.py b/test/py/conftest.py
index 1ec096e..449f98b 100644
--- a/test/py/conftest.py
+++ b/test/py/conftest.py
@@ -312,12 +312,12 @@ def u_boot_console(request):
return console
anchors = {}
-tests_not_run = set()
-tests_failed = set()
-tests_xpassed = set()
-tests_xfailed = set()
-tests_skipped = set()
-tests_passed = set()
+tests_not_run = []
+tests_failed = []
+tests_xpassed = []
+tests_xfailed = []
+tests_skipped = []
+tests_passed = []
def pytest_itemcollected(item):
"""pytest hook: Called once for each test found during collection.
@@ -332,7 +332,7 @@ def pytest_itemcollected(item):
Nothing.
"""
- tests_not_run.add(item.name)
+ tests_not_run.append(item.name)
def cleanup():
"""Clean up all global state.
@@ -493,7 +493,7 @@ def pytest_runtest_protocol(item, nextitem):
if failure_cleanup:
console.drain_console()
- test_list.add(item.name)
+ test_list.append(item.name)
tests_not_run.remove(item.name)
try: