no-dot-config-targets := clean clobber mrproper distclean \
help %docs check% coccicheck \
- ubootversion backup tests check qcheck tcheck pylint \
- pylint_err
+ ubootversion backup tests check pcheck qcheck tcheck \
+ pylint pylint_err
config-targets := 0
mixed-targets := 0
@echo 'Test targets:'
@echo ''
@echo ' check - Run all automated tests that use sandbox'
+ @echo ' pcheck - Run quick automated tests in parallel'
@echo ' qcheck - Run quick automated tests that use sandbox'
@echo ' tcheck - Run quick automated tests on tools'
@echo ' pylint - Run pylint on all Python files'
tests check:
$(srctree)/test/run
+pcheck:
+ $(srctree)/test/run parallel
+
qcheck:
$(srctree)/test/run quick
Running tests in parallel
~~~~~~~~~~~~~~~~~~~~~~~~~
-Note: This does not fully work yet and is documented only so you can try to
-fix the problems.
+Note: Not all tests can run in parallel at present, so the usual approach is
+to just run those that can.
First install support for parallel tests::
+ sudo apt install python3-pytest-xdist
+
+or:::
+
pip3 install pytest-xdist
-Then build sandbox in a suitable build directory. It is not possible to use
-the --build flag with xdist.
+Then run the tests in parallel using the -n flag::
-Finally, run the tests in parallel using the -n flag::
+ test/py/test.py -B sandbox --build --build-dir /tmp/b/sandbox -q -k \
+ 'not slow and not bootstd and not spi_flash' -n16
- # build sandbox first, in a suitable build directory. It is not possible
- # to use the --build flag with -n
- test/py/test.py -B sandbox --build-dir /tmp/b/sandbox -q -k 'not slow' -n32
+You can also use `make pcheck` to run all tests in parallel. This uses a maximum
+of 16 threads, since the setup time is significant and there are under 1000
+tests.
-At least the following non-slow tests are known to fail:
+Note that the `test-log.html` output does not work correctly at present with
+parallel testing. All the threads write to it at once, so it is garbled.
-- test_fit_ecdsa
-- test_bind_unbind_with_uclass
-- ut_dm_spi_flash
-- test_gpt_rename_partition
-- test_gpt_swap_partitions
-- test_pinmux_status
-- test_sqfs_load
+Note that the `tools/` tests still run each tool's tests once after the other,
+although within that, they do run in parallel. So for example, the buildman
+tests run in parallel, then the binman tests run in parallel. There would be a
+significant advantage to running them all in parallel together, but that would
+require a large amount of refactoring, e.g. with more use of pytest fixtures.
+The code-coverage tests are omitted since they cannot run in parallel due to a
+Python limitation.
Testing under a debugger
make tcheck
+You can also run a selection tests in parallel with::
+
+ make pcheck
+
All of the above use the test/run script with a paremeter to select which tests
-are run.
+are run. See :doc:`py_testing` for more information.
Sandbox
}
# Select test attributes
+ut_mark_expr=test_ut
if [ "$1" = "quick" ]; then
mark_expr="not slow"
+ ut_mark_expr="test_ut and not slow"
skip=--skip-net-tests
fi
[ "$1" == "tools" ] && tools_only=y
+if [ "$1" = "parallel" ]; then
+ if ! echo 'import xdist' | python3 2>/dev/null; then
+ echo "Please install python3-pytest-xdist - see doc/develop/py_testing.rst"
+ exit 1
+ fi
+ jobs="$(($(nproc) > 16 ? 16 : $(nproc)))"
+ para="-n${jobs} -q"
+ prompt="Building and..."
+ skip=--skip-net-tests
+ mark_expr="not slow and not bootstd and not spi_flash"
+ ut_mark_expr="test_ut and not slow and not bootstd and not spi_flash"
+ echo "Note: test log is garbled with parallel tests"
+fi
+
failures=0
if [ -z "$tools_only" ]; then
# Run all tests that the standard sandbox build can support
- run_test "sandbox" ./test/py/test.py --bd sandbox --build \
+ echo "${prompt}"
+ run_test "sandbox" ./test/py/test.py --bd sandbox --build ${para} \
-k "${mark_expr}"
fi
# Run tests which require sandbox_spl
-run_test "sandbox_spl" ./test/py/test.py --bd sandbox_spl --build \
+echo "${prompt}"
+run_test "sandbox_spl" ./test/py/test.py --bd sandbox_spl --build ${para} \
-k 'test_ofplatdata or test_handoff or test_spl'
# Run the sane tests with sandbox_noinst (i.e. without OF_PLATDATA_INST)
-run_test "sandbox_spl" ./test/py/test.py --bd sandbox_noinst --build \
+echo "${prompt}"
+run_test "sandbox_spl" ./test/py/test.py --bd sandbox_noinst --build ${para} \
-k 'test_ofplatdata or test_handoff or test_spl'
if [ -z "$tools_only" ]; then
# build which does not enable CONFIG_OF_LIVE for the live device tree, so we can
# check that functionality is the same. The standard sandbox build (above) uses
# CONFIG_OF_LIVE.
+ echo "${prompt}"
run_test "sandbox_flattree" ./test/py/test.py --bd sandbox_flattree \
- --build -k test_ut
+ ${para} --build -k "${ut_mark_expr}"
fi
# Set up a path to dtc (device-tree compiler) and libfdt.py, a library it
# This needs you to set up Python test coverage tools.
# To enable Python test coverage on Debian-type distributions (e.g. Ubuntu):
# $ sudo apt-get install python-pytest python-coverage
-export PATH=$PATH:${TOOLS_DIR}
-run_test "binman code coverage" ./tools/binman/binman test -T
-run_test "dtoc code coverage" ./tools/dtoc/dtoc -T
-run_test "fdt code coverage" ./tools/dtoc/test_fdt -T
+
+# Code-coverage tests cannot run in parallel, so skip them in that case
+if [ -z "${para}" ]; then
+ export PATH=$PATH:${TOOLS_DIR}
+ run_test "binman code coverage" ./tools/binman/binman test -T
+ run_test "dtoc code coverage" ./tools/dtoc/dtoc -T
+ run_test "fdt code coverage" ./tools/dtoc/test_fdt -T
+fi
if [ $failures == 0 ]; then
echo "Tests passed!"