Merge branch 'as/test-tweaks'
Output from the tests is coloured using "green is okay, yellow is questionable, red is bad and blue is informative" scheme. * as/test-tweaks: tests: paint unexpectedly fixed known breakages in bold red tests: test the test framework more thoroughly tests: refactor mechanics of testing in a sub test-lib tests: change info messages from yellow/brown to cyan tests: paint skipped tests in blue tests: paint known breakages in yellow tests: test number comes first in 'not ok $count - $message'
This commit is contained in:
212
t/t0000-basic.sh
212
t/t0000-basic.sh
@ -45,39 +45,176 @@ test_expect_failure 'pretend we have a known breakage' '
|
||||
false
|
||||
'
|
||||
|
||||
test_expect_success 'pretend we have fixed a known breakage (run in sub test-lib)' "
|
||||
mkdir passing-todo &&
|
||||
(cd passing-todo &&
|
||||
cat >passing-todo.sh <<-EOF &&
|
||||
#!$SHELL_PATH
|
||||
run_sub_test_lib_test () {
|
||||
name="$1" descr="$2" # stdin is the body of the test code
|
||||
mkdir "$name" &&
|
||||
(
|
||||
cd "$name" &&
|
||||
cat >"$name.sh" <<-EOF &&
|
||||
#!$SHELL_PATH
|
||||
|
||||
test_description='A passing TODO test
|
||||
test_description='$descr (run in sub test-lib)
|
||||
|
||||
This is run in a sub test-lib so that we do not get incorrect
|
||||
passing metrics
|
||||
'
|
||||
This is run in a sub test-lib so that we do not get incorrect
|
||||
passing metrics
|
||||
'
|
||||
|
||||
# Point to the t/test-lib.sh, which isn't in ../ as usual
|
||||
TEST_DIRECTORY=\"$TEST_DIRECTORY\"
|
||||
. \"\$TEST_DIRECTORY\"/test-lib.sh
|
||||
# Point to the t/test-lib.sh, which isn't in ../ as usual
|
||||
. "\$TEST_DIRECTORY"/test-lib.sh
|
||||
EOF
|
||||
cat >>"$name.sh" &&
|
||||
chmod +x "$name.sh" &&
|
||||
export TEST_DIRECTORY &&
|
||||
./"$name.sh" >out 2>err
|
||||
)
|
||||
}
|
||||
|
||||
test_expect_failure 'pretend we have fixed a known breakage' '
|
||||
:
|
||||
'
|
||||
check_sub_test_lib_test () {
|
||||
name="$1" # stdin is the expected output from the test
|
||||
(
|
||||
cd "$name" &&
|
||||
! test -s err &&
|
||||
sed -e 's/^> //' -e 's/Z$//' >expect &&
|
||||
test_cmp expect out
|
||||
)
|
||||
}
|
||||
|
||||
test_expect_success 'pretend we have a fully passing test suite' "
|
||||
run_sub_test_lib_test full-pass '3 passing tests' <<-\\EOF &&
|
||||
for i in 1 2 3
|
||||
do
|
||||
test_expect_success \"passing test #\$i\" 'true'
|
||||
done
|
||||
test_done
|
||||
EOF
|
||||
chmod +x passing-todo.sh &&
|
||||
./passing-todo.sh >out 2>err &&
|
||||
! test -s err &&
|
||||
sed -e 's/^> //' >expect <<-\\EOF &&
|
||||
> ok 1 - pretend we have fixed a known breakage # TODO known breakage
|
||||
> # fixed 1 known breakage(s)
|
||||
> # passed all 1 test(s)
|
||||
check_sub_test_lib_test full-pass <<-\\EOF
|
||||
> ok 1 - passing test #1
|
||||
> ok 2 - passing test #2
|
||||
> ok 3 - passing test #3
|
||||
> # passed all 3 test(s)
|
||||
> 1..3
|
||||
EOF
|
||||
"
|
||||
|
||||
test_expect_success 'pretend we have a partially passing test suite' "
|
||||
test_must_fail run_sub_test_lib_test \
|
||||
partial-pass '2/3 tests passing' <<-\\EOF &&
|
||||
test_expect_success 'passing test #1' 'true'
|
||||
test_expect_success 'failing test #2' 'false'
|
||||
test_expect_success 'passing test #3' 'true'
|
||||
test_done
|
||||
EOF
|
||||
check_sub_test_lib_test partial-pass <<-\\EOF
|
||||
> ok 1 - passing test #1
|
||||
> not ok 2 - failing test #2
|
||||
# false
|
||||
> ok 3 - passing test #3
|
||||
> # failed 1 among 3 test(s)
|
||||
> 1..3
|
||||
EOF
|
||||
"
|
||||
|
||||
test_expect_success 'pretend we have a known breakage' "
|
||||
run_sub_test_lib_test failing-todo 'A failing TODO test' <<-\\EOF &&
|
||||
test_expect_success 'passing test' 'true'
|
||||
test_expect_failure 'pretend we have a known breakage' 'false'
|
||||
test_done
|
||||
EOF
|
||||
check_sub_test_lib_test failing-todo <<-\\EOF
|
||||
> ok 1 - passing test
|
||||
> not ok 2 - pretend we have a known breakage # TODO known breakage
|
||||
> # still have 1 known breakage(s)
|
||||
> # passed all remaining 1 test(s)
|
||||
> 1..2
|
||||
EOF
|
||||
"
|
||||
|
||||
test_expect_success 'pretend we have fixed a known breakage' "
|
||||
run_sub_test_lib_test passing-todo 'A passing TODO test' <<-\\EOF &&
|
||||
test_expect_failure 'pretend we have fixed a known breakage' 'true'
|
||||
test_done
|
||||
EOF
|
||||
check_sub_test_lib_test passing-todo <<-\\EOF
|
||||
> ok 1 - pretend we have fixed a known breakage # TODO known breakage vanished
|
||||
> # 1 known breakage(s) vanished; please update test(s)
|
||||
> 1..1
|
||||
EOF
|
||||
test_cmp expect out)
|
||||
"
|
||||
|
||||
test_expect_success 'pretend we have fixed one of two known breakages (run in sub test-lib)' "
|
||||
run_sub_test_lib_test partially-passing-todos \
|
||||
'2 TODO tests, one passing' <<-\\EOF &&
|
||||
test_expect_failure 'pretend we have a known breakage' 'false'
|
||||
test_expect_success 'pretend we have a passing test' 'true'
|
||||
test_expect_failure 'pretend we have fixed another known breakage' 'true'
|
||||
test_done
|
||||
EOF
|
||||
check_sub_test_lib_test partially-passing-todos <<-\\EOF
|
||||
> not ok 1 - pretend we have a known breakage # TODO known breakage
|
||||
> ok 2 - pretend we have a passing test
|
||||
> ok 3 - pretend we have fixed another known breakage # TODO known breakage vanished
|
||||
> # 1 known breakage(s) vanished; please update test(s)
|
||||
> # still have 1 known breakage(s)
|
||||
> # passed all remaining 1 test(s)
|
||||
> 1..3
|
||||
EOF
|
||||
"
|
||||
|
||||
test_expect_success 'pretend we have a pass, fail, and known breakage' "
|
||||
test_must_fail run_sub_test_lib_test \
|
||||
mixed-results1 'mixed results #1' <<-\\EOF &&
|
||||
test_expect_success 'passing test' 'true'
|
||||
test_expect_success 'failing test' 'false'
|
||||
test_expect_failure 'pretend we have a known breakage' 'false'
|
||||
test_done
|
||||
EOF
|
||||
check_sub_test_lib_test mixed-results1 <<-\\EOF
|
||||
> ok 1 - passing test
|
||||
> not ok 2 - failing test
|
||||
> # false
|
||||
> not ok 3 - pretend we have a known breakage # TODO known breakage
|
||||
> # still have 1 known breakage(s)
|
||||
> # failed 1 among remaining 2 test(s)
|
||||
> 1..3
|
||||
EOF
|
||||
"
|
||||
|
||||
test_expect_success 'pretend we have a mix of all possible results' "
|
||||
test_must_fail run_sub_test_lib_test \
|
||||
mixed-results2 'mixed results #2' <<-\\EOF &&
|
||||
test_expect_success 'passing test' 'true'
|
||||
test_expect_success 'passing test' 'true'
|
||||
test_expect_success 'passing test' 'true'
|
||||
test_expect_success 'passing test' 'true'
|
||||
test_expect_success 'failing test' 'false'
|
||||
test_expect_success 'failing test' 'false'
|
||||
test_expect_success 'failing test' 'false'
|
||||
test_expect_failure 'pretend we have a known breakage' 'false'
|
||||
test_expect_failure 'pretend we have a known breakage' 'false'
|
||||
test_expect_failure 'pretend we have fixed a known breakage' 'true'
|
||||
test_done
|
||||
EOF
|
||||
check_sub_test_lib_test mixed-results2 <<-\\EOF
|
||||
> ok 1 - passing test
|
||||
> ok 2 - passing test
|
||||
> ok 3 - passing test
|
||||
> ok 4 - passing test
|
||||
> not ok 5 - failing test
|
||||
> # false
|
||||
> not ok 6 - failing test
|
||||
> # false
|
||||
> not ok 7 - failing test
|
||||
> # false
|
||||
> not ok 8 - pretend we have a known breakage # TODO known breakage
|
||||
> not ok 9 - pretend we have a known breakage # TODO known breakage
|
||||
> ok 10 - pretend we have fixed a known breakage # TODO known breakage vanished
|
||||
> # 1 known breakage(s) vanished; please update test(s)
|
||||
> # still have 2 known breakage(s)
|
||||
> # failed 3 among remaining 7 test(s)
|
||||
> 1..10
|
||||
EOF
|
||||
"
|
||||
|
||||
test_set_prereq HAVEIT
|
||||
haveit=no
|
||||
test_expect_success HAVEIT 'test runs if prerequisite is satisfied' '
|
||||
@ -159,19 +296,8 @@ then
|
||||
fi
|
||||
|
||||
test_expect_success 'tests clean up even on failures' "
|
||||
mkdir failing-cleanup &&
|
||||
(
|
||||
cd failing-cleanup &&
|
||||
|
||||
cat >failing-cleanup.sh <<-EOF &&
|
||||
#!$SHELL_PATH
|
||||
|
||||
test_description='Failing tests with cleanup commands'
|
||||
|
||||
# Point to the t/test-lib.sh, which isn't in ../ as usual
|
||||
TEST_DIRECTORY=\"$TEST_DIRECTORY\"
|
||||
. \"\$TEST_DIRECTORY\"/test-lib.sh
|
||||
|
||||
test_must_fail run_sub_test_lib_test \
|
||||
failing-cleanup 'Failing tests with cleanup commands' <<-\\EOF &&
|
||||
test_expect_success 'tests clean up even after a failure' '
|
||||
touch clean-after-failure &&
|
||||
test_when_finished rm clean-after-failure &&
|
||||
@ -181,29 +307,21 @@ test_expect_success 'tests clean up even on failures' "
|
||||
test_when_finished \"(exit 2)\"
|
||||
'
|
||||
test_done
|
||||
|
||||
EOF
|
||||
|
||||
chmod +x failing-cleanup.sh &&
|
||||
test_must_fail ./failing-cleanup.sh >out 2>err &&
|
||||
! test -s err &&
|
||||
! test -f \"trash directory.failing-cleanup/clean-after-failure\" &&
|
||||
sed -e 's/Z$//' -e 's/^> //' >expect <<-\\EOF &&
|
||||
> not ok - 1 tests clean up even after a failure
|
||||
check_sub_test_lib_test failing-cleanup <<-\\EOF
|
||||
> not ok 1 - tests clean up even after a failure
|
||||
> # Z
|
||||
> # touch clean-after-failure &&
|
||||
> # test_when_finished rm clean-after-failure &&
|
||||
> # (exit 1)
|
||||
> # Z
|
||||
> not ok - 2 failure to clean up causes the test to fail
|
||||
> not ok 2 - failure to clean up causes the test to fail
|
||||
> # Z
|
||||
> # test_when_finished \"(exit 2)\"
|
||||
> # Z
|
||||
> # failed 2 among 2 test(s)
|
||||
> 1..2
|
||||
EOF
|
||||
test_cmp expect out
|
||||
)
|
||||
"
|
||||
|
||||
################################################################
|
||||
|
Reference in New Issue
Block a user