Name | Executed | Routines | % | Executed | Lines | % | Unexecuted |
/home/matt/eu/rds/include/std/unittest.e | 12 | 17 | 70.59% | 88 | 154 | 57.14% | 66 |
Routine | Executed | Lines | Unexecuted | |
test_report() | 10 | 23 | 43.48% | 13 |
assert() | 0 | 9 | 0.00% | 9 |
test_failed() | 14 | 22 | 63.64% | 8 |
add_log() | 3 | 8 | 37.50% | 5 |
test_crash() | 0 | 4 | 0.00% | 4 |
set_accumulate_summary() | 0 | 3 | 0.00% | 3 |
set_wait_on_summary() | 0 | 3 | 0.00% | 3 |
test_fail() | 0 | 3 | 0.00% | 3 |
test_passed() | 6 | 7 | 85.71% | 1 |
record_result() | 6 | 6 | 100.00% | 0 |
set_test_abort() | 4 | 4 | 100.00% | 0 |
set_test_verbosity() | 3 | 3 | 100.00% | 0 |
test_equal() | 8 | 8 | 100.00% | 0 |
test_false() | 3 | 3 | 100.00% | 0 |
test_not_equal() | 9 | 9 | 100.00% | 0 |
test_pass() | 3 | 3 | 100.00% | 0 |
test_true() | 3 | 3 | 100.00% | 0 |
# | Executed | |
1 | -- (c) Copyright - See License.txt | |
2 | -- | |
3 | namespace unittest | |
4 | ||
5 | --**** | |
6 | -- == Unit Testing Framework | |
7 | -- | |
8 | -- < | |
9 | -- | |
10 | -- === Background | |
11 | -- Unit testing is the process of assuring that the smallest programming units | |
12 | -- are actually delivering functionality that complies with their specification. | |
13 | -- The units in question are usually individual routines rather than whole programs | |
14 | -- or applications. | |
15 | -- | |
16 | -- The theory is that if the components of a system are working correctly, then | |
17 | -- there is a high probability that a system using those components can be made | |
18 | -- to work correctly. | |
19 | -- | |
20 | -- In Euphoria terms, this framework provides the tools to make testing and reporting on | |
21 | -- functions and procedures easy and standardized. It gives us a simple way to | |
22 | -- write a test case and to report on the findings.\\ | |
23 | -- Example~: | |
24 | -- | |
25 | -- include std/unittest.e | |
26 | -- | |
27 | -- test_equal( "Power function test #1", 4, power(2, 2)) | |
28 | -- test_equal( "Power function test #2", 4, power(16, 0.5)) | |
29 | -- | |
30 | -- test_report() | |
31 | -- | |
32 | -- | |
33 | -- Name your test file in the special manner, ##t_NAME.e## and then simply run | |
34 | -- ##eutest## in that directory. | |
35 | -- | |
36 | -- {{{ | |
37 | -- C:\Euphoria> eutest | |
38 | -- t_math.e: | |
39 | -- failed: Bad math, expected: 100 but got: 8 | |
40 | -- 2 tests run, 1 passed, 1 failed, 50.0% success | |
41 | -- | |
42 | -- Test failure summary: | |
43 | -- FAIL: t_math.e | |
44 | -- | |
45 | -- 2 file(s) run 1 file(s) failed, 50.0% success-- | |
46 | -- }}} | |
47 | -- | |
48 | -- In this example, we use the ##test_equal## function to record the result of | |
49 | -- a test. The first parameter is the name of the test, which can be anything | |
50 | -- and is displayed if the test fails. The second parameter is the expected | |
51 | -- result ~-- what we expect the function being tested to return. The third | |
52 | -- parameter is the actual result returned by the function being tested. This | |
53 | -- is usually written as a call to the function itself. | |
54 | -- | |
55 | -- It is typical to provide as many test cases as would be required to give us | |
56 | -- confidence that the function is being truly exercised. This includes calling | |
57 | -- it with typical values and edge-case or exceptional values. It is also useful | |
58 | -- to test the function's error handling by calling it with bad parameters. | |
59 | -- | |
60 | -- When a test fails, the framework displays a message, showing the test's name, | |
61 | -- the expected result and the actual result. You can configure the framework to | |
62 | -- display each test run, regardless of whether it fails or not. | |
63 | -- | |
64 | -- After running a series of tests, you can get a summary displayed by calling | |
65 | -- the ##test_report##() procedure. To get a better feel for unit testing, have | |
66 | -- a look at the provided test cases for the standard library in the //tests// | |
67 | -- directory. | |
68 | -- | |
69 | -- When included in your program, unittest.e sets a crash handler to log a crash | |
70 | -- as a failure. | |
71 | ||
72 | --**** | |
73 | -- === Constants | |
74 | -- | |
75 | ||
76 | include std/io.e | |
77 | include std/pretty.e | |
78 | include std/search.e | |
79 | include std/filesys.e | |
80 | include std/math.e | |
81 | include std/types.e | |
82 | include std/error.e | |
83 | -- | |
84 | -- Public Variables | |
85 | -- | |
86 | ||
87 | -- Verbosity values | |
88 | public enum | |
89 | 101 | TEST_QUIET = 0, |
90 | 101 | TEST_SHOW_FAILED_ONLY, |
91 | 101 | TEST_SHOW_ALL |
92 | ||
93 | -- | |
94 | -- Private variables | |
95 | -- | |
96 | ||
97 | 101 | atom time_start = time(), time_test = time() |
98 | ||
99 | 101 | integer test_count = 0, tests_passed = 0, tests_failed = 0 |
100 | sequence filename | |
101 | 101 | integer verbose = TEST_SHOW_FAILED_ONLY |
102 | ||
103 | 101 | integer abort_on_fail = 0 |
104 | 101 | integer wait_on_summary = 0 |
105 | 101 | integer accumulate_on_summary = 0 |
106 | 101 | integer logging = 0, log_fh = 0 |
107 | ||
108 | -- | |
109 | -- Private utility functions | |
110 | -- | |
111 | ||
112 | 3403 | |
113 | 3403 | if log_fh = 0 then |
114 | 3403 | return |
115 | end if | |
116 | ||
117 | 0 | puts(log_fh, "entry = ") |
118 | 0 | pretty_print(log_fh, data, {2, 2, 1, 78, "%d", "%.15g"}) |
119 | 0 | puts(log_fh, "\n") |
120 | 0 | flush(log_fh) |
121 | 0 | end procedure |
122 | ||
123 | 6 | |
124 | 6 | if verbose >= TEST_SHOW_FAILED_ONLY then |
125 | 6 | printf(2, " failed: %s, expected: ", {name}) |
126 | 6 | if TF then |
127 | 0 | if not equal(a,0) then |
128 | 0 | puts(2, "TRUE") |
129 | else | |
130 | 0 | puts(2, "FALSE") |
131 | end if | |
132 | else | |
133 | 6 | pretty_print(2, a, {2,2,1,78,"%d", "%.15g"}) |
134 | end if | |
135 | 6 | puts(2, " but got: ") |
136 | 6 | if TF and integer(b) then |
137 | 0 | if not equal(b,0) then |
138 | 0 | puts(2, "TRUE") |
139 | else | |
140 | 0 | puts(2, "FALSE") |
141 | end if | |
142 | else | |
143 | 6 | pretty_print(2, b, {2,2,1,78,"%d", "%.15g"}) |
144 | end if | |
145 | 6 | puts(2, "\n") |
146 | end if | |
147 | ||
148 | 6 | tests_failed += 1 |
149 | ||
150 | 6 | add_log({ "failed", name, a, b, time() - time_test }) |
151 | 6 | time_test = time() |
152 | ||
153 | 6 | if abort_on_fail then |
154 | 0 | puts(2, "Abort On Fail set.\n") |
155 | 0 | abort(2) |
156 | end if | |
157 | 6 | end procedure |
158 | ||
159 | 3311 | |
160 | 3311 | if verbose >= TEST_SHOW_ALL then |
161 | 0 | printf(2, " passed: %s\n", {name}) |
162 | end if | |
163 | ||
164 | 3311 | tests_passed += 1 |
165 | ||
166 | 3311 | add_log({ "passed", name, time() - time_test }) |
167 | 3311 | time_test = time() |
168 | 3311 | end procedure |
169 | ||
170 | -- | |
171 | -- Global Testing Functions | |
172 | -- | |
173 | ||
174 | --**** | |
175 | -- === Setup Routines | |
176 | ||
177 | --** | |
178 | -- Set the amount of information that is returned about passed and failed tests. | |
179 | -- | |
180 | -- Parameters: | |
181 | -- # ##verbosity## : an atom which takes predefined values for verbosity levels. | |
182 | -- | |
183 | -- Comments: | |
184 | -- The following values are allowable for ##verbosity##: | |
185 | -- * ##TEST_QUIET## ~-- 0, | |
186 | -- * ##TEST_SHOW_FAILED_ONLY## ~-- 1 | |
187 | -- * ##TEST_SHOW_ALL## ~-- 2 | |
188 | -- | |
189 | -- However, anything less than ##TEST_SHOW_FAILED_ONLY## is treated as ##TEST_QUIET##, and everything | |
190 | -- above ##TEST_SHOW_ALL## is treated as ##TEST_SHOW_ALL##. | |
191 | -- | |
192 | -- * At the lowest verbosity level, only the score is shown, ie the ratio passed tests/total tests. | |
193 | -- * At the medium level, in addition, failed tests display their name, the expected outcome and | |
194 | -- the outcome they got. This is the initial setting. | |
195 | -- * At the highest level of verbosity, each test is reported as passed or failed. | |
196 | -- | |
197 | -- If a file crashes when it should not, this event is reported no matter the verbosity level. | |
198 | -- | |
199 | -- The command line switch ""-failed" causes verbosity to be set to medium at startup. The | |
200 | -- command line switch ""-all" causes verbosity to be set to high at startup. | |
201 | -- | |
202 | -- See Also: | |
203 | -- [[:test_report]] | |
204 | ||
205 | 1 | |
206 | 1 | verbose = verbosity |
207 | 1 | end procedure |
208 | ||
209 | --** | |
210 | -- Request the test report to pause before exiting. | |
211 | -- | |
212 | -- Parameters: | |
213 | -- # ##to_wait## : an integer, zero not to wait, nonzero to wait. | |
214 | -- | |
215 | -- Comments: | |
216 | -- Depending on the environment, the test results may be invisible if | |
217 | -- ##set_wait_on_summary(1)## was not called prior, as this is not the default. The command | |
218 | -- line switch "-wait" performs this call. | |
219 | -- | |
220 | -- See Also: | |
221 | -- [[:test_report]] | |
222 | ||
223 | 0 | |
224 | 0 | wait_on_summary = to_wait |
225 | 0 | end procedure |
226 | ||
227 | --** | |
228 | -- Request the test report to save run stats in "unittest.dat" before exiting. | |
229 | -- | |
230 | -- Parameters: | |
231 | -- # ##accumulate## : an integer, zero not to accumulate, nonzero to accumulate. | |
232 | -- | |
233 | -- Comments: | |
234 | -- The file "unittest.dat" is appended to with {t,f}\\ | |
235 | -- : where | |
236 | -- :: //t// is total number of tests run | |
237 | -- :: //f// is the total number of tests that failed | |
238 | -- | |
239 | ||
240 | 0 | |
241 | 0 | accumulate_on_summary = accumulate |
242 | 0 | end procedure |
243 | ||
244 | --** | |
245 | -- Set behavior on test failure, and return previous value. | |
246 | -- | |
247 | -- Parameters: | |
248 | -- # ##abort_test## : an integer, the new value for this setting. | |
249 | -- | |
250 | -- Returns: | |
251 | -- An **integer**, the previous value for the setting. | |
252 | -- | |
253 | -- Comments: | |
254 | -- By default, the tests go on even if a file crashed. | |
255 | ||
256 | 2 | |
257 | 2 | integer tmp = abort_on_fail |
258 | 2 | abort_on_fail = abort_test |
259 | ||
260 | 2 | return tmp |
261 | end function | |
262 | ||
263 | --**** | |
264 | -- === Reporting | |
265 | ||
266 | --** | |
267 | -- Output test report | |
268 | -- | |
269 | -- Comments: | |
270 | -- | |
271 | -- The report components are described in the comments section for [[:set_test_verbosity]]. Everything | |
272 | -- prints on the standard error device. | |
273 | -- | |
274 | -- See Also: | |
275 | -- [[:set_test_verbosity]] | |
276 | ||
277 | 86 | |
278 | atom score | |
279 | integer fh | |
280 | sequence fname | |
281 | ||
282 | 86 | if tests_failed > 0 or verbose >= TEST_SHOW_ALL then |
283 | 2 | if test_count = 0 then |
284 | 0 | score = 100 |
285 | else | |
286 | 2 | score = (tests_passed / test_count) * 100 |
287 | end if | |
288 | ||
289 | 2 | printf(2, " %d tests run, %d passed, %d failed, %.1f%% success\n", |
290 | {test_count, tests_passed, tests_failed, score}) | |
291 | end if | |
292 | ||
293 | 86 | if accumulate_on_summary then |
294 | 0 | fname = command_line() |
295 | 0 | if equal(fname[1], fname[2]) then |
296 | 0 | fname = fname[1] |
297 | else | |
298 | 0 | fname = fname[2] |
299 | end if | |
300 | 0 | fh = open("unittest.dat", "a") |
301 | 0 | printf(fh, "{%d,%d,\"%s\"}\n", {test_count, tests_failed, fname}) |
302 | 0 | close(fh) |
303 | end if | |
304 | ||
305 | 86 | add_log({ "summary", test_count, tests_failed, tests_passed, time() - time_start }) |
306 | ||
307 | 86 | if log_fh != 0 then |
308 | 0 | close( log_fh ) |
309 | 0 | log_fh = 0 |
310 | end if | |
311 | ||
312 | 86 | if match("t_c_", filename) = 1 then |
313 | 0 | puts(2, " test should have failed but was a success\n") |
314 | 0 | abort(0) |
315 | else | |
316 | 86 | abort(tests_failed > 0) |
317 | end if | |
318 | 0 | end procedure |
319 | ||
320 | 3317 | |
321 | 3317 | test_count += 1 |
322 | ||
323 | 3317 | if success then |
324 | 3311 | test_passed(name) |
325 | else | |
326 | 6 | test_failed(name, a, b, TF) |
327 | end if | |
328 | 3317 | end procedure |
329 | ||
330 | --**** | |
331 | -- === Tests | |
332 | -- | |
333 | ||
334 | --** | |
335 | -- Records whether a test passes by comparing two values. | |
336 | -- | |
337 | -- Parameters: | |
338 | -- # ##name## : a string, the name of the test | |
339 | -- # ##expected## : an object, the expected outcome of some action | |
340 | -- # ##outcome## : an object, some actual value that should equal the reference ##expected##. | |
341 | -- | |
342 | -- Comments: | |
343 | -- | |
344 | -- * For floating point numbers, a fuzz of 1e-9 is used to assess equality. | |
345 | -- | |
346 | -- A test is recorded as passed if equality holds between ##expected## and ##outcome##. The latter | |
347 | -- is typically a function call, or a variable that was set by some prior action. | |
348 | -- | |
349 | -- While ##expected## and ##outcome## are processed symmetrically, they are not recorded | |
350 | -- symmetrically, so be careful to pass ##expected## before ##outcome## for better test failure | |
351 | -- reports. | |
352 | -- | |
353 | -- See Also: | |
354 | -- [[:test_not_equal]], [[:test_true]], [[:test_false]], [[:test_pass]], [[:test_fail]] | |
355 | ||
356 | 2508 | |
357 | integer success | |
358 | ||
359 | 2508 | if equal(expected, outcome ) then |
360 | -- for inf and -inf simple values | |
361 | 2471 | success = 1 |
362 | 37 | elsif equal(0*expected, 0*outcome) then |
363 | -- for complicated sequences values | |
364 | 36 | success = max(abs(expected-outcome)) < 1e-9 |
365 | else | |
366 | 1 | success = 0 |
367 | end if | |
368 | ||
369 | 2508 | record_result(success, name, expected, outcome) |
370 | 2508 | end procedure |
371 | ||
372 | --** | |
373 | -- Records whether a test passes by comparing two values. | |
374 | -- | |
375 | -- Parameters: | |
376 | -- # ##name## : a string, the name of the test | |
377 | -- # ##expected## : an object, the expected outcome of some action | |
378 | -- # ##outcome## : an object, some actual value that should equal the reference ##expected##. | |
379 | -- | |
380 | -- Comments: | |
381 | -- * For atoms, a fuzz of 1e-9 is used to assess equality. | |
382 | -- * For sequences, no such fuzz is implemented. | |
383 | -- | |
384 | -- A test is recorded as passed if equality does not hold between ##expected## and ##outcome##. The | |
385 | -- latter is typically a function call, or a variable that was set by some prior action. | |
386 | -- | |
387 | -- See Also: | |
388 | -- [[:test_equal]], [[:test_true]], [[:test_false]], [[:test_pass]], [[:test_fail]] | |
389 | ||
390 | 36 | |
391 | integer success | |
392 | 36 | if sequence(a) or sequence(b) then |
393 | 10 | success = not equal(a,b) |
394 | else | |
395 | 26 | if a > b then |
396 | 1 | success = ((a-b) >= 1e-9) |
397 | else | |
398 | 25 | success = ((b-a) >= 1e-9) |
399 | end if | |
400 | end if | |
401 | 36 | a = "anything but '" & pretty_sprint( a, {2,2,1,78,"%d", "%.15g"}) & "'" |
402 | 36 | record_result(success, name, a, b) |
403 | 36 | end procedure |
404 | ||
405 | --** | |
406 | -- Records whether a test passes. | |
407 | -- | |
408 | -- Parameters: | |
409 | -- # ##name## : a string, the name of the test | |
410 | -- # ##outcome## : an object, some actual value that should not be zero. | |
411 | -- | |
412 | -- Comments: | |
413 | -- This assumes an expected value different from 0. No fuzz is applied when checking whether an | |
414 | -- atom is zero or not. Use [[:test_equal]]() instead in this case. | |
415 | -- | |
416 | -- See Also: | |
417 | -- [[:test_equal]], [[:test_not_equal]], [[:test_false]], [[:test_pass]], [[test_fail]] | |
418 | ||
419 | 408 | |
420 | 408 | record_result(not equal(outcome,0), name, 1, outcome, 1 ) |
421 | 408 | end procedure |
422 | ||
423 | --** | |
424 | -- Records whether a test passes. If it fails, the program also fails. | |
425 | -- | |
426 | -- Parameters: | |
427 | -- # ##name## : a string, the name of the test | |
428 | -- # ##outcome## : an object, some actual value that should not be zero. | |
429 | -- | |
430 | -- Comments: | |
431 | -- This is identical to ##test_true()## except that if the test fails, the | |
432 | -- program will also be forced to fail at this point. | |
433 | -- | |
434 | -- See Also: | |
435 | -- [[:test_equal]], [[:test_not_equal]], [[:test_false]], [[:test_pass]], [[test_fail]] | |
436 | ||
437 | 0 | |
438 | 0 | if sequence(name) then |
439 | 0 | test_true(name, outcome) |
440 | 0 | if equal(outcome,0) then |
441 | 0 | crash(name) |
442 | end if | |
443 | else | |
444 | 0 | test_true(outcome, name) |
445 | 0 | if equal(name,0) then |
446 | 0 | crash(outcome) |
447 | end if | |
448 | end if | |
449 | 0 | end procedure |
450 | ||
451 | --** | |
452 | -- Records whether a test passes by comparing two values. | |
453 | -- | |
454 | -- Parameters: | |
455 | -- # ##name## : a string, the name of the test | |
456 | -- # ##outcome## : an object, some actual value that should be zero | |
457 | -- | |
458 | -- Comments: | |
459 | -- This assumes an expected value of 0. No fuzz is applied when checking whether an atom is zero | |
460 | -- or not. Use [[:test_equal]]() instead in this case. | |
461 | -- | |
462 | -- See Also: | |
463 | -- [[:test_equal]], [[:test_not_equal]], [[:test_true]], [[:test_pass]], [[:test_fail]] | |
464 | ||
465 | 305 | |
466 | 305 | record_result(equal(outcome, 0), name, 0, outcome, 1) |
467 | 305 | end procedure |
468 | ||
469 | --** | |
470 | -- Records that a test failed. | |
471 | -- | |
472 | -- Parameters: | |
473 | -- # ##name## : a string, the name of the test | |
474 | -- | |
475 | -- See Also: | |
476 | -- [[:test_equal]], [[:test_not_equal]],[[:test_true]], [[:test_false]], [[:test_pass]] | |
477 | ||
478 | 0 | |
479 | 0 | record_result(0, name, 1, 0, 1) |
480 | 0 | end procedure |
481 | ||
482 | --** | |
483 | -- Records that a test passed. | |
484 | -- | |
485 | -- Parameters: | |
486 | -- # ##name## : a string, the name of the test | |
487 | -- | |
488 | -- See Also: | |
489 | -- [[:test_equal]], [[:test_not_equal]],[[:test_true]], [[:test_false]], [[:test_fail]] | |
490 | ||
491 | 60 | |
492 | 60 | record_result(1, name, 1, 1, 1) |
493 | 60 | end procedure |
494 | ||
495 | 101 | sequence cmd = command_line() |
496 | 101 | filename = cmd[2] |
497 | ||
498 | ||
499 | -- strip off path information | |
500 | 101 | while find( SLASH, filename ) do |
501 | 0 | filename = filename[find( SLASH, filename )+1..$] |
502 | 0 | end while |
503 | ||
504 | 101 | for i = 3 to length(cmd) do |
505 | 0 | if equal(cmd[i], "-all") then |
506 | 0 | set_test_verbosity(TEST_SHOW_ALL) |
507 | 0 | elsif equal(cmd[i], "-failed") then |
508 | 0 | set_test_verbosity(TEST_SHOW_FAILED_ONLY) |
509 | 0 | elsif equal(cmd[i], "-wait") then |
510 | 0 | set_wait_on_summary(1) |
511 | 0 | elsif begins(cmd[i], "-accumulate") then |
512 | 0 | set_accumulate_summary(1) |
513 | 0 | elsif equal(cmd[i], "-log") then |
514 | 0 | log_fh = open("unittest.log", "a") |
515 | 0 | if log_fh = -1 then |
516 | 0 | puts(2,"Cannot open unittest.log for append.\n") |
517 | 0 | abort(1) |
518 | end if | |
519 | 0 | add_log({"file", filename}) |
520 | end if | |
521 | 0 | end for |
522 | ||
523 | 101 | ifdef not CRASH then |
524 | ||
525 | include std/error.e | |
526 | ||
527 | 0 | |
528 | 0 | test_fail( "unittesting crashed" ) |
529 | 0 | test_report() |
530 | 0 | return 0 |
531 | end function | |
532 | 87 | crash_routine( routine_id( "test_crash" ) ) |
533 | ||
534 | end ifdef |