1 | # |
---|
2 | # RTEMS Tools Project (http://www.rtems.org/) |
---|
3 | # Copyright 2013-2014 Chris Johns (chrisj@rtems.org) |
---|
4 | # All rights reserved. |
---|
5 | # |
---|
6 | # This file is part of the RTEMS Tools package in 'rtems-tools'. |
---|
7 | # |
---|
8 | # Redistribution and use in source and binary forms, with or without |
---|
9 | # modification, are permitted provided that the following conditions are met: |
---|
10 | # |
---|
11 | # 1. Redistributions of source code must retain the above copyright notice, |
---|
12 | # this list of conditions and the following disclaimer. |
---|
13 | # |
---|
14 | # 2. Redistributions in binary form must reproduce the above copyright notice, |
---|
15 | # this list of conditions and the following disclaimer in the documentation |
---|
16 | # and/or other materials provided with the distribution. |
---|
17 | # |
---|
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
21 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
---|
22 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
23 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
24 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
25 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
26 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
27 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
28 | # POSSIBILITY OF SUCH DAMAGE. |
---|
29 | # |
---|
30 | |
---|
31 | # |
---|
32 | # RTEMS Testing Reports |
---|
33 | # |
---|
34 | |
---|
35 | from __future__ import print_function |
---|
36 | |
---|
37 | import datetime |
---|
38 | import os |
---|
39 | import threading |
---|
40 | |
---|
41 | from rtemstoolkit import error |
---|
42 | from rtemstoolkit import log |
---|
43 | from rtemstoolkit import path |
---|
44 | |
---|
45 | # |
---|
46 | # Maybe this should be a configuration. |
---|
47 | # |
---|
48 | test_fail_excludes = [ |
---|
49 | 'minimum' |
---|
50 | ] |
---|
51 | |
---|
52 | class report(object): |
---|
53 | '''RTEMS Testing report.''' |
---|
54 | |
---|
55 | def __init__(self, total): |
---|
56 | self.lock = threading.Lock() |
---|
57 | self.total = total |
---|
58 | self.total_len = len(str(total)) |
---|
59 | self.passed = 0 |
---|
60 | self.failed = 0 |
---|
61 | self.user_input = 0 |
---|
62 | self.expected_fail = 0 |
---|
63 | self.indeterminate = 0 |
---|
64 | self.benchmark = 0 |
---|
65 | self.timeouts = 0 |
---|
66 | self.invalids = 0 |
---|
67 | self.wrong_version = 0 |
---|
68 | self.wrong_build = 0 |
---|
69 | self.wrong_tools = 0 |
---|
70 | self.results = {} |
---|
71 | self.config = {} |
---|
72 | self.name_max_len = 0 |
---|
73 | |
---|
74 | def __str__(self): |
---|
75 | msg = 'Passed: %*d%s' % (self.total_len, self.passed, os.linesep) |
---|
76 | msg += 'Failed: %*d%s' % (self.total_len, self.failed, os.linesep) |
---|
77 | msg += 'User Input: %*d%s' % (self.total_len, self.user_input, os.linesep) |
---|
78 | msg += 'Expected Fail: %*d%s' % (self.total_len, self.expected_fail, os.linesep) |
---|
79 | msg += 'Indeterminate: %*d%s' % (self.total_len, self.self.indeterminate, os.linesep) |
---|
80 | msg += 'Benchmark: %*d%s' % (self.total_len, self.self.benchmark, os.linesep) |
---|
81 | msg += 'Timeout: %*d%s' % (self.total_len, self.timeouts, os.linesep) |
---|
82 | msg += 'Invalid: %*d%s' % (self.total_len, self.invalids, os.linesep) |
---|
83 | msg += 'Wrong Version %*d%s' % (self.total_len, self.wrong_version, os.linesep) |
---|
84 | msg += 'Wrong Build %*d%s' % (self.total_len, self.wrong_build, os.linesep) |
---|
85 | msg += 'Wrong Tools %*d%s' % (self.total_len, self.wrong_tools, os.linesep) |
---|
86 | return msg |
---|
87 | |
---|
88 | def start(self, index, total, name, executable, bsp_arch, bsp, show_header): |
---|
89 | header = '[%*d/%*d] p:%-*d f:%-*d u:%-*d e:%-*d I:%-*d B:%-*d ' \ |
---|
90 | 't:%-*d i:%-*d W:%-*d | %s/%s: %s' % \ |
---|
91 | (len(str(total)), index, |
---|
92 | len(str(total)), total, |
---|
93 | len(str(total)), self.passed, |
---|
94 | len(str(total)), self.failed, |
---|
95 | len(str(total)), self.user_input, |
---|
96 | len(str(total)), self.expected_fail, |
---|
97 | len(str(total)), self.indeterminate, |
---|
98 | len(str(total)), self.benchmark, |
---|
99 | len(str(total)), self.timeouts, |
---|
100 | len(str(total)), self.invalids, |
---|
101 | len(str(total)), self.wrong_version + self.wrong_build + self.wrong_tools, |
---|
102 | bsp_arch, |
---|
103 | bsp, |
---|
104 | path.basename(name)) |
---|
105 | self.lock.acquire() |
---|
106 | if name in self.results: |
---|
107 | self.lock.release() |
---|
108 | raise error.general('duplicate test: %s' % (name)) |
---|
109 | self.results[name] = { 'index': index, |
---|
110 | 'bsp': bsp, |
---|
111 | 'bsp_arch': bsp_arch, |
---|
112 | 'exe': name, |
---|
113 | 'start': datetime.datetime.now(), |
---|
114 | 'end': None, |
---|
115 | 'result': None, |
---|
116 | 'output': None, |
---|
117 | 'header': header } |
---|
118 | |
---|
119 | self.lock.release() |
---|
120 | if show_header: |
---|
121 | log.notice(header, stdout_only = True) |
---|
122 | |
---|
123 | def end(self, name, output, output_prefix): |
---|
124 | start = False |
---|
125 | end = False |
---|
126 | state = None |
---|
127 | version = None |
---|
128 | build = None |
---|
129 | tools = None |
---|
130 | timeout = False |
---|
131 | prefixed_output = [] |
---|
132 | for line in output: |
---|
133 | if line[0] == output_prefix: |
---|
134 | if line[1].startswith('*** '): |
---|
135 | banner = line[1][4:] |
---|
136 | if banner.startswith('BEGIN OF '): |
---|
137 | start = True |
---|
138 | elif line[1][4:].startswith('END OF '): |
---|
139 | end = True |
---|
140 | elif banner.startswith('TIMEOUT TIMEOUT'): |
---|
141 | timeout = True |
---|
142 | elif banner.startswith('TEST VERSION:'): |
---|
143 | version = banner[13:].strip() |
---|
144 | elif banner.startswith('TEST STATE:'): |
---|
145 | state = banner[11:].strip() |
---|
146 | elif banner.startswith('TEST BUILD:'): |
---|
147 | build = ','.join(banner[11:].strip().split(' ')) |
---|
148 | elif banner.startswith('TEST TOOLS:'): |
---|
149 | tools = banner[11:].strip() |
---|
150 | prefixed_output += [line[0] + ' ' + line[1]] |
---|
151 | self.lock.acquire() |
---|
152 | try: |
---|
153 | if name not in self.results: |
---|
154 | raise error.general('test report missing: %s' % (name)) |
---|
155 | if self.results[name]['end'] is not None: |
---|
156 | raise error.general('test already finished: %s' % (name)) |
---|
157 | self.results[name]['end'] = datetime.datetime.now() |
---|
158 | if state is not None and state not in ['BENCHMARK', |
---|
159 | 'EXPECTED_FAIL', |
---|
160 | 'INDETERMINATE', |
---|
161 | 'USER_INPUT']: |
---|
162 | if version: |
---|
163 | if 'version' not in self.config: |
---|
164 | self.config['version'] = version |
---|
165 | else: |
---|
166 | if version != self.config['version']: |
---|
167 | state = 'WRONG-VERSION' |
---|
168 | if build: |
---|
169 | if 'build' not in self.config: |
---|
170 | self.config['build'] = build |
---|
171 | else: |
---|
172 | if build != self.config['build']: |
---|
173 | state = 'WRONG-BUILD' |
---|
174 | if tools: |
---|
175 | if 'tools' not in self.config: |
---|
176 | self.config['tools'] = tools |
---|
177 | else: |
---|
178 | if tools != self.config['tools']: |
---|
179 | state = 'WRONG-TOOLS' |
---|
180 | if state is None or state == 'EXPECTED-PASS': |
---|
181 | if start and end: |
---|
182 | if state is None or state == 'EXPECTED-PASS': |
---|
183 | status = 'passed' |
---|
184 | self.passed += 1 |
---|
185 | elif timeout: |
---|
186 | status = 'timeout' |
---|
187 | self.timeouts += 1 |
---|
188 | elif start: |
---|
189 | if not end: |
---|
190 | status = 'failed' |
---|
191 | self.failed += 1 |
---|
192 | else: |
---|
193 | exe_name = path.basename(name).split('.')[0] |
---|
194 | if exe_name in test_fail_excludes: |
---|
195 | status = 'passed' |
---|
196 | self.passed += 1 |
---|
197 | else: |
---|
198 | status = 'invalid' |
---|
199 | self.invalids += 1 |
---|
200 | else: |
---|
201 | if state == 'EXPECTED_FAIL': |
---|
202 | if start and end: |
---|
203 | status = 'passed' |
---|
204 | self.passed += 1 |
---|
205 | else: |
---|
206 | status = 'expected-fail' |
---|
207 | self.expected_fail += 1 |
---|
208 | elif state == 'USER_INPUT': |
---|
209 | status = 'user-input' |
---|
210 | self.user_input += 1 |
---|
211 | elif state == 'INDETERMINATE': |
---|
212 | if start and end: |
---|
213 | status = 'passed' |
---|
214 | self.passed += 1 |
---|
215 | else: |
---|
216 | status = 'indeterminate' |
---|
217 | self.indeterminate += 1 |
---|
218 | elif state == 'BENCHMARK': |
---|
219 | status = 'benchmark' |
---|
220 | self.benchmark += 1 |
---|
221 | elif state == 'WRONG-VERSION': |
---|
222 | status = 'wrong-version' |
---|
223 | self.wrong_version += 1 |
---|
224 | elif state == 'WRONG-BUILD': |
---|
225 | status = 'wrong-build' |
---|
226 | self.wrong_build += 1 |
---|
227 | elif state == 'WRONG-TOOLS': |
---|
228 | status = 'wrong-tools' |
---|
229 | self.wrong_tools += 1 |
---|
230 | else: |
---|
231 | raise error.general('invalid test state: %s: %s' % (name, state)) |
---|
232 | self.results[name]['result'] = status |
---|
233 | self.results[name]['output'] = prefixed_output |
---|
234 | if self.name_max_len < len(path.basename(name)): |
---|
235 | self.name_max_len = len(path.basename(name)) |
---|
236 | finally: |
---|
237 | self.lock.release() |
---|
238 | return status |
---|
239 | |
---|
240 | def log(self, name, mode): |
---|
241 | status_fails = ['failed', 'timeout', 'invalid', |
---|
242 | 'wrong-version', 'wrong-build', 'wrong-tools'] |
---|
243 | if mode != 'none': |
---|
244 | self.lock.acquire() |
---|
245 | if name not in self.results: |
---|
246 | self.lock.release() |
---|
247 | raise error.general('test report missing: %s' % (name)) |
---|
248 | exe = path.basename(self.results[name]['exe']) |
---|
249 | result = self.results[name]['result'] |
---|
250 | time = self.results[name]['end'] - self.results[name]['start'] |
---|
251 | failed = result in status_fails |
---|
252 | result = 'Result: %-10s Time: %s %s' % (result, str(time), exe) |
---|
253 | if mode != 'none': |
---|
254 | header = self.results[name]['header'] |
---|
255 | if mode == 'all' or failed: |
---|
256 | output = self.results[name]['output'] |
---|
257 | else: |
---|
258 | output = None |
---|
259 | self.lock.release() |
---|
260 | if header: |
---|
261 | log.output(header) |
---|
262 | if output: |
---|
263 | log.output(result) |
---|
264 | log.output(output) |
---|
265 | |
---|
266 | def get_config(self, config, not_found = None): |
---|
267 | if config in self.config: |
---|
268 | return self.config[config] |
---|
269 | return not_found |
---|
270 | |
---|
271 | def score_card(self, mode = 'full'): |
---|
272 | if mode == 'short': |
---|
273 | wrongs = self.wrong_version + self.wrong_build + self.wrong_tools |
---|
274 | return 'Passed:%d Failed:%d Timeout:%d Invalid:%d Wrong:%d' % \ |
---|
275 | (self.passed, self.failed, self.timeouts, self.invalids, wrongs) |
---|
276 | elif mode == 'full': |
---|
277 | l = [] |
---|
278 | l += ['Passed: %*d' % (self.total_len, self.passed)] |
---|
279 | l += ['Failed: %*d' % (self.total_len, self.failed)] |
---|
280 | l += ['User Input: %*d' % (self.total_len, self.user_input)] |
---|
281 | l += ['Expected Fail: %*d' % (self.total_len, self.expected_fail)] |
---|
282 | l += ['Indeterminate: %*d' % (self.total_len, self.indeterminate)] |
---|
283 | l += ['Benchmark: %*d' % (self.total_len, self.benchmark)] |
---|
284 | l += ['Timeout: %*d' % (self.total_len, self.timeouts)] |
---|
285 | l += ['Invalid: %*d' % (self.total_len, self.invalids)] |
---|
286 | l += ['Wrong Version: %*d' % (self.total_len, self.wrong_version)] |
---|
287 | l += ['Wrong Build: %*d' % (self.total_len, self.wrong_build)] |
---|
288 | l += ['Wrong Tools: %*d' % (self.total_len, self.wrong_tools)] |
---|
289 | l += ['---------------%s' % ('-' * self.total_len)] |
---|
290 | l += ['Total: %*d' % (self.total_len, self.total)] |
---|
291 | return os.linesep.join(l) |
---|
292 | raise error.general('invalid socre card mode: %s' % (mode)) |
---|
293 | |
---|
294 | def failures(self): |
---|
295 | def show_state(results, state, max_len): |
---|
296 | l = [] |
---|
297 | for name in results: |
---|
298 | if results[name]['result'] == state: |
---|
299 | l += [' %s' % (path.basename(name))] |
---|
300 | return l |
---|
301 | l = [] |
---|
302 | if self.failed: |
---|
303 | l += ['Failures:'] |
---|
304 | l += show_state(self.results, 'failed', self.name_max_len) |
---|
305 | if self.user_input: |
---|
306 | l += ['User Input:'] |
---|
307 | l += show_state(self.results, 'user-input', self.name_max_len) |
---|
308 | if self.expected_fail: |
---|
309 | l += ['Expected Fail:'] |
---|
310 | l += show_state(self.results, 'expected-fail', self.name_max_len) |
---|
311 | if self.indeterminate: |
---|
312 | l += ['Indeterminate:'] |
---|
313 | l += show_state(self.results, 'indeterminate', self.name_max_len) |
---|
314 | if self.benchmark: |
---|
315 | l += ['Benchmark:'] |
---|
316 | l += show_state(self.results, 'benchmark', self.name_max_len) |
---|
317 | if self.timeouts: |
---|
318 | l += ['Timeouts:'] |
---|
319 | l += show_state(self.results, 'timeout', self.name_max_len) |
---|
320 | if self.invalids: |
---|
321 | l += ['Invalid:'] |
---|
322 | l += show_state(self.results, 'invalid', self.name_max_len) |
---|
323 | if self.wrong_version: |
---|
324 | l += ['Wrong Version:'] |
---|
325 | l += show_state(self.results, 'wrong-version', self.name_max_len) |
---|
326 | if self.wrong_build: |
---|
327 | l += ['Wrong Build:'] |
---|
328 | l += show_state(self.results, 'wrong-build', self.name_max_len) |
---|
329 | if self.wrong_tools: |
---|
330 | l += ['Wrong Tools:'] |
---|
331 | l += show_state(self.results, 'wrong-tools', self.name_max_len) |
---|
332 | return os.linesep.join(l) |
---|
333 | |
---|
334 | def summary(self): |
---|
335 | log.output() |
---|
336 | log.notice(self.score_card()) |
---|
337 | log.output(self.failures()) |
---|