1 | # |
---|
2 | # RTEMS Tools Project (http://www.rtems.org/) |
---|
3 | # Copyright 2013-2014 Chris Johns (chrisj@rtems.org) |
---|
4 | # All rights reserved. |
---|
5 | # |
---|
6 | # This file is part of the RTEMS Tools package in 'rtems-tools'. |
---|
7 | # |
---|
8 | # Redistribution and use in source and binary forms, with or without |
---|
9 | # modification, are permitted provided that the following conditions are met: |
---|
10 | # |
---|
11 | # 1. Redistributions of source code must retain the above copyright notice, |
---|
12 | # this list of conditions and the following disclaimer. |
---|
13 | # |
---|
14 | # 2. Redistributions in binary form must reproduce the above copyright notice, |
---|
15 | # this list of conditions and the following disclaimer in the documentation |
---|
16 | # and/or other materials provided with the distribution. |
---|
17 | # |
---|
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
21 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
---|
22 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
23 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
24 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
25 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
26 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
27 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
28 | # POSSIBILITY OF SUCH DAMAGE. |
---|
29 | # |
---|
30 | |
---|
31 | # |
---|
32 | # RTEMS Testing Reports |
---|
33 | # |
---|
34 | |
---|
35 | import datetime |
---|
36 | import os |
---|
37 | import threading |
---|
38 | |
---|
39 | from rtemstoolkit import error |
---|
40 | from rtemstoolkit import log |
---|
41 | from rtemstoolkit import path |
---|
42 | |
---|
43 | # |
---|
44 | # Maybe this should be a configuration. |
---|
45 | # |
---|
46 | test_fail_excludes = [ |
---|
47 | 'minimum' |
---|
48 | ] |
---|
49 | |
---|
50 | class report(object): |
---|
51 | '''RTEMS Testing report.''' |
---|
52 | |
---|
53 | def __init__(self, total): |
---|
54 | self.lock = threading.Lock() |
---|
55 | self.total = total |
---|
56 | self.total_len = len(str(total)) |
---|
57 | self.passed = 0 |
---|
58 | self.user_input = 0 |
---|
59 | self.failed = 0 |
---|
60 | self.expected_fail = 0 |
---|
61 | self.indeterminate = 0 |
---|
62 | self.benchmark = 0 |
---|
63 | self.timeouts = 0 |
---|
64 | self.invalids = 0 |
---|
65 | self.results = {} |
---|
66 | self.name_max_len = 0 |
---|
67 | |
---|
68 | def __str__(self): |
---|
69 | msg = 'Passed: %*d%s' % (self.total_len, self.passed, os.linesep) |
---|
70 | msg += 'Failed: %*d%s' % (self.total_len, self.failed, os.linesep) |
---|
71 | msg += 'User Input: %*d%s' % (self.total_len, self.user_input, os.linesep) |
---|
72 | msg += 'Expected Fail: %*d%s' % (self.total_len, self.expected_fail, os.linesep) |
---|
73 | msg += 'Indeterminate: %*d%s' % (self.total_len, self.self.indeterminate, os.linesep) |
---|
74 | msg += 'Benchmark: %*d%s' % (self.total_len, self.self.benchmark, os.linesep) |
---|
75 | msg += 'Timeout: %*d%s' % (self.total_len, self.timeouts, os.linesep) |
---|
76 | msg += 'Invalid: %*d%s' % (self.total_len, self.invalids, os.linesep) |
---|
77 | return msg |
---|
78 | |
---|
79 | def start(self, index, total, name, executable, bsp_arch, bsp): |
---|
80 | header = '[%*d/%*d] p:%-*d f:%-*d u:%-*d e:%-*d I:%-*d B:%-*d t:%-*d i:%-*d | %s/%s: %s' % \ |
---|
81 | (len(str(total)), index, |
---|
82 | len(str(total)), total, |
---|
83 | len(str(total)), self.passed, |
---|
84 | len(str(total)), self.failed, |
---|
85 | len(str(total)), self.user_input, |
---|
86 | len(str(total)), self.expected_fail, |
---|
87 | len(str(total)), self.indeterminate, |
---|
88 | len(str(total)), self.benchmark, |
---|
89 | len(str(total)), self.timeouts, |
---|
90 | len(str(total)), self.invalids, |
---|
91 | bsp_arch, |
---|
92 | bsp, |
---|
93 | path.basename(executable)) |
---|
94 | self.lock.acquire() |
---|
95 | if name in self.results: |
---|
96 | self.lock.release() |
---|
97 | raise error.general('duplicate test: %s' % (name)) |
---|
98 | self.results[name] = { 'index': index, |
---|
99 | 'bsp': bsp, |
---|
100 | 'bsp_arch': bsp_arch, |
---|
101 | 'exe': executable, |
---|
102 | 'start': datetime.datetime.now(), |
---|
103 | 'end': None, |
---|
104 | 'result': None, |
---|
105 | 'output': None, |
---|
106 | 'header': header } |
---|
107 | |
---|
108 | self.lock.release() |
---|
109 | log.notice(header, stdout_only = True) |
---|
110 | |
---|
111 | def end(self, name, output): |
---|
112 | start = False |
---|
113 | end = False |
---|
114 | state = None |
---|
115 | timeout = False |
---|
116 | prefixed_output = [] |
---|
117 | for line in output: |
---|
118 | if line[0] == ']': |
---|
119 | if line[1].startswith('*** '): |
---|
120 | if line[1][4:].startswith('BEGIN OF '): |
---|
121 | start = True |
---|
122 | if line[1][4:].startswith('END OF '): |
---|
123 | end = True |
---|
124 | if line[1][4:].startswith('TEST STATE:'): |
---|
125 | state = line[1][15:].strip() |
---|
126 | if line[1][4:].startswith('TIMEOUT TIMEOUT'): |
---|
127 | timeout = True |
---|
128 | prefixed_output += [line[0] + ' ' + line[1]] |
---|
129 | self.lock.acquire() |
---|
130 | if name not in self.results: |
---|
131 | self.lock.release() |
---|
132 | raise error.general('test report missing: %s' % (name)) |
---|
133 | if self.results[name]['end'] is not None: |
---|
134 | self.lock.release() |
---|
135 | raise error.general('test already finished: %s' % (name)) |
---|
136 | self.results[name]['end'] = datetime.datetime.now() |
---|
137 | if state is None: |
---|
138 | if start and end: |
---|
139 | if state is None: |
---|
140 | status = 'passed' |
---|
141 | self.passed += 1 |
---|
142 | elif timeout: |
---|
143 | status = 'timeout' |
---|
144 | self.timeouts += 1 |
---|
145 | elif start: |
---|
146 | if not end: |
---|
147 | status = 'failed' |
---|
148 | self.failed += 1 |
---|
149 | else: |
---|
150 | exe_name = name.split('.')[0] |
---|
151 | if exe_name in test_fail_excludes: |
---|
152 | status = 'passed' |
---|
153 | self.passed += 1 |
---|
154 | else: |
---|
155 | status = 'invalid' |
---|
156 | self.invalids += 1 |
---|
157 | else: |
---|
158 | if state == 'EXPECTED_FAIL': |
---|
159 | if start and end: |
---|
160 | status = 'passed' |
---|
161 | self.passed += 1 |
---|
162 | else: |
---|
163 | status = 'expected-fail' |
---|
164 | self.expected_fail += 1 |
---|
165 | elif state == 'USER_INPUT': |
---|
166 | status = 'user-input' |
---|
167 | self.user_input += 1 |
---|
168 | elif state == 'INDETERMINATE': |
---|
169 | if start and end: |
---|
170 | status = 'passed' |
---|
171 | self.passed += 1 |
---|
172 | else: |
---|
173 | status = 'indeterminate' |
---|
174 | self.indeterminate += 1 |
---|
175 | elif state == 'BENCHMARK': |
---|
176 | status = 'benchmark' |
---|
177 | self.benchmark += 1 |
---|
178 | else: |
---|
179 | raise error.general('invalid test state: %s: %s' % (name, state)) |
---|
180 | self.results[name]['result'] = status |
---|
181 | self.results[name]['output'] = prefixed_output |
---|
182 | if self.name_max_len < len(path.basename(name)): |
---|
183 | self.name_max_len = len(path.basename(name)) |
---|
184 | self.lock.release() |
---|
185 | return status |
---|
186 | |
---|
187 | def log(self, name, mode): |
---|
188 | if mode != 'none': |
---|
189 | self.lock.acquire() |
---|
190 | if name not in self.results: |
---|
191 | self.lock.release() |
---|
192 | raise error.general('test report missing: %s' % (name)) |
---|
193 | exe = path.basename(self.results[name]['exe']) |
---|
194 | result = self.results[name]['result'] |
---|
195 | time = self.results[name]['end'] - self.results[name]['start'] |
---|
196 | failed = result in ['failed', 'timeout', 'invalid'] |
---|
197 | result = 'Result: %-10s Time: %s %s' % (result, str(time), exe) |
---|
198 | if mode != 'none': |
---|
199 | header = self.results[name]['header'] |
---|
200 | if mode == 'all' or failed: |
---|
201 | output = self.results[name]['output'] |
---|
202 | else: |
---|
203 | output = None |
---|
204 | self.lock.release() |
---|
205 | if header: |
---|
206 | log.output(header) |
---|
207 | if output: |
---|
208 | log.output(result) |
---|
209 | log.output(output) |
---|
210 | |
---|
211 | def summary(self): |
---|
212 | def show_state(results, state, max_len): |
---|
213 | for name in results: |
---|
214 | if results[name]['result'] == state: |
---|
215 | log.output(' %s' % (path.basename(name))) |
---|
216 | log.output() |
---|
217 | log.notice('Passed: %*d' % (self.total_len, self.passed)) |
---|
218 | log.notice('Failed: %*d' % (self.total_len, self.failed)) |
---|
219 | log.notice('User Input: %*d' % (self.total_len, self.user_input)) |
---|
220 | log.notice('Expected Fail: %*d' % (self.total_len, self.expected_fail)) |
---|
221 | log.notice('Indeterminate: %*d' % (self.total_len, self.indeterminate)) |
---|
222 | log.notice('Benchmark: %*d' % (self.total_len, self.benchmark)) |
---|
223 | log.notice('Timeout: %*d' % (self.total_len, self.timeouts)) |
---|
224 | log.notice('Invalid: %*d' % (self.total_len, self.invalids)) |
---|
225 | log.output('---------------%s' % ('-' * self.total_len)) |
---|
226 | log.notice('Total: %*d' % (self.total_len, self.total)) |
---|
227 | log.output() |
---|
228 | if self.failed: |
---|
229 | log.output('Failures:') |
---|
230 | show_state(self.results, 'failed', self.name_max_len) |
---|
231 | if self.user_input: |
---|
232 | log.output('User Input:') |
---|
233 | show_state(self.results, 'user-input', self.name_max_len) |
---|
234 | if self.expected_fail: |
---|
235 | log.output('Expected Fail:') |
---|
236 | show_state(self.results, 'expected-fail', self.name_max_len) |
---|
237 | if self.indeterminate: |
---|
238 | log.output('Indeterminate:') |
---|
239 | show_state(self.results, 'indeterminate', self.name_max_len) |
---|
240 | if self.benchmark: |
---|
241 | log.output('Benchmark:') |
---|
242 | show_state(self.results, 'benchmark', self.name_max_len) |
---|
243 | if self.timeouts: |
---|
244 | log.output('Timeouts:') |
---|
245 | show_state(self.results, 'timeout', self.name_max_len) |
---|
246 | if self.invalids: |
---|
247 | log.output('Invalid:') |
---|
248 | show_state(self.results, 'invalid', self.name_max_len) |
---|