1 | # |
---|
2 | # RTEMS Tools Project (http://www.rtems.org/) |
---|
3 | # Copyright 2013-2014 Chris Johns (chrisj@rtems.org) |
---|
4 | # All rights reserved. |
---|
5 | # |
---|
6 | # This file is part of the RTEMS Tools package in 'rtems-tools'. |
---|
7 | # |
---|
8 | # Redistribution and use in source and binary forms, with or without |
---|
9 | # modification, are permitted provided that the following conditions are met: |
---|
10 | # |
---|
11 | # 1. Redistributions of source code must retain the above copyright notice, |
---|
12 | # this list of conditions and the following disclaimer. |
---|
13 | # |
---|
14 | # 2. Redistributions in binary form must reproduce the above copyright notice, |
---|
15 | # this list of conditions and the following disclaimer in the documentation |
---|
16 | # and/or other materials provided with the distribution. |
---|
17 | # |
---|
18 | # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" |
---|
19 | # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE |
---|
20 | # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE |
---|
21 | # ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE |
---|
22 | # LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR |
---|
23 | # CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF |
---|
24 | # SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS |
---|
25 | # INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN |
---|
26 | # CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) |
---|
27 | # ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE |
---|
28 | # POSSIBILITY OF SUCH DAMAGE. |
---|
29 | # |
---|
30 | |
---|
31 | # |
---|
32 | # RTEMS Testing Reports |
---|
33 | # |
---|
34 | |
---|
35 | import datetime |
---|
36 | import os |
---|
37 | import threading |
---|
38 | |
---|
39 | from rtemstoolkit import error |
---|
40 | from rtemstoolkit import log |
---|
41 | from rtemstoolkit import path |
---|
42 | |
---|
43 | class report(object): |
---|
44 | '''RTEMS Testing report.''' |
---|
45 | |
---|
46 | def __init__(self, total): |
---|
47 | self.lock = threading.Lock() |
---|
48 | self.total = total |
---|
49 | self.total_len = len(str(total)) |
---|
50 | self.passed = 0 |
---|
51 | self.failed = 0 |
---|
52 | self.timeouts = 0 |
---|
53 | self.invalids = 0 |
---|
54 | self.invalid_tests = 0 |
---|
55 | self.results = {} |
---|
56 | self.name_max_len = 0 |
---|
57 | |
---|
58 | def __str__(self): |
---|
59 | msg = 'Passed: %*d%s' % (self.total_len, self.passed, os.linesep) |
---|
60 | msg += 'Failed: %*d%s' % (self.total_len, self.failed, os.linesep) |
---|
61 | msg += 'Timeouts: %*d%s' % (self.total_len, self.timeouts, os.linesep) |
---|
62 | msg += 'Invalid: %*d%s' % (self.total_len, self.invalids, os.linesep) |
---|
63 | return msg |
---|
64 | |
---|
65 | def set_invalid_tests(self, invalid_tests): |
---|
66 | self.invalid_tests = invalid_tests |
---|
67 | |
---|
68 | def start(self, index, total, name, executable, bsp_arch, bsp): |
---|
69 | header = '[%*d/%*d] p:%-*d f:%-*d t:%-*d i:%-*d | %s/%s: %s' % \ |
---|
70 | (len(str(total)), index, |
---|
71 | len(str(total)), total, |
---|
72 | len(str(total)), self.passed, |
---|
73 | len(str(total)), self.failed, |
---|
74 | len(str(total)), self.timeouts, |
---|
75 | len(str(total)), self.invalids, |
---|
76 | bsp_arch, |
---|
77 | bsp, |
---|
78 | path.basename(executable)) |
---|
79 | self.lock.acquire() |
---|
80 | if name in self.results: |
---|
81 | self.lock.release() |
---|
82 | raise error.general('duplicate test: %s' % (name)) |
---|
83 | self.results[name] = { 'index': index, |
---|
84 | 'bsp': bsp, |
---|
85 | 'bsp_arch': bsp_arch, |
---|
86 | 'exe': executable, |
---|
87 | 'start': datetime.datetime.now(), |
---|
88 | 'end': None, |
---|
89 | 'result': None, |
---|
90 | 'output': None, |
---|
91 | 'header': header } |
---|
92 | |
---|
93 | self.lock.release() |
---|
94 | log.notice(header, stdout_only = True) |
---|
95 | |
---|
96 | def end(self, name, output): |
---|
97 | start = False |
---|
98 | end = False |
---|
99 | timeout = False |
---|
100 | prefixed_output = [] |
---|
101 | for line in output: |
---|
102 | if line[0] == ']': |
---|
103 | if line[1].startswith('*** '): |
---|
104 | if line[1][4:].startswith('END OF '): |
---|
105 | end = True |
---|
106 | if line[1][4:].startswith('TIMEOUT TIMEOUT'): |
---|
107 | timeout = True |
---|
108 | else: |
---|
109 | start = True |
---|
110 | prefixed_output += [line[0] + ' ' + line[1]] |
---|
111 | self.lock.acquire() |
---|
112 | if name not in self.results: |
---|
113 | self.lock.release() |
---|
114 | raise error.general('test report missing: %s' % (name)) |
---|
115 | if self.results[name]['end'] is not None: |
---|
116 | self.lock.release() |
---|
117 | raise error.general('test already finished: %s' % (name)) |
---|
118 | self.results[name]['end'] = datetime.datetime.now() |
---|
119 | if start and end: |
---|
120 | status = 'passed' |
---|
121 | self.passed += 1 |
---|
122 | elif timeout: |
---|
123 | status = 'timeout' |
---|
124 | self.timeouts += 1 |
---|
125 | elif start: |
---|
126 | if not end: |
---|
127 | status = 'failed' |
---|
128 | self.failed += 1 |
---|
129 | else: |
---|
130 | if self.invalid_tests and path.basename(name) in self.invalid_tests: |
---|
131 | status = 'passed' |
---|
132 | self.passed += 1 |
---|
133 | else: |
---|
134 | status = 'invalid' |
---|
135 | self.invalids += 1 |
---|
136 | self.results[name]['result'] = status |
---|
137 | self.results[name]['output'] = prefixed_output |
---|
138 | if self.name_max_len < len(path.basename(name)): |
---|
139 | self.name_max_len = len(path.basename(name)) |
---|
140 | self.lock.release() |
---|
141 | |
---|
142 | def log(self, name, mode): |
---|
143 | if mode != 'none': |
---|
144 | self.lock.acquire() |
---|
145 | if name not in self.results: |
---|
146 | self.lock.release() |
---|
147 | raise error.general('test report missing: %s' % (name)) |
---|
148 | exe = path.basename(self.results[name]['exe']) |
---|
149 | result = self.results[name]['result'] |
---|
150 | time = self.results[name]['end'] - self.results[name]['start'] |
---|
151 | if mode != 'none': |
---|
152 | header = self.results[name]['header'] |
---|
153 | if mode == 'all' or result != 'passed': |
---|
154 | output = self.results[name]['output'] |
---|
155 | else: |
---|
156 | output = None |
---|
157 | self.lock.release() |
---|
158 | if header: |
---|
159 | log.output(header) |
---|
160 | if output: |
---|
161 | log.output(output) |
---|
162 | log.output('Result: %-10s Time: %s %s' % (result, str(time), exe)) |
---|
163 | |
---|
164 | def summary(self): |
---|
165 | def show_state(results, state, max_len): |
---|
166 | for name in results: |
---|
167 | if results[name]['result'] == state: |
---|
168 | log.output(' %s' % (path.basename(name))) |
---|
169 | log.output() |
---|
170 | log.notice('Passed: %*d' % (self.total_len, self.passed)) |
---|
171 | log.notice('Failed: %*d' % (self.total_len, self.failed)) |
---|
172 | log.notice('Timeouts: %*d' % (self.total_len, self.timeouts)) |
---|
173 | log.notice('Invalid: %*d' % (self.total_len, self.invalids)) |
---|
174 | log.output('----------%s' % ('-' * self.total_len)) |
---|
175 | log.notice('Total: %*d' % (self.total_len, self.total)) |
---|
176 | log.output() |
---|
177 | if self.failed: |
---|
178 | log.output('Failures:') |
---|
179 | show_state(self.results, 'failed', self.name_max_len) |
---|
180 | if self.timeouts: |
---|
181 | log.output('Timeouts:') |
---|
182 | show_state(self.results, 'timeout', self.name_max_len) |
---|
183 | if self.invalids: |
---|
184 | log.output('Invalid:') |
---|
185 | show_state(self.results, 'invalid', self.name_max_len) |
---|