summaryrefslogtreecommitdiff
path: root/tests/run-tests.py
blob: ac705ca6c272dcd3c994e12ef62723cc7a813bed (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
#!/usr/bin/python

# $Id: run-tests.py 4325 2009-01-27 14:02:16Z potyra $ 

# Run the test suite.
#
# Copyright (C) 2008-2009 FAUmachine Team <info@faumachine.org>.
# This program is free software. You can redistribute it and/or modify it
# under the terms of the GNU General Public License, either version 2 of
# the License, or (at your option) any later version. See COPYING.


import os
import subprocess
import time
import re
import sys

# some colors
COL_RED="\033[31m\033[1m"
COL_GREEN="\033[32m\033[1m"
COL_YELLOW="\033[33m\033[1m"
COL_NORM="\033[0m"

class file_handling:
	@classmethod
	def setup_filehandling(cls, srcdir, builddir):
		cls._srcdir = srcdir
		cls._builddir = builddir

	@classmethod
	def get_src_name(cls, filename, prefix="."):
		return "%s/%s/%s" % (cls._srcdir, prefix, filename)

	@classmethod
	def get_output_name(cls, filename, prefix="."):
		dir = "%s/%s" % (cls._builddir, prefix)
		if not os.path.isdir(dir):
			print "Creating directory %s" % dir
			os.mkdir(dir)
		return "%s/%s" % (dir, filename)

	@classmethod
	def get_compiler(cls):
		return "../fauhdlc"

	@classmethod
	def get_intepreter(cls):
		return "../interpreter/fauhdli"
				

class eval_helper:
	""" helper for string parsing """
	@classmethod
	def eval_opt_list(cls, s):
		""" evaluate an option list and return a dictionary 

		eval_opt_list will take a string, and find all NAME="option" 
		entries.
		The entries are stored in a dictionary with NAME as the key 
		and the option (without quotes) as the value. This dictionary 
		is returned.
		"""
		if not hasattr(cls, "preg"):
			reg = "(?P<name>[A-Za-z0-9_]*)=\"(?P<entry>[^\"]*)\""
			cls.preg = re.compile(reg)

		ret = {}
		i = cls.preg.finditer(s)
		for match in i:
			ret[match.group("name")] = match.group("entry")

		return ret

class InterpreteTC:
	""" class to interprete an intermediate code file """
	def __init__(self, icpath, entity="work:test_bench", debug=False):
		self._icpath = icpath
		self._entity = entity
		self._debug = debug
		self._runtime = -1
		# -1: not run, 0: success, 1: failure
		self._status = -1
		self._exit_status = -1

	def _get_file_name(self, suffix):
		return "%s.%s" % (self._icpath, suffix)
	
	@classmethod
	def canUseValgrind(cls):
		if hasattr(cls, "useValgrind"):
			return cls.useValgrind
		# detect valgrind
		p = os.getenv("PATH")
		pathes = [x + "/valgrind" for x in p.split(":")]
		vgs = [os.path.isfile(x) for x in pathes]
		cls.useValgrind = (True in vgs)
		return cls.useValgrind

	def execute(self):
		fout = file(self._get_file_name("std.out"), "w")
		ferr = file(self._get_file_name("err.out"), "w")

		if InterpreteTC.canUseValgrind():
			cmd = ["valgrind"]
		else:
			cmd = []
		cmd.append(file_handling.get_intepreter())
		cmd.append("-s")
		cmd.append(self._entity)
		if self._debug:
			cmd.append("-d")
		cmd.append(self._icpath)

		sys.stdout.write(".")
		sys.stdout.flush()
		t1 = time.time()
		p = subprocess.Popen(cmd, shell=False, stdout=fout, \
					stderr=ferr)
		self._exit_status = p.wait()
		t2 = time.time()

		self._runtime = t2 - t1
		fout.close()
		ferr.close()

	def _handle_err_out(self):
		ferr = file(self._get_file_name("err.out"), "r")
		fout = file(self._get_file_name("std.out"), "r")
		lines = ferr.readlines()
		ferr.close()
		sim_fin_seen = False
		have_critical_errs = False
		have_mem_errors = False
		critical_errs = ""
		
		for l in lines:
			if "Conditional jump or move depends on" in l:
				have_mem_errors = True

		lines = fout.readlines()
		fout.close()

		for l in lines:
			if ": simulation finished" in l:
				sim_fin_seen = True
			if "FAILURE" in l:
				have_critical_errs = True
				critical_errs += l

		if self._exit_status != 0:
			# interpreter failed
			self._report = "%s interpreter exited with %d%s\n" % \
				(COL_RED, self._exit_status, COL_NORM)
			self._report += "".join(lines[-7:])
			self._status = 1
			return

		if have_critical_errs:
			# interpreter succeeded, but critical errors present
			self._report = "%sinterpreter critical errors:%s\n" % \
				(COL_RED, COL_NORM)
			self._report += critical_errs
			self._status = 1
			return

		if (not sim_fin_seen):
			# simulation finished not seen
			self._report = "%sinterpreter did not finish%s\n" % \
				(COL_RED, COL_NORM)
			# add last 7 lines of stderr
			self._report += "".join(lines[-7:])
			self._status = 1
			return

		if have_mem_errors:
			# valgrind detected memory errors
			self._report = "%sinterpreter has memory errors%s\n" \
				% (COL_RED, COL_NORM)
			self._status = 1
			return

		# all good
		self._report = "%ssucceeded%s" % (COL_GREEN, COL_NORM)
		self._status = 0

	def get_status(self):
		assert self._exit_status != -1
		if self._status == -1:
			self._handle_err_out()
		assert self._status != -1
		return self._status

	def __str__(self):
		assert self._exit_status != -1
		assert self._status != -1
		return self._report

# symbol table
class TestCase:
	""" class representing one test case """
	def __init__(self, entry, subdir):
		self._name = ""
		self._compiler_opts = ["--freestanding"]
		self._exit_status = -1
		# status of result. 
		# -1: no result yet, 
		# 0: expected result occurred (test succeeded)
		# 1: expected result did not occur (test failed)
		# 2: expected result but not expected output (warning)
		self._status = -1
		self._subdir = subdir
		self._err_out = []
		# (line, msg) tuples of errors that are expected but not 
		# present in actual_errors
		self._missing_errors = []
		# expected exit status
		self._expected_result = 0
		self._parse(entry)
		self._interprete = False

	def _parse(self, entry):
		arr = entry.split(" ", 1)
		self._name = arr.pop(0).strip()

		if len(arr) == 0:
			return

		d = eval_helper.eval_opt_list(arr[0])

		opts = []
		if d.has_key("OPTS"):
			opts += d["OPTS"].split(" ")

		for o in opts:
			# VHDL file name? prefix with subdir
			if o[-5:].lower() == ".vhdl":
				src = file_handling.get_src_name(\
							o, self._subdir)
				self._compiler_opts.append(src)
			else:
				self._compiler_opts.append(o)

	def _get_src_file_name(self, suffix):
		fn = "%s.%s" % (self._name, suffix)
		return file_handling.get_src_name(fn, self._subdir)

	def _get_out_file_name(self, suffix):
		fn = "%s.%s" % (self._name, suffix)
		return file_handling.get_output_name(fn, self._subdir)

	def execute(self, compiler, common_opts, output_ic=False):
		""" execute the test """
		fout = file(self._get_out_file_name("std.out"), "w")
		ferr = file(self._get_out_file_name("err.out"), "w")

		cmd = [compiler]
		if len(common_opts) > 0:
			cmd.append(common_opts)
		cmd = cmd + self._compiler_opts
		cmd.append(self._get_src_file_name("vhdl"))
		
		if output_ic:
			cmd += ["-o", self._get_out_file_name("ic")]

		t1 = time.time()
		p = subprocess.Popen(cmd, shell=False, stdout=fout, \
					stderr=ferr)
		self._exit_status = p.wait()
		t2 = time.time()

		self._runtime = t2 - t1
		fout.close()
		ferr.close()

	def _store_err_out(self):
		f = file(self._get_out_file_name("err.out"), "r")
		self._err_out = f.readlines()
		f.close()

	@staticmethod
	def _flatten_err(line):
		txt = line[7:]
		fn = ""
		line = ""
		msg = ""

		try:
			(fn, line_t, msg) = txt.split(":", 2)
			line = int(line_t)
			msg = msg.strip()
		except:
			return None
		
		return (line, msg)


	def _check_errors(self):
		expected = self._check_vhdl()
		if len(expected) == 0:
			return

		# errors are expected: want 3 as exit status
		self._expected_result = 3
		self._missing_errors = []

		actual_errs = [self._flatten_err(e) for e in self._err_out \
				if e[:6] == "ERROR>"]
		actual_errs = [e for e in actual_errs if e is not None]

		for line, msg in expected:
			corr = [ a for a in actual_errs \
				if (a[0] == line) \
				and msg.lower() in a[1].lower()]

			if len(corr) == 0:
				self._missing_errors.append((line, msg))

	def _check_vhdl(self):
		lno = 0
		f = file(self._get_src_file_name("vhdl"), "r")
		errs = []

		for l in f.readlines():
			lno = lno + 1
			if "@ERROR@" in l:
				parts = l.split("@ERROR@")
				desc = parts[1].strip()
				errs.append((lno, desc))

			if "test_bench" in l.lower():
				self._interprete = True
		f.close()
		return errs

	def _get_err_out(self, num_lines=10):
		assertions = [l for l in self._err_out if "Assertion" in l]
		# assertion failure present? return it.
		if len(assertions) != 0:
			return "\n" + assertions[-1]

		# return last 10 lines of stderr
		s = "\nLast %d lines of stderr:\n\n" % num_lines
		s += "".join(self._err_out[-num_lines:])
		s += "\n"
		return s

	def _get_error_report(self):
		s = "\nExpected errors missing from fauhdlc:\n"
		mes = [ "Line %d: %s" % (e[0], e[1]) for e in self._missing_errors ]
		s += "\n".join(mes)
		s += self._get_err_out(7)
		return s

	def _eval_interprete(self):
		assert self._interprete
		tc = InterpreteTC(self._get_out_file_name("ic"))
		tc.execute()
		self._status = tc.get_status()
		return str(tc)

	def _get_result(self):
		# TODO has side effects right now
		self._store_err_out()
		self._check_errors()

		if self._expected_result == 0:
			# no errors expected
			if self._exit_status == 0:
				self._status = 0
				if self._interprete:
					return self._eval_interprete()
				return "%ssucceeded%s" % (COL_GREEN, COL_NORM)

			self._status = 1
			return "%sfailed%s%s" % \
				(COL_RED, COL_NORM, self._get_err_out())

		# errors expected
		if self._expected_result == self._exit_status:
			if len(self._missing_errors) == 0:
				self._status = 0
				return "%sfailed as expected%s" % \
					(COL_GREEN, COL_NORM)
			else:
				self._status = 2
				return "%sfailed but errors don't match%s%s"\
					% (COL_YELLOW, COL_NORM, \
					self._get_error_report())

		self._status = 1
		return "%sunexpected result%s (exit code: %d)%s" \
			%(COL_RED, COL_NORM, self._exit_status, \
			  self._get_err_out())

	def __str__(self):
		""" provide output by converting to string. """

		if self._exit_status == -1:
			return "%s %swas not yet executed%s." % \
				(self._name, COL_YELLOW, COL_NORM)

		s = "%s: " % self._name
		if len(self._name) < 20:
			s += " " * (20 - len(self._name))

		s += self._get_result()
		return s

	def get_status(self):
		return self._status

class QuietTestCase(TestCase):
	""" same as test-case, but don't output anything but the status """
	def __init__(self, entry, subdir):
		# manually call parent c'tor
		TestCase.__init__(self, entry, subdir)
	
	def _get_err_out(self, num_lines=10):
		return ""
	
	def _get_error_report(self):
		return ""

	def _get_result(self):
		return TestCase._get_result(self) + \
			" (verbose output disabled) "


class TestSuite:
	""" parses a tests.list file, evaluating all test-cases in there """

	def __init__(self, listdir):
		self._listfile = \
			file_handling.get_src_name("tests.list", listdir)
		self._testcases = []
		self._subdir = listdir
	
	def parse(self):
		f = None
		f = file(self._listfile, "r")

		lines = f.readlines()
		f.close()
		# filter out comments
		lines = [ l for l in lines if l[0] != '#' ]
		desc_line = lines.pop(0)

		self._parse_desc(desc_line)
		for l in lines:
			self._parse_test_case(l)

	def _parse_desc(self, d):
		opts = eval_helper.eval_opt_list(d)
		self._description = "Description unset!"
		self._common_opts = ""
		self._output_ic = False

		if opts.has_key("DESC"):
			self._description = opts["DESC"]

		if opts.has_key("COMMON_OPTS"):
			self._common_opts = opts["COMMON_OPTS"]

		if opts.has_key("OUTPUT"):
			if opts["OUTPUT"] == 'True':
				self._output_ic = True

	def _parse_test_case(self, s):
		d = eval_helper.eval_opt_list(s)
		tc = None

		if d.has_key("QUIET") and (d["QUIET"] == "True"):
			tc = QuietTestCase(s, self._subdir)
		else:
			tc = TestCase(s, self._subdir)
			
		self._testcases.append(tc)

	def execute(self):	
		""" execute all test cases """
		t1 = time.time()

		for tc in self._testcases:
			tc.execute(\
				file_handling.get_compiler(), \
				self._common_opts, \
				self._output_ic)

		t2 = time.time()
		self._runtime = t2 - t1

	def get_status(self):
		""" get_status -> (#succ, #failed, #warn) """
		succ, failed, warn = 0, 0, 0

		for tc in self._testcases:
			res = tc.get_status()
			if res == 0:
				succ += 1
			elif res == 1:
				failed += 1
			elif res == 2:
				warn += 1
			else:
				assert False
		return (succ, failed, warn)

	def __str__(self):
		s =  "Executing tests for %s\n" % self._description
		s += "  common options: %s\n" % self._common_opts
		s += "--------------------------------------------\n"
		for tc in self._testcases:
			s += "%s\n" % tc
		s += "--------------------------------------------\n"
		s += "Summary:\n"
		s += "  Execution took %0.02f seconds.\n" % self._runtime
		s += "============================================\n"
		return s



class AllTest:
	def __init__(self, testfiles):
		self._suites = []

		for td in testfiles:
			ts = TestSuite(td)
			ts.parse()
			self._suites.append(ts)
	
	def execute(self):
		t1 = time.time()
		for ts in self._suites:
			ts.execute()
		t2 = time.time()
		self._runtime = t2 - t1
	
	def __str__(self):
		s = ""
		succ, failed, warn = 0, 0, 0

		for ts in self._suites:
			s += "%s\n" % ts
			st, ft, wt = ts.get_status()
			succ += st
			failed += ft
			warn += wt
		
		s += "TOTAL SUMMARY:\n"
		s += "%d successfuls tests, %d failures, %d warnings\n" %\
			(succ, failed, warn)
		s += "Compile time for all tests: %0.02f\n" % self._runtime
		return s


if __name__ == "__main__":
	srcdir = os.getenv("srcdir", ".")
	file_handling.setup_filehandling(srcdir, ".")
	
	suites = ("symboltable", \
			"types", \
			"icode", \
			"error_tests")
	alltest = AllTest(suites)
	alltest.execute()
	print "\n" + str(alltest)