summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--src/type_check.ml3
-rwxr-xr-xtest/builtins/run_tests.py28
-rwxr-xr-xtest/c/run_tests.py30
-rw-r--r--test/sailtest.py62
4 files changed, 79 insertions, 44 deletions
diff --git a/src/type_check.ml b/src/type_check.ml
index 47275594..8359dac2 100644
--- a/src/type_check.ml
+++ b/src/type_check.ml
@@ -2321,8 +2321,7 @@ let rec check_exp env (E_aux (exp_aux, (l, ())) as exp : unit exp) (Typ_aux (typ
let inferred_exp = infer_funapp l env f xs (Some typ) in
type_coercion env inferred_exp typ
| E_if (cond, then_branch, else_branch), _ ->
- (* let cond' = crule check_exp env cond (mk_typ (Typ_id (mk_id "bool"))) in *)
- let cond' = irule infer_exp env cond in
+ let cond' = try irule infer_exp env cond with Type_error _ -> crule check_exp env cond bool_typ in
begin match destruct_exist (typ_of cond') with
| Some (kopts, nc, Typ_aux (Typ_app (ab, [A_aux (A_bool flow, _)]), _)) when string_of_id ab = "atom_bool" ->
let env = add_existential l kopts nc env in
diff --git a/test/builtins/run_tests.py b/test/builtins/run_tests.py
index e1f2927e..b20d4224 100755
--- a/test/builtins/run_tests.py
+++ b/test/builtins/run_tests.py
@@ -13,9 +13,10 @@ from sailtest import *
def test_c_builtins(name, sail_opts):
banner('Testing builtins: {} Sail options: {}'.format(name, sail_opts))
- tests = {}
- for filename in os.listdir('.'):
- if re.match('.+\.sail', filename):
+ results = Results(name)
+ for filenames in chunks(os.listdir('.'), parallel()):
+ tests = {}
+ for filename in filenames:
basename = os.path.splitext(os.path.basename(filename))[0]
tests[filename] = os.fork()
if tests[filename] == 0:
@@ -26,13 +27,16 @@ def test_c_builtins(name, sail_opts):
step('rm {}'.format(basename))
print '{} {}{}{}'.format(filename, color.PASS, 'ok', color.END)
sys.exit()
+ results.collect(tests)
+ return results.finish()
return collect_results(name, tests)
def test_ocaml_builtins(name, sail_opts):
banner('Testing builtins: {} Sail options: {}'.format(name, sail_opts))
- tests = {}
- for filename in os.listdir('.'):
- if re.match('.+\.sail', filename):
+ results = Results(name)
+ for filenames in chunks(os.listdir('.'), parallel()):
+ tests = {}
+ for filename in filenames:
basename = os.path.splitext(os.path.basename(filename))[0]
tests[filename] = os.fork()
if tests[filename] == 0:
@@ -42,13 +46,16 @@ def test_ocaml_builtins(name, sail_opts):
step('rm {}'.format(basename))
print '{} {}{}{}'.format(filename, color.PASS, 'ok', color.END)
sys.exit()
+ results.collect(tests)
+ return results.finish()
return collect_results(name, tests)
def test_lem_builtins(name):
banner('Testing builtins: {}'.format(name))
- tests = {}
- for filename in os.listdir('.'):
- if re.match('.+\.sail', filename):
+ results = Results(name)
+ for filenames in chunks(os.listdir('.'), parallel()):
+ tests = {}
+ for filename in filenames:
basename = os.path.splitext(os.path.basename(filename))[0]
tests[filename] = os.fork()
if tests[filename] == 0:
@@ -76,7 +83,8 @@ def test_lem_builtins(name):
print '{} {}{}{}'.format(filename, color.PASS, 'ok', color.END)
sys.exit()
- return collect_results(name, tests)
+ results.collect(tests)
+ return results.finish()
xml = '<testsuites>\n'
diff --git a/test/c/run_tests.py b/test/c/run_tests.py
index 268763ad..4f221636 100755
--- a/test/c/run_tests.py
+++ b/test/c/run_tests.py
@@ -13,9 +13,10 @@ from sailtest import *
def test_c(name, c_opts, sail_opts, valgrind):
banner('Testing {} with C options: {} Sail options: {} valgrind: {}'.format(name, c_opts, sail_opts, valgrind))
- tests = {}
- for filename in os.listdir('.'):
- if re.match('.+\.sail', filename):
+ results = Results(name)
+ for filenames in chunks(os.listdir('.'), parallel()):
+ tests = {}
+ for filename in filenames:
basename = os.path.splitext(os.path.basename(filename))[0]
tests[filename] = os.fork()
if tests[filename] == 0:
@@ -27,13 +28,15 @@ def test_c(name, c_opts, sail_opts, valgrind):
step("valgrind --leak-check=full --track-origins=yes --errors-for-leak-kinds=all --error-exitcode=1 ./{}".format(basename))
print '{} {}{}{}'.format(filename, color.PASS, 'ok', color.END)
sys.exit()
- return collect_results(name, tests)
+ results.collect(tests)
+ return results.finish()
def test_interpreter(name):
banner('Testing {}'.format(name))
- tests = {}
- for filename in os.listdir('.'):
- if re.match('.+\.sail', filename):
+ results = Results(name)
+ for filenames in chunks(os.listdir('.'), parallel()):
+ tests = {}
+ for filename in filenames:
basename = os.path.splitext(os.path.basename(filename))[0]
tests[filename] = os.fork()
if tests[filename] == 0:
@@ -41,13 +44,15 @@ def test_interpreter(name):
step('diff {}.iresult {}.expect'.format(basename, basename))
print '{} {}{}{}'.format(filename, color.PASS, 'ok', color.END)
sys.exit()
- return collect_results(name, tests)
+ results.collect(tests)
+ return results.finish()
def test_ocaml(name):
banner('Testing {}'.format(name))
- tests = {}
- for filename in os.listdir('.'):
- if re.match('.+\.sail', filename):
+ results = Results(name)
+ for filenames in chunks(os.listdir('.'), parallel()):
+ tests = {}
+ for filename in filenames:
basename = os.path.splitext(os.path.basename(filename))[0]
tests[filename] = os.fork()
if tests[filename] == 0:
@@ -56,7 +61,8 @@ def test_ocaml(name):
step('diff {}.oresult {}.expect'.format(basename, basename))
print '{} {}{}{}'.format(filename, color.PASS, 'ok', color.END)
sys.exit()
- return collect_results(name, tests)
+ results.collect(tests)
+ return results.finish()
xml = '<testsuites>\n'
diff --git a/test/sailtest.py b/test/sailtest.py
index 612aba2b..32d09aa9 100644
--- a/test/sailtest.py
+++ b/test/sailtest.py
@@ -11,6 +11,24 @@ class color:
FAIL = '\033[91m'
END = '\033[0m'
+def parallel():
+ try:
+ return int(os.environ['TEST_PAR'])
+ except Exception, e:
+ print("Running 4 tests in parallel. Set TEST_PAR to configure")
+ return 4
+
+def chunks(filenames, cores):
+ ys = []
+ chunk = []
+ for filename in filenames:
+ if re.match('.+\.sail', filename):
+ chunk.append(filename)
+ if len(chunk) >= cores:
+ ys.append(list(chunk))
+ chunk = []
+ ys.append(list(chunk))
+ return ys
def step(string):
p = subprocess.Popen(string, shell=True, stderr=subprocess.PIPE, stdout=subprocess.PIPE)
@@ -29,23 +47,27 @@ def banner(string):
print string
print '-' * len(string)
-def collect_results(name, tests):
- passes = 0
- failures = 0
- xml = ""
-
- for test in tests:
- _, status = os.waitpid(tests[test], 0)
- if status != 0:
- failures += 1
- xml += ' <testcase name="{}">\n <error message="fail">fail</error>\n </testcase>\n'.format(test)
- else:
- passes += 1
- xml += ' <testcase name="{}"/>\n'.format(test)
-
- print '{}{} passes and {} failures{}'.format(color.NOTICE, passes, failures, color.END)
-
- time = datetime.datetime.utcnow()
- suite = ' <testsuite name="{}" tests="{}" failures="{}" timestamp="{}">\n{} </testsuite>\n'
- xml = suite.format(name, passes + failures, failures, time, xml)
- return xml
+class Results:
+ def __init__(self, name):
+ self.passes = 0
+ self.failures = 0
+ self.xml = ""
+ self.name = name
+
+ def collect(self, tests):
+ for test in tests:
+ _, status = os.waitpid(tests[test], 0)
+ if status != 0:
+ self.failures += 1
+ self.xml += ' <testcase name="{}">\n <error message="fail">fail</error>\n </testcase>\n'.format(test)
+ else:
+ self.passes += 1
+ self.xml += ' <testcase name="{}"/>\n'.format(test)
+
+ def finish(self):
+ print '{}{} passes and {} failures{}'.format(color.NOTICE, self.passes, self.failures, color.END)
+
+ time = datetime.datetime.utcnow()
+ suite = ' <testsuite name="{}" tests="{}" failures="{}" timestamp="{}">\n{} </testsuite>\n'
+ self.xml = suite.format(self.name, self.passes + self.failures, self.failures, time, self.xml)
+ return self.xml