summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorThomas Bauereiss2018-05-11 12:04:10 +0100
committerThomas Bauereiss2018-05-11 12:04:10 +0100
commitff18bac6654a73cedf32a45ee406fe3e74ae3efd (patch)
treeed940ea575c93d741c84cd24cd3e029d0a590b81
parent823fe1d82e753add2d54ba010689a81af027ba6d (diff)
parentdb3b6d21c18f4ac516c2554db6890274d2b8292c (diff)
Merge branch 'sail2' into cheri-mono
In order to use up-to-date sequential CHERI model for test suite
-rw-r--r--aarch64/Makefile15
-rw-r--r--aarch64/ROOT4
-rw-r--r--aarch64/aarch64_extras.lem4
-rw-r--r--aarch64/full.sail14
-rw-r--r--aarch64/full/decode.sail3479
-rw-r--r--aarch64/full/spec.sail19305
-rw-r--r--aarch64/no_vector/spec.sail2
-rw-r--r--cheri/Makefile65
-rw-r--r--cheri/ROOT4
-rw-r--r--cheri/cheri_insts.sail94
-rw-r--r--cheri/cheri_prelude_256.sail20
-rw-r--r--cheri/cheri_prelude_common.sail45
-rw-r--r--doc/examples/overload.sail10
-rw-r--r--doc/examples/zeros.sail5
-rw-r--r--doc/tutorial.tex307
-rw-r--r--doc/types.tex256
-rw-r--r--etc/loc.mk13
-rw-r--r--lib/arith.sail2
-rw-r--r--lib/elf.sail2
-rw-r--r--lib/vector_dec.sail2
-rw-r--r--mips/Makefile3
-rw-r--r--mips/main.sail1
-rw-r--r--mips/mips_extras.lem10
-rw-r--r--mips/mips_prelude.sail9
-rw-r--r--mips/prelude.sail17
-rw-r--r--riscv/Makefile4
-rw-r--r--riscv/main.sail1
-rw-r--r--riscv/prelude.sail10
-rw-r--r--riscv/riscv.sail13
-rw-r--r--riscv/riscv_step.sail9
-rw-r--r--riscv/riscv_sys.sail10
-rw-r--r--riscv/riscv_types.sail1
-rw-r--r--riscv/riscv_vmem.sail52
-rw-r--r--src/c_backend.ml2
-rw-r--r--src/elf_loader.ml8
-rw-r--r--src/gen_lib/prompt.lem6
-rw-r--r--src/gen_lib/sail_operators.lem40
-rw-r--r--src/gen_lib/sail_operators_bitlists.lem33
-rw-r--r--src/gen_lib/sail_operators_mwords.lem33
-rw-r--r--src/gen_lib/sail_values.lem1
-rw-r--r--src/gen_lib/state_monad.lem13
-rw-r--r--src/latex.ml11
-rw-r--r--src/pretty_print_lem.ml86
-rw-r--r--src/rewrites.ml53
-rw-r--r--src/state.ml131
-rw-r--r--src/type_check.mli5
-rwxr-xr-xtest/arm/run_tests.sh2
-rw-r--r--test/builtins/get_slice_int.sail36
-rwxr-xr-xtest/builtins/run_tests.sh48
-rw-r--r--test/builtins/set_slice_bits.sail2
-rw-r--r--test/builtins/signed.sail120
-rw-r--r--test/builtins/test_extras.lem22
-rw-r--r--test/builtins/unsigned6.sail392
-rw-r--r--test/isabelle/Aarch64_code.thy61
-rw-r--r--test/isabelle/Cheri_code.thy62
-rw-r--r--test/isabelle/Makefile27
-rw-r--r--test/isabelle/ROOT9
-rw-r--r--test/isabelle/elf_loader.ml126
-rw-r--r--test/isabelle/run_aarch64.ml93
-rw-r--r--test/isabelle/run_cheri.ml92
-rwxr-xr-xtest/isabelle/run_tests.sh90
-rw-r--r--test/typecheck/pass/simple_record_access.sail1
62 files changed, 24753 insertions, 640 deletions
diff --git a/aarch64/Makefile b/aarch64/Makefile
new file mode 100644
index 00000000..0ef6b0f0
--- /dev/null
+++ b/aarch64/Makefile
@@ -0,0 +1,15 @@
+THIS_MAKEFILE := $(realpath $(lastword $(MAKEFILE_LIST)))
+SAIL_DIR:=$(realpath $(dir $(THIS_MAKEFILE))..)
+export SAIL_DIR
+SAIL_LIB_DIR:=$(SAIL_DIR)/lib
+SAIL:=$(SAIL_DIR)/sail
+
+aarch64.lem: no_vector.sail
+ $(SAIL) $^ -o aarch64 -lem -lem_lib Aarch64_extras -memo_z3 -undefined_gen -no_lexp_bounds_check
+aarch64_types.lem: aarch64.lem
+
+Aarch64.thy: aarch64_extras.lem aarch64_types.lem aarch64.lem
+ lem -isa -outdir . -lib $(SAIL_DIR)/src/gen_lib -lib $(SAIL_DIR)/src/lem_interp $^
+
+LOC_FILES:=prelude.sail full/spec.sail decode_start.sail full/decode.sail decode_end.sail main.sail
+include ../etc/loc.mk
diff --git a/aarch64/ROOT b/aarch64/ROOT
new file mode 100644
index 00000000..113e8e70
--- /dev/null
+++ b/aarch64/ROOT
@@ -0,0 +1,4 @@
+session "Sail-AArch64" = "Sail" +
+ options [document = false]
+ theories
+ Aarch64_lemmas
diff --git a/aarch64/aarch64_extras.lem b/aarch64/aarch64_extras.lem
index e823dbfe..ab67f506 100644
--- a/aarch64/aarch64_extras.lem
+++ b/aarch64/aarch64_extras.lem
@@ -119,3 +119,7 @@ val read_ram : forall 'rv 'e.
integer -> integer -> list bitU -> list bitU -> monad 'rv (list bitU) 'e
let read_ram addrsize size hexRAM address =
read_mem Read_plain address size
+
+val elf_entry : unit -> integer
+let elf_entry () = 0
+declare ocaml target_rep function elf_entry = `Elf_loader.elf_entry`
diff --git a/aarch64/full.sail b/aarch64/full.sail
new file mode 100644
index 00000000..b05b1829
--- /dev/null
+++ b/aarch64/full.sail
@@ -0,0 +1,14 @@
+
+// Prelude
+$include "prelude.sail"
+
+// Specification
+$include "full/spec.sail"
+
+// Decoder
+$include "decode_start.sail"
+$include "full/decode.sail"
+$include "decode_end.sail"
+
+// Top level loop
+$include "main.sail"
diff --git a/aarch64/full/decode.sail b/aarch64/full/decode.sail
new file mode 100644
index 00000000..41ec5f16
--- /dev/null
+++ b/aarch64/full/decode.sail
@@ -0,0 +1,3479 @@
+function clause decode (0b011111110 @ _ : bits(7) @ 0b010101 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_leftinsert_sisd_decode(U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011110 @ _ : bits(7) @ 0b010101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_leftinsert_simd_decode(Q, U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b001001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_int_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b100011 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_bitwise_sisd_decode(U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b100011 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_bitwise_simd_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011100 @ _ : bits(1) @ 0b100001011010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_narrow_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b0110 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_maxmin_single_decode(Q, U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b001011 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_saturating_sisd_decode(U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b001011 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_saturating_simd_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b10011011 @ _ : bits(1) @ 0b10 @ _ : bits(5) @ 0b0 @ _ : bits(15) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op54 : bits(2) = op_code[30 .. 29];
+ U : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Ra : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_mul_widening_64128hi_decode(sf, op54, U, Rm, o0, Ra, Rn, Rd)
+}
+
+function clause decode (0b01111111 @ _ : bits(8) @ 0b11 @ _ : bits(1) @ 0b1 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ S : bits(1) = [op_code[13]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_high_sisd_decode(U, size, L, M, Rm, S, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101111 @ _ : bits(8) @ 0b11 @ _ : bits(1) @ 0b1 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ S : bits(1) = [op_code[13]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_high_simd_decode(Q, U, size, L, M, Rm, S, H, Rn, Rd)
+}
+
+function clause decode (0b01011110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b110100 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_mul_double_sisd_decode(U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b110100 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_mul_double_simd_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b1010001 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_general_postidx_aarch64_memory_pair_general_postidx__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b1010011 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_general_preidx_aarch64_memory_pair_general_postidx__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b1010010 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_general_offset_aarch64_memory_pair_general_postidx__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(1) @ 0b10 @ _ : bits(5) @ 0b000011 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp16_fused_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b110011 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ op : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp_fused_decode(Q, U, op, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b100000101110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_diffneg_int_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100000101110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_diffneg_int_simd_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b11001110011 @ _ : bits(5) @ 0b100011 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ O : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3_rax1_decode(Rm, O, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_halving_truncating_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(1) @ 0b10 @ _ : bits(5) @ 0b0010 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ E : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ ac : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_fp16_sisd_decode(U, E, Rm, ac, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b1110 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ E : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ ac : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_fp_sisd_decode(U, E, sz, Rm, ac, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(1) @ 0b10 @ _ : bits(5) @ 0b0010 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ E : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ ac : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_fp16_simd_decode(Q, U, E, Rm, ac, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b1110 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ E : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ ac : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_fp_simd_decode(Q, U, E, sz, Rm, ac, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b0 @ _ : bits(1) @ 0b1110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_permute_zip_decode(Q, size, Rm, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01111 @ _ : bits(8) @ 0b1110 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_dotp_decode(Q, U, size, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b010 @ _ : bits(2) @ 0b1 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ R : bits(1) = [op_code[12]];
+ S : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_shift_sisd_decode(U, size, Rm, R, S, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b010 @ _ : bits(2) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ R : bits(1) = [op_code[12]];
+ S : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_shift_simd_decode(Q, U, size, Rm, R, S, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111100 @ _ : bits(2) @ 0b1 @ _ : bits(9) @ 0b10 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ option_name : bits(3) = op_code[15 .. 13];
+ S : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_simdfp_register_aarch64_memory_single_simdfp_register__decode(size, V, opc, Rm, option_name, S, Rn, Rt)
+}
+
+function clause decode (0b00011111 @ _ : bits(24) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ o1 : bits(1) = [op_code[21]];
+ Rm : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Ra : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_arithmetic_mul_addsub_decode(M, S, typ, o1, Rm, o0, Ra, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b1111001111001110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_fp16_conv_int_sisd_decode(U, a, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111100 @ _ : bits(1) @ 0b100001110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_conv_int_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b0111001111001110110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_fp16_conv_int_simd_decode(Q, U, a, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011100 @ _ : bits(1) @ 0b100001110110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_conv_int_simd_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b11001110001 @ _ : bits(5) @ 0b0 @ _ : bits(15) as op_code) = {
+ Op0 : bits(2) = op_code[22 .. 21];
+ Rm : bits(5) = op_code[20 .. 16];
+ Ra : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3_bcax_decode(Op0, Rm, Ra, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b0 @ _ : bits(1) @ 0b1010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_permute_transpose_decode(Q, size, Rm, op, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b0 @ _ : bits(3) @ 0b00 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ A : bits(1) = [op_code[23]];
+ R : bits(1) = [op_code[22]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o3 : bits(1) = [op_code[15]];
+ opc : bits(3) = op_code[14 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_atomicops_ld_decode(size, V, A, R, Rs, o3, opc, Rn, Rt)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111110 @ _ : bits(7) @ 0b00 @ _ : bits(2) @ 0b01 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ o0 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_right_sisd_decode(U, immh, immb, o1, o0, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011110 @ _ : bits(7) @ 0b00 @ _ : bits(2) @ 0b01 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ o0 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_right_simd_decode(Q, U, immh, immb, o1, o0, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(1) @ 0b1111001101 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ o2 : bits(1) = [op_code[23]];
+ o1 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_fp16_conv_float_bulk_sisd_decode(U, o2, o1, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b100001101 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ o2 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_conv_float_bulk_sisd_decode(U, o2, sz, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(1) @ 0b1111001101 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o2 : bits(1) = [op_code[23]];
+ o1 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_fp16_conv_float_bulk_simd_decode(Q, U, o2, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100001101 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o2 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_conv_float_bulk_simd_decode(Q, U, o2, sz, o1, Rn, Rd)
+}
+
+function clause decode (0b01011110000 @ _ : bits(5) @ 0b011000 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3op_sha256sched1_decode(size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b10111011111001111110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_sqrtfp16_decode(Q, U, a, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011101 @ _ : bits(1) @ 0b100001111110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_sqrt_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b01011110110 @ _ : bits(5) @ 0b001111 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_rsqrtsfp16_sisd_decode(U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b010111101 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b111111 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_rsqrts_sisd_decode(U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110110 @ _ : bits(5) @ 0b001111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_rsqrtsfp16_simd_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011101 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b111111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_rsqrts_simd_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b00111100000 @ _ : bits(3) @ 0b111111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[18]];
+ b : bits(1) = [op_code[17]];
+ c : bits(1) = [op_code[16]];
+ cmode : bits(4) = op_code[15 .. 12];
+ o2 : bits(1) = [op_code[11]];
+ d : bits(1) = [op_code[9]];
+ e : bits(1) = [op_code[8]];
+ f : bits(1) = [op_code[7]];
+ g : bits(1) = [op_code[6]];
+ h : bits(1) = [op_code[5]];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_fp16_movi_decode(Q, op, a, b, c, cmode, o2, d, e, f, g, h, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b0111100000 @ _ : bits(7) @ 0b01 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[18]];
+ b : bits(1) = [op_code[17]];
+ c : bits(1) = [op_code[16]];
+ cmode : bits(4) = op_code[15 .. 12];
+ o2 : bits(1) = [op_code[11]];
+ d : bits(1) = [op_code[9]];
+ e : bits(1) = [op_code[8]];
+ f : bits(1) = [op_code[7]];
+ g : bits(1) = [op_code[6]];
+ h : bits(1) = [op_code[5]];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_logical_decode(Q, op, a, b, c, cmode, o2, d, e, f, g, h, Rd)
+}
+
+function clause decode (0b01011111 @ _ : bits(8) @ 0b1011 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_double_sisd_decode(U, size, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001111 @ _ : bits(8) @ 0b1011 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_double_simd_decode(Q, U, size, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011101 @ _ : bits(1) @ 0b100001110010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_sqrtest_int_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b110110101100000101000 @ _ : bits(11) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ D : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_strip_dp_1src_decode(sf, S, opcode2, D, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110010000011111111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ integer_pac_strip_hint_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(1) @ 0b10 @ _ : bits(5) @ 0b000001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Op3 : bits(3) = op_code[13 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_maxmin_fp16_2008_decode(Q, U, a, Rm, Op3, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b110001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_maxmin_fp_2008_decode(Q, U, o1, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111110 @ _ : bits(7) @ 0b111111 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_conv_float_sisd_decode(U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011110 @ _ : bits(7) @ 0b111111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_conv_float_simd_decode(Q, U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b010 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_pacda_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011100 @ _ : bits(1) @ 0b100001011110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_widen_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110011 @ _ : bits(4) @ 0b01011111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ system_monitors_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110000 @ _ : bits(5) @ 0b001111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_integer_move_unsigned_decode(Q, op, imm5, imm4, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b00 @ _ : bits(1) @ 0b100 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_addsub_wide_decode(Q, U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (0b0101111100 @ _ : bits(6) @ 0b0 @ _ : bits(1) @ 0b01 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_fp16_sisd_decode(U, size, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (0b010111111 @ _ : bits(7) @ 0b0 @ _ : bits(1) @ 0b01 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_fp_sisd_decode(U, sz, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b00111100 @ _ : bits(6) @ 0b0 @ _ : bits(1) @ 0b01 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_fp16_simd_decode(Q, U, size, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011111 @ _ : bits(7) @ 0b0 @ _ : bits(1) @ 0b01 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_fp_simd_decode(Q, U, sz, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b0 @ _ : bits(9) @ 0b00 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm9 : bits(9) = op_code[20 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_immediate_signed_offset_normal_aarch64_memory_single_general_immediate_signed_offset_normal__decode(size, V, opc, imm9, Rn, Rt)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000011 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_saturating_sisd_decode(U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000011 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_saturating_simd_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b1010000 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_general_noalloc_aarch64_memory_pair_general_noalloc__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(6) @ 0b00010 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(1) = [op_code[15]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_arithmetic_mul_product_decode(M, S, typ, Rm, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001111 @ _ : bits(8) @ 0b1000 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_int_decode(Q, U, size, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011110 @ _ : bits(7) @ 0b101001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_leftlong_decode(Q, U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b11001110010 @ _ : bits(5) @ 0b10 @ _ : bits(2) @ 0b11 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ imm2 : bits(2) = op_code[13 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm3_sm3tt2b_decode(Rm, imm2, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110000 @ _ : bits(5) @ 0b0 @ _ : bits(4) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op2 : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_extract_decode(Q, op2, Rm, imm4, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b100001010010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_extract_sat_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100001010010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_extract_sat_simd_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b10011010110 @ _ : bits(5) @ 0b001100 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ Rm : bits(5) = op_code[20 .. 16];
+ opcode2 : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_pacga_dp_2src_decode(sf, op, S, Rm, opcode2, Rn, Rd)
+}
+
+function clause decode (0b11001110011 @ _ : bits(5) @ 0b110001 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ O : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm3_sm3partw2_decode(Rm, O, Rn, Rd)
+}
+
+function clause decode (0b0101111011111001110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_recip_fp16_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b010111101 @ _ : bits(1) @ 0b100001110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_recip_float_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b00111011111001110110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_recip_fp16_simd_decode(Q, U, a, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011101 @ _ : bits(1) @ 0b100001110110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_recip_float_simd_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b0010001 @ _ : bits(1) @ 0b1 @ _ : bits(6) @ 0b11111 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ o2 : bits(1) = [op_code[23]];
+ L : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[21]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_atomicops_cas_single_decode(size, o2, L, o1, Rs, o0, Rt2, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(1) @ 0b10 @ _ : bits(5) @ 0b001101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_maxmin_fp16_1985_decode(Q, U, o1, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b111101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_maxmin_fp_1985_decode(Q, U, o1, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b11001110011 @ _ : bits(5) @ 0b100001 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ O : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha512_sha512h2_decode(Rm, O, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b10111000100000010110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_not_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b1101010100000 @ _ : bits(3) @ 0b0100 @ _ : bits(7) @ 0b11111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ system_register_cpsr_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b0101111011111000111010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_fp16_lessthan_sisd_decode(U, a, Rn, Rd)
+}
+
+function clause decode (0b010111101 @ _ : bits(1) @ 0b100000111010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_float_lessthan_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b00111011111000111010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_fp16_lessthan_simd_decode(Q, U, a, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011101 @ _ : bits(1) @ 0b100000111010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_float_lessthan_simd_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b01010100 @ _ : bits(19) @ 0b0 @ _ : bits(4) as op_code) = {
+ o1 : bits(1) = [op_code[24]];
+ imm19 : bits(19) = op_code[23 .. 5];
+ o0 : bits(1) = [op_code[4]];
+ cond : bits(4) = op_code[3 .. 0];
+ branch_conditional_cond_decode(o1, imm19, o0, cond)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b10111001100000010110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_rbit_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b11010100000 @ _ : bits(16) @ 0b00010 as op_code) = {
+ opc : bits(3) = op_code[23 .. 21];
+ imm16 : bits(16) = op_code[20 .. 5];
+ op2 : bits(3) = op_code[4 .. 2];
+ LL : bits(2) = op_code[1 .. 0];
+ system_exceptions_runtime_hvc_decode(opc, imm16, op2, LL)
+}
+
+function clause decode (0b1 @ _ : bits(1) @ 0b0010000 @ _ : bits(1) @ 0b1 @ _ : bits(21) as op_code) = {
+ sz : bits(1) = [op_code[30]];
+ o2 : bits(1) = [op_code[23]];
+ L : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[21]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_exclusive_pair_decode(sz, o2, L, o1, Rs, o0, Rt2, Rn, Rt)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b111 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_autdb_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b101101011000000000000 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_rbit_decode(sf, S, opcode2, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111001 @ _ : bits(24) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm12 : bits(12) = op_code[21 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_immediate_unsigned_aarch64_memory_single_general_immediate_unsigned__decode(size, V, opc, imm12, Rn, Rt)
+}
+
+function clause decode (0b01011110000 @ _ : bits(5) @ 0b010 @ _ : bits(1) @ 0b00 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ P : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3op_sha256hash_decode(size, Rm, P, Rn, Rd)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b100 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_autia_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110010 @ _ : bits(7) @ 0b11111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ integer_pac_autia_hint_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b110000001110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_addlong_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b11010100000 @ _ : bits(16) @ 0b00001 as op_code) = {
+ opc : bits(3) = op_code[23 .. 21];
+ imm16 : bits(16) = op_code[20 .. 5];
+ op2 : bits(3) = op_code[4 .. 2];
+ LL : bits(2) = op_code[1 .. 0];
+ system_exceptions_runtime_svc_decode(opc, imm16, op2, LL)
+}
+
+function clause decode (0b1101011 @ _ : bits(1) @ 0b0 @ _ : bits(2) @ 0b111110000 @ _ : bits(12) as op_code) = {
+ Z : bits(1) = [op_code[24]];
+ opc : bits(1) = [op_code[23]];
+ op : bits(2) = op_code[22 .. 21];
+ op2 : bits(5) = op_code[20 .. 16];
+ op3 : bits(4) = op_code[15 .. 12];
+ A : bits(1) = [op_code[11]];
+ M : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rm : bits(5) = op_code[4 .. 0];
+ branch_unconditional_register_decode(Z, opc, op, op2, op3, A, M, Rn, Rm)
+}
+
+function clause decode (0b11001110010 @ _ : bits(5) @ 0b0 @ _ : bits(15) as op_code) = {
+ Op0 : bits(2) = op_code[22 .. 21];
+ Rm : bits(5) = op_code[20 .. 16];
+ Ra : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm3_sm3ss1_decode(Op0, Rm, Ra, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_halving_rounding_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b101101 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_int_doubling_sisd_decode(U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b101101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_int_doubling_simd_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b1100111011000000100000 @ _ : bits(10) as op_code) = {
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha512_sha512su0_decode(Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110000 @ _ : bits(5) @ 0b0 @ _ : bits(3) @ 0b00 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op2 : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ len : bits(2) = op_code[14 .. 13];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_table_decode(Q, op2, Rm, len, op, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b100001 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_wrapping_single_sisd_decode(U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b100001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_wrapping_single_simd_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b0100111000101000010 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ D : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_aes_round_decode(size, D, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b100000100 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_int_bulk_sisd_decode(U, size, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100000100 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_int_bulk_simd_decode(Q, U, size, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b101111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_wrapping_pair_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(8) @ 0b10000000 @ _ : bits(5) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ imm8 : bits(8) = op_code[20 .. 13];
+ imm5 : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_move_fp_imm_decode(M, S, typ, imm8, imm5, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b01 @ _ : bits(1) @ 0b000 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_addsub_narrow_decode(Q, U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b011011 @ _ : bits(25) as op_code) = {
+ b5 : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[24]];
+ b40 : bits(5) = op_code[23 .. 19];
+ imm14 : bits(14) = op_code[18 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ branch_conditional_test_decode(b5, op, b40, imm14, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b0111011111000111110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_diffneg_fp16_decode(Q, U, a, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011101 @ _ : bits(1) @ 0b100000111110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_diffneg_float_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b111000 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_mul_poly_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b0 @ _ : bits(9) @ 0b01 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm9 : bits(9) = op_code[20 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_immediate_signed_postidx_aarch64_memory_single_general_immediate_signed_postidx__decode(size, V, opc, imm9, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b0 @ _ : bits(9) @ 0b11 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm9 : bits(9) = op_code[20 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_immediate_signed_preidx_aarch64_memory_single_general_immediate_signed_postidx__decode(size, V, opc, imm9, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b111001 @ _ : bits(24) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm12 : bits(12) = op_code[21 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_immediate_unsigned_aarch64_memory_single_general_immediate_signed_postidx__decode(size, V, opc, imm12, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101111 @ _ : bits(8) @ 0b0 @ _ : bits(1) @ 0b00 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_int_decode(Q, U, size, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b100001001110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_shift_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ opc2 : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_logical_bsleor_decode(Q, U, opc2, Rm, Rn, Rd)
+}
+
+function clause decode (0b1100111011000000100001 @ _ : bits(10) as op_code) = {
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm4_sm4enc_decode(Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b011000 @ _ : bits(24) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ imm19 : bits(19) = op_code[23 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_literal_general_decode(opc, V, imm19, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110000 @ _ : bits(5) @ 0b001011 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_integer_move_signed_decode(Q, op, imm5, imm4, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b1011001 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_simdfp_postidx_aarch64_memory_pair_simdfp_postidx__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b1011011 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_simdfp_preidx_aarch64_memory_pair_simdfp_postidx__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b1011010 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_simdfp_offset_aarch64_memory_pair_simdfp_postidx__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b100000010110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cnt_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b01011110000 @ _ : bits(5) @ 0b001100 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3op_sha1sched0_decode(size, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b011010 @ _ : bits(25) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[24]];
+ imm19 : bits(19) = op_code[23 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ branch_conditional_compare_decode(sf, op, imm19, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110000 @ _ : bits(5) @ 0b000011 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_integer_dup_decode(Q, op, imm5, imm4, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110010 @ _ : bits(5) @ 0b000101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_fp16_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011100 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b110101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_fp_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b11001110011 @ _ : bits(5) @ 0b110010 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ O : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm4_sm4enckey_decode(Rm, O, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b1111001111001110010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_fp16_conv_float_tieaway_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111100 @ _ : bits(1) @ 0b100001110010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_conv_float_tieaway_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b0111001111001110010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_fp16_conv_float_tieaway_simd_decode(Q, U, a, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011100 @ _ : bits(1) @ 0b100001110010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_conv_float_tieaway_simd_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111100 @ _ : bits(2) @ 0b0 @ _ : bits(9) @ 0b00 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm9 : bits(9) = op_code[20 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_simdfp_immediate_signed_offset_normal_aarch64_memory_single_simdfp_immediate_signed_offset_normal__decode(size, V, opc, imm9, Rn, Rt)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b001 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_pacib_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110010 @ _ : bits(7) @ 0b11111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ integer_pac_pacib_hint_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b01011110 @ _ : bits(2) @ 0b110000110010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fp16maxnm_sisd_decode(U, o1, sz, Rn, Rd)
+}
+
+function clause decode (0b01111110 @ _ : bits(2) @ 0b110000110010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fpmaxnm_sisd_decode(U, o1, sz, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111100 @ _ : bits(2) @ 0b0 @ _ : bits(9) @ 0b01 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm9 : bits(9) = op_code[20 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_simdfp_immediate_signed_postidx_aarch64_memory_single_simdfp_immediate_signed_postidx__decode(size, V, opc, imm9, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b111100 @ _ : bits(2) @ 0b0 @ _ : bits(9) @ 0b11 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm9 : bits(9) = op_code[20 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_simdfp_immediate_signed_preidx_aarch64_memory_single_simdfp_immediate_signed_postidx__decode(size, V, opc, imm9, Rn, Rt)
+}
+
+function clause decode (_ : bits(2) @ 0b111101 @ _ : bits(24) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm12 : bits(12) = op_code[21 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_simdfp_immediate_unsigned_aarch64_memory_single_simdfp_immediate_signed_postidx__decode(size, V, opc, imm12, Rn, Rt)
+}
+
+function clause decode (0b01011110010 @ _ : bits(5) @ 0b000111 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp16_extended_sisd_decode(U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b010111100 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b110111 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp_extended_sisd_decode(U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110010 @ _ : bits(5) @ 0b000111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp16_extended_simd_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011100 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b110111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp_extended_simd_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b01011110000 @ _ : bits(5) @ 0b001000 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3op_sha1hash_majority_decode(size, Rm, Rn, Rd)
+}
+
+function clause decode (0b11001110011 @ _ : bits(5) @ 0b110000 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ O : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm3_sm3partw1_decode(Rm, O, Rn, Rd)
+}
+
+function clause decode (0b010111110 @ _ : bits(7) @ 0b010101 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_left_sisd_decode(U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011110 @ _ : bits(7) @ 0b010101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_left_simd_decode(Q, U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011101 @ _ : bits(1) @ 0b100001110010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_recip_int_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b100101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_int_dotp_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b11001110010 @ _ : bits(5) @ 0b10 @ _ : bits(2) @ 0b01 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ imm2 : bits(2) = op_code[13 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm3_sm3tt1b_decode(Rm, imm2, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011110 @ _ : bits(7) @ 0b1000 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ op : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_rightnarrow_logical_decode(Q, U, immh, immb, op, Rn, Rd)
+}
+
+function clause decode (0b0101111000101000001010 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha2op_sha256sched0_decode(size, Rn, Rd)
+}
+
+function clause decode (0b01111110110 @ _ : bits(5) @ 0b000101 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_fp16_sisd_decode(U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b011111101 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b110101 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_fp_sisd_decode(U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110110 @ _ : bits(5) @ 0b000101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_fp16_simd_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011101 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b110101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_fp_simd_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011000 @ _ : bits(1) @ 0b000000 @ _ : bits(16) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ L : bits(1) = [op_code[22]];
+ opcode : bits(4) = op_code[15 .. 12];
+ size : bits(2) = op_code[11 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_vector_multiple_nowb_aarch64_memory_vector_multiple_nowb__decode(Q, L, opcode, size, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011001 @ _ : bits(1) @ 0b0 @ _ : bits(21) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ L : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ opcode : bits(4) = op_code[15 .. 12];
+ size : bits(2) = op_code[11 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_vector_multiple_postinc_aarch64_memory_vector_multiple_nowb__decode(Q, L, Rm, opcode, size, Rn, Rt)
+}
+
+function clause decode (0b01011111 @ _ : bits(8) @ 0b110 @ _ : bits(2) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ op : bits(1) = [op_code[12]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_high_sisd_decode(U, size, L, M, Rm, op, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001111 @ _ : bits(8) @ 0b110 @ _ : bits(2) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ op : bits(1) = [op_code[12]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_high_simd_decode(Q, U, size, L, M, Rm, op, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b0111 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ ac : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_diff_decode(Q, U, size, Rm, ac, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b1111100 @ _ : bits(6) @ 0b1001 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_fp16_sisd_decode(U, size, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111111 @ _ : bits(7) @ 0b1001 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_fp_sisd_decode(U, sz, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b0111100 @ _ : bits(6) @ 0b1001 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_fp16_simd_decode(Q, U, size, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011111 @ _ : bits(7) @ 0b1001 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_fp_simd_decode(Q, U, sz, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b01011110 @ _ : bits(2) @ 0b110001101110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_add_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b11010110100111110000 @ _ : bits(12) as op_code) = {
+ opc : bits(4) = op_code[24 .. 21];
+ op2 : bits(5) = op_code[20 .. 16];
+ op3 : bits(4) = op_code[15 .. 12];
+ A : bits(1) = [op_code[11]];
+ M : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ op4 : bits(5) = op_code[4 .. 0];
+ branch_unconditional_eret_decode(opc, op2, op3, A, M, Rn, op4)
+}
+
+function clause decode (0b0100111000101000011 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ D : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_aes_mix_decode(size, D, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b110000 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_mul_product_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111110 @ _ : bits(7) @ 0b111001 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_conv_int_sisd_decode(U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011110 @ _ : bits(7) @ 0b111001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_conv_int_simd_decode(Q, U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b01 @ _ : bits(2) @ 0b10 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(2) = op_code[13 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_arithmetic_maxmin_decode(M, S, typ, Rm, op, Rn, Rd)
+}
+
+function clause decode (0b01111110 @ _ : bits(2) @ 0b100001001010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_extract_sqxtun_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b100001001010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_extract_sqxtun_simd_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b010111100 @ _ : bits(1) @ 0b110000110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fp16add_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b011111100 @ _ : bits(1) @ 0b110000110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fpadd_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b10000 @ _ : bits(2) @ 0b10000 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ opc : bits(2) = op_code[16 .. 15];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_arithmetic_unary_decode(M, S, typ, opc, Rn, Rd)
+}
+
+function clause decode (0b11010100000 @ _ : bits(16) @ 0b00011 as op_code) = {
+ opc : bits(3) = op_code[23 .. 21];
+ imm16 : bits(16) = op_code[20 .. 5];
+ op2 : bits(3) = op_code[4 .. 2];
+ LL : bits(2) = op_code[1 .. 0];
+ system_exceptions_runtime_smc_decode(opc, imm16, op2, LL)
+}
+
+function clause decode (_ : bits(2) @ 0b0010001 @ _ : bits(1) @ 0b0 @ _ : bits(21) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ o2 : bits(1) = [op_code[23]];
+ L : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[21]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_ordered_decode(size, o2, L, o1, Rs, o0, Rt2, Rn, Rt)
+}
+
+function clause decode (0b011111100 @ _ : bits(1) @ 0b100001011010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_xtn_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011100 @ _ : bits(1) @ 0b100001011010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_xtn_simd_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b1 @ _ : bits(9) @ 0b10 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ option_name : bits(3) = op_code[15 .. 13];
+ S : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_register_aarch64_memory_single_general_register__decode(size, V, opc, Rm, option_name, S, Rn, Rt)
+}
+
+function clause decode (0b11001110100 @ _ : bits(21) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ imm6 : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3_xar_decode(Rm, imm6, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b1011000 @ _ : bits(23) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ L : bits(1) = [op_code[22]];
+ imm7 : bits(7) = op_code[21 .. 15];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_pair_simdfp_noalloc_aarch64_memory_pair_simdfp_noalloc__decode(opc, V, L, imm7, Rt2, Rn, Rt)
+}
+
+function clause decode (0b11010100001 @ _ : bits(16) @ 0b00000 as op_code) = {
+ opc : bits(3) = op_code[23 .. 21];
+ imm16 : bits(16) = op_code[20 .. 5];
+ op2 : bits(3) = op_code[4 .. 2];
+ LL : bits(2) = op_code[1 .. 0];
+ system_exceptions_debug_breakpoint_decode(opc, imm16, op2, LL)
+}
+
+function clause decode (_ : bits(1) @ 0b0011011000 @ _ : bits(21) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op54 : bits(2) = op_code[30 .. 29];
+ op31 : bits(3) = op_code[23 .. 21];
+ Rm : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Ra : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_mul_uniform_addsub_decode(sf, op54, op31, Rm, o0, Ra, Rn, Rd)
+}
+
+function clause decode (0b01011110010 @ _ : bits(5) @ 0b001111 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_recpsfp16_sisd_decode(U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b010111100 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b111111 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_recps_sisd_decode(U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110010 @ _ : bits(5) @ 0b001111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_recpsfp16_simd_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011100 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b111111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_recps_simd_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b011100 @ _ : bits(24) as op_code) = {
+ opc : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ imm19 : bits(19) = op_code[23 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_literal_simdfp_decode(opc, V, imm19, Rt)
+}
+
+function clause decode (0b1101010100 @ _ : bits(1) @ 0b01 @ _ : bits(19) as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ system_sysops_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (_ : bits(3) @ 0b01011 @ _ : bits(2) @ 0b0 @ _ : bits(21) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ shift : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ imm6 : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_addsub_shiftedreg_decode(sf, op, S, shift, Rm, imm6, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(1) @ 0b1111001100 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o2 : bits(1) = [op_code[23]];
+ o1 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_fp16_round_decode(Q, U, o2, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100001100 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o2 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_float_round_decode(Q, U, o2, sz, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110010 @ _ : bits(5) @ 0b001111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_divfp16_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011100 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b111111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_div_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(3) @ 0b100100 @ _ : bits(23) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ opc : bits(2) = op_code[30 .. 29];
+ N : bits(1) = [op_code[22]];
+ immr : bits(6) = op_code[21 .. 16];
+ imms : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_logical_immediate_decode(sf, opc, N, immr, imms, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111010010 @ _ : bits(9) @ 0b00 @ _ : bits(5) @ 0b0 @ _ : bits(4) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ Rm : bits(5) = op_code[20 .. 16];
+ cond : bits(4) = op_code[15 .. 12];
+ o2 : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ o3 : bits(1) = [op_code[4]];
+ nzcv : bits(4) = op_code[3 .. 0];
+ integer_conditional_compare_register_decode(sf, op, S, Rm, cond, o2, Rn, o3, nzcv)
+}
+
+function clause decode (_ : bits(3) @ 0b100110 @ _ : bits(23) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ opc : bits(2) = op_code[30 .. 29];
+ N : bits(1) = [op_code[22]];
+ immr : bits(6) = op_code[21 .. 16];
+ imms : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_bitfield_decode(sf, opc, N, immr, imms, Rn, Rd)
+}
+
+function clause decode (0b1101010100 @ _ : bits(1) @ 0b1 @ _ : bits(20) as op_code) = {
+ L : bits(1) = [op_code[21]];
+ o0 : bits(1) = [op_code[19]];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ system_register_system_decode(L, o0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b11001110010 @ _ : bits(5) @ 0b10 @ _ : bits(2) @ 0b10 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ imm2 : bits(2) = op_code[13 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm3_sm3tt2a_decode(Rm, imm2, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111010010 @ _ : bits(9) @ 0b10 @ _ : bits(5) @ 0b0 @ _ : bits(4) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ cond : bits(4) = op_code[15 .. 12];
+ o2 : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ o3 : bits(1) = [op_code[4]];
+ nzcv : bits(4) = op_code[3 .. 0];
+ integer_conditional_compare_immediate_decode(sf, op, S, imm5, cond, o2, Rn, o3, nzcv)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000110 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_arithmetic_div_decode(M, S, typ, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1000000 @ _ : bits(1) @ 0b1010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ op : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_add_pairwise_decode(Q, U, size, op, Rn, Rd)
+}
+
+function clause decode (0b01011110000 @ _ : bits(5) @ 0b000001 @ _ : bits(10) as op_code) = {
+ op : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_cpydup_sisd_decode(op, imm5, imm4, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110000 @ _ : bits(5) @ 0b000001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_cpydup_simd_decode(Q, op, imm5, imm4, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b11000 @ _ : bits(1) @ 0b101010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ op : bits(1) = [op_code[16]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_intmax_decode(Q, U, size, op, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b100000 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ A : bits(1) = [op_code[23]];
+ R : bits(1) = [op_code[22]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o3 : bits(1) = [op_code[15]];
+ opc : bits(3) = op_code[14 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_atomicops_swp_decode(size, V, A, R, Rs, o3, opc, Rn, Rt)
+}
+
+function clause decode (0b0111111011111001110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_sqrtest_fp16_sisd_decode(U, a, Rn, Rd)
+}
+
+function clause decode (0b011111101 @ _ : bits(1) @ 0b100001110110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_sqrtest_float_sisd_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b10111011111001110110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_sqrtest_fp16_simd_decode(Q, U, a, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011101 @ _ : bits(1) @ 0b100001110110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_sqrtest_float_simd_decode(Q, U, sz, Rn, Rd)
+}
+
+function clause decode (0b11001110011 @ _ : bits(5) @ 0b100000 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ O : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha512_sha512h_decode(Rm, O, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100000000 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ o0 : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_rev_decode(Q, U, size, o0, Rn, Rd)
+}
+
+function clause decode (_ : bits(3) @ 0b10001 @ _ : bits(24) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ shift : bits(2) = op_code[23 .. 22];
+ imm12 : bits(12) = op_code[21 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_addsub_immediate_decode(sf, op, S, shift, imm12, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b00 @ _ : bits(1) @ 0b000 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_addsub_long_decode(Q, U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b0 @ _ : bits(1) @ 0b0110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_permute_unzip_decode(Q, size, Rm, op, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b00101 @ _ : bits(26) as op_code) = {
+ op : bits(1) = [op_code[31]];
+ imm26 : bits(26) = op_code[25 .. 0];
+ branch_unconditional_immediate_decode(op, imm26)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b111 @ _ : bits(1) @ 0b01 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ rot : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_add_fp_complex_decode(Q, U, size, Rm, rot, Rn, Rd)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b110 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_autda_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (0b0101111000101000000010 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha2op_sha1hash_decode(size, Rn, Rd)
+}
+
+function clause decode (0b01001110000 @ _ : bits(5) @ 0b000111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_integer_insert_decode(Q, op, imm5, imm4, Rn, Rd)
+}
+
+function clause decode (0b01011110 @ _ : bits(2) @ 0b110000111110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fp16max_sisd_decode(U, o1, sz, Rn, Rd)
+}
+
+function clause decode (0b01111110 @ _ : bits(2) @ 0b110000111110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fpmax_sisd_decode(U, o1, sz, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b0011 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ eq : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_int_sisd_decode(U, size, Rm, eq, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b0011 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ eq : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_cmp_int_simd_decode(Q, U, size, Rm, eq, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b10110101100000000010 @ _ : bits(11) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ op : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_cnt_decode(sf, S, opcode2, op, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111 @ _ : bits(1) @ 0b00 @ _ : bits(2) @ 0b1 @ _ : bits(9) @ 0b00 @ _ : bits(5) @ 0b11111 as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ A : bits(1) = [op_code[23]];
+ R : bits(1) = [op_code[22]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o3 : bits(1) = [op_code[15]];
+ opc : bits(3) = op_code[14 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_atomicops_st_decode(size, V, A, R, Rs, o3, opc, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b100111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_int_product_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110011 @ _ : bits(4) @ 0b1 @ _ : bits(2) @ 0b11111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ opc : bits(2) = op_code[6 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ system_barriers_decode(L, op0, op1, CRn, CRm, opc, Rt)
+}
+
+function clause decode (0b11001110000 @ _ : bits(5) @ 0b0 @ _ : bits(15) as op_code) = {
+ Op0 : bits(2) = op_code[22 .. 21];
+ Rm : bits(5) = op_code[20 .. 16];
+ Ra : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3_eor3_decode(Op0, Rm, Ra, Rn, Rd)
+}
+
+function clause decode (0b011111110 @ _ : bits(7) @ 0b010001 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_rightinsert_sisd_decode(U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011110 @ _ : bits(7) @ 0b010001 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_rightinsert_simd_decode(Q, U, immh, immb, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b100000011110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_diffneg_sat_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100000011110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_diffneg_sat_simd_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0010000 @ _ : bits(1) @ 0b1 @ _ : bits(21) as op_code) = {
+ sz : bits(1) = [op_code[30]];
+ o2 : bits(1) = [op_code[23]];
+ L : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[21]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_atomicops_cas_pair_decode(sz, o2, L, o1, Rs, o0, Rt2, Rn, Rt)
+}
+
+function clause decode (_ : bits(3) @ 0b100101 @ _ : bits(23) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ opc : bits(2) = op_code[30 .. 29];
+ hw : bits(2) = op_code[22 .. 21];
+ imm16 : bits(16) = op_code[20 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_insext_insert_movewide_decode(sf, opc, hw, imm16, Rd)
+}
+
+function clause decode (0b11010110101111110000001111100000 as op_code) = {
+ opc : bits(4) = op_code[24 .. 21];
+ op2 : bits(5) = op_code[20 .. 16];
+ op3 : bits(6) = op_code[15 .. 10];
+ Rt : bits(5) = op_code[9 .. 5];
+ op4 : bits(5) = op_code[4 .. 0];
+ branch_unconditional_dret_decode(opc, op2, op3, Rt, op4)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(9) @ 0b11 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ cond : bits(4) = op_code[15 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_move_fp_select_decode(M, S, typ, Rm, cond, Rn, Rd)
+}
+
+function clause decode (0b11010100101 @ _ : bits(16) @ 0b000 @ _ : bits(2) as op_code) = {
+ opc : bits(3) = op_code[23 .. 21];
+ imm16 : bits(16) = op_code[20 .. 5];
+ op2 : bits(3) = op_code[4 .. 2];
+ LL : bits(2) = op_code[1 .. 0];
+ system_exceptions_debug_exception_decode(opc, imm16, op2, LL)
+}
+
+function clause decode (0b01011111 @ _ : bits(8) @ 0b0 @ _ : bits(1) @ 0b11 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_double_sisd_decode(U, size, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001111 @ _ : bits(8) @ 0b0 @ _ : bits(1) @ 0b11 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_double_simd_decode(Q, U, size, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111110 @ _ : bits(7) @ 0b1001 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ op : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_rightnarrow_uniform_sisd_decode(U, immh, immb, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011110 @ _ : bits(7) @ 0b1001 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ op : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_rightnarrow_uniform_simd_decode(Q, U, immh, immb, op, Rn, Rd)
+}
+
+function clause decode (0b01011110000 @ _ : bits(5) @ 0b000000 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3op_sha1hash_choose_decode(size, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b1011010110000000000 @ _ : bits(12) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ opc : bits(2) = op_code[11 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_rev_decode(sf, S, opcode2, opc, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110010 @ _ : bits(5) @ 0b000111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp16_product_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011100 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b110111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp_product_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000111 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_logical_andorr_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b10001 @ _ : bits(2) @ 0b10000 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ opc : bits(2) = op_code[16 .. 15];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_convert_fp_decode(M, S, typ, opc, Rn, Rd)
+}
+
+function clause decode (0b01111110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b1000 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ S : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_int_doubling_accum_sisd_decode(U, size, Rm, S, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b1000 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ S : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_int_doubling_accum_simd_decode(Q, U, size, Rm, S, Rn, Rd)
+}
+
+function clause decode (0b11001110010 @ _ : bits(5) @ 0b10 @ _ : bits(2) @ 0b00 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ imm2 : bits(2) = op_code[13 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sm3_sm3tt1a_decode(Rm, imm2, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b10 @ _ : bits(1) @ 0b000 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_mul_accum_decode(Q, U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b0 @ _ : bits(5) @ 0b110 @ _ : bits(2) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ rot : bits(2) = op_code[12 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_fp_complex_decode(Q, U, size, Rm, rot, Rn, Rd)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b000 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_pacia_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110010 @ _ : bits(7) @ 0b11111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ integer_pac_pacia_hint_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101111 @ _ : bits(8) @ 0b0 @ _ : bits(2) @ 0b1 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ rot : bits(2) = op_code[14 .. 13];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_complex_decode(Q, U, size, L, M, Rm, rot, H, Rn, Rd)
+}
+
+function clause decode (0b01011110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b10 @ _ : bits(1) @ 0b100 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_mul_dmacc_sisd_decode(U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b10 @ _ : bits(1) @ 0b100 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_mul_dmacc_simd_decode(Q, U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b110001101110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_add_simd_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b0011110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b000000 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ rmode : bits(2) = op_code[20 .. 19];
+ opcode : bits(3) = op_code[18 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_convert_int_decode(sf, S, typ, rmode, opcode, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b01 @ _ : bits(1) @ 0b100 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_disparate_diff_decode(Q, U, size, Rm, op, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b011010100 @ _ : bits(9) @ 0b0 @ _ : bits(11) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ Rm : bits(5) = op_code[20 .. 16];
+ cond : bits(4) = op_code[15 .. 12];
+ o2 : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_conditional_select_decode(sf, op, S, Rm, cond, o2, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000101 @ _ : bits(5) @ 0b110000 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ A : bits(1) = [op_code[23]];
+ R : bits(1) = [op_code[22]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o3 : bits(1) = [op_code[15]];
+ opc : bits(3) = op_code[14 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_orderedrcpc_decode(size, V, A, R, Rs, o3, opc, Rn, Rt)
+}
+
+function clause decode (0b0101111011111001111110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_frecpxfp16_decode(U, a, Rn, Rd)
+}
+
+function clause decode (0b010111101 @ _ : bits(1) @ 0b100001111110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_special_frecpx_decode(U, sz, Rn, Rd)
+}
+
+function clause decode (_ : bits(3) @ 0b01011001 @ _ : bits(21) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ opt : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ option_name : bits(3) = op_code[15 .. 13];
+ imm3 : bits(3) = op_code[12 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_addsub_extendedreg_decode(sf, op, S, opt, Rm, option_name, imm3, Rn, Rd)
+}
+
+function clause decode (0b01011110000 @ _ : bits(5) @ 0b000100 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha3op_sha1hash_parity_decode(size, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(3) @ 0b10000 @ _ : bits(24) as op_code) = {
+ op : bits(1) = [op_code[31]];
+ immlo : bits(2) = op_code[30 .. 29];
+ immhi : bits(19) = op_code[23 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_address_pcrel_decode(op, immlo, immhi, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011010 @ _ : bits(2) @ 0b00000 @ _ : bits(16) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ L : bits(1) = [op_code[22]];
+ R : bits(1) = [op_code[21]];
+ opcode : bits(3) = op_code[15 .. 13];
+ S : bits(1) = [op_code[12]];
+ size : bits(2) = op_code[11 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_vector_single_nowb_aarch64_memory_vector_single_nowb__decode(Q, L, R, opcode, S, size, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b0011011 @ _ : bits(23) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ L : bits(1) = [op_code[22]];
+ R : bits(1) = [op_code[21]];
+ Rm : bits(5) = op_code[20 .. 16];
+ opcode : bits(3) = op_code[15 .. 13];
+ S : bits(1) = [op_code[12]];
+ size : bits(2) = op_code[11 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_vector_single_postinc_aarch64_memory_vector_single_nowb__decode(Q, L, R, Rm, opcode, S, size, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b100101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_mul_int_accum_decode(Q, U, size, Rm, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b1 @ _ : bits(10) @ 0b1 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ M : bits(1) = [op_code[23]];
+ S : bits(1) = [op_code[22]];
+ imm9 : bits(9) = op_code[20 .. 12];
+ W : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_immediate_signed_pac_decode(size, V, M, S, imm9, W, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(1) @ 0b0110000110010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fp16maxnm_simd_decode(Q, U, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b110000110010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fpmaxnm_simd_decode(Q, U, o1, sz, Rn, Rd)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b101 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_autib_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110010 @ _ : bits(7) @ 0b11111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ integer_pac_autib_hint_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b11001110011 @ _ : bits(5) @ 0b100010 @ _ : bits(10) as op_code) = {
+ Rm : bits(5) = op_code[20 .. 16];
+ O : bits(1) = [op_code[14]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha512_sha512su1_decode(Rm, O, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01111 @ _ : bits(8) @ 0b1010 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mul_long_decode(Q, U, size, L, M, Rm, H, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(9) @ 0b01 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ cond : bits(4) = op_code[15 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ op : bits(1) = [op_code[4]];
+ nzcv : bits(4) = op_code[3 .. 0];
+ float_compare_cond_decode(M, S, typ, Rm, cond, Rn, op, nzcv)
+}
+
+function clause decode (_ : bits(2) @ 0b0010000 @ _ : bits(1) @ 0b0 @ _ : bits(21) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ o2 : bits(1) = [op_code[23]];
+ L : bits(1) = [op_code[22]];
+ o1 : bits(1) = [op_code[21]];
+ Rs : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Rt2 : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_exclusive_single_decode(size, o2, L, o1, Rs, o0, Rt2, Rn, Rt)
+}
+
+function clause decode (_ : bits(1) @ 0b0011110 @ _ : bits(2) @ 0b0 @ _ : bits(21) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ rmode : bits(2) = op_code[20 .. 19];
+ opcode : bits(3) = op_code[18 .. 16];
+ scale : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_convert_fix_decode(sf, S, typ, rmode, opcode, scale, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1001 @ _ : bits(3) @ 0b10000 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ rmode : bits(3) = op_code[17 .. 15];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_arithmetic_round_decode(M, S, typ, rmode, Rn, Rd)
+}
+
+function clause decode (_ : bits(3) @ 0b01010 @ _ : bits(24) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ opc : bits(2) = op_code[30 .. 29];
+ shift : bits(2) = op_code[23 .. 22];
+ N : bits(1) = [op_code[21]];
+ Rm : bits(5) = op_code[20 .. 16];
+ imm6 : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_logical_shiftedreg_decode(sf, opc, shift, N, Rm, imm6, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100000010010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_clsz_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01111 @ _ : bits(8) @ 0b0 @ _ : bits(1) @ 0b10 @ _ : bits(1) @ 0b0 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ L : bits(1) = [op_code[21]];
+ M : bits(1) = [op_code[20]];
+ Rm : bits(4) = op_code[19 .. 16];
+ o2 : bits(1) = [op_code[14]];
+ H : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_element_mulacc_long_decode(Q, U, size, L, M, Rm, o2, H, Rn, Rd)
+}
+
+function clause decode (0b011111110 @ _ : bits(7) @ 0b1000 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ op : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_rightnarrow_nonuniform_sisd_decode(U, immh, immb, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b1011110 @ _ : bits(7) @ 0b1000 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ op : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_rightnarrow_nonuniform_simd_decode(Q, U, immh, immb, op, Rn, Rd)
+}
+
+function clause decode (0b01101110000 @ _ : bits(5) @ 0b0 @ _ : bits(4) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ imm5 : bits(5) = op_code[20 .. 16];
+ imm4 : bits(4) = op_code[14 .. 11];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_transfer_vector_insert_decode(Q, op, imm5, imm4, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b0011010110 @ _ : bits(5) @ 0b010 @ _ : bits(13) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ Rm : bits(5) = op_code[20 .. 16];
+ opcode2 : bits(3) = op_code[15 .. 13];
+ C : bits(1) = [op_code[12]];
+ sz : bits(2) = op_code[11 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_crc_decode(sf, op, S, Rm, opcode2, C, sz, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b100001001010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_extract_nosat_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b0111100000 @ _ : bits(7) @ 0b01 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ op : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[18]];
+ b : bits(1) = [op_code[17]];
+ c : bits(1) = [op_code[16]];
+ cmode : bits(4) = op_code[15 .. 12];
+ o2 : bits(1) = [op_code[11]];
+ d : bits(1) = [op_code[9]];
+ e : bits(1) = [op_code[8]];
+ f : bits(1) = [op_code[7]];
+ g : bits(1) = [op_code[6]];
+ h : bits(1) = [op_code[5]];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_logical_decode(Q, op, a, b, c, cmode, o2, d, e, f, g, h, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111110 @ _ : bits(7) @ 0b011 @ _ : bits(1) @ 0b01 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_leftsat_sisd_decode(U, immh, immb, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011110 @ _ : bits(7) @ 0b011 @ _ : bits(1) @ 0b01 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ immh : bits(4) = op_code[22 .. 19];
+ immb : bits(3) = op_code[18 .. 16];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_shift_leftsat_simd_decode(Q, U, immh, immb, op, Rn, Rd)
+}
+
+function clause decode (_ : bits(3) @ 0b11010000 @ _ : bits(5) @ 0b000000 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ Rm : bits(5) = op_code[20 .. 16];
+ opcode2 : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_addsub_carry_decode(sf, op, S, Rm, opcode2, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b001000 @ _ : bits(7) @ 0b000 as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(2) = op_code[15 .. 14];
+ Rn : bits(5) = op_code[9 .. 5];
+ opc : bits(2) = op_code[4 .. 3];
+ float_compare_uncond_decode(M, S, typ, Rm, op, Rn, opc)
+}
+
+function clause decode (0b01011110 @ _ : bits(2) @ 0b100000101010 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_int_lessthan_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(2) @ 0b100000101010 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_int_lessthan_simd_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b1111011111000110 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_fp16_bulk_sisd_decode(U, a, op, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b111101 @ _ : bits(1) @ 0b100000110 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_float_bulk_sisd_decode(U, sz, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b0111011111000110 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_fp16_bulk_simd_decode(Q, U, a, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011101 @ _ : bits(1) @ 0b100000110 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_cmp_float_bulk_simd_decode(Q, U, sz, op, Rn, Rd)
+}
+
+function clause decode (0b0101111000101000000110 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_crypto_sha2op_sha1sched1_decode(size, Rn, Rd)
+}
+
+function clause decode (0b01 @ _ : bits(1) @ 0b11110 @ _ : bits(2) @ 0b100000001110 @ _ : bits(10) as op_code) = {
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_add_saturating_sisd_decode(U, size, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b100000001110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_unary_add_saturating_simd_decode(Q, U, size, Rn, Rd)
+}
+
+function clause decode (0b11010101000000110010 @ _ : bits(7) @ 0b11111 as op_code) = {
+ L : bits(1) = [op_code[21]];
+ op0 : bits(2) = op_code[20 .. 19];
+ op1 : bits(3) = op_code[18 .. 16];
+ CRn : bits(4) = op_code[15 .. 12];
+ CRm : bits(4) = op_code[11 .. 8];
+ op2 : bits(3) = op_code[7 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ system_hints_decode(L, op0, op1, CRn, CRm, op2, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b1010 @ _ : bits(1) @ 0b1 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ size : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ o1 : bits(1) = [op_code[11]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_maxmin_pair_decode(Q, U, size, Rm, o1, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b0011010110 @ _ : bits(5) @ 0b0010 @ _ : bits(12) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ Rm : bits(5) = op_code[20 .. 16];
+ opcode2 : bits(4) = op_code[15 .. 12];
+ op2 : bits(2) = op_code[11 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_shift_variable_decode(sf, op, S, Rm, opcode2, op2, Rn, Rd)
+}
+
+function clause decode (0b110110101100000100 @ _ : bits(1) @ 0b011 @ _ : bits(10) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ opcode2 : bits(5) = op_code[20 .. 16];
+ Z : bits(1) = [op_code[13]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_pac_pacdb_dp_1src_decode(sf, S, opcode2, Z, Rn, Rd)
+}
+
+function clause decode (_ : bits(2) @ 0b111000 @ _ : bits(2) @ 0b0 @ _ : bits(9) @ 0b10 @ _ : bits(10) as op_code) = {
+ size : bits(2) = op_code[31 .. 30];
+ V : bits(1) = [op_code[26]];
+ opc : bits(2) = op_code[23 .. 22];
+ imm9 : bits(9) = op_code[20 .. 12];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rt : bits(5) = op_code[4 .. 0];
+ memory_single_general_immediate_signed_offset_unpriv_aarch64_memory_single_general_immediate_signed_offset_unpriv__decode(size, V, opc, imm9, Rn, Rt)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b01110110 @ _ : bits(5) @ 0b000101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ a : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_fp16_simd_decode(Q, U, a, Rm, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(2) @ 0b011101 @ _ : bits(1) @ 0b1 @ _ : bits(5) @ 0b110101 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ sz : bits(1) = [op_code[22]];
+ Rm : bits(5) = op_code[20 .. 16];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_arithmetic_binary_uniform_sub_fp_simd_decode(Q, U, sz, Rm, Rn, Rd)
+}
+
+function clause decode (0b00011110 @ _ : bits(2) @ 0b1 @ _ : bits(5) @ 0b001 @ _ : bits(1) @ 0b10 @ _ : bits(10) as op_code) = {
+ M : bits(1) = [op_code[31]];
+ S : bits(1) = [op_code[29]];
+ typ : bits(2) = op_code[23 .. 22];
+ Rm : bits(5) = op_code[20 .. 16];
+ op : bits(1) = [op_code[12]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ float_arithmetic_addsub_decode(M, S, typ, Rm, op, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b001110 @ _ : bits(1) @ 0b0110000111110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fp16max_simd_decode(Q, U, o1, Rn, Rd)
+}
+
+function clause decode (0b0 @ _ : bits(1) @ 0b101110 @ _ : bits(2) @ 0b110000111110 @ _ : bits(10) as op_code) = {
+ Q : bits(1) = [op_code[30]];
+ U : bits(1) = [op_code[29]];
+ o1 : bits(1) = [op_code[23]];
+ sz : bits(1) = [op_code[22]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ vector_reduce_fpmax_simd_decode(Q, U, o1, sz, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b00100111 @ _ : bits(1) @ 0b0 @ _ : bits(21) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op21 : bits(2) = op_code[30 .. 29];
+ N : bits(1) = [op_code[22]];
+ o0 : bits(1) = [op_code[21]];
+ Rm : bits(5) = op_code[20 .. 16];
+ imms : bits(6) = op_code[15 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_insext_extract_immediate_decode(sf, op21, N, o0, Rm, imms, Rn, Rd)
+}
+
+function clause decode (0b11010100010 @ _ : bits(16) @ 0b00000 as op_code) = {
+ opc : bits(3) = op_code[23 .. 21];
+ imm16 : bits(16) = op_code[20 .. 5];
+ op2 : bits(3) = op_code[4 .. 2];
+ LL : bits(2) = op_code[1 .. 0];
+ system_exceptions_debug_halt_decode(opc, imm16, op2, LL)
+}
+
+function clause decode (0b10011011 @ _ : bits(1) @ 0b01 @ _ : bits(21) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op54 : bits(2) = op_code[30 .. 29];
+ U : bits(1) = [op_code[23]];
+ Rm : bits(5) = op_code[20 .. 16];
+ o0 : bits(1) = [op_code[15]];
+ Ra : bits(5) = op_code[14 .. 10];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_mul_widening_3264_decode(sf, op54, U, Rm, o0, Ra, Rn, Rd)
+}
+
+function clause decode (_ : bits(1) @ 0b0011010110 @ _ : bits(5) @ 0b00001 @ _ : bits(11) as op_code) = {
+ sf : bits(1) = [op_code[31]];
+ op : bits(1) = [op_code[30]];
+ S : bits(1) = [op_code[29]];
+ Rm : bits(5) = op_code[20 .. 16];
+ opcode2 : bits(5) = op_code[15 .. 11];
+ o1 : bits(1) = [op_code[10]];
+ Rn : bits(5) = op_code[9 .. 5];
+ Rd : bits(5) = op_code[4 .. 0];
+ integer_arithmetic_div_decode(sf, op, S, Rm, opcode2, o1, Rn, Rd)
+}
diff --git a/aarch64/full/spec.sail b/aarch64/full/spec.sail
new file mode 100644
index 00000000..f468302b
--- /dev/null
+++ b/aarch64/full/spec.sail
@@ -0,0 +1,19305 @@
+enum boolean = {FALSE, TRUE}
+
+enum signal = {LOW, HIGH}
+
+enum __RetCode = {
+ __RC_OK,
+ __RC_UNDEFINED,
+ __RC_UNPREDICTABLE,
+ __RC_SEE,
+ __RC_IMPLEMENTATION_DEFINED,
+ __RC_SUBARCHITECTURE_DEFINED,
+ __RC_EXCEPTION_TAKEN,
+ __RC_ASSERT_FAILED,
+ __RC_UNMATCHED_CASE
+}
+
+type CPACRType = bits(32)
+
+type CNTKCTLType = bits(32)
+
+type ESRType = bits(32)
+
+type FPCRType = bits(32)
+
+type MAIRType = bits(64)
+
+type SCRType = bits(32)
+
+type SCTLRType = bits(32)
+
+enum FPConvOp = {
+ FPConvOp_CVT_FtoI,
+ FPConvOp_CVT_ItoF,
+ FPConvOp_MOV_FtoI,
+ FPConvOp_MOV_ItoF,
+ FPConvOp_CVT_FtoI_JS
+}
+
+enum Exception = {
+ Exception_Uncategorized,
+ Exception_WFxTrap,
+ Exception_CP15RTTrap,
+ Exception_CP15RRTTrap,
+ Exception_CP14RTTrap,
+ Exception_CP14DTTrap,
+ Exception_AdvSIMDFPAccessTrap,
+ Exception_FPIDTrap,
+ Exception_PACTrap,
+ Exception_CP14RRTTrap,
+ Exception_IllegalState,
+ Exception_SupervisorCall,
+ Exception_HypervisorCall,
+ Exception_MonitorCall,
+ Exception_SystemRegisterTrap,
+ Exception_ERetTrap,
+ Exception_InstructionAbort,
+ Exception_PCAlignment,
+ Exception_DataAbort,
+ Exception_SPAlignment,
+ Exception_FPTrappedException,
+ Exception_SError,
+ Exception_Breakpoint,
+ Exception_SoftwareStep,
+ Exception_Watchpoint,
+ Exception_SoftwareBreakpoint,
+ Exception_VectorCatch,
+ Exception_IRQ,
+ Exception_FIQ
+}
+
+enum ArchVersion = {ARMv8p0, ARMv8p1, ARMv8p2, ARMv8p3}
+
+enum Unpredictable = {
+ Unpredictable_WBOVERLAPLD,
+ Unpredictable_WBOVERLAPST,
+ Unpredictable_LDPOVERLAP,
+ Unpredictable_BASEOVERLAP,
+ Unpredictable_DATAOVERLAP,
+ Unpredictable_DEVPAGE2,
+ Unpredictable_INSTRDEVICE,
+ Unpredictable_RESCPACR,
+ Unpredictable_RESMAIR,
+ Unpredictable_RESTEXCB,
+ Unpredictable_RESPRRR,
+ Unpredictable_RESDACR,
+ Unpredictable_RESVTCRS,
+ Unpredictable_RESTnSZ,
+ Unpredictable_OORTnSZ,
+ Unpredictable_LARGEIPA,
+ Unpredictable_ESRCONDPASS,
+ Unpredictable_ILZEROIT,
+ Unpredictable_ILZEROT,
+ Unpredictable_BPVECTORCATCHPRI,
+ Unpredictable_VCMATCHHALF,
+ Unpredictable_VCMATCHDAPA,
+ Unpredictable_WPMASKANDBAS,
+ Unpredictable_WPBASCONTIGUOUS,
+ Unpredictable_RESWPMASK,
+ Unpredictable_WPMASKEDBITS,
+ Unpredictable_RESBPWPCTRL,
+ Unpredictable_BPNOTIMPL,
+ Unpredictable_RESBPTYPE,
+ Unpredictable_BPNOTCTXCMP,
+ Unpredictable_BPMATCHHALF,
+ Unpredictable_BPMISMATCHHALF,
+ Unpredictable_RESTARTALIGNPC,
+ Unpredictable_RESTARTZEROUPPERPC,
+ Unpredictable_ZEROUPPER,
+ Unpredictable_ERETZEROUPPERPC,
+ Unpredictable_A32FORCEALIGNPC,
+ Unpredictable_SMD,
+ Unpredictable_AFUPDATE,
+ Unpredictable_IESBinDebug,
+ Unpredictable_ZEROPMSEVFR,
+ Unpredictable_NOOPTYPES,
+ Unpredictable_ZEROMINLATENCY,
+ Unpredictable_CLEARERRITEZERO,
+ Unpredictable_TBD
+}
+
+enum Constraint = {
+ Constraint_NONE,
+ Constraint_UNKNOWN,
+ Constraint_UNDEF,
+ Constraint_UNDEFEL0,
+ Constraint_NOP,
+ Constraint_TRUE,
+ Constraint_FALSE,
+ Constraint_DISABLED,
+ Constraint_UNCOND,
+ Constraint_COND,
+ Constraint_ADDITIONAL_DECODE,
+ Constraint_WBSUPPRESS,
+ Constraint_FAULT,
+ Constraint_FORCE,
+ Constraint_FORCENOSLCHECK
+}
+
+enum InstrSet = {InstrSet_A64, InstrSet_A32, InstrSet_T32}
+
+struct ProcState = {
+ N : bits(1),
+ Z : bits(1),
+ C : bits(1),
+ V : bits(1),
+ D : bits(1),
+ A : bits(1),
+ I : bits(1),
+ F : bits(1),
+ PAN : bits(1),
+ UAO : bits(1),
+ SS : bits(1),
+ IL : bits(1),
+ EL : bits(2),
+ nRW : bits(1),
+ SP : bits(1),
+ Q : bits(1),
+ GE : bits(4),
+ IT : bits(8),
+ J : bits(1),
+ T : bits(1),
+ E : bits(1),
+ M : bits(5)
+}
+
+enum BranchType = {
+ BranchType_CALL,
+ BranchType_ERET,
+ BranchType_DBGEXIT,
+ BranchType_RET,
+ BranchType_JMP,
+ BranchType_EXCEPTION,
+ BranchType_UNKNOWN
+}
+
+struct ExceptionRecord = {
+ typ : Exception,
+ syndrome : bits(25),
+ vaddress : bits(64),
+ ipavalid : bool,
+ ipaddress : bits(52)
+}
+
+enum Fault = {
+ Fault_None,
+ Fault_AccessFlag,
+ Fault_Alignment,
+ Fault_Background,
+ Fault_Domain,
+ Fault_Permission,
+ Fault_Translation,
+ Fault_AddressSize,
+ Fault_SyncExternal,
+ Fault_SyncExternalOnWalk,
+ Fault_SyncParity,
+ Fault_SyncParityOnWalk,
+ Fault_AsyncParity,
+ Fault_AsyncExternal,
+ Fault_Debug,
+ Fault_TLBConflict,
+ Fault_Lockdown,
+ Fault_Exclusive,
+ Fault_ICacheMaint
+}
+
+enum AccType = {
+ AccType_NORMAL,
+ AccType_VEC,
+ AccType_STREAM,
+ AccType_VECSTREAM,
+ AccType_ATOMIC,
+ AccType_ATOMICRW,
+ AccType_ORDERED,
+ AccType_ORDEREDRW,
+ AccType_LIMITEDORDERED,
+ AccType_UNPRIV,
+ AccType_IFETCH,
+ AccType_PTW,
+ AccType_DC,
+ AccType_IC,
+ AccType_DCZVA,
+ AccType_AT
+}
+
+struct FaultRecord = {
+ typ : Fault,
+ acctype : AccType,
+ ipaddress : bits(52),
+ s2fs1walk : bool,
+ write : bool,
+ level : int,
+ extflag : bits(1),
+ secondstage : bool,
+ domain : bits(4),
+ errortype : bits(2),
+ debugmoe : bits(4)
+}
+
+enum MBReqDomain = {
+ MBReqDomain_Nonshareable,
+ MBReqDomain_InnerShareable,
+ MBReqDomain_OuterShareable,
+ MBReqDomain_FullSystem
+}
+
+enum MBReqTypes = {MBReqTypes_Reads, MBReqTypes_Writes, MBReqTypes_All}
+
+enum MemType = {MemType_Normal, MemType_Device}
+
+enum DeviceType = {
+ DeviceType_GRE,
+ DeviceType_nGRE,
+ DeviceType_nGnRE,
+ DeviceType_nGnRnE
+}
+
+struct MemAttrHints = {attrs : bits(2), hints : bits(2), transient : bool}
+
+struct MemoryAttributes = {
+ typ : MemType,
+ device : DeviceType,
+ inner : MemAttrHints,
+ outer : MemAttrHints,
+ shareable : bool,
+ outershareable : bool
+}
+
+struct FullAddress = {physicaladdress : bits(52), NS : bits(1)}
+
+struct AddressDescriptor = {
+ fault : FaultRecord,
+ memattrs : MemoryAttributes,
+ paddress : FullAddress,
+ vaddress : bits(64)
+}
+
+struct DescriptorUpdate = {AF : bool, AP : bool, descaddr : AddressDescriptor}
+
+enum MemAtomicOp = {
+ MemAtomicOp_ADD,
+ MemAtomicOp_BIC,
+ MemAtomicOp_EOR,
+ MemAtomicOp_ORR,
+ MemAtomicOp_SMAX,
+ MemAtomicOp_SMIN,
+ MemAtomicOp_UMAX,
+ MemAtomicOp_UMIN,
+ MemAtomicOp_SWP
+}
+
+enum FPType = {
+ FPType_Nonzero,
+ FPType_Zero,
+ FPType_Infinity,
+ FPType_QNaN,
+ FPType_SNaN
+}
+
+enum FPExc = {
+ FPExc_InvalidOp,
+ FPExc_DivideByZero,
+ FPExc_Overflow,
+ FPExc_Underflow,
+ FPExc_Inexact,
+ FPExc_InputDenorm
+}
+
+enum FPRounding = {
+ FPRounding_TIEEVEN,
+ FPRounding_POSINF,
+ FPRounding_NEGINF,
+ FPRounding_ZERO,
+ FPRounding_TIEAWAY,
+ FPRounding_ODD
+}
+
+enum SysRegAccess = {
+ SysRegAccess_OK,
+ SysRegAccess_UNDEFINED,
+ SysRegAccess_TrapToEL1,
+ SysRegAccess_TrapToEL2,
+ SysRegAccess_TrapToEL3
+}
+
+enum SRType = {SRType_LSL, SRType_LSR, SRType_ASR, SRType_ROR, SRType_RRX}
+
+enum ShiftType = {ShiftType_LSL, ShiftType_LSR, ShiftType_ASR, ShiftType_ROR}
+
+enum PrefetchHint = {Prefetch_READ, Prefetch_WRITE, Prefetch_EXEC}
+
+enum InterruptID = {
+ InterruptID_PMUIRQ,
+ InterruptID_COMMIRQ,
+ InterruptID_CTIIRQ,
+ InterruptID_COMMRX,
+ InterruptID_COMMTX
+}
+
+enum CrossTriggerOut = {
+ CrossTriggerOut_DebugRequest,
+ CrossTriggerOut_RestartRequest,
+ CrossTriggerOut_IRQ,
+ CrossTriggerOut_RSVD3,
+ CrossTriggerOut_TraceExtIn0,
+ CrossTriggerOut_TraceExtIn1,
+ CrossTriggerOut_TraceExtIn2,
+ CrossTriggerOut_TraceExtIn3
+}
+
+enum CrossTriggerIn = {
+ CrossTriggerIn_CrossHalt,
+ CrossTriggerIn_PMUOverflow,
+ CrossTriggerIn_RSVD2,
+ CrossTriggerIn_RSVD3,
+ CrossTriggerIn_TraceExtOut0,
+ CrossTriggerIn_TraceExtOut1,
+ CrossTriggerIn_TraceExtOut2,
+ CrossTriggerIn_TraceExtOut3
+}
+
+enum MemBarrierOp = {MemBarrierOp_DSB, MemBarrierOp_DMB, MemBarrierOp_ISB}
+
+struct AccessDescriptor = {
+ acctype : AccType,
+ page_table_walk : bool,
+ secondstage : bool,
+ s2fs1walk : bool,
+ level : int
+}
+
+struct Permissions = {ap : bits(3), xn : bits(1), xxn : bits(1), pxn : bits(1)}
+
+struct TLBRecord = {
+ perms : Permissions,
+ nG : bits(1),
+ domain : bits(4),
+ contiguous : bool,
+ level : int,
+ blocksize : int,
+ descupdate : DescriptorUpdate,
+ CnP : bits(1),
+ addrdesc : AddressDescriptor
+}
+
+enum ImmediateOp = {
+ ImmediateOp_MOVI,
+ ImmediateOp_MVNI,
+ ImmediateOp_ORR,
+ ImmediateOp_BIC
+}
+
+enum MoveWideOp = {MoveWideOp_N, MoveWideOp_Z, MoveWideOp_K}
+
+enum SystemAccessType = {
+ SystemAccessType_RT,
+ SystemAccessType_RRT,
+ SystemAccessType_DT
+}
+
+enum VBitOp = {VBitOp_VBIF, VBitOp_VBIT, VBitOp_VBSL, VBitOp_VEOR}
+
+enum TimeStamp = {TimeStamp_None, TimeStamp_Virtual, TimeStamp_Physical}
+
+enum PrivilegeLevel = {PL3, PL2, PL1, PL0}
+
+struct AArch32_SErrorSyndrome = {AET : bits(2), ExT : bits(1)}
+
+enum SystemOp = {Sys_AT, Sys_DC, Sys_IC, Sys_TLBI, Sys_SYS}
+
+struct PCSample = {
+ valid_name : bool,
+ pc : bits(64),
+ el : bits(2),
+ rw : bits(1),
+ ns : bits(1),
+ contextidr : bits(32),
+ contextidr_el2 : bits(32),
+ vmid : bits(16)
+}
+
+enum ReduceOp = {
+ ReduceOp_FMINNUM,
+ ReduceOp_FMAXNUM,
+ ReduceOp_FMIN,
+ ReduceOp_FMAX,
+ ReduceOp_FADD,
+ ReduceOp_ADD
+}
+
+enum LogicalOp = {LogicalOp_AND, LogicalOp_EOR, LogicalOp_ORR}
+
+enum ExtendType = {
+ ExtendType_SXTB,
+ ExtendType_SXTH,
+ ExtendType_SXTW,
+ ExtendType_SXTX,
+ ExtendType_UXTB,
+ ExtendType_UXTH,
+ ExtendType_UXTW,
+ ExtendType_UXTX
+}
+
+enum SystemHintOp = {
+ SystemHintOp_NOP,
+ SystemHintOp_YIELD,
+ SystemHintOp_WFE,
+ SystemHintOp_WFI,
+ SystemHintOp_SEV,
+ SystemHintOp_SEVL,
+ SystemHintOp_ESB,
+ SystemHintOp_PSB
+}
+
+enum MemOp = {MemOp_LOAD, MemOp_STORE, MemOp_PREFETCH}
+
+enum OpType = {
+ OpType_Load,
+ OpType_Store,
+ OpType_LoadAtomic,
+ OpType_Branch,
+ OpType_Other
+}
+
+enum FPUnaryOp = {FPUnaryOp_ABS, FPUnaryOp_MOV, FPUnaryOp_NEG, FPUnaryOp_SQRT}
+
+enum CompareOp = {
+ CompareOp_GT,
+ CompareOp_GE,
+ CompareOp_EQ,
+ CompareOp_LE,
+ CompareOp_LT
+}
+
+enum PSTATEField = {
+ PSTATEField_DAIFSet,
+ PSTATEField_DAIFClr,
+ PSTATEField_PAN,
+ PSTATEField_UAO,
+ PSTATEField_SP
+}
+
+enum FPMaxMinOp = {
+ FPMaxMinOp_MAX,
+ FPMaxMinOp_MIN,
+ FPMaxMinOp_MAXNUM,
+ FPMaxMinOp_MINNUM
+}
+
+enum CountOp = {CountOp_CLZ, CountOp_CLS, CountOp_CNT}
+
+enum VFPNegMul = {VFPNegMul_VNMLA, VFPNegMul_VNMLS, VFPNegMul_VNMUL}
+
+enum VBitOps = {VBitOps_VBIF, VBitOps_VBIT, VBitOps_VBSL}
+
+enum VCGEtype = {VCGEtype_signed, VCGEtype_unsigned, VCGEtype_fp}
+
+enum VCGTtype = {VCGTtype_signed, VCGTtype_unsigned, VCGTtype_fp}
+
+enum __InstrEnc = {__A64, __A32, __T16, __T32}
+
+val AArch64_CheckAndUpdateDescriptor_SecondStage : (DescriptorUpdate, FaultRecord, bits(64), AccType, bool, bool, bool) -> FaultRecord effect {escape, rreg, rmem, wmem, undef}
+
+val AArch64_TranslationTableWalk_SecondStage : (bits(52), bits(64), AccType, bool, bool, int) -> TLBRecord effect {escape, rreg, rmem, undef}
+
+val AArch64_SecondStageTranslate : (AddressDescriptor, bits(64), AccType, bool, bool, bool, int, bool) -> AddressDescriptor effect {rreg, escape, rmem, undef, wmem}
+
+val AArch64_CheckAndUpdateDescriptor : (DescriptorUpdate, FaultRecord, bool, bits(64), AccType, bool, bool, bool) -> FaultRecord effect {escape, rreg, rmem, wmem, undef}
+
+register __unconditional : bool
+
+register __currentCond : bits(4)
+
+val __UNKNOWN_real : unit -> real
+
+function __UNKNOWN_real () = return(0.0)
+
+val __UNKNOWN_integer : unit -> int
+
+function __UNKNOWN_integer () = return(0)
+
+register __ThisInstrEnc : __InstrEnc
+
+register __ThisInstr : bits(32)
+
+register __Sleeping : bool
+
+register __PendingPhysicalSError : bool
+
+register __PendingInterrupt : bool
+
+register __Memory : bits(52)
+
+register __ExclusiveLocal : bool
+
+register __BranchTaken : bool
+
+register _V : vector(32, dec, bits(128))
+
+register _R : vector(31, dec, bits(64))
+
+register _PC : bits(64)
+
+val aget_PC : unit -> bits(64) effect {rreg}
+
+function aget_PC () = return(_PC)
+
+register VTTBR_EL2 : bits(64)
+
+register VTCR_EL2 : bits(32)
+
+register VSESR_EL2 : bits(32)
+
+register VDFSR : bits(32)
+
+val __UNKNOWN_VBitOp : unit -> VBitOp
+
+function __UNKNOWN_VBitOp () = return(VBitOp_VBIF)
+
+register VBAR_EL3 : bits(64)
+
+register VBAR_EL2 : bits(64)
+
+register VBAR_EL1 : bits(64)
+
+register VBAR : bits(32)
+
+val UndefinedFault : unit -> unit effect {escape}
+
+function UndefinedFault () = assert(false, "Undefined fault")
+
+val ThisInstrAddr : forall ('N : Int), 'N >= 0. unit -> bits('N) effect {rreg}
+
+function ThisInstrAddr () = return(slice(_PC, 0, 'N))
+
+val ThisInstr : unit -> bits(32) effect {rreg}
+
+function ThisInstr () = return(__ThisInstr)
+
+register TTBR1_EL2 : bits(64)
+
+register TTBR1_EL1 : bits(64)
+
+register TTBR0_EL3 : bits(64)
+
+register TTBR0_EL2 : bits(64)
+
+register TTBR0_EL1 : bits(64)
+
+register TTBCR : bits(32)
+
+register TCR_EL3 : bits(32)
+
+register TCR_EL2 : bits(64)
+
+register TCR_EL1 : bits(64)
+
+val __UNKNOWN_SystemHintOp : unit -> SystemHintOp
+
+function __UNKNOWN_SystemHintOp () = return(SystemHintOp_NOP)
+
+val SynchronizeContext : unit -> unit
+
+function SynchronizeContext () = ()
+
+register SP_mon : bits(32)
+
+register SP_EL3 : bits(64)
+
+register SP_EL2 : bits(64)
+
+register SP_EL1 : bits(64)
+
+register SP_EL0 : bits(64)
+
+register SPSR_und : bits(32)
+
+register SPSR_svc : bits(32)
+
+register SPSR_mon : bits(32)
+
+register SPSR_irq : bits(32)
+
+register SPSR_hyp : bits(32)
+
+register SPSR_fiq : bits(32)
+
+register SPSR_abt : bits(32)
+
+register SPSR_EL3 : bits(32)
+
+register SPSR_EL2 : bits(32)
+
+register SPSR_EL1 : bits(32)
+
+register SPIDEN : signal
+
+val SErrorPending : unit -> bool effect {rreg}
+
+function SErrorPending () = return(__PendingPhysicalSError)
+
+register SDER : bits(32)
+
+register SDCR : bits(32)
+
+register SCTLR_EL3 : bits(32)
+
+register SCTLR_EL2 : bits(32)
+
+register SCTLR_EL1 : bits(32)
+
+register SCTLR : bits(32)
+
+register SCR_EL3 : bits(32)
+
+register SCR : bits(32)
+
+val ResetExternalDebugRegisters : bool -> unit
+
+function ResetExternalDebugRegisters cold_reset = ()
+
+register RVBAR_EL3 : bits(64)
+
+register RVBAR_EL2 : bits(64)
+
+register RVBAR_EL1 : bits(64)
+
+register RC : vector(5, dec, bits(64))
+
+val ProfilingSynchronizationBarrier : unit -> unit
+
+function ProfilingSynchronizationBarrier () = ()
+
+val ProcessorID : unit -> int
+
+function ProcessorID () = return(0)
+
+val __UNKNOWN_PrefetchHint : unit -> PrefetchHint
+
+function __UNKNOWN_PrefetchHint () = return(Prefetch_READ)
+
+val __UNKNOWN_PSTATEField : unit -> PSTATEField
+
+function __UNKNOWN_PSTATEField () = return(PSTATEField_DAIFSet)
+
+register PSTATE : ProcState
+
+val PACCellShuffle : bits(64) -> bits(64) effect {undef}
+
+function PACCellShuffle indata = {
+ outdata : bits(64) = undefined;
+ outdata : bits(64) = __SetSlice_bits(64, 4, outdata, 0, slice(indata, 52, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 4, slice(indata, 24, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 8, slice(indata, 44, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 12, slice(indata, 0, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 16, slice(indata, 28, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 20, slice(indata, 48, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 24, slice(indata, 4, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 28, slice(indata, 40, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 32, slice(indata, 32, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 36, slice(indata, 12, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 40, slice(indata, 56, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 44, slice(indata, 20, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 48, slice(indata, 8, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 52, slice(indata, 36, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 56, slice(indata, 16, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 60, slice(indata, 60, 4));
+ return(outdata)
+}
+
+val PACCellInvShuffle : bits(64) -> bits(64) effect {undef}
+
+function PACCellInvShuffle indata = {
+ outdata : bits(64) = undefined;
+ outdata : bits(64) = __SetSlice_bits(64, 4, outdata, 0, slice(indata, 12, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 4, slice(indata, 24, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 8, slice(indata, 48, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 12, slice(indata, 36, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 16, slice(indata, 56, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 20, slice(indata, 44, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 24, slice(indata, 4, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 28, slice(indata, 16, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 32, slice(indata, 32, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 36, slice(indata, 52, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 40, slice(indata, 28, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 44, slice(indata, 8, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 48, slice(indata, 20, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 52, slice(indata, 0, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 56, slice(indata, 40, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 60, slice(indata, 60, 4));
+ return(outdata)
+}
+
+register OSLSR_EL1 : bits(32)
+
+register OSDLR_EL1 : bits(32)
+
+val __UNKNOWN_MoveWideOp : unit -> MoveWideOp
+
+function __UNKNOWN_MoveWideOp () = return(MoveWideOp_N)
+
+val __UNKNOWN_MemType : unit -> MemType
+
+function __UNKNOWN_MemType () = return(MemType_Normal)
+
+val __UNKNOWN_MemOp : unit -> MemOp
+
+function __UNKNOWN_MemOp () = return(MemOp_LOAD)
+
+let MemHint_RWA : vector(2, dec, bit) = 0b11
+
+let MemHint_RA : vector(2, dec, bit) = 0b10
+
+let MemHint_No : vector(2, dec, bit) = 0b00
+
+val __UNKNOWN_MemBarrierOp : unit -> MemBarrierOp
+
+function __UNKNOWN_MemBarrierOp () = return(MemBarrierOp_DSB)
+
+let MemAttr_WT : vector(2, dec, bit) = 0b10
+
+let MemAttr_WB : vector(2, dec, bit) = 0b11
+
+let MemAttr_NC : vector(2, dec, bit) = 0b00
+
+val __UNKNOWN_MemAtomicOp : unit -> MemAtomicOp
+
+function __UNKNOWN_MemAtomicOp () = return(MemAtomicOp_ADD)
+
+register MDSCR_EL1 : bits(32)
+
+register MDCR_EL3 : bits(32)
+
+register MDCR_EL2 : bits(32)
+
+val __UNKNOWN_MBReqTypes : unit -> MBReqTypes
+
+function __UNKNOWN_MBReqTypes () = return(MBReqTypes_Reads)
+
+val __UNKNOWN_MBReqDomain : unit -> MBReqDomain
+
+function __UNKNOWN_MBReqDomain () = return(MBReqDomain_Nonshareable)
+
+register MAIR_EL3 : bits(64)
+
+register MAIR_EL2 : bits(64)
+
+register MAIR_EL1 : bits(64)
+
+let M32_User : vector(5, dec, bit) = 0b10000
+
+let M32_Undef : vector(5, dec, bit) = 0b11011
+
+let M32_System : vector(5, dec, bit) = 0b11111
+
+let M32_Svc : vector(5, dec, bit) = 0b10011
+
+let M32_Monitor : vector(5, dec, bit) = 0b10110
+
+let M32_IRQ : vector(5, dec, bit) = 0b10010
+
+let M32_Hyp : vector(5, dec, bit) = 0b11010
+
+let M32_FIQ : vector(5, dec, bit) = 0b10001
+
+let M32_Abort : vector(5, dec, bit) = 0b10111
+
+val __UNKNOWN_LogicalOp : unit -> LogicalOp
+
+function __UNKNOWN_LogicalOp () = return(LogicalOp_AND)
+
+register LR_mon : bits(32)
+
+val IsExclusiveLocal : (FullAddress, int, int) -> bool effect {rreg}
+
+function IsExclusiveLocal (paddress, 'processorid, 'size) = return(__ExclusiveLocal)
+
+val InterruptPending : unit -> bool effect {rreg}
+
+function InterruptPending () = return(__PendingInterrupt)
+
+val asl_Int : forall ('N : Int), 'N >= 0. (bits('N), bool) -> int
+
+function asl_Int (x, unsigned) = {
+ result : int = if unsigned then UInt(x) else SInt(x);
+ return(result)
+}
+
+val InstructionSynchronizationBarrier : unit -> unit
+
+function InstructionSynchronizationBarrier () = ()
+
+val __UNKNOWN_InstrSet : unit -> InstrSet
+
+function __UNKNOWN_InstrSet () = return(InstrSet_A64)
+
+val __UNKNOWN_ImmediateOp : unit -> ImmediateOp
+
+function __UNKNOWN_ImmediateOp () = return(ImmediateOp_MOVI)
+
+register ID_AA64DFR0_EL1 : bits(64)
+
+val Hint_Yield : unit -> unit
+
+function Hint_Yield () = ()
+
+val Hint_Prefetch : (bits(64), PrefetchHint, int, bool) -> unit
+
+function Hint_Prefetch (address, hint, 'target, stream) = ()
+
+val Hint_Branch : BranchType -> unit
+
+function Hint_Branch hint = ()
+
+val HaveFP16Ext : unit -> bool
+
+function HaveFP16Ext () = return(true)
+
+val HaveAnyAArch32 : unit -> bool
+
+function HaveAnyAArch32 () = return(false)
+
+register HVBAR : bits(32)
+
+register HSR : bits(32)
+
+register HSCTLR : bits(32)
+
+register HPFAR_EL2 : bits(64)
+
+register HPFAR : bits(32)
+
+register HIFAR : bits(32)
+
+register HDFAR : bits(32)
+
+register HDCR : bits(32)
+
+register HCR_EL2 : bits(64)
+
+register HCR2 : bits(32)
+
+register HCR : bits(32)
+
+val __UNKNOWN_Fault : unit -> Fault
+
+function __UNKNOWN_Fault () = return(Fault_None)
+
+val __UNKNOWN_FPUnaryOp : unit -> FPUnaryOp
+
+function __UNKNOWN_FPUnaryOp () = return(FPUnaryOp_ABS)
+
+val __UNKNOWN_FPType : unit -> FPType
+
+function __UNKNOWN_FPType () = return(FPType_Nonzero)
+
+register FPSR : bits(32)
+
+register FPSCR : bits(32)
+
+val __UNKNOWN_FPRounding : unit -> FPRounding
+
+function __UNKNOWN_FPRounding () = return(FPRounding_TIEEVEN)
+
+val __UNKNOWN_FPMaxMinOp : unit -> FPMaxMinOp
+
+function __UNKNOWN_FPMaxMinOp () = return(FPMaxMinOp_MAX)
+
+register FPEXC : bits(32)
+
+val FPDecodeRounding : bits(2) -> FPRounding
+
+function FPDecodeRounding rmode = match rmode {
+ 0b00 => return(FPRounding_TIEEVEN),
+ 0b01 => return(FPRounding_POSINF),
+ 0b10 => return(FPRounding_NEGINF),
+ 0b11 => return(FPRounding_ZERO)
+}
+
+val FPRoundingMode : bits(32) -> FPRounding
+
+function FPRoundingMode fpcr = return(FPDecodeRounding(slice(fpcr, 22, 2)))
+
+val __UNKNOWN_FPConvOp : unit -> FPConvOp
+
+function __UNKNOWN_FPConvOp () = return(FPConvOp_CVT_FtoI)
+
+register FPCR : bits(32)
+
+register FAR_EL3 : bits(64)
+
+register FAR_EL2 : bits(64)
+
+register FAR_EL1 : bits(64)
+
+val __UNKNOWN_boolean : unit -> bool
+
+function __UNKNOWN_boolean () = return(false)
+
+val __ResetInterruptState : unit -> unit effect {wreg}
+
+function __ResetInterruptState () = {
+ __PendingPhysicalSError = false;
+ __PendingInterrupt = false
+}
+
+val __ResetExecuteState : unit -> unit effect {wreg}
+
+function __ResetExecuteState () = __Sleeping = false
+
+val Unreachable : unit -> unit effect {escape}
+
+function Unreachable () = assert(false, "FALSE")
+
+val RBankSelect : (bits(5), int, int, int, int, int, int, int) -> int effect {escape, undef}
+
+function RBankSelect (mode, 'usr, 'fiq, 'irq, 'svc, 'abt, 'und, 'hyp) = {
+ result : int = undefined;
+ match mode {
+ ? if ? == M32_User => result = usr,
+ ? if ? == M32_FIQ => result = fiq,
+ ? if ? == M32_IRQ => result = irq,
+ ? if ? == M32_Svc => result = svc,
+ ? if ? == M32_Abort => result = abt,
+ ? if ? == M32_Hyp => result = hyp,
+ ? if ? == M32_Undef => result = und,
+ ? if ? == M32_System => result = usr,
+ _ => Unreachable()
+ };
+ return(result)
+}
+
+val TakeUnmaskedSErrorInterrupts : unit -> unit effect {escape}
+
+function TakeUnmaskedSErrorInterrupts () = assert(false, "FALSE")
+
+val TakeUnmaskedPhysicalSErrorInterrupts : bool -> unit effect {escape}
+
+function TakeUnmaskedPhysicalSErrorInterrupts iesb_req = assert(false, "FALSE")
+
+val StopInstructionPrefetchAndEnableITR : unit -> unit effect {escape}
+
+function StopInstructionPrefetchAndEnableITR () = assert(false, "FALSE")
+
+val SendEvent : unit -> unit effect {escape}
+
+function SendEvent () = assert(false, "FALSE")
+
+val MarkExclusiveLocal : (FullAddress, int, int) -> unit effect {wreg}
+
+function MarkExclusiveLocal (paddress, 'processorid, 'size) = __ExclusiveLocal = false
+
+val MarkExclusiveGlobal : (FullAddress, int, int) -> unit effect {escape}
+
+function MarkExclusiveGlobal (paddress, 'processorid, 'size) = assert(false, "FALSE")
+
+val IsExclusiveGlobal : (FullAddress, int, int) -> bool effect {escape}
+
+function IsExclusiveGlobal (paddress, 'processorid, 'size) = {
+ assert(false, "FALSE");
+ return(false)
+}
+
+val ExclusiveMonitorsStatus : unit -> bits(1) effect {escape}
+
+function ExclusiveMonitorsStatus () = {
+ assert(false, "FALSE");
+ return(0b0)
+}
+
+val __UNKNOWN_Exception : unit -> Exception
+
+function __UNKNOWN_Exception () = return(Exception_Uncategorized)
+
+register EventRegister : bits(1)
+
+val SendEventLocal : unit -> unit effect {wreg}
+
+function SendEventLocal () = {
+ EventRegister = 0b1;
+ ()
+}
+
+val ErrorSynchronizationBarrier : (MBReqDomain, MBReqTypes) -> unit
+
+function ErrorSynchronizationBarrier (domain, types) = ()
+
+val EnterLowPowerState : unit -> unit effect {wreg}
+
+function EnterLowPowerState () = __Sleeping = true
+
+val WaitForInterrupt : unit -> unit effect {wreg}
+
+function WaitForInterrupt () = {
+ EnterLowPowerState();
+ ()
+}
+
+val EndOfInstruction : unit -> unit effect {escape}
+
+function EndOfInstruction () = throw(Error_ExceptionTaken())
+
+register ESR_EL3 : bits(32)
+
+register ESR_EL2 : bits(32)
+
+register ESR_EL1 : bits(32)
+
+val TweakCellRot : bits(4) -> bits(4) effect {undef}
+
+function TweakCellRot incell_name = {
+ outcell : bits(4) = undefined;
+ outcell : bits(4) = __SetSlice_bits(4, 1, outcell, 3, [incell_name[0]] ^ [incell_name[1]]);
+ outcell = __SetSlice_bits(4, 1, outcell, 2, [incell_name[3]]);
+ outcell = __SetSlice_bits(4, 1, outcell, 1, [incell_name[2]]);
+ outcell = __SetSlice_bits(4, 1, outcell, 0, [incell_name[1]]);
+ return(outcell)
+}
+
+val TweakShuffle : bits(64) -> bits(64) effect {undef}
+
+function TweakShuffle indata = {
+ outdata : bits(64) = undefined;
+ outdata : bits(64) = __SetSlice_bits(64, 4, outdata, 0, slice(indata, 16, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 4, slice(indata, 20, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 8, TweakCellRot(slice(indata, 24, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 12, slice(indata, 28, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 16, TweakCellRot(slice(indata, 44, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 20, slice(indata, 8, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 24, slice(indata, 12, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 28, TweakCellRot(slice(indata, 32, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 32, slice(indata, 48, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 36, slice(indata, 52, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 40, slice(indata, 56, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 44, TweakCellRot(slice(indata, 60, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 48, TweakCellRot(slice(indata, 0, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 52, slice(indata, 4, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 56, TweakCellRot(slice(indata, 40, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 60, TweakCellRot(slice(indata, 36, 4)));
+ return(outdata)
+}
+
+val TweakCellInvRot : bits(4) -> bits(4) effect {undef}
+
+function TweakCellInvRot incell_name = {
+ outcell : bits(4) = undefined;
+ outcell : bits(4) = __SetSlice_bits(4, 1, outcell, 3, [incell_name[2]]);
+ outcell = __SetSlice_bits(4, 1, outcell, 2, [incell_name[1]]);
+ outcell = __SetSlice_bits(4, 1, outcell, 1, [incell_name[0]]);
+ outcell = __SetSlice_bits(4, 1, outcell, 0, [incell_name[0]] ^ [incell_name[3]]);
+ return(outcell)
+}
+
+val TweakInvShuffle : bits(64) -> bits(64) effect {undef}
+
+function TweakInvShuffle indata = {
+ outdata : bits(64) = undefined;
+ outdata : bits(64) = __SetSlice_bits(64, 4, outdata, 0, TweakCellInvRot(slice(indata, 48, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 4, slice(indata, 52, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 8, slice(indata, 20, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 12, slice(indata, 24, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 16, slice(indata, 0, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 20, slice(indata, 4, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 24, TweakCellInvRot(slice(indata, 8, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 28, slice(indata, 12, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 32, TweakCellInvRot(slice(indata, 28, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 36, TweakCellInvRot(slice(indata, 60, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 40, TweakCellInvRot(slice(indata, 56, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 44, TweakCellInvRot(slice(indata, 16, 4)));
+ outdata = __SetSlice_bits(64, 4, outdata, 48, slice(indata, 32, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 52, slice(indata, 36, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 56, slice(indata, 40, 4));
+ outdata = __SetSlice_bits(64, 4, outdata, 60, TweakCellInvRot(slice(indata, 44, 4)));
+ return(outdata)
+}
+
+val SHAparity : (bits(32), bits(32), bits(32)) -> bits(32)
+
+function SHAparity (x, y, z) = return((x ^ y) ^ z)
+
+register ELR_hyp : bits(32)
+
+register ELR_EL3 : bits(64)
+
+register ELR_EL2 : bits(64)
+
+register ELR_EL1 : bits(64)
+
+let EL3 : vector(2, dec, bit) = 0b11
+
+let EL2 : vector(2, dec, bit) = 0b10
+
+let EL1 : vector(2, dec, bit) = 0b01
+
+let EL0 : vector(2, dec, bit) = 0b00
+
+register EDSCR : bits(32)
+
+val __UNKNOWN_DeviceType : unit -> DeviceType
+
+function __UNKNOWN_DeviceType () = return(DeviceType_GRE)
+
+val DecodeShift : bits(2) -> ShiftType
+
+function DecodeShift op = match op {
+ 0b00 => return(ShiftType_LSL),
+ 0b01 => return(ShiftType_LSR),
+ 0b10 => return(ShiftType_ASR),
+ 0b11 => return(ShiftType_ROR)
+}
+
+val DecodeRegExtend : bits(3) -> ExtendType
+
+function DecodeRegExtend op = match op {
+ 0b000 => return(ExtendType_UXTB),
+ 0b001 => return(ExtendType_UXTH),
+ 0b010 => return(ExtendType_UXTW),
+ 0b011 => return(ExtendType_UXTX),
+ 0b100 => return(ExtendType_SXTB),
+ 0b101 => return(ExtendType_SXTH),
+ 0b110 => return(ExtendType_SXTW),
+ 0b111 => return(ExtendType_SXTX)
+}
+
+let DebugHalt_Watchpoint : vector(6, dec, bit) = 0b101011
+
+let DebugHalt_HaltInstruction : vector(6, dec, bit) = 0b101111
+
+let DebugHalt_Breakpoint : vector(6, dec, bit) = 0b000111
+
+let DebugException_VectorCatch : vector(4, dec, bit) = 0x5
+
+val DataSynchronizationBarrier : (MBReqDomain, MBReqTypes) -> unit
+
+function DataSynchronizationBarrier (domain, types) = ()
+
+val DataMemoryBarrier : (MBReqDomain, MBReqTypes) -> unit
+
+function DataMemoryBarrier (domain, types) = ()
+
+val aarch64_system_barriers : (MBReqDomain, MemBarrierOp, MBReqTypes) -> unit
+
+function aarch64_system_barriers (domain, op, types) = match op {
+ MemBarrierOp_DSB => DataSynchronizationBarrier(domain, types),
+ MemBarrierOp_DMB => DataMemoryBarrier(domain, types),
+ MemBarrierOp_ISB => InstructionSynchronizationBarrier()
+}
+
+register DSPSR_EL0 : bits(32)
+
+register DSPSR : bits(32)
+
+register DLR_EL0 : bits(64)
+
+register DLR : bits(32)
+
+register DBGWVR_EL1 : vector(16, dec, bits(64))
+
+register DBGWCR_EL1 : vector(16, dec, bits(32))
+
+register DBGPRCR_EL1 : bits(32)
+
+register DBGPRCR : bits(32)
+
+register DBGOSLSR : bits(32)
+
+register DBGOSDLR : bits(32)
+
+register DBGEN : signal
+
+register DBGBVR_EL1 : vector(16, dec, bits(64))
+
+register DBGBCR_EL1 : vector(16, dec, bits(32))
+
+val __UNKNOWN_Constraint : unit -> Constraint
+
+function __UNKNOWN_Constraint () = return(Constraint_NONE)
+
+val ConstrainUnpredictable : Unpredictable -> Constraint
+
+function ConstrainUnpredictable which = match which {
+ Unpredictable_WBOVERLAPLD => return(Constraint_WBSUPPRESS),
+ Unpredictable_WBOVERLAPST => return(Constraint_NONE),
+ Unpredictable_LDPOVERLAP => return(Constraint_UNDEF),
+ Unpredictable_BASEOVERLAP => return(Constraint_NONE),
+ Unpredictable_DATAOVERLAP => return(Constraint_NONE),
+ Unpredictable_DEVPAGE2 => return(Constraint_FAULT),
+ Unpredictable_INSTRDEVICE => return(Constraint_NONE),
+ Unpredictable_RESCPACR => return(Constraint_UNKNOWN),
+ Unpredictable_RESMAIR => return(Constraint_UNKNOWN),
+ Unpredictable_RESTEXCB => return(Constraint_UNKNOWN),
+ Unpredictable_RESDACR => return(Constraint_UNKNOWN),
+ Unpredictable_RESPRRR => return(Constraint_UNKNOWN),
+ Unpredictable_RESVTCRS => return(Constraint_UNKNOWN),
+ Unpredictable_RESTnSZ => return(Constraint_FORCE),
+ Unpredictable_OORTnSZ => return(Constraint_FORCE),
+ Unpredictable_LARGEIPA => return(Constraint_FORCE),
+ Unpredictable_ESRCONDPASS => return(Constraint_FALSE),
+ Unpredictable_ILZEROIT => return(Constraint_FALSE),
+ Unpredictable_ILZEROT => return(Constraint_FALSE),
+ Unpredictable_BPVECTORCATCHPRI => return(Constraint_TRUE),
+ Unpredictable_VCMATCHHALF => return(Constraint_FALSE),
+ Unpredictable_VCMATCHDAPA => return(Constraint_FALSE),
+ Unpredictable_WPMASKANDBAS => return(Constraint_FALSE),
+ Unpredictable_WPBASCONTIGUOUS => return(Constraint_FALSE),
+ Unpredictable_RESWPMASK => return(Constraint_DISABLED),
+ Unpredictable_WPMASKEDBITS => return(Constraint_FALSE),
+ Unpredictable_RESBPWPCTRL => return(Constraint_DISABLED),
+ Unpredictable_BPNOTIMPL => return(Constraint_DISABLED),
+ Unpredictable_RESBPTYPE => return(Constraint_DISABLED),
+ Unpredictable_BPNOTCTXCMP => return(Constraint_DISABLED),
+ Unpredictable_BPMATCHHALF => return(Constraint_FALSE),
+ Unpredictable_BPMISMATCHHALF => return(Constraint_FALSE),
+ Unpredictable_RESTARTALIGNPC => return(Constraint_FALSE),
+ Unpredictable_RESTARTZEROUPPERPC => return(Constraint_TRUE),
+ Unpredictable_ZEROUPPER => return(Constraint_TRUE),
+ Unpredictable_ERETZEROUPPERPC => return(Constraint_TRUE),
+ Unpredictable_A32FORCEALIGNPC => return(Constraint_FALSE),
+ Unpredictable_SMD => return(Constraint_UNDEF),
+ Unpredictable_AFUPDATE => return(Constraint_TRUE),
+ Unpredictable_IESBinDebug => return(Constraint_TRUE),
+ Unpredictable_CLEARERRITEZERO => return(Constraint_FALSE)
+}
+
+val __UNKNOWN_CompareOp : unit -> CompareOp
+
+function __UNKNOWN_CompareOp () = return(CompareOp_GT)
+
+val ClearPendingPhysicalSError : unit -> unit effect {wreg}
+
+function ClearPendingPhysicalSError () = {
+ __PendingPhysicalSError = false;
+ ()
+}
+
+val ClearExclusiveLocal : int -> unit effect {wreg}
+
+function ClearExclusiveLocal 'processorid = {
+ __ExclusiveLocal = false;
+ ()
+}
+
+val aarch64_system_monitors : unit -> unit effect {wreg}
+
+function aarch64_system_monitors () = ClearExclusiveLocal(ProcessorID())
+
+val system_monitors_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {wreg}
+
+function system_monitors_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ aarch64_system_monitors()
+}
+
+val ClearExclusiveByAddress : (FullAddress, int, int) -> unit
+
+function ClearExclusiveByAddress (paddress, 'processorid, 'size) = ()
+
+val ClearEventRegister : unit -> unit effect {wreg}
+
+function ClearEventRegister () = {
+ EventRegister = 0b0;
+ ()
+}
+
+val CTI_SignalEvent : CrossTriggerIn -> unit effect {escape}
+
+function CTI_SignalEvent id = assert(false, "FALSE")
+
+register CPTR_EL3 : bits(32)
+
+register CPTR_EL2 : bits(32)
+
+register CPACR_EL1 : bits(32)
+
+register CONTEXTIDR_EL2 : bits(32)
+
+register CONTEXTIDR_EL1 : bits(32)
+
+val __UNKNOWN_BranchType : unit -> BranchType
+
+function __UNKNOWN_BranchType () = return(BranchType_CALL)
+
+val __UNKNOWN_AccType : unit -> AccType
+
+function __UNKNOWN_AccType () = return(AccType_NORMAL)
+
+val CreateAccessDescriptorPTW : (AccType, bool, bool, int) -> AccessDescriptor effect {undef}
+
+function CreateAccessDescriptorPTW (acctype, secondstage, s2fs1walk, 'level) = {
+ accdesc : AccessDescriptor = undefined;
+ accdesc.acctype = acctype;
+ accdesc.page_table_walk = true;
+ accdesc.secondstage = s2fs1walk;
+ accdesc.secondstage = secondstage;
+ accdesc.level = level;
+ return(accdesc)
+}
+
+val CreateAccessDescriptor : AccType -> AccessDescriptor effect {undef}
+
+function CreateAccessDescriptor acctype = {
+ accdesc : AccessDescriptor = undefined;
+ accdesc.acctype = acctype;
+ accdesc.page_table_walk = false;
+ return(accdesc)
+}
+
+register APIBKeyLo_EL1 : bits(64)
+
+register APIBKeyHi_EL1 : bits(64)
+
+register APIAKeyLo_EL1 : bits(64)
+
+register APIAKeyHi_EL1 : bits(64)
+
+register APGAKeyLo_EL1 : bits(64)
+
+register APGAKeyHi_EL1 : bits(64)
+
+register APDBKeyLo_EL1 : bits(64)
+
+register APDBKeyHi_EL1 : bits(64)
+
+register APDAKeyLo_EL1 : bits(64)
+
+register APDAKeyHi_EL1 : bits(64)
+
+val aarch64_system_register_cpsr : (PSTATEField, bits(4)) -> unit effect {rreg, wreg}
+
+function aarch64_system_register_cpsr (field, operand) = match field {
+ PSTATEField_SP => PSTATE.SP = [operand[0]],
+ PSTATEField_DAIFSet => {
+ PSTATE.D = PSTATE.D | [operand[3]];
+ PSTATE.A = PSTATE.A | [operand[2]];
+ PSTATE.I = PSTATE.I | [operand[1]];
+ PSTATE.F = PSTATE.F | [operand[0]]
+ },
+ PSTATEField_DAIFClr => {
+ PSTATE.D = PSTATE.D & ~([operand[3]]);
+ PSTATE.A = PSTATE.A & ~([operand[2]]);
+ PSTATE.I = PSTATE.I & ~([operand[1]]);
+ PSTATE.F = PSTATE.F & ~([operand[0]])
+ },
+ PSTATEField_PAN => PSTATE.PAN = [operand[0]],
+ PSTATEField_UAO => PSTATE.UAO = [operand[0]]
+}
+
+val SHAmajority : (bits(32), bits(32), bits(32)) -> bits(32)
+
+function SHAmajority (x, y, z) = return(x & y | (x | y) & z)
+
+val SHAchoose : (bits(32), bits(32), bits(32)) -> bits(32)
+
+function SHAchoose (x, y, z) = return((y ^ z & x) ^ z)
+
+val AArch64_SysRegWrite : (int, int, int, int, int, bits(64)) -> unit effect {escape}
+
+function AArch64_SysRegWrite ('op0, 'op1, 'crn, 'crm, 'op2, val_name) = assert(false, "FALSE")
+
+val AArch64_SysRegRead : (int, int, int, int, int) -> bits(64) effect {escape, undef}
+
+function AArch64_SysRegRead _ = {
+ assert(false, "Tried to read system register");
+ undefined
+}
+
+val AArch64_SysInstr : (int, int, int, int, int, bits(64)) -> unit effect {escape}
+
+function AArch64_SysInstr ('op0, 'op1, 'crn, 'crm, 'op2, val_name) = assert(false, "FALSE")
+
+val AArch64_ResetControlRegisters : bool -> unit
+
+function AArch64_ResetControlRegisters cold_reset = ()
+
+val AArch64_ReportDeferredSError : bits(25) -> bits(64) effect {undef}
+
+function AArch64_ReportDeferredSError syndrome = {
+ target : bits(64) = undefined;
+ target : bits(64) = __SetSlice_bits(64, 1, target, 31, 0b1);
+ target = __SetSlice_bits(64, 1, target, 24, [syndrome[24]]);
+ target = __SetSlice_bits(64, 24, target, 0, slice(syndrome, 0, 24));
+ return(target)
+}
+
+val AArch64_MarkExclusiveVA : (bits(64), int, int) -> unit effect {escape}
+
+function AArch64_MarkExclusiveVA (address, 'processorid, 'size) = assert(false, "FALSE")
+
+val AArch64_IsExclusiveVA : (bits(64), int, int) -> bool effect {escape}
+
+function AArch64_IsExclusiveVA (address, 'processorid, 'size) = {
+ assert(false, "FALSE");
+ return(false)
+}
+
+val AArch64_CreateFaultRecord : (Fault, bits(52), int, AccType, bool, bits(1), bits(2), bool, bool) -> FaultRecord effect {undef}
+
+function AArch64_CreateFaultRecord (typ, ipaddress, 'level, acctype, write, extflag, errortype, secondstage, s2fs1walk) = {
+ fault : FaultRecord = undefined;
+ fault.typ = typ;
+ fault.domain = undefined;
+ fault.debugmoe = undefined;
+ fault.errortype = errortype;
+ fault.ipaddress = ipaddress;
+ fault.level = level;
+ fault.acctype = acctype;
+ fault.write = write;
+ fault.extflag = extflag;
+ fault.secondstage = secondstage;
+ fault.s2fs1walk = s2fs1walk;
+ return(fault)
+}
+
+val AArch64_TranslationFault : (bits(52), int, AccType, bool, bool, bool) -> FaultRecord effect {undef}
+
+function AArch64_TranslationFault (ipaddress, 'level, acctype, iswrite, secondstage, s2fs1walk) = {
+ extflag : bits(1) = undefined;
+ errortype : bits(2) = undefined;
+ return(AArch64_CreateFaultRecord(Fault_Translation, ipaddress, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk))
+}
+
+val AArch64_PermissionFault : (bits(52), int, AccType, bool, bool, bool) -> FaultRecord effect {undef}
+
+function AArch64_PermissionFault (ipaddress, 'level, acctype, iswrite, secondstage, s2fs1walk) = {
+ extflag : bits(1) = undefined;
+ errortype : bits(2) = undefined;
+ return(AArch64_CreateFaultRecord(Fault_Permission, ipaddress, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk))
+}
+
+val AArch64_NoFault : unit -> FaultRecord effect {undef}
+
+function AArch64_NoFault () = {
+ ipaddress : bits(52) = undefined;
+ level : int = undefined;
+ acctype : AccType = AccType_NORMAL;
+ iswrite : bool = undefined;
+ extflag : bits(1) = undefined;
+ errortype : bits(2) = undefined;
+ secondstage : bool = false;
+ s2fs1walk : bool = false;
+ return(AArch64_CreateFaultRecord(Fault_None, ipaddress, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk))
+}
+
+val AArch64_DebugFault : (AccType, bool) -> FaultRecord effect {undef}
+
+function AArch64_DebugFault (acctype, iswrite) = {
+ ipaddress : bits(52) = undefined;
+ errortype : bits(2) = undefined;
+ level : int = undefined;
+ extflag : bits(1) = undefined;
+ secondstage : bool = false;
+ s2fs1walk : bool = false;
+ return(AArch64_CreateFaultRecord(Fault_Debug, ipaddress, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk))
+}
+
+val AArch64_CheckUnallocatedSystemAccess : (bits(2), bits(3), bits(4), bits(4), bits(3), bits(1)) -> bool effect {escape}
+
+function AArch64_CheckUnallocatedSystemAccess (op0, op1, crn, crm, op2, read) = {
+ assert(false, "FALSE");
+ return(false)
+}
+
+val AArch64_CheckSystemRegisterTraps : (bits(2), bits(3), bits(4), bits(4), bits(3), bits(1)) -> (bool, bits(2)) effect {escape}
+
+function AArch64_CheckSystemRegisterTraps (op0, op1, crn, crm, op2, read) = {
+ assert(false, "FALSE");
+ return((false, 0b00))
+}
+
+val AArch64_CheckAdvSIMDFPSystemRegisterTraps : (bits(2), bits(3), bits(4), bits(4), bits(3), bits(1)) -> (bool, bits(2)) effect {escape}
+
+function AArch64_CheckAdvSIMDFPSystemRegisterTraps (op0, op1, crn, crm, op2, read) = {
+ assert(false, "FALSE");
+ return((false, 0b00))
+}
+
+val AArch64_AlignmentFault : (AccType, bool, bool) -> FaultRecord effect {undef}
+
+function AArch64_AlignmentFault (acctype, iswrite, secondstage) = {
+ ipaddress : bits(52) = undefined;
+ level : int = undefined;
+ extflag : bits(1) = undefined;
+ errortype : bits(2) = undefined;
+ s2fs1walk : bool = undefined;
+ return(AArch64_CreateFaultRecord(Fault_Alignment, ipaddress, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk))
+}
+
+val AArch64_AddressSizeFault : (bits(52), int, AccType, bool, bool, bool) -> FaultRecord effect {undef}
+
+function AArch64_AddressSizeFault (ipaddress, 'level, acctype, iswrite, secondstage, s2fs1walk) = {
+ extflag : bits(1) = undefined;
+ errortype : bits(2) = undefined;
+ return(AArch64_CreateFaultRecord(Fault_AddressSize, ipaddress, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk))
+}
+
+val AArch64_AccessFlagFault : (bits(52), int, AccType, bool, bool, bool) -> FaultRecord effect {undef}
+
+function AArch64_AccessFlagFault (ipaddress, 'level, acctype, iswrite, secondstage, s2fs1walk) = {
+ extflag : bits(1) = undefined;
+ errortype : bits(2) = undefined;
+ return(AArch64_CreateFaultRecord(Fault_AccessFlag, ipaddress, level, acctype, iswrite, extflag, errortype, secondstage, s2fs1walk))
+}
+
+val AArch32_CurrentCond : unit -> bits(4) effect {rreg}
+
+function AArch32_CurrentCond () = return(__currentCond)
+
+val aget_SP : forall ('width : Int), 'width >= 0.
+ unit -> bits('width) effect {escape, rreg}
+
+function aget_SP () = {
+ assert('width == 8 | 'width == 16 | 'width == 32 | 'width == 64, "((width == 8) || ((width == 16) || ((width == 32) || (width == 64))))");
+ if PSTATE.SP == 0b0 then return(slice(SP_EL0, 0, 'width)) else match PSTATE.EL {
+ ? if ? == EL0 => return(slice(SP_EL0, 0, 'width)),
+ ? if ? == EL1 => return(slice(SP_EL1, 0, 'width)),
+ ? if ? == EL2 => return(slice(SP_EL2, 0, 'width)),
+ ? if ? == EL3 => return(slice(SP_EL3, 0, 'width))
+ }
+}
+
+val __IMPDEF_integer : string -> int
+
+function __IMPDEF_integer x = {
+ if x == "Maximum Physical Address Size" then return(52) else if x == "Maximum Virtual Address Size" then return(56) else ();
+ return(0)
+}
+
+val VAMax : unit -> int
+
+function VAMax () = return(__IMPDEF_integer("Maximum Virtual Address Size"))
+
+val PAMax : unit -> int
+
+function PAMax () = return(__IMPDEF_integer("Maximum Physical Address Size"))
+
+val __IMPDEF_boolean : string -> bool
+
+function __IMPDEF_boolean x = {
+ if x == "Condition valid for trapped T32" then return(true) else if x == "Has Dot Product extension" then return(true) else if x == "Has RAS extension" then return(true) else if x == "Has SHA512 and SHA3 Crypto instructions" then return(true) else if x == "Has SM3 and SM4 Crypto instructions" then return(true) else if x == "Has basic Crypto instructions" then return(true) else if x == "Have CRC extension" then return(true) else if x == "Report I-cache maintenance fault in IFSR" then return(true) else if x == "Reserved Control Space EL0 Trapped" then return(true) else if x == "Translation fault on misprogrammed contiguous bit" then return(true) else if x == "UNDEF unallocated CP15 access at NS EL0" then return(true) else if x == "UNDEF unallocated CP15 access at NS EL0" then return(true) else ();
+ return(false)
+}
+
+val HaveCryptoExt : unit -> bool
+
+function HaveCryptoExt () = return(__IMPDEF_boolean("Has basic Crypto instructions"))
+
+val WaitForEvent : unit -> unit effect {rreg, wreg}
+
+function WaitForEvent () = {
+ if EventRegister == 0b0 then EnterLowPowerState() else ();
+ ()
+}
+
+val ThisInstrLength : unit -> int effect {rreg}
+
+function ThisInstrLength () = return(if __ThisInstrEnc == __T16 then 16 else 32)
+
+val RoundTowardsZero : real -> int
+
+function RoundTowardsZero x = return(if x == 0.0 then 0 else if x >= 0.0 then RoundDown(x) else RoundUp(x))
+
+val Restarting : unit -> bool effect {rreg}
+
+function Restarting () = return(slice(EDSCR, 0, 6) == 0b000001)
+
+val PtrHasUpperAndLowerAddRanges : unit -> bool effect {rreg}
+
+function PtrHasUpperAndLowerAddRanges () = return((PSTATE.EL == EL1 | PSTATE.EL == EL0) | PSTATE.EL == EL2 & [HCR_EL2[34]] == 0b1)
+
+val MemAttrDefaults : MemoryAttributes -> MemoryAttributes effect {undef}
+
+function MemAttrDefaults memattrs__arg = {
+ memattrs = memattrs__arg;
+ if memattrs.typ == MemType_Device then {
+ memattrs.inner = undefined;
+ memattrs.outer = undefined;
+ memattrs.shareable = true;
+ memattrs.outershareable = true
+ } else {
+ memattrs.device = undefined;
+ if memattrs.inner.attrs == MemAttr_NC & memattrs.outer.attrs == MemAttr_NC then {
+ memattrs.shareable = true;
+ memattrs.outershareable = true
+ } else ()
+ };
+ return(memattrs)
+}
+
+val IsEventRegisterSet : unit -> bool effect {rreg}
+
+function IsEventRegisterSet () = return(EventRegister == 0b1)
+
+val HaveEL : bits(2) -> bool
+
+function HaveEL el = {
+ if el == EL1 | el == EL0 then return(true) else ();
+ return(true)
+}
+
+val HighestEL : unit -> bits(2)
+
+function HighestEL () = if HaveEL(EL3) then return(EL3) else if HaveEL(EL2) then return(EL2) else return(EL1)
+
+val Have16bitVMID : unit -> bool
+
+function Have16bitVMID () = return(HaveEL(EL2))
+
+val HasArchVersion : ArchVersion -> bool
+
+function HasArchVersion version = return(version == ARMv8p0 | version == ARMv8p1 | version == ARMv8p2 | version == ARMv8p3)
+
+val HaveVirtHostExt : unit -> bool
+
+function HaveVirtHostExt () = return(HasArchVersion(ARMv8p1))
+
+val HaveUAOExt : unit -> bool
+
+function HaveUAOExt () = return(HasArchVersion(ARMv8p2))
+
+val HaveTrapLoadStoreMultipleDeviceExt : unit -> bool
+
+function HaveTrapLoadStoreMultipleDeviceExt () = return(HasArchVersion(ARMv8p2))
+
+val HaveStatisticalProfiling : unit -> bool
+
+function HaveStatisticalProfiling () = return(HasArchVersion(ARMv8p2))
+
+val HaveRASExt : unit -> bool
+
+function HaveRASExt () = return(HasArchVersion(ARMv8p2) | __IMPDEF_boolean("Has RAS extension"))
+
+val HaveQRDMLAHExt : unit -> bool
+
+function HaveQRDMLAHExt () = return(HasArchVersion(ARMv8p1))
+
+val HavePrivATExt : unit -> bool
+
+function HavePrivATExt () = return(HasArchVersion(ARMv8p2))
+
+val HavePANExt : unit -> bool
+
+function HavePANExt () = return(HasArchVersion(ARMv8p1))
+
+val HavePACExt : unit -> bool
+
+function HavePACExt () = return(HasArchVersion(ARMv8p3))
+
+val HaveNVExt : unit -> bool
+
+function HaveNVExt () = return(HasArchVersion(ARMv8p3))
+
+val HaveFJCVTZSExt : unit -> bool
+
+function HaveFJCVTZSExt () = return(HasArchVersion(ARMv8p3))
+
+val HaveFCADDExt : unit -> bool
+
+function HaveFCADDExt () = return(HasArchVersion(ARMv8p3))
+
+val HaveExtendedExecuteNeverExt : unit -> bool
+
+function HaveExtendedExecuteNeverExt () = return(HasArchVersion(ARMv8p2))
+
+val HaveDirtyBitModifierExt : unit -> bool
+
+function HaveDirtyBitModifierExt () = return(HasArchVersion(ARMv8p1))
+
+val HaveDOTPExt : unit -> bool
+
+function HaveDOTPExt () = return(HasArchVersion(ARMv8p2) & __IMPDEF_boolean("Has Dot Product extension"))
+
+val HaveCommonNotPrivateTransExt : unit -> bool
+
+function HaveCommonNotPrivateTransExt () = return(HasArchVersion(ARMv8p2))
+
+val HaveCRCExt : unit -> bool
+
+function HaveCRCExt () = return(HasArchVersion(ARMv8p1) | __IMPDEF_boolean("Have CRC extension"))
+
+val HaveAtomicExt : unit -> bool
+
+function HaveAtomicExt () = return(HasArchVersion(ARMv8p1))
+
+val HaveAccessFlagUpdateExt : unit -> bool
+
+function HaveAccessFlagUpdateExt () = return(HasArchVersion(ARMv8p1))
+
+val Have52BitVAExt : unit -> bool
+
+function Have52BitVAExt () = return(HasArchVersion(ARMv8p2))
+
+val Have52BitPAExt : unit -> bool
+
+function Have52BitPAExt () = return(HasArchVersion(ARMv8p2))
+
+val AArch64_HaveHPDExt : unit -> bool
+
+function AArch64_HaveHPDExt () = return(HasArchVersion(ARMv8p1))
+
+val ExternalInvasiveDebugEnabled : unit -> bool effect {rreg}
+
+function ExternalInvasiveDebugEnabled () = return(DBGEN == HIGH)
+
+val ConstrainUnpredictableInteger : (int, int, Unpredictable) -> (Constraint, int) effect {undef}
+
+function ConstrainUnpredictableInteger ('low, 'high, which) = {
+ c : Constraint = ConstrainUnpredictable(which);
+ if c == Constraint_UNKNOWN then return((c, low)) else return((c, undefined))
+}
+
+val ConstrainUnpredictableBool : Unpredictable -> bool effect {escape}
+
+function ConstrainUnpredictableBool which = {
+ c : Constraint = ConstrainUnpredictable(which);
+ assert(c == Constraint_TRUE | c == Constraint_FALSE, "((c == Constraint_TRUE) || (c == Constraint_FALSE))");
+ return(c == Constraint_TRUE)
+}
+
+val CombineS1S2Device : (DeviceType, DeviceType) -> DeviceType effect {undef}
+
+function CombineS1S2Device (s1device, s2device) = {
+ result : DeviceType = undefined;
+ if s2device == DeviceType_nGnRnE | s1device == DeviceType_nGnRnE then result = DeviceType_nGnRnE else if s2device == DeviceType_nGnRE | s1device == DeviceType_nGnRE then result = DeviceType_nGnRE else if s2device == DeviceType_nGRE | s1device == DeviceType_nGRE then result = DeviceType_nGRE else result = DeviceType_GRE;
+ return(result)
+}
+
+val CombineS1S2AttrHints : (MemAttrHints, MemAttrHints) -> MemAttrHints effect {undef}
+
+function CombineS1S2AttrHints (s1desc, s2desc) = {
+ result : MemAttrHints = undefined;
+ if s2desc.attrs == 0b01 | s1desc.attrs == 0b01 then result.attrs = undefined else if s2desc.attrs == MemAttr_NC | s1desc.attrs == MemAttr_NC then result.attrs = MemAttr_NC else if s2desc.attrs == MemAttr_WT | s1desc.attrs == MemAttr_WT then result.attrs = MemAttr_WT else result.attrs = MemAttr_WB;
+ result.hints = s1desc.hints;
+ result.transient = s1desc.transient;
+ return(result)
+}
+
+val AArch64_InstructionDevice : (AddressDescriptor, bits(64), bits(52), int, AccType, bool, bool, bool) -> AddressDescriptor effect {escape, undef}
+
+function AArch64_InstructionDevice (addrdesc__arg, vaddress, ipaddress, 'level, acctype, iswrite, secondstage, s2fs1walk) = {
+ addrdesc = addrdesc__arg;
+ c : Constraint = ConstrainUnpredictable(Unpredictable_INSTRDEVICE);
+ assert(c == Constraint_NONE | c == Constraint_FAULT, "((c == Constraint_NONE) || (c == Constraint_FAULT))");
+ if c == Constraint_FAULT then addrdesc.fault = AArch64_PermissionFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk) else {
+ __tmp_12 : MemoryAttributes = addrdesc.memattrs;
+ __tmp_12.typ = MemType_Normal;
+ addrdesc.memattrs = __tmp_12;
+ __tmp_13 : MemAttrHints = addrdesc.memattrs.inner;
+ __tmp_13.attrs = MemAttr_NC;
+ __tmp_14 : MemoryAttributes = addrdesc.memattrs;
+ __tmp_14.inner = __tmp_13;
+ addrdesc.memattrs = __tmp_14;
+ __tmp_15 : MemAttrHints = addrdesc.memattrs.inner;
+ __tmp_15.hints = MemHint_No;
+ __tmp_16 : MemoryAttributes = addrdesc.memattrs;
+ __tmp_16.inner = __tmp_15;
+ addrdesc.memattrs = __tmp_16;
+ __tmp_17 : MemoryAttributes = addrdesc.memattrs;
+ __tmp_17.outer = addrdesc.memattrs.inner;
+ addrdesc.memattrs = __tmp_17;
+ addrdesc.memattrs = MemAttrDefaults(addrdesc.memattrs)
+ };
+ return(addrdesc)
+}
+
+val aget_Vpart : forall ('width : Int), 'width >= 0.
+ (int, int) -> bits('width) effect {escape, rreg}
+
+function aget_Vpart ('n, 'part) = {
+ assert(n >= 0 & n <= 31, "((n >= 0) && (n <= 31))");
+ assert(part == 0 | part == 1, "((part == 0) || (part == 1))");
+ if part == 0 then {
+ assert('width == 8 | 'width == 16 | 'width == 32 | 'width == 64, "((width == 8) || ((width == 16) || ((width == 32) || (width == 64))))");
+ return(slice(_V[n], 0, 'width))
+ } else {
+ assert('width == 64, "(width == 64)");
+ return(slice(_V[n], 'width, 'width))
+ }
+}
+
+val aget_V : forall ('width : Int), 'width >= 0.
+ int -> bits('width) effect {escape, rreg}
+
+function aget_V 'n = {
+ assert(n >= 0 & n <= 31, "((n >= 0) && (n <= 31))");
+ assert('width == 8 | 'width == 16 | 'width == 32 | 'width == 64 | 'width == 128, "((width == 8) || ((width == 16) || ((width == 32) || ((width == 64) || (width == 128)))))");
+ return(slice(_V[n], 0, 'width))
+}
+
+val LookUpRIndex : (int, bits(5)) -> int effect {escape, undef}
+
+function LookUpRIndex ('n, mode) = {
+ assert(n >= 0 & n <= 14, "((n >= 0) && (n <= 14))");
+ result : int = undefined;
+ match n {
+ 8 => result = RBankSelect(mode, 8, 24, 8, 8, 8, 8, 8),
+ 9 => result = RBankSelect(mode, 9, 25, 9, 9, 9, 9, 9),
+ 10 => result = RBankSelect(mode, 10, 26, 10, 10, 10, 10, 10),
+ 11 => result = RBankSelect(mode, 11, 27, 11, 11, 11, 11, 11),
+ 12 => result = RBankSelect(mode, 12, 28, 12, 12, 12, 12, 12),
+ 13 => result = RBankSelect(mode, 13, 29, 17, 19, 21, 23, 15),
+ 14 => result = RBankSelect(mode, 14, 30, 16, 18, 20, 22, 14),
+ _ => result = n
+ };
+ return(result)
+}
+
+val LowestSetBit : forall ('N : Int), 'N >= 0. bits('N) -> int
+
+function LowestSetBit x = {
+ foreach (i from 0 to ('N - 1) by 1 in inc)
+ if [x[i]] == 0b1 then return(i) else ();
+ return('N)
+}
+
+val HighestSetBit : forall ('N : Int), 'N >= 0. bits('N) -> int
+
+function HighestSetBit x = {
+ foreach (i from ('N - 1) to 0 by 1 in dec)
+ if [x[i]] == 0b1 then return(i) else ();
+ return(negate(1))
+}
+
+val CountLeadingZeroBits : forall ('N : Int), 'N >= 2. bits('N) -> int
+
+function CountLeadingZeroBits x = return(('N - 1) - HighestSetBit(x))
+
+val CountLeadingSignBits : forall ('N : Int), 'N >= 3. bits('N) -> int
+
+function CountLeadingSignBits x = return(CountLeadingZeroBits(x[(('N - 1) - 1) + 1 .. 1] ^ x[('N - 1) - 1 .. 0]))
+
+val BitReverse : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ bits('N) -> bits('N) effect {undef}
+
+function BitReverse data = {
+ result : bits('N) = undefined;
+ foreach (i from 0 to ('N - 1) by 1 in inc)
+ result = __SetSlice_bits('N, 1, result, ('N - i) - 1, [data[i]]);
+ return(result)
+}
+
+val NextInstrAddr : forall ('N : Int), 'N >= 0. unit -> bits('N) effect {rreg}
+
+function NextInstrAddr () = return(slice(_PC + ThisInstrLength() / 8, 0, 'N))
+
+val BitCount : forall ('N : Int), 'N >= 0. bits('N) -> int
+
+function BitCount x = {
+ result : int = 0;
+ foreach (i from 0 to ('N - 1) by 1 in inc)
+ if [x[i]] == 0b1 then result = result + 1 else ();
+ return(result)
+}
+
+val AArch32_ExceptionClass : Exception -> (int, bits(1)) effect {escape, rreg, undef}
+
+function AArch32_ExceptionClass typ = {
+ il : bits(1) = if ThisInstrLength() == 32 then 0b1 else 0b0;
+ ec : int = undefined;
+ match typ {
+ Exception_Uncategorized => {
+ ec = 0;
+ il = 0b1
+ },
+ Exception_WFxTrap => ec = 1,
+ Exception_CP15RTTrap => ec = 3,
+ Exception_CP15RRTTrap => ec = 4,
+ Exception_CP14RTTrap => ec = 5,
+ Exception_CP14DTTrap => ec = 6,
+ Exception_AdvSIMDFPAccessTrap => ec = 7,
+ Exception_FPIDTrap => ec = 8,
+ Exception_CP14RRTTrap => ec = 12,
+ Exception_IllegalState => {
+ ec = 14;
+ il = 0b1
+ },
+ Exception_SupervisorCall => ec = 17,
+ Exception_HypervisorCall => ec = 18,
+ Exception_MonitorCall => ec = 19,
+ Exception_InstructionAbort => {
+ ec = 32;
+ il = 0b1
+ },
+ Exception_PCAlignment => {
+ ec = 34;
+ il = 0b1
+ },
+ Exception_DataAbort => ec = 36,
+ Exception_FPTrappedException => ec = 40,
+ _ => Unreachable()
+ };
+ if (ec == 32 | ec == 36) & PSTATE.EL == EL2 then ec = ec + 1 else ();
+ return((ec, il))
+}
+
+val RotCell : (bits(4), int) -> bits(4) effect {undef}
+
+function RotCell (incell_name, 'amount) = {
+ tmp : bits(8) = undefined;
+ outcell : bits(4) = undefined;
+ tmp = __SetSlice_bits(8, 8, tmp, 0, slice(incell_name, 0, 4) @ slice(incell_name, 0, 4));
+ outcell = slice(tmp, 4 - amount, 4);
+ return(outcell)
+}
+
+val FPNeg : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ bits('N) -> bits('N) effect {escape}
+
+function FPNeg op = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ return(~([op['N - 1]]) @ slice(op, 0, 'N - 1))
+}
+
+val FPAbs : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ bits('N) -> bits('N) effect {escape}
+
+function FPAbs op = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ return(0b0 @ slice(op, 0, 'N - 1))
+}
+
+val EncodeLDFSC : (Fault, int) -> bits(6) effect {escape, undef}
+
+function EncodeLDFSC (typ, 'level) = {
+ result : bits(6) = undefined;
+ match typ {
+ Fault_AddressSize => {
+ result = 0x0 @ __GetSlice_int(2, level, 0);
+ assert(level == 0 | level == 1 | level == 2 | level == 3, "((level == 0) || ((level == 1) || ((level == 2) || (level == 3))))")
+ },
+ Fault_AccessFlag => {
+ result = 0x2 @ __GetSlice_int(2, level, 0);
+ assert(level == 1 | level == 2 | level == 3, "((level == 1) || ((level == 2) || (level == 3)))")
+ },
+ Fault_Permission => {
+ result = 0x3 @ __GetSlice_int(2, level, 0);
+ assert(level == 1 | level == 2 | level == 3, "((level == 1) || ((level == 2) || (level == 3)))")
+ },
+ Fault_Translation => {
+ result = 0x1 @ __GetSlice_int(2, level, 0);
+ assert(level == 0 | level == 1 | level == 2 | level == 3, "((level == 0) || ((level == 1) || ((level == 2) || (level == 3))))")
+ },
+ Fault_SyncExternal => result = 0b010000,
+ Fault_SyncExternalOnWalk => {
+ result = 0x5 @ __GetSlice_int(2, level, 0);
+ assert(level == 0 | level == 1 | level == 2 | level == 3, "((level == 0) || ((level == 1) || ((level == 2) || (level == 3))))")
+ },
+ Fault_SyncParity => result = 0b011000,
+ Fault_SyncParityOnWalk => {
+ result = 0x7 @ __GetSlice_int(2, level, 0);
+ assert(level == 0 | level == 1 | level == 2 | level == 3, "((level == 0) || ((level == 1) || ((level == 2) || (level == 3))))")
+ },
+ Fault_AsyncParity => result = 0b011001,
+ Fault_AsyncExternal => result = 0b010001,
+ Fault_Alignment => result = 0b100001,
+ Fault_Debug => result = 0b100010,
+ Fault_TLBConflict => result = 0b110000,
+ Fault_Lockdown => result = 0b110100,
+ Fault_Exclusive => result = 0b110101,
+ _ => Unreachable()
+ };
+ return(result)
+}
+
+val BigEndianReverse : forall ('width : Int), 'width >= 0 & 'width >= 0.
+ bits('width) -> bits('width) effect {escape}
+
+function BigEndianReverse value_name = {
+ assert('width == 8 | 'width == 16 | 'width == 32 | 'width == 64 | 'width == 128);
+ let 'half = 'width / 2;
+ assert(constraint('half * 2 = 'width));
+ if 'width == 8 then return(value_name) else ();
+ return(BigEndianReverse(slice(value_name, 0, half)) @ BigEndianReverse(slice(value_name, half, 'width - half)))
+}
+
+val AArch32_ReportHypEntry : ExceptionRecord -> unit effect {escape, rreg, undef, wreg}
+
+function AArch32_ReportHypEntry exception = {
+ typ : Exception = exception.typ;
+ il : bits(1) = undefined;
+ ec : int = undefined;
+ (ec, il) = AArch32_ExceptionClass(typ);
+ iss : bits(25) = exception.syndrome;
+ if (ec == 36 | ec == 37) & [iss[24]] == 0b0 then il = 0b1 else ();
+ HSR = (__GetSlice_int(6, ec, 0) @ il) @ iss;
+ if typ == Exception_InstructionAbort | typ == Exception_PCAlignment then {
+ HIFAR = slice(exception.vaddress, 0, 32);
+ HDFAR = undefined
+ } else if typ == Exception_DataAbort then {
+ HIFAR = undefined;
+ HDFAR = slice(exception.vaddress, 0, 32)
+ } else ();
+ if exception.ipavalid then HPFAR = __SetSlice_bits(32, 28, HPFAR, 4, slice(exception.ipaddress, 12, 28)) else HPFAR = __SetSlice_bits(32, 28, HPFAR, 4, undefined);
+ ()
+}
+
+val aset_Elem__0 : forall ('N : Int) ('size : Int), 'N >= 0 & 'size >= 0 & 'N >= 0.
+ (bits('N), int, atom('size), bits('size)) -> bits('N) effect {escape}
+
+val aset_Elem__1 : forall ('N : Int) ('size : Int), 'N >= 0 & 'size >= 0 & 'N >= 0.
+ (bits('N), int, bits('size)) -> bits('N) effect {escape}
+
+overload aset_Elem = {aset_Elem__0, aset_Elem__1}
+
+function aset_Elem__0 (vector_name__arg, 'e, size, value_name) = {
+ vector_name = vector_name__arg;
+ assert(e >= 0 & (e + 1) * 'size <= 'N, "((e >= 0) && (((e + 1) * size) <= N))");
+ vector_name = __SetSlice_bits('N, 'size, vector_name, e * 'size, value_name);
+ return(vector_name)
+}
+
+function aset_Elem__1 (vector_name__arg, 'e, value_name) = {
+ vector_name = vector_name__arg;
+ vector_name = aset_Elem(vector_name, e, 'size, value_name);
+ return(vector_name)
+}
+
+val aget_Elem__0 : forall ('N : Int) ('size : Int), 'N >= 0 & 'size >= 0.
+ (bits('N), int, atom('size)) -> bits('size) effect {escape}
+
+val aget_Elem__1 : forall ('N : Int) ('size : Int), 'N >= 0 & 'size >= 0.
+ (bits('N), int) -> bits('size) effect {escape}
+
+overload aget_Elem = {aget_Elem__0, aget_Elem__1}
+
+function aget_Elem__0 (vector_name, 'e, size) = {
+ assert(e >= 0 & (e + 1) * 'size <= 'N, "((e >= 0) && (((e + 1) * size) <= N))");
+ return(slice(vector_name, e * 'size, 'size))
+}
+
+function aget_Elem__1 (vector_name, 'e) = return(aget_Elem(vector_name, e, 'size))
+
+val UnsignedSatQ : forall ('N : Int), 'N >= 0.
+ (int, atom('N)) -> (bits('N), bool) effect {undef}
+
+function UnsignedSatQ ('i, N) = {
+ saturated : bool = undefined;
+ result : int = undefined;
+ if i > 2 ^ 'N - 1 then {
+ result = 2 ^ 'N - 1;
+ saturated = true
+ } else if i < 0 then {
+ result = 0;
+ saturated = true
+ } else {
+ result = i;
+ saturated = false
+ };
+ return((__GetSlice_int('N, result, 0), saturated))
+}
+
+val SignedSatQ : forall ('N : Int), 'N >= 0.
+ (int, atom('N)) -> (bits('N), bool) effect {undef}
+
+function SignedSatQ ('i, N) = {
+ saturated : bool = undefined;
+ result : int = undefined;
+ if i > 2 ^ ('N - 1) - 1 then {
+ result = 2 ^ ('N - 1) - 1;
+ saturated = true
+ } else if i < negate(2 ^ ('N - 1)) then {
+ result = negate(2 ^ ('N - 1));
+ saturated = true
+ } else {
+ result = i;
+ saturated = false
+ };
+ return((__GetSlice_int('N, result, 0), saturated))
+}
+
+val SatQ : forall ('N : Int), 'N >= 0.
+ (int, atom('N), bool) -> (bits('N), bool) effect {undef}
+
+function SatQ ('i, N, unsigned) = {
+ sat : bool = undefined;
+ result : bits('N) = undefined;
+ (result, sat) = if unsigned then UnsignedSatQ(i, 'N) else SignedSatQ(i, 'N);
+ return((result, sat))
+}
+
+val Sbox : bits(8) -> bits(8) effect {escape, undef}
+
+function Sbox sboxin = {
+ sboxout : bits(8) = undefined;
+ sboxstring : bits(2048) = hex_slice("0xD690E9FECCE13DB716B614C228FB2C052B679A762ABE04C3AA441326498606999C4250F491EF987A33540B43EDCFAC62E4B31CA9C98E8958DF94FA758F3FA64707A7FCF37317BA83593C19E6854FA8686B81B27164DA8BF8EB0F4B70569D351E240E5E6358D1A225227C3B01217887D40046579FD327524C3602E7A0C4C89EEABF8AD240C738B5A3F7F2CEF96115A1E0AE5DA49B341A55AD933230F58CB1E31DF6E22E8266CA60C2923ABD534E6FD5DB3745DEFD8E2F03FF6A726D6C5B518D1BAF92BBDDBC7F11D95C411F105AD80AC13188A5CD7BBD2D74D012B8E5B4B08969974AC96777E65B9F19C56EC68418F07DEC3ADC4D2079EE5F3ED7CB3948", 2048, 0);
+ sboxout = slice(sboxstring, (255 - UInt(sboxin)) * 8, 8);
+ return(sboxout)
+}
+
+val Replicate : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0.
+ bits('M) -> bits('N) effect {escape}
+
+function Replicate x = {
+ assert('N % 'M == 0, "((N MOD M) == 0)");
+ return(replicate_bits(x, 'N / 'M))
+}
+
+val Zeros__0 : forall ('N : Int), 'N >= 0. atom('N) -> bits('N)
+
+val Zeros__1 : forall ('N : Int), 'N >= 0. unit -> bits('N)
+
+overload Zeros = {Zeros__0, Zeros__1}
+
+function Zeros__0 N = return(replicate_bits(0b0, 'N))
+
+function Zeros__1 () = return(Zeros('N))
+
+val __ResetMemoryState : unit -> unit effect {rreg, wreg}
+
+function __ResetMemoryState () = {
+ __InitRAM(52, 1, __Memory, Zeros(8));
+ __ExclusiveLocal = false
+}
+
+val ZeroExtend__0 : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0.
+ (bits('M), atom('N)) -> bits('N) effect {escape}
+
+val ZeroExtend__1 : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0.
+ bits('M) -> bits('N) effect {escape}
+
+overload ZeroExtend = {ZeroExtend__0, ZeroExtend__1}
+
+function ZeroExtend__0 (x, N) = {
+ assert('N >= 'M);
+ return(Zeros('N - 'M) @ x)
+}
+
+function ZeroExtend__1 x = return(ZeroExtend(x, 'N))
+
+val aset_Vpart : forall ('width : Int), 'width >= 0.
+ (int, int, bits('width)) -> unit effect {escape, wreg, rreg}
+
+function aset_Vpart (n, part, value_name) = {
+ assert(n >= 0 & n <= 31, "((n >= 0) && (n <= 31))");
+ assert(part == 0 | part == 1, "((part == 0) || (part == 1))");
+ if part == 0 then {
+ assert('width == 8 | 'width == 16 | 'width == 32 | 'width == 64, "((width == 8) || ((width == 16) || ((width == 32) || (width == 64))))");
+ _V[n] = ZeroExtend(value_name) : bits(128)
+ } else {
+ assert('width == 64, "(width == 64)");
+ __tmp_287 : bits(128) = _V[n];
+ __tmp_287[127 .. 64] = value_name[63 .. 0];
+ _V[n] = __tmp_287
+ }
+}
+
+val aset_V : forall ('width : Int), 'width >= 0.
+ (int, bits('width)) -> unit effect {escape, wreg}
+
+function aset_V (n, value_name) = {
+ assert(n >= 0 & n <= 31, "((n >= 0) && (n <= 31))");
+ assert('width == 8 | 'width == 16 | 'width == 32 | 'width == 64 | 'width == 128, "((width == 8) || ((width == 16) || ((width == 32) || ((width == 64) || (width == 128)))))");
+ _V[n] = ZeroExtend(value_name) : bits(128);
+ ()
+}
+
+val aarch64_vector_crypto_sha3_eor3 : (int, int, int, int) -> unit effect {escape, rreg, wreg}
+
+function aarch64_vector_crypto_sha3_eor3 ('a, 'd, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Va : bits(128) = aget_V(a);
+ aset_V(d, (Vn ^ Vm) ^ Va)
+}
+
+val aarch64_vector_crypto_sha3_bcax : (int, int, int, int) -> unit effect {escape, rreg, wreg}
+
+function aarch64_vector_crypto_sha3_bcax ('a, 'd, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Va : bits(128) = aget_V(a);
+ aset_V(d, Vn ^ (Vm & ~(Va)))
+}
+
+val AArch64_ResetSIMDFPRegisters : unit -> unit effect {escape, undef, wreg}
+
+function AArch64_ResetSIMDFPRegisters () = {
+ foreach (i from 0 to 31 by 1 in inc) aset_V(i, undefined : bits(64));
+ ()
+}
+
+val aset_SP : forall ('width : Int), 'width >= 0.
+ bits('width) -> unit effect {escape, rreg, wreg}
+
+function aset_SP value_name = {
+ assert('width == 32 | 'width == 64, "((width == 32) || (width == 64))");
+ if PSTATE.SP == 0b0 then SP_EL0 = ZeroExtend(value_name) else match PSTATE.EL {
+ ? if ? == EL0 => SP_EL0 = ZeroExtend(value_name),
+ ? if ? == EL1 => SP_EL1 = ZeroExtend(value_name),
+ ? if ? == EL2 => SP_EL2 = ZeroExtend(value_name),
+ ? if ? == EL3 => SP_EL3 = ZeroExtend(value_name)
+ };
+ ()
+}
+
+val LSR_C : forall ('N : Int), 'N >= 0 & 'N >= 0 & 1 >= 0.
+ (bits('N), int) -> (bits('N), bits(1)) effect {escape}
+
+function LSR_C (x, 'shift) = {
+ assert(shift > 0, "(shift > 0)");
+ extended_x : bits('shift + 'N) = ZeroExtend(x, shift + 'N);
+ result : bits('N) = slice(extended_x, shift, 'N);
+ carry_out : bits(1) = [extended_x[shift - 1]];
+ return((result, carry_out))
+}
+
+val LSR : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ (bits('N), int) -> bits('N) effect {escape, undef}
+
+function LSR (x, 'shift) = {
+ assert(shift >= 0, "(shift >= 0)");
+ __anon1 : bits(1) = undefined;
+ result : bits('N) = undefined;
+ if shift == 0 then result = x else (result, __anon1) = LSR_C(x, shift);
+ return(result)
+}
+
+val Poly32Mod2 : forall ('N : Int), 'N >= 0 & 32 >= 0 & 32 >= 0.
+ (bits('N), bits(32)) -> bits(32) effect {escape}
+
+function Poly32Mod2 (data__arg, poly) = {
+ data = data__arg;
+ assert('N > 32, "(N > 32)");
+ foreach (i from ('N - 1) to 32 by 1 in dec)
+ if [data[i]] == 0b1 then data = __SetSlice_bits('N, i, data, 0, slice(data, 0, i) ^ (poly @ Zeros(i - 32))) else ();
+ return(slice(data, 0, 32))
+}
+
+val LSL_C : forall ('N : Int), 'N >= 0 & 'N >= 0 & 1 >= 0.
+ (bits('N), int) -> (bits('N), bits(1)) effect {escape}
+
+function LSL_C (x, 'shift) = {
+ assert(shift > 0, "(shift > 0)");
+ extended_x : bits('shift + 'N) = x @ Zeros(shift);
+ result : bits('N) = slice(extended_x, 0, 'N);
+ carry_out : bits(1) = [extended_x['N]];
+ return((result, carry_out))
+}
+
+val LSL : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ (bits('N), int) -> bits('N) effect {escape, undef}
+
+function LSL (x, 'shift) = {
+ assert(shift >= 0, "(shift >= 0)");
+ __anon1 : bits(1) = undefined;
+ result : bits('N) = undefined;
+ if shift == 0 then result = x else (result, __anon1) = LSL_C(x, shift);
+ return(result)
+}
+
+val PolynomialMult : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0 & 'N + 'M >= 0.
+ (bits('M), bits('N)) -> bits('N + 'M) effect {escape, undef}
+
+function PolynomialMult (op1, op2) = {
+ result : bits('N + 'M) = Zeros('M + 'N);
+ extended_op2 : bits('N + 'M) = ZeroExtend(op2, 'M + 'N);
+ foreach (i from 0 to ('M - 1) by 1 in inc)
+ if [op1[i]] == 0b1 then result = result ^ LSL(extended_op2, i) else ();
+ return(result)
+}
+
+val AArch32_ITAdvance : unit -> unit effect {escape, rreg, undef, wreg}
+
+function AArch32_ITAdvance () = {
+ if slice(PSTATE.IT, 0, 3) == 0b000 then PSTATE.IT = 0x00 else {
+ __tmp_276 : bits(8) = PSTATE.IT;
+ __tmp_276 = __SetSlice_bits(8, 5, __tmp_276, 0, LSL(slice(PSTATE.IT, 0, 5), 1));
+ PSTATE.IT = __tmp_276
+ };
+ ()
+}
+
+val LSInstructionSyndrome : unit -> bits(11) effect {escape}
+
+function LSInstructionSyndrome () = {
+ assert(false, "FALSE");
+ return(Zeros(11))
+}
+
+val IsZero : forall ('N : Int), 'N >= 0. bits('N) -> bool
+
+function IsZero x = return(x == Zeros('N))
+
+val IsZeroBit : forall ('N : Int), 'N >= 0 & 1 >= 0. bits('N) -> bits(1)
+
+function IsZeroBit x = return(if IsZero(x) then 0b1 else 0b0)
+
+val AddWithCarry : forall ('N : Int), 'N >= 0 & 'N >= 0 & 1 >= 0 & 'N >= 0 & 4 >= 0.
+ (bits('N), bits('N), bits(1)) -> (bits('N), bits(4))
+
+function AddWithCarry (x, y, carry_in) = {
+ unsigned_sum : int = (UInt(x) + UInt(y)) + UInt(carry_in);
+ signed_sum : int = (SInt(x) + SInt(y)) + UInt(carry_in);
+ result : bits('N) = __GetSlice_int('N, unsigned_sum, 0);
+ n : bits(1) = [result['N - 1]];
+ z : bits(1) = if IsZero(result) then 0b1 else 0b0;
+ c : bits(1) = if UInt(result) == unsigned_sum then 0b0 else 0b1;
+ v : bits(1) = if SInt(result) == signed_sum then 0b0 else 0b1;
+ return((result, ((n @ z) @ c) @ v))
+}
+
+val GetPSRFromPSTATE : unit -> bits(32) effect {rreg, escape}
+
+function GetPSRFromPSTATE () = {
+ spsr : bits(32) = Zeros();
+ spsr[31 .. 31] = PSTATE.N;
+ spsr[30 .. 30] = PSTATE.Z;
+ spsr[29 .. 29] = PSTATE.C;
+ spsr[28 .. 28] = PSTATE.V;
+ spsr[21 .. 21] = PSTATE.SS;
+ spsr[20 .. 20] = PSTATE.IL;
+ if PSTATE.nRW == 0b1 then {
+ spsr[27 .. 27] = PSTATE.Q;
+ spsr[26 .. 25] = PSTATE.IT[1 .. 0];
+ spsr[19 .. 16] = PSTATE.GE;
+ spsr[15 .. 10] = PSTATE.IT[7 .. 2];
+ spsr[9 .. 9] = PSTATE.E;
+ spsr[8 .. 8] = PSTATE.A;
+ spsr[7 .. 7] = PSTATE.I;
+ spsr[6 .. 6] = PSTATE.F;
+ spsr[5 .. 5] = PSTATE.T;
+ assert([PSTATE.M[4]] == PSTATE.nRW, "(((PSTATE).M)<4> == (PSTATE).nRW)");
+ spsr[4 .. 0] = PSTATE.M
+ } else {
+ spsr[9 .. 9] = PSTATE.D;
+ spsr[8 .. 8] = PSTATE.A;
+ spsr[7 .. 7] = PSTATE.I;
+ spsr[6 .. 6] = PSTATE.F;
+ spsr[4 .. 4] = PSTATE.nRW;
+ spsr[3 .. 2] = PSTATE.EL;
+ spsr[0 .. 0] = PSTATE.SP
+ };
+ return(spsr)
+}
+
+val FPZero : forall ('N : Int), 1 >= 0 & 'N >= 0.
+ bits(1) -> bits('N) effect {escape}
+
+function FPZero sign = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'E = (if 'N == 16 then 5 else if 'N == 32 then 8 else 11) : {|5, 8, 11|};
+ F : atom('N - 'E - 1) = ('N - E) - 1;
+ exp : bits('E) = Zeros(E);
+ frac : bits('N - 1 - 'E) = Zeros(F);
+ return(append(append(sign, exp), frac))
+}
+
+val FPTwo : forall ('N : Int), 1 >= 0 & 'N >= 0.
+ bits(1) -> bits('N) effect {escape}
+
+function FPTwo sign = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'E : {|5, 8, 11|} = if 'N == 16 then 5 else if 'N == 32 then 8 else 11;
+ F : atom('N - 'E - 1) = ('N - E) - 1;
+ exp : bits('E) = 0b1 @ Zeros(E - 1);
+ frac : bits('N - 'E - 1) = Zeros(F);
+ return(sign @ (exp @ frac))
+}
+
+val ExceptionSyndrome : Exception -> ExceptionRecord effect {undef}
+
+function ExceptionSyndrome typ = {
+ r : ExceptionRecord = undefined;
+ r.typ = typ;
+ r.syndrome = Zeros();
+ r.vaddress = Zeros();
+ r.ipavalid = false;
+ r.ipaddress = Zeros();
+ return(r)
+}
+
+val ConstrainUnpredictableBits : forall ('width : Int), 'width >= 0.
+ Unpredictable -> (Constraint, bits('width)) effect {undef}
+
+function ConstrainUnpredictableBits which = {
+ c : Constraint = ConstrainUnpredictable(which);
+ if c == Constraint_UNKNOWN then return((c, Zeros('width))) else return((c, undefined))
+}
+
+val AESSubBytes : bits(128) -> bits(128) effect {escape}
+
+function AESSubBytes op = {
+ assert(false, "FALSE");
+ return(Zeros(128))
+}
+
+val AESShiftRows : bits(128) -> bits(128) effect {escape}
+
+function AESShiftRows op = {
+ assert(false, "FALSE");
+ return(Zeros(128))
+}
+
+val AESMixColumns : bits(128) -> bits(128) effect {escape}
+
+function AESMixColumns op = {
+ assert(false, "FALSE");
+ return(Zeros(128))
+}
+
+val AESInvSubBytes : bits(128) -> bits(128) effect {escape}
+
+function AESInvSubBytes op = {
+ assert(false, "FALSE");
+ return(Zeros(128))
+}
+
+val AESInvShiftRows : bits(128) -> bits(128) effect {escape}
+
+function AESInvShiftRows op = {
+ assert(false, "FALSE");
+ return(Zeros(128))
+}
+
+val AESInvMixColumns : bits(128) -> bits(128) effect {escape}
+
+function AESInvMixColumns op = {
+ assert(false, "FALSE");
+ return(Zeros(128))
+}
+
+val AArch64_SysInstrWithResult : (int, int, int, int, int) -> bits(64) effect {escape}
+
+function AArch64_SysInstrWithResult ('op0, 'op1, 'crn, 'crm, 'op2) = {
+ assert(false, "FALSE");
+ return(Zeros(64))
+}
+
+val AArch64_PhysicalSErrorSyndrome : bool -> bits(25) effect {escape}
+
+function AArch64_PhysicalSErrorSyndrome implicit_esb = {
+ assert(false, "FALSE");
+ return(Zeros(25))
+}
+
+val AArch32_PhysicalSErrorSyndrome : unit -> AArch32_SErrorSyndrome effect {escape, undef}
+
+function AArch32_PhysicalSErrorSyndrome () = {
+ assert(false, "FALSE");
+ r : AArch32_SErrorSyndrome = undefined;
+ r.AET = Zeros(2);
+ r.ExT = Zeros(1);
+ return(r)
+}
+
+val VFPExpandImm : forall ('N : Int), 8 >= 0 & 'N >= 0.
+ bits(8) -> bits('N) effect {escape}
+
+function VFPExpandImm imm8 = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'E = (if 'N == 16 then 5 else if 'N == 32 then 8 else 11) : {|5, 8, 11|};
+ F : atom('N - 'E - 1) = ('N - E) - 1;
+ sign : bits(1) = [imm8[7]];
+ exp : bits('E) = append(append(~([imm8[6]]), replicate_bits([imm8[6]], E - 3)), imm8[5 .. 4]);
+ frac : bits('N - 1 - 'E) = append(imm8[3 .. 0], Zeros(F - 4));
+ return(append(append(sign, exp), frac))
+}
+
+val SignExtend__0 : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0.
+ (bits('M), atom('N)) -> bits('N) effect {escape}
+
+val SignExtend__1 : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0.
+ bits('M) -> bits('N) effect {escape}
+
+overload SignExtend = {SignExtend__0, SignExtend__1}
+
+function SignExtend__0 (x, N) = {
+ assert('N >= 'M);
+ return(replicate_bits([x['M - 1]], 'N - 'M) @ x)
+}
+
+function SignExtend__1 x = return(SignExtend(x, 'N))
+
+val Extend__0 : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0.
+ (bits('M), atom('N), bool) -> bits('N) effect {escape}
+
+val Extend__1 : forall ('M : Int) ('N : Int), 'M >= 0 & 'N >= 0.
+ (bits('M), bool) -> bits('N) effect {escape}
+
+overload Extend = {Extend__0, Extend__1}
+
+function Extend__0 (x, N, unsigned) = return(if unsigned then ZeroExtend(x, 'N) else SignExtend(x, 'N))
+
+function Extend__1 (x, unsigned) = return(Extend(x, 'N, unsigned))
+
+val ASR_C : forall ('N : Int), 'N >= 0 & 'N >= 0 & 1 >= 0.
+ (bits('N), int) -> (bits('N), bits(1)) effect {escape}
+
+function ASR_C (x, 'shift) = {
+ assert(shift > 0, "(shift > 0)");
+ extended_x : bits('shift + 'N) = SignExtend(x, shift + 'N);
+ result : bits('N) = slice(extended_x, shift, 'N);
+ carry_out : bits(1) = [extended_x[shift - 1]];
+ return((result, carry_out))
+}
+
+val ASR : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ (bits('N), int) -> bits('N) effect {escape, undef}
+
+function ASR (x, 'shift) = {
+ assert(shift >= 0, "(shift >= 0)");
+ __anon1 : bits(1) = undefined;
+ result : bits('N) = undefined;
+ if shift == 0 then result = x else (result, __anon1) = ASR_C(x, shift);
+ return(result)
+}
+
+val Ones__0 : forall ('N : Int), 'N >= 0. atom('N) -> bits('N)
+
+val Ones__1 : forall ('N : Int), 'N >= 0. unit -> bits('N)
+
+overload Ones = {Ones__0, Ones__1}
+
+function Ones__0 N = return(replicate_bits(0b1, 'N))
+
+function Ones__1 () = return(Ones('N))
+
+val IsOnes : forall ('N : Int), 'N >= 0. bits('N) -> bool
+
+function IsOnes x = return(x == Ones('N))
+
+val FPOnePointFive : forall ('N : Int), 1 >= 0 & 'N >= 0.
+ bits(1) -> bits('N) effect {escape}
+
+function FPOnePointFive sign = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'E : {|5, 8, 11|} = if 'N == 16 then 5 else if 'N == 32 then 8 else 11;
+ let F : atom('N - 'E - 1) = ('N - E) - 1;
+ exp : bits('E) = 0b0 @ Ones(E - 1);
+ frac : bits('N - 'E - 1) = 0b1 @ Zeros(F - 1);
+ return((sign @ exp) @ frac)
+}
+
+val FPMaxNormal : forall ('N : Int), 1 >= 0 & 'N >= 0.
+ bits(1) -> bits('N) effect {escape}
+
+function FPMaxNormal sign = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'E = (if 'N == 16 then 5 else if 'N == 32 then 8 else 11) : {|5, 8, 11|};
+ F : atom('N - 'E - 1) = ('N - E) - 1;
+ exp : bits('E) = append(Ones(E - 1), 0b0);
+ frac : bits('N - 1 - 'E) = Ones(F);
+ return(append(append(sign, exp), frac))
+}
+
+val FPInfinity : forall ('N : Int), 1 >= 0 & 'N >= 0.
+ bits(1) -> bits('N) effect {escape}
+
+function FPInfinity sign = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'E = (if 'N == 16 then 5 else if 'N == 32 then 8 else 11) : {|5, 8, 11|};
+ F : atom('N - 'E - 1) = ('N - E) - 1;
+ exp : bits('E) = Ones(E);
+ frac : bits('N - 1 - 'E) = Zeros(F);
+ return(append(append(sign, exp), frac))
+}
+
+val FPDefaultNaN : forall ('N : Int), 'N >= 0. unit -> bits('N) effect {escape}
+
+function FPDefaultNaN () = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'E = (if 'N == 16 then 5 else if 'N == 32 then 8 else 11) : {|5, 8, 11|};
+ F : atom('N - 'E - 1) = ('N - E) - 1;
+ sign : bits(1) = 0b0;
+ exp : bits('E) = Ones(E);
+ frac : bits('N - 1 - 'E) = append(0b1, Zeros(F - 1));
+ return(append(append(sign, exp), frac))
+}
+
+val FPConvertNaN : forall ('N : Int) ('M : Int), 'N >= 0 & 'M >= 0.
+ bits('N) -> bits('M) effect {escape, undef}
+
+function FPConvertNaN op = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ assert('M == 16 | 'M == 32 | 'M == 64, "((M == 16) || ((M == 32) || (M == 64)))");
+ result : bits('M) = undefined;
+ frac : bits(51) = undefined;
+ sign : bits(1) = [op['N - 1]];
+ match 'N {
+ 64 => frac = slice(op, 0, 51),
+ 32 => frac = slice(op, 0, 22) @ Zeros(29),
+ 16 => frac = slice(op, 0, 9) @ Zeros(42)
+ };
+ match 'M {
+ 64 => result = (sign @ Ones('M - 52)) @ frac,
+ 32 => result = (sign @ Ones('M - 23)) @ slice(frac, 29, 22),
+ 16 => result = (sign @ Ones('M - 10)) @ slice(frac, 42, 9)
+ };
+ return(result)
+}
+
+val ExcVectorBase : unit -> bits(32) effect {rreg}
+
+function ExcVectorBase () = if [SCTLR[13]] == 0b1 then return(Ones(16) @ Zeros(16)) else return(slice(VBAR, 5, 27) @ Zeros(5))
+
+val RecipSqrtEstimate : int -> int effect {escape}
+
+function RecipSqrtEstimate a__arg = {
+ a : int = a__arg;
+ assert(128 <= a & a < 512, "((128 <= a) && (a < 512))");
+ if a < 256 then a = a * 2 + 1
+ else {
+ a = shl_int(shr_int(a, 1), 1);
+ a = (a + 1) * 2
+ };
+ b : int = 512;
+ while (a * (b + 1)) * (b + 1) < pow2(28) do b = b + 1;
+ r : int = (b + 1) / 2;
+ assert(256 <= r & r < 512, "((256 <= r) && (r < 512))");
+ return(r)
+}
+
+val UnsignedRSqrtEstimate : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ bits('N) -> bits('N) effect {escape, undef}
+
+function UnsignedRSqrtEstimate operand = {
+ assert('N == 16 | 'N == 32, "((N == 16) || (N == 32))");
+ estimate : int = undefined;
+ result : bits('N) = undefined;
+ if slice(operand, 'N - 2, 2) == 0b00 then result = Ones('N) else {
+ match 'N {
+ 16 => estimate = RecipSqrtEstimate(UInt(slice(operand, 7, 9))),
+ 32 => estimate = RecipSqrtEstimate(UInt(slice(operand, 23, 9)))
+ };
+ result = __GetSlice_int(9, estimate, 0) @ Zeros('N - 9)
+ };
+ return(result)
+}
+
+val RecipEstimate : int -> int effect {escape}
+
+function RecipEstimate a__arg = {
+ a : int = a__arg;
+ assert(256 <= a & a < 512, "((256 <= a) && (a < 512))");
+ a = a * 2 + 1;
+ b : int = pow2(19) / a;
+ r : int = (b + 1) / 2;
+ assert(256 <= r & r < 512, "((256 <= r) && (r < 512))");
+ return(r)
+}
+
+val UnsignedRecipEstimate : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ bits('N) -> bits('N) effect {escape, undef}
+
+function UnsignedRecipEstimate operand = {
+ assert('N == 16 | 'N == 32, "((N == 16) || (N == 32))");
+ estimate : int = undefined;
+ result : bits('N) = undefined;
+ if [operand['N - 1]] == 0b0 then result = Ones('N) else {
+ match 'N {
+ 16 => estimate = RecipEstimate(UInt(slice(operand, 7, 9))),
+ 32 => estimate = RecipEstimate(UInt(slice(operand, 23, 9)))
+ };
+ result = __GetSlice_int(9, estimate, 0) @ Zeros('N - 9)
+ };
+ return(result)
+}
+
+val PACSub : bits(64) -> bits(64) effect {undef}
+
+function PACSub Tinput = {
+ Toutput : bits(64) = undefined;
+ foreach (i from 0 to 15 by 1 in inc)
+ match slice(Tinput, 4 * i, 4) {
+ 0x0 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xB),
+ 0x1 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x6),
+ 0x2 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x8),
+ 0x3 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xF),
+ 0x4 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xC),
+ 0x5 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x0),
+ 0x6 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x9),
+ 0x7 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xE),
+ 0x8 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x3),
+ 0x9 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x7),
+ 0xA => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x4),
+ 0xB => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x5),
+ 0xC => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xD),
+ 0xD => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x2),
+ 0xE => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x1),
+ 0xF => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xA)
+ };
+ return(Toutput)
+}
+
+val PACMult : bits(64) -> bits(64) effect {undef}
+
+function PACMult Sinput = {
+ t0 : bits(4) = undefined;
+ t1 : bits(4) = undefined;
+ t2 : bits(4) = undefined;
+ t3 : bits(4) = undefined;
+ Soutput : bits(64) = undefined;
+ foreach (i from 0 to 3 by 1 in inc) {
+ t0 = __SetSlice_bits(4, 4, t0, 0, RotCell(slice(Sinput, 4 * (i + 8), 4), 1) ^ RotCell(slice(Sinput, 4 * (i + 4), 4), 2));
+ t0 = __SetSlice_bits(4, 4, t0, 0, slice(t0, 0, 4) ^ RotCell(slice(Sinput, 4 * i, 4), 1));
+ t1 = __SetSlice_bits(4, 4, t1, 0, RotCell(slice(Sinput, 4 * (i + 12), 4), 1) ^ RotCell(slice(Sinput, 4 * (i + 4), 4), 1));
+ t1 = __SetSlice_bits(4, 4, t1, 0, slice(t1, 0, 4) ^ RotCell(slice(Sinput, 4 * i, 4), 2));
+ t2 = __SetSlice_bits(4, 4, t2, 0, RotCell(slice(Sinput, 4 * (i + 12), 4), 2) ^ RotCell(slice(Sinput, 4 * (i + 8), 4), 1));
+ t2 = __SetSlice_bits(4, 4, t2, 0, slice(t2, 0, 4) ^ RotCell(slice(Sinput, 4 * i, 4), 1));
+ t3 = __SetSlice_bits(4, 4, t3, 0, RotCell(slice(Sinput, 4 * (i + 12), 4), 1) ^ RotCell(slice(Sinput, 4 * (i + 8), 4), 2));
+ t3 = __SetSlice_bits(4, 4, t3, 0, slice(t3, 0, 4) ^ RotCell(slice(Sinput, 4 * (i + 4), 4), 1));
+ Soutput = __SetSlice_bits(64, 4, Soutput, 4 * i, slice(t3, 0, 4));
+ Soutput = __SetSlice_bits(64, 4, Soutput, 4 * (i + 4), slice(t2, 0, 4));
+ Soutput = __SetSlice_bits(64, 4, Soutput, 4 * (i + 8), slice(t1, 0, 4));
+ Soutput = __SetSlice_bits(64, 4, Soutput, 4 * (i + 12), slice(t0, 0, 4))
+ };
+ return(Soutput)
+}
+
+val PACInvSub : bits(64) -> bits(64) effect {undef}
+
+function PACInvSub Tinput = {
+ Toutput : bits(64) = undefined;
+ foreach (i from 0 to 15 by 1 in inc)
+ match slice(Tinput, 4 * i, 4) {
+ 0x0 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x5),
+ 0x1 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xE),
+ 0x2 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xD),
+ 0x3 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x8),
+ 0x4 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xA),
+ 0x5 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xB),
+ 0x6 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x1),
+ 0x7 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x9),
+ 0x8 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x2),
+ 0x9 => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x6),
+ 0xA => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xF),
+ 0xB => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x0),
+ 0xC => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x4),
+ 0xD => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0xC),
+ 0xE => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x7),
+ 0xF => Toutput = __SetSlice_bits(64, 4, Toutput, 4 * i, 0x3)
+ };
+ return(Toutput)
+}
+
+val ComputePAC : (bits(64), bits(64), bits(64), bits(64)) -> bits(64) effect {escape, rreg, undef, wreg}
+
+function ComputePAC (data, modifier, key0, key1) = {
+ workingval : bits(64) = undefined;
+ runningmod : bits(64) = undefined;
+ roundkey : bits(64) = undefined;
+ modk0 : bits(64) = undefined;
+ Alpha : bits(64) = hex_slice("0xC0AC29B7C97C50DD", 64, 0);
+ RC[0] = hex_slice("0x0", 64, 0);
+ RC[1] = hex_slice("0x13198A2E03707344", 64, 0);
+ RC[2] = hex_slice("0xA493822299F31D0", 64, 0);
+ RC[3] = hex_slice("0x82EFA98EC4E6C89", 64, 0);
+ RC[4] = hex_slice("0x452821E638D01377", 64, 0);
+ modk0 = ([key0[0]] @ slice(key0, 2, 62)) @ ([key0[63]] ^ [key0[1]]);
+ runningmod = modifier;
+ workingval = data ^ key0;
+ foreach (i from 0 to 4 by 1 in inc) {
+ roundkey = key1 ^ runningmod;
+ workingval = workingval ^ roundkey;
+ workingval = workingval ^ RC[i];
+ if i > 0 then {
+ workingval = PACCellShuffle(workingval);
+ workingval = PACMult(workingval)
+ } else ();
+ workingval = PACSub(workingval);
+ runningmod = TweakShuffle(slice(runningmod, 0, 64))
+ };
+ roundkey = modk0 ^ runningmod;
+ workingval = workingval ^ roundkey;
+ workingval = PACCellShuffle(workingval);
+ workingval = PACMult(workingval);
+ workingval = PACSub(workingval);
+ workingval = PACCellShuffle(workingval);
+ workingval = PACMult(workingval);
+ workingval = key1 ^ workingval;
+ workingval = PACCellInvShuffle(workingval);
+ workingval = PACInvSub(workingval);
+ workingval = PACMult(workingval);
+ workingval = PACCellInvShuffle(workingval);
+ workingval = workingval ^ key0;
+ workingval = workingval ^ runningmod;
+ foreach (i from 0 to 4 by 1 in inc) {
+ workingval = PACInvSub(workingval);
+ if i < 4 then {
+ workingval = PACMult(workingval);
+ workingval = PACCellInvShuffle(workingval)
+ } else ();
+ runningmod = TweakInvShuffle(slice(runningmod, 0, 64));
+ roundkey = key1 ^ runningmod;
+ workingval = workingval ^ RC[4 - i];
+ workingval = workingval ^ roundkey;
+ workingval = workingval ^ Alpha
+ };
+ workingval = workingval ^ modk0;
+ return(workingval)
+}
+
+val Align__0 : (int, int) -> int
+
+val Align__1 : forall ('N : Int), 'N >= 0 & 'N >= 0. (bits('N), int) -> bits('N)
+
+overload Align = {Align__0, Align__1}
+
+function Align__0 ('x, 'y) = return(y * (x / y))
+
+function Align__1 (x, 'y) = return(__GetSlice_int('N, Align(UInt(x), y), 0))
+
+val aset__Mem : forall ('size : Int), 8 * 'size >= 0.
+ (AddressDescriptor, atom('size), AccessDescriptor, bits(8 * 'size)) -> unit effect {escape, rreg, wmem}
+
+function aset__Mem (desc, size, accdesc, value_name) = {
+ assert('size == 1 | 'size == 2 | 'size == 4 | 'size == 8 | 'size == 16, "((size == 1) || ((size == 2) || ((size == 4) || ((size == 8) || (size == 16)))))");
+ address : bits(52) = desc.paddress.physicaladdress;
+ assert(address == Align(address, 'size), "(address == Align(address, size))");
+ if address == hex_slice("0x13000000", 52, 0) then if UInt(value_name) == 4 then {
+ print("Program exited by writing ^D to TUBE\n");
+ exit(())
+ } else putchar(UInt(slice(value_name, 0, 8))) else __WriteRAM(52, 'size, __Memory, address, value_name);
+ ()
+}
+
+val aget__Mem : forall ('size : Int), 8 * 'size >= 0.
+ (AddressDescriptor, atom('size), AccessDescriptor) -> bits(8 * 'size) effect {escape, rmem, rreg}
+
+function aget__Mem (desc, size, accdesc) = {
+ assert('size == 1 | 'size == 2 | 'size == 4 | 'size == 8 | 'size == 16, "((size == 1) || ((size == 2) || ((size == 4) || ((size == 8) || (size == 16)))))");
+ address : bits(52) = desc.paddress.physicaladdress;
+ assert(address == Align(address, 'size), "(address == Align(address, size))");
+ return(__ReadRAM(52, 'size, __Memory, address))
+}
+
+val aset_X : forall ('width : Int), 'width >= 0.
+ (int, bits('width)) -> unit effect {wreg, escape}
+
+function aset_X (n, value_name) = {
+ assert(n >= 0 & n <= 31, "((n >= 0) && (n <= 31))");
+ assert('width == 32 | 'width == 64, "((width == 32) || (width == 64))");
+ if n != 31 then _R[n] = ZeroExtend(value_name, 64)
+ else ();
+ ()
+}
+
+val aarch64_integer_arithmetic_address_pcrel : (int, bits(64), bool) -> unit effect {escape, rreg, wreg}
+
+function aarch64_integer_arithmetic_address_pcrel ('d, imm, page) = {
+ base : bits(64) = aget_PC();
+ if page then base = __SetSlice_bits(64, 12, base, 0, Zeros(12)) else ();
+ aset_X(d, base + imm)
+}
+
+val integer_arithmetic_address_pcrel_decode : (bits(1), bits(2), bits(19), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_address_pcrel_decode (op, immlo, immhi, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ page : bool = op == 0b1;
+ imm : bits(64) = undefined;
+ if page then imm = SignExtend((immhi @ immlo) @ Zeros(12), 64) else imm = SignExtend(immhi @ immlo, 64);
+ aarch64_integer_arithmetic_address_pcrel(d, imm, page)
+}
+
+val AArch64_ResetGeneralRegisters : unit -> unit effect {escape, undef, wreg}
+
+function AArch64_ResetGeneralRegisters () = {
+ foreach (i from 0 to 30 by 1 in inc) aset_X(i, undefined : bits(64));
+ ()
+}
+
+val aset_ELR__0 : (bits(2), bits(64)) -> unit effect {wreg, escape}
+
+val aset_ELR__1 : bits(64) -> unit effect {wreg, rreg, escape}
+
+overload aset_ELR = {aset_ELR__0, aset_ELR__1}
+
+function aset_ELR__0 (el, value_name) = {
+ r : bits(64) = value_name;
+ match el {
+ ? if ? == EL1 => ELR_EL1 = r,
+ ? if ? == EL2 => ELR_EL2 = r,
+ ? if ? == EL3 => ELR_EL3 = r,
+ _ => Unreachable()
+ };
+ ()
+}
+
+function aset_ELR__1 value_name = {
+ assert(PSTATE.EL != EL0);
+ aset_ELR(PSTATE.EL, value_name);
+ ()
+}
+
+val aget_X : forall ('width : Int), 'width >= 0.
+ int -> bits('width) effect {escape, rreg}
+
+function aget_X 'n = {
+ assert(n >= 0 & n <= 31, "((n >= 0) && (n <= 31))");
+ assert('width == 8 | 'width == 16 | 'width == 32 | 'width == 64, "((width == 8) || ((width == 16) || ((width == 32) || (width == 64))))");
+ if n != 31 then return(slice(_R[n], 0, 'width)) else return(Zeros('width))
+}
+
+val aarch64_system_sysops : (bool, int, int, int, int, int, int) -> unit effect {escape, rreg, wreg}
+
+function aarch64_system_sysops (has_result, 'sys_crm, 'sys_crn, 'sys_op0, 'sys_op1, 'sys_op2, 't) = if has_result then aset_X(t, AArch64_SysInstrWithResult(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2)) else AArch64_SysInstr(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2, aget_X(t))
+
+val aarch64_system_register_system : (bool, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_register_system (read, 'sys_crm, 'sys_crn, 'sys_op0, 'sys_op1, 'sys_op2, 't) = if read then aset_X(t, AArch64_SysRegRead(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2)) else AArch64_SysRegWrite(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2, aget_X(t))
+
+val aarch64_integer_insext_insert_movewide : (int, int, bits(16), MoveWideOp, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_insext_insert_movewide ('d, 'datasize, imm, opcode, 'pos) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ if opcode == MoveWideOp_K then result = aget_X(d) else result = Zeros();
+ result = __SetSlice_bits(datasize, 16, result, pos, imm);
+ if opcode == MoveWideOp_N then result = ~(result) else ();
+ aset_X(d, result)
+}
+
+val aarch64_integer_insext_extract_immediate : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_insext_extract_immediate ('d, 'datasize, 'lsb, 'm, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ concat : bits(2 * 'datasize) = operand1 @ operand2;
+ result = slice(concat, lsb, datasize);
+ aset_X(d, result)
+}
+
+val aarch64_integer_arithmetic_rev : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_rev ('container_size, 'd, 'datasize, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand : bits('datasize) = aget_X(n);
+ result : bits('datasize) = undefined;
+ containers : int = datasize / container_size;
+ elements_per_container : int = container_size / 8;
+ index : int = 0;
+ rev_index : int = undefined;
+ foreach (c from 0 to (containers - 1) by 1 in inc) {
+ rev_index = index + (elements_per_container - 1) * 8;
+ foreach (e from 0 to (elements_per_container - 1) by 1 in inc) {
+ result = __SetSlice_bits(datasize, 8, result, rev_index, slice(operand, index, 8));
+ index = index + 8;
+ rev_index = rev_index - 8
+ }
+ };
+ aset_X(d, result)
+}
+
+val aarch64_integer_arithmetic_rbit : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_rbit ('d, 'datasize, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand : bits('datasize) = aget_X(n);
+ result : bits('datasize) = undefined;
+ foreach (i from 0 to (datasize - 1) by 1 in inc)
+ result = __SetSlice_bits(datasize, 1, result, (datasize - 1) - i, [operand[i]]);
+ aset_X(d, result)
+}
+
+val integer_arithmetic_rbit_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_rbit_decode (sf, S, opcode2, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ aarch64_integer_arithmetic_rbit(d, datasize, n)
+}
+
+val aarch64_integer_arithmetic_mul_widening_64128hi : (int, int, int, int, bool) -> unit effect {escape, rreg, wreg}
+
+function aarch64_integer_arithmetic_mul_widening_64128hi ('d, 'datasize, 'm, 'n, unsigned) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ result : int = asl_Int(operand1, unsigned) * asl_Int(operand2, unsigned);
+ aset_X(d, __GetSlice_int(64, result, 64))
+}
+
+val integer_arithmetic_mul_widening_64128hi_decode : (bits(1), bits(2), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, wreg}
+
+function integer_arithmetic_mul_widening_64128hi_decode (sf, op54, U, Rm, o0, Ra, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ a : int = UInt(Ra);
+ let 'destsize : {|64|} = 64;
+ datasize : int = destsize;
+ unsigned : bool = U == 0b1;
+ aarch64_integer_arithmetic_mul_widening_64128hi(d, datasize, m, n, unsigned)
+}
+
+val aarch64_integer_arithmetic_mul_widening_3264 : (int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_mul_widening_3264 ('a, 'd, 'datasize, 'destsize, 'm, 'n, sub_op, unsigned) = {
+ assert(constraint('destsize in {32, 64}), "destsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ operand3 : bits('destsize) = aget_X(a);
+ result : int = undefined;
+ if sub_op then result = asl_Int(operand3, unsigned) - asl_Int(operand1, unsigned) * asl_Int(operand2, unsigned) else result = asl_Int(operand3, unsigned) + asl_Int(operand1, unsigned) * asl_Int(operand2, unsigned);
+ aset_X(d, __GetSlice_int(64, result, 0))
+}
+
+val integer_arithmetic_mul_widening_3264_decode : (bits(1), bits(2), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_mul_widening_3264_decode (sf, op54, U, Rm, o0, Ra, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ a : int = UInt(Ra);
+ let 'destsize : {|64|} = 64;
+ let 'datasize : {|32|} = 32;
+ sub_op : bool = o0 == 0b1;
+ unsigned : bool = U == 0b1;
+ aarch64_integer_arithmetic_mul_widening_3264(a, d, datasize, destsize, m, n, sub_op, unsigned)
+}
+
+val aarch64_integer_arithmetic_mul_uniform_addsub : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_mul_uniform_addsub ('a, 'd, 'datasize, 'destsize, 'm, 'n, sub_op) = {
+ assert(constraint('destsize in {32, 64}), "destsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ operand3 : bits('destsize) = aget_X(a);
+ result : int = undefined;
+ if sub_op then result = UInt(operand3) - UInt(operand1) * UInt(operand2) else result = UInt(operand3) + UInt(operand1) * UInt(operand2);
+ aset_X(d, __GetSlice_int(destsize, result, 0))
+}
+
+val integer_arithmetic_mul_uniform_addsub_decode : (bits(1), bits(2), bits(3), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_mul_uniform_addsub_decode (sf, op54, op31, Rm, o0, Ra, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ a : int = UInt(Ra);
+ let 'destsize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ datasize : int = destsize;
+ sub_op : bool = o0 == 0b1;
+ aarch64_integer_arithmetic_mul_uniform_addsub(a, d, datasize, destsize, m, n, sub_op)
+}
+
+val aarch64_integer_arithmetic_div : (int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_div ('d, 'datasize, 'm, 'n, unsigned) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ result : int = undefined;
+ if IsZero(operand2) then result = 0 else result = RoundTowardsZero(Real(asl_Int(operand1, unsigned)) / Real(asl_Int(operand2, unsigned)));
+ aset_X(d, __GetSlice_int(datasize, result, 0))
+}
+
+val integer_arithmetic_div_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_div_decode (sf, op, S, Rm, opcode2, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ unsigned : bool = o1 == 0b0;
+ aarch64_integer_arithmetic_div(d, datasize, m, n, unsigned)
+}
+
+val aarch64_integer_arithmetic_cnt : (int, int, int, CountOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_cnt ('d, 'datasize, 'n, opcode) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : int = undefined;
+ operand1 : bits('datasize) = aget_X(n);
+ if opcode == CountOp_CLZ then result = CountLeadingZeroBits(operand1) else result = CountLeadingSignBits(operand1);
+ aset_X(d, __GetSlice_int(datasize, result, 0))
+}
+
+val integer_arithmetic_cnt_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_cnt_decode (sf, S, opcode2, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ opcode : CountOp = if op == 0b0 then CountOp_CLZ else CountOp_CLS;
+ aarch64_integer_arithmetic_cnt(d, datasize, n, opcode)
+}
+
+val aarch64_integer_arithmetic_addsub_carry : (int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_addsub_carry ('d, 'datasize, 'm, 'n, setflags, sub_op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ nzcv : bits(4) = undefined;
+ if sub_op then operand2 = ~(operand2) else ();
+ (result, nzcv) = AddWithCarry(operand1, operand2, PSTATE.C);
+ if setflags then (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = nzcv else ();
+ aset_X(d, result)
+}
+
+val integer_arithmetic_addsub_carry_decode : (bits(1), bits(1), bits(1), bits(5), bits(6), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_addsub_carry_decode (sf, op, S, Rm, opcode2, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ sub_op : bool = op == 0b1;
+ setflags : bool = S == 0b1;
+ aarch64_integer_arithmetic_addsub_carry(d, datasize, m, n, setflags, sub_op)
+}
+
+val ExtendReg : forall ('N : Int), 'N >= 0.
+ (int, ExtendType, int) -> bits('N) effect {escape, rreg, undef}
+
+function ExtendReg (reg, typ, shift) = {
+ assert(shift >= 0 & shift <= 4, "((shift >= 0) && (shift <= 4))");
+ val_name : bits('N) = aget_X(reg);
+ unsigned : bool = undefined;
+ len : int = undefined;
+ match typ {
+ ExtendType_SXTB => {
+ unsigned = false;
+ len = 8
+ },
+ ExtendType_SXTH => {
+ unsigned = false;
+ len = 16
+ },
+ ExtendType_SXTW => {
+ unsigned = false;
+ len = 32
+ },
+ ExtendType_SXTX => {
+ unsigned = false;
+ len = 64
+ },
+ ExtendType_UXTB => {
+ unsigned = true;
+ len = 8
+ },
+ ExtendType_UXTH => {
+ unsigned = true;
+ len = 16
+ },
+ ExtendType_UXTW => {
+ unsigned = true;
+ len = 32
+ },
+ ExtendType_UXTX => {
+ unsigned = true;
+ len = 64
+ }
+ };
+ len = min(len, 'N - shift);
+ shift2 = coerce_int_nat(shift);
+ let 'len2 : {'n, true. atom('n)} = ex_int(len);
+ assert(constraint('len2 >= 2), "hack");
+ return(Extend(append(val_name[len2 - 1 .. 0], Zeros(ex_nat(shift2))), 'N, unsigned))
+}
+
+val aget_ELR__0 : bits(2) -> bits(64) effect {escape, rreg, undef}
+
+val aget_ELR__1 : unit -> bits(64) effect {escape, rreg, undef}
+
+overload aget_ELR = {aget_ELR__0, aget_ELR__1}
+
+function aget_ELR__0 el = {
+ r : bits(64) = undefined;
+ match el {
+ ? if ? == EL1 => r = ELR_EL1,
+ ? if ? == EL2 => r = ELR_EL2,
+ ? if ? == EL3 => r = ELR_EL3,
+ _ => Unreachable()
+ };
+ return(r)
+}
+
+function aget_ELR__1 () = {
+ assert(PSTATE.EL != EL0);
+ return(aget_ELR(PSTATE.EL))
+}
+
+val ROR_C : forall ('N : Int), 'N >= 0 & 'N >= 0 & 1 >= 0.
+ (bits('N), int) -> (bits('N), bits(1)) effect {escape, undef}
+
+function ROR_C (x, 'shift) = {
+ assert(shift != 0, "(shift != 0)");
+ m : int = shift % 'N;
+ result : bits('N) = LSR(x, m) | LSL(x, 'N - m);
+ carry_out : bits(1) = [result['N - 1]];
+ return((result, carry_out))
+}
+
+val ROR : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ (bits('N), int) -> bits('N) effect {escape, undef}
+
+function ROR (x, 'shift) = {
+ assert(shift >= 0, "(shift >= 0)");
+ __anon1 : bits(1) = undefined;
+ result : bits('N) = undefined;
+ if shift == 0 then result = x else (result, __anon1) = ROR_C(x, shift);
+ return(result)
+}
+
+val aarch64_vector_crypto_sha512_sha512su1 : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha512_sha512su1 ('d, 'm, 'n) = {
+ sig1 : bits(64) = undefined;
+ Vtmp : bits(128) = undefined;
+ X : bits(128) = aget_V(n);
+ Y : bits(128) = aget_V(m);
+ W : bits(128) = aget_V(d);
+ sig1 = (ROR(slice(X, 64, 64), 19) ^ ROR(slice(X, 64, 64), 61)) ^ (0b000000 @ slice(X, 70, 58));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 64, (slice(W, 64, 64) + sig1) + slice(Y, 64, 64));
+ sig1 = (ROR(slice(X, 0, 64), 19) ^ ROR(slice(X, 0, 64), 61)) ^ (0b000000 @ slice(X, 6, 58));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 0, (slice(W, 0, 64) + sig1) + slice(Y, 0, 64));
+ aset_V(d, Vtmp)
+}
+
+val aarch64_vector_crypto_sha512_sha512su0 : (int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha512_sha512su0 ('d, 'n) = {
+ sig0 : bits(64) = undefined;
+ Vtmp : bits(128) = undefined;
+ X : bits(128) = aget_V(n);
+ W : bits(128) = aget_V(d);
+ sig0 = (ROR(slice(W, 64, 64), 1) ^ ROR(slice(W, 64, 64), 8)) ^ (0b0000000 @ slice(W, 71, 57));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 0, slice(W, 0, 64) + sig0);
+ sig0 = (ROR(slice(X, 0, 64), 1) ^ ROR(slice(X, 0, 64), 8)) ^ (0b0000000 @ slice(X, 7, 57));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 64, slice(W, 64, 64) + sig0);
+ aset_V(d, Vtmp)
+}
+
+val aarch64_vector_crypto_sha512_sha512h : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha512_sha512h ('d, 'm, 'n) = {
+ Vtmp : bits(128) = undefined;
+ MSigma1 : bits(64) = undefined;
+ tmp : bits(64) = undefined;
+ X : bits(128) = aget_V(n);
+ Y : bits(128) = aget_V(m);
+ W : bits(128) = aget_V(d);
+ MSigma1 = (ROR(slice(Y, 64, 64), 14) ^ ROR(slice(Y, 64, 64), 18)) ^ ROR(slice(Y, 64, 64), 41);
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 64, (slice(Y, 64, 64) & slice(X, 0, 64)) ^ (~(slice(Y, 64, 64)) & slice(X, 64, 64)));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 64, (slice(Vtmp, 64, 64) + MSigma1) + slice(W, 64, 64));
+ tmp = slice(Vtmp, 64, 64) + slice(Y, 0, 64);
+ MSigma1 = (ROR(tmp, 14) ^ ROR(tmp, 18)) ^ ROR(tmp, 41);
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 0, (tmp & slice(Y, 64, 64)) ^ (~(tmp) & slice(X, 0, 64)));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 0, (slice(Vtmp, 0, 64) + MSigma1) + slice(W, 0, 64));
+ aset_V(d, Vtmp)
+}
+
+val aarch64_vector_crypto_sha512_sha512h2 : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha512_sha512h2 ('d, 'm, 'n) = {
+ Vtmp : bits(128) = undefined;
+ NSigma0 : bits(64) = undefined;
+ tmp : bits(64) = undefined;
+ X : bits(128) = aget_V(n);
+ Y : bits(128) = aget_V(m);
+ W : bits(128) = aget_V(d);
+ NSigma0 = (ROR(slice(Y, 0, 64), 28) ^ ROR(slice(Y, 0, 64), 34)) ^ ROR(slice(Y, 0, 64), 39);
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 64, ((slice(X, 0, 64) & slice(Y, 64, 64)) ^ (slice(X, 0, 64) & slice(Y, 0, 64))) ^ (slice(Y, 64, 64) & slice(Y, 0, 64)));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 64, (slice(Vtmp, 64, 64) + NSigma0) + slice(W, 64, 64));
+ NSigma0 = (ROR(slice(Vtmp, 64, 64), 28) ^ ROR(slice(Vtmp, 64, 64), 34)) ^ ROR(slice(Vtmp, 64, 64), 39);
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 0, ((slice(Vtmp, 64, 64) & slice(Y, 0, 64)) ^ (slice(Vtmp, 64, 64) & slice(Y, 64, 64))) ^ (slice(Y, 64, 64) & slice(Y, 0, 64)));
+ Vtmp = __SetSlice_bits(128, 64, Vtmp, 0, (slice(Vtmp, 0, 64) + NSigma0) + slice(W, 0, 64));
+ aset_V(d, Vtmp)
+}
+
+val aarch64_vector_crypto_sha3_xar : (int, bits(6), int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3_xar ('d, imm6, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ tmp : bits(128) = Vn ^ Vm;
+ aset_V(d, ROR(slice(tmp, 64, 64), UInt(imm6)) @ ROR(slice(tmp, 0, 64), UInt(imm6)))
+}
+
+val aarch64_integer_bitfield : forall ('datasize : Int).
+ (int, int, int, atom('datasize), bool, bool, int, bits('datasize), bits('datasize)) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_bitfield ('R, 'S, 'd, datasize, extend, inzero, 'n, tmask, wmask) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ dst : bits('datasize) = if inzero then Zeros() else aget_X(d);
+ src : bits('datasize) = aget_X(n);
+ bot : bits('datasize) = dst & ~(wmask) | ROR(src, R) & wmask;
+ top : bits('datasize) = if extend then Replicate([src[S]]) else dst;
+ aset_X(d, top & ~(tmask) | bot & tmask)
+}
+
+val ShiftReg : forall ('N : Int), 'N >= 0.
+ (int, ShiftType, int) -> bits('N) effect {escape, rreg, undef}
+
+function ShiftReg ('reg, typ, 'amount) = {
+ result : bits('N) = aget_X(reg);
+ match typ {
+ ShiftType_LSL => result = LSL(result, amount),
+ ShiftType_LSR => result = LSR(result, amount),
+ ShiftType_ASR => result = ASR(result, amount),
+ ShiftType_ROR => result = ROR(result, amount)
+ };
+ return(result)
+}
+
+val aarch64_integer_shift_variable : (int, int, int, int, ShiftType) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_shift_variable ('d, 'datasize, 'm, 'n, shift_type) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand2 : bits('datasize) = aget_X(m);
+ result = ShiftReg(n, shift_type, UInt(operand2) % datasize);
+ aset_X(d, result)
+}
+
+val integer_shift_variable_decode : (bits(1), bits(1), bits(1), bits(5), bits(4), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_shift_variable_decode (sf, op, S, Rm, opcode2, op2, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ shift_type : ShiftType = DecodeShift(op2);
+ aarch64_integer_shift_variable(d, datasize, m, n, shift_type)
+}
+
+val aarch64_integer_logical_shiftedreg : (int, int, bool, int, int, LogicalOp, bool, int, ShiftType) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_logical_shiftedreg ('d, 'datasize, invert, 'm, 'n, op, setflags, 'shift_amount, shift_type) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = ShiftReg(m, shift_type, shift_amount);
+ if invert then operand2 = ~(operand2) else ();
+ result : bits('datasize) = undefined;
+ match op {
+ LogicalOp_AND => result = operand1 & operand2,
+ LogicalOp_ORR => result = operand1 | operand2,
+ LogicalOp_EOR => result = operand1 ^ operand2
+ };
+ if setflags then (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = ([result[datasize - 1]] @ IsZeroBit(result)) @ 0b00 else ();
+ aset_X(d, result)
+}
+
+val aarch64_integer_arithmetic_addsub_shiftedreg : (int, int, int, int, bool, int, ShiftType, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_addsub_shiftedreg ('d, 'datasize, 'm, 'n, setflags, 'shift_amount, shift_type, sub_op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = ShiftReg(m, shift_type, shift_amount);
+ nzcv : bits(4) = undefined;
+ carry_in : bits(1) = undefined;
+ if sub_op then {
+ operand2 = ~(operand2);
+ carry_in = 0b1
+ } else carry_in = 0b0;
+ (result, nzcv) = AddWithCarry(operand1, operand2, carry_in);
+ if setflags then (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = nzcv else ();
+ aset_X(d, result)
+}
+
+val SHAhashSIGMA1 : bits(32) -> bits(32) effect {escape, undef}
+
+function SHAhashSIGMA1 x = return((ROR(x, 6) ^ ROR(x, 11)) ^ ROR(x, 25))
+
+val SHAhashSIGMA0 : bits(32) -> bits(32) effect {escape, undef}
+
+function SHAhashSIGMA0 x = return((ROR(x, 2) ^ ROR(x, 13)) ^ ROR(x, 22))
+
+val ROL : forall ('N : Int), 'N >= 0 & 'N >= 0.
+ (bits('N), int) -> bits('N) effect {escape, undef}
+
+function ROL (x, 'shift) = {
+ assert(shift >= 0 & shift <= 'N, "((shift >= 0) && (shift <= N))");
+ if shift == 0 then return(x) else ();
+ return(ROR(x, 'N - shift))
+}
+
+val aarch64_vector_crypto_sm4_sm4enckey : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm4_sm4enckey ('d, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ intval : bits(32) = undefined;
+ sboxout : bits(8) = undefined;
+ result : bits(128) = undefined;
+ const : bits(32) = undefined;
+ roundresult : bits(128) = undefined;
+ index : int = undefined;
+ roundresult = aget_V(n);
+ foreach (index from 0 to 3 by 1 in inc) {
+ const = aget_Elem(Vm, index, 32);
+ intval = ((slice(roundresult, 96, 32) ^ slice(roundresult, 64, 32)) ^ slice(roundresult, 32, 32)) ^ const;
+ foreach (i from 0 to 3 by 1 in inc)
+ intval = aset_Elem(intval, i, 8, Sbox(aget_Elem(intval, i, 8)));
+ intval = (intval ^ ROL(intval, 13)) ^ ROL(intval, 23);
+ intval = intval ^ slice(roundresult, 0, 32);
+ roundresult = __SetSlice_bits(128, 32, roundresult, 0, slice(roundresult, 32, 32));
+ roundresult = __SetSlice_bits(128, 32, roundresult, 32, slice(roundresult, 64, 32));
+ roundresult = __SetSlice_bits(128, 32, roundresult, 64, slice(roundresult, 96, 32));
+ roundresult = __SetSlice_bits(128, 32, roundresult, 96, intval)
+ };
+ aset_V(d, roundresult)
+}
+
+val aarch64_vector_crypto_sm4_sm4enc : (int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm4_sm4enc ('d, 'n) = {
+ Vn : bits(128) = aget_V(n);
+ intval : bits(32) = undefined;
+ sboxout : bits(8) = undefined;
+ roundresult : bits(128) = undefined;
+ roundkey : bits(32) = undefined;
+ index : int = undefined;
+ roundresult = aget_V(d);
+ foreach (index from 0 to 3 by 1 in inc) {
+ roundkey = aget_Elem(Vn, index, 32);
+ intval = ((slice(roundresult, 96, 32) ^ slice(roundresult, 64, 32)) ^ slice(roundresult, 32, 32)) ^ roundkey;
+ foreach (i from 0 to 3 by 1 in inc)
+ intval = aset_Elem(intval, i, 8, Sbox(aget_Elem(intval, i, 8)));
+ intval = (((intval ^ ROL(intval, 2)) ^ ROL(intval, 10)) ^ ROL(intval, 18)) ^ ROL(intval, 24);
+ intval = intval ^ slice(roundresult, 0, 32);
+ roundresult = __SetSlice_bits(128, 32, roundresult, 0, slice(roundresult, 32, 32));
+ roundresult = __SetSlice_bits(128, 32, roundresult, 32, slice(roundresult, 64, 32));
+ roundresult = __SetSlice_bits(128, 32, roundresult, 64, slice(roundresult, 96, 32));
+ roundresult = __SetSlice_bits(128, 32, roundresult, 96, intval)
+ };
+ aset_V(d, roundresult)
+}
+
+val aarch64_vector_crypto_sm3_sm3tt2b : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm3_sm3tt2b ('d, 'i, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Vd : bits(128) = aget_V(d);
+ Wj : bits(32) = undefined;
+ result : bits(128) = undefined;
+ TT2 : bits(32) = undefined;
+ Wj = aget_Elem(Vm, i, 32);
+ TT2 = slice(Vd, 96, 32) & slice(Vd, 64, 32) | ~(slice(Vd, 96, 32)) & slice(Vd, 32, 32);
+ TT2 = slice(((TT2 + slice(Vd, 0, 32)) + slice(Vn, 96, 32)) + Wj, 0, 32);
+ result = __SetSlice_bits(128, 32, result, 0, slice(Vd, 32, 32));
+ result = __SetSlice_bits(128, 32, result, 32, ROL(slice(Vd, 64, 32), 19));
+ result = __SetSlice_bits(128, 32, result, 64, slice(Vd, 96, 32));
+ result = __SetSlice_bits(128, 32, result, 96, (TT2 ^ ROL(TT2, 9)) ^ ROL(TT2, 17));
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sm3_sm3tt2a : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm3_sm3tt2a ('d, 'i, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Vd : bits(128) = aget_V(d);
+ Wj : bits(32) = undefined;
+ result : bits(128) = undefined;
+ TT1 : bits(32) = undefined;
+ Wj = aget_Elem(Vm, i, 32);
+ TT2 : bits(32) = slice(Vd, 32, 32) ^ slice(Vd, 96, 32) ^ slice(Vd, 64, 32);
+ TT2 = slice(((TT2 + slice(Vd, 0, 32)) + slice(Vn, 96, 32)) + Wj, 0, 32);
+ result = __SetSlice_bits(128, 32, result, 0, slice(Vd, 32, 32));
+ result = __SetSlice_bits(128, 32, result, 32, ROL(slice(Vd, 64, 32), 19));
+ result = __SetSlice_bits(128, 32, result, 64, slice(Vd, 96, 32));
+ result = __SetSlice_bits(128, 32, result, 96, (TT2 ^ ROL(TT2, 9)) ^ ROL(TT2, 17));
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sm3_sm3tt1b : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm3_sm3tt1b ('d, 'i, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Vd : bits(128) = aget_V(d);
+ WjPrime : bits(32) = undefined;
+ result : bits(128) = undefined;
+ TT1 : bits(32) = undefined;
+ SS2 : bits(32) = undefined;
+ WjPrime = aget_Elem(Vm, i, 32);
+ SS2 = slice(Vn, 96, 32) ^ ROL(slice(Vd, 96, 32), 12);
+ TT1 = (slice(Vd, 96, 32) & slice(Vd, 32, 32) | slice(Vd, 96, 32) & slice(Vd, 64, 32)) | slice(Vd, 32, 32) & slice(Vd, 64, 32);
+ TT1 = slice(((TT1 + slice(Vd, 0, 32)) + SS2) + WjPrime, 0, 32);
+ result = __SetSlice_bits(128, 32, result, 0, slice(Vd, 32, 32));
+ result = __SetSlice_bits(128, 32, result, 32, ROL(slice(Vd, 64, 32), 9));
+ result = __SetSlice_bits(128, 32, result, 64, slice(Vd, 96, 32));
+ result = __SetSlice_bits(128, 32, result, 96, TT1);
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sm3_sm3tt1a : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm3_sm3tt1a ('d, 'i, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Vd : bits(128) = aget_V(d);
+ WjPrime : bits(32) = undefined;
+ result : bits(128) = undefined;
+ TT1 : bits(32) = undefined;
+ SS2 : bits(32) = undefined;
+ WjPrime = aget_Elem(Vm, i, 32);
+ SS2 = slice(Vn, 96, 32) ^ ROL(slice(Vd, 96, 32), 12);
+ TT1 = slice(Vd, 32, 32) ^ slice(Vd, 96, 32) ^ slice(Vd, 64, 32);
+ TT1 = slice(((TT1 + slice(Vd, 0, 32)) + SS2) + WjPrime, 0, 32);
+ result = __SetSlice_bits(128, 32, result, 0, slice(Vd, 32, 32));
+ result = __SetSlice_bits(128, 32, result, 32, ROL(slice(Vd, 64, 32), 9));
+ result = __SetSlice_bits(128, 32, result, 64, slice(Vd, 96, 32));
+ result = __SetSlice_bits(128, 32, result, 96, TT1);
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sm3_sm3ss1 : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm3_sm3ss1 ('a, 'd, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Vd : bits(128) = aget_V(d);
+ Va : bits(128) = aget_V(a);
+ Vd = __SetSlice_bits(128, 32, Vd, 96, ROL((ROL(slice(Vn, 96, 32), 12) + slice(Vm, 96, 32)) + slice(Va, 96, 32), 7));
+ Vd = __SetSlice_bits(128, 96, Vd, 0, Zeros());
+ aset_V(d, Vd)
+}
+
+val aarch64_vector_crypto_sm3_sm3partw2 : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm3_sm3partw2 ('d, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Vd : bits(128) = aget_V(d);
+ result : bits(128) = undefined;
+ tmp : bits(128) = undefined;
+ tmp2 : bits(32) = undefined;
+ tmp = __SetSlice_bits(128, 128, tmp, 0, Vn ^ (((ROL(slice(Vm, 96, 32), 7) @ ROL(slice(Vm, 64, 32), 7)) @ ROL(slice(Vm, 32, 32), 7)) @ ROL(slice(Vm, 0, 32), 7)));
+ result = __SetSlice_bits(128, 128, result, 0, slice(Vd, 0, 128) ^ slice(tmp, 0, 128));
+ tmp2 = ROL(slice(tmp, 0, 32), 15);
+ tmp2 = (tmp2 ^ ROL(tmp2, 15)) ^ ROL(tmp2, 23);
+ result = __SetSlice_bits(128, 32, result, 96, slice(result, 96, 32) ^ tmp2);
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sm3_sm3partw1 : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sm3_sm3partw1 ('d, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ Vd : bits(128) = aget_V(d);
+ result : bits(128) = undefined;
+ result : bits(128) = __SetSlice_bits(128, 96, result, 0, slice(Vd ^ Vn, 0, 96) ^ ((ROL(slice(Vm, 96, 32), 15) @ ROL(slice(Vm, 64, 32), 15)) @ ROL(slice(Vm, 32, 32), 15)));
+ foreach (i from 0 to 3 by 1 in inc) {
+ if i == 3 then result = __SetSlice_bits(128, 32, result, 96, slice(Vd ^ Vn, 96, 32) ^ ROL(slice(result, 0, 32), 15)) else ();
+ result = __SetSlice_bits(128, 32, result, 32 * i, (slice(result, 32 * i, 32) ^ ROL(slice(result, 32 * i, 32), 15)) ^ ROL(slice(result, 32 * i, 32), 23))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sha3_rax1 : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3_rax1 ('d, 'm, 'n) = {
+ Vm : bits(128) = aget_V(m);
+ Vn : bits(128) = aget_V(n);
+ aset_V(d, Vn ^ (ROL(slice(Vm, 64, 64), 1) @ ROL(slice(Vm, 0, 64), 1)))
+}
+
+val SHA256hash : (bits(128), bits(128), bits(128), bool) -> bits(128) effect {escape, undef}
+
+function SHA256hash (X__arg, Y__arg, W, part1) = {
+ X = X__arg;
+ Y = Y__arg;
+ chs : bits(32) = undefined;
+ maj : bits(32) = undefined;
+ t : bits(32) = undefined;
+ foreach (e from 0 to 3 by 1 in inc) {
+ chs = SHAchoose(slice(Y, 0, 32), slice(Y, 32, 32), slice(Y, 64, 32));
+ maj = SHAmajority(slice(X, 0, 32), slice(X, 32, 32), slice(X, 64, 32));
+ t = ((slice(Y, 96, 32) + SHAhashSIGMA1(slice(Y, 0, 32))) + chs) + aget_Elem(W, e, 32);
+ X = __SetSlice_bits(128, 32, X, 96, t + slice(X, 96, 32));
+ Y = __SetSlice_bits(128, 32, Y, 96, (t + SHAhashSIGMA0(slice(X, 0, 32))) + maj);
+ __tmp_278 : bits(256) = ROL(Y @ X, 32);
+ Y = slice(__tmp_278, 128, 128);
+ X = slice(__tmp_278, 0, 128)
+ };
+ return(if part1 then X else Y)
+}
+
+val Prefetch : (bits(64), bits(5)) -> unit effect {undef}
+
+function Prefetch (address, prfop) = {
+ hint : PrefetchHint = undefined;
+ target : int = undefined;
+ stream : bool = undefined;
+ match slice(prfop, 3, 2) {
+ 0b00 => hint = Prefetch_READ,
+ 0b01 => hint = Prefetch_EXEC,
+ 0b10 => hint = Prefetch_WRITE,
+ 0b11 => ()
+ };
+ target = UInt(slice(prfop, 1, 2));
+ stream = [prfop[0]] != 0b0;
+ Hint_Prefetch(address, hint, target, stream);
+ ()
+}
+
+val IsSecondStage : FaultRecord -> bool effect {escape}
+
+function IsSecondStage fault = {
+ assert(fault.typ != Fault_None, "((fault).type != Fault_None)");
+ return(fault.secondstage)
+}
+
+val IsFault : AddressDescriptor -> bool
+
+function IsFault addrdesc = return(addrdesc.fault.typ != Fault_None)
+
+val CombineS1S2Desc : (AddressDescriptor, AddressDescriptor) -> AddressDescriptor effect {undef}
+
+function CombineS1S2Desc (s1desc, s2desc) = {
+ result : AddressDescriptor = undefined;
+ result.paddress = s2desc.paddress;
+ if IsFault(s1desc) | IsFault(s2desc) then result = if IsFault(s1desc) then s1desc else s2desc else if s2desc.memattrs.typ == MemType_Device | s1desc.memattrs.typ == MemType_Device then {
+ __tmp_61 : MemoryAttributes = result.memattrs;
+ __tmp_61.typ = MemType_Device;
+ result.memattrs = __tmp_61;
+ if s1desc.memattrs.typ == MemType_Normal then {
+ __tmp_62 : MemoryAttributes = result.memattrs;
+ __tmp_62.device = s2desc.memattrs.device;
+ result.memattrs = __tmp_62
+ } else if s2desc.memattrs.typ == MemType_Normal then {
+ __tmp_63 : MemoryAttributes = result.memattrs;
+ __tmp_63.device = s1desc.memattrs.device;
+ result.memattrs = __tmp_63
+ } else {
+ __tmp_64 : MemoryAttributes = result.memattrs;
+ __tmp_64.device = CombineS1S2Device(s1desc.memattrs.device, s2desc.memattrs.device);
+ result.memattrs = __tmp_64
+ }
+ } else {
+ __tmp_65 : MemoryAttributes = result.memattrs;
+ __tmp_65.typ = MemType_Normal;
+ result.memattrs = __tmp_65;
+ __tmp_66 : MemoryAttributes = result.memattrs;
+ __tmp_66.device = undefined;
+ result.memattrs = __tmp_66;
+ __tmp_67 : MemoryAttributes = result.memattrs;
+ __tmp_67.inner = CombineS1S2AttrHints(s1desc.memattrs.inner, s2desc.memattrs.inner);
+ result.memattrs = __tmp_67;
+ __tmp_68 : MemoryAttributes = result.memattrs;
+ __tmp_68.outer = CombineS1S2AttrHints(s1desc.memattrs.outer, s2desc.memattrs.outer);
+ result.memattrs = __tmp_68;
+ __tmp_69 : MemoryAttributes = result.memattrs;
+ __tmp_69.shareable = s1desc.memattrs.shareable | s2desc.memattrs.shareable;
+ result.memattrs = __tmp_69;
+ __tmp_70 : MemoryAttributes = result.memattrs;
+ __tmp_70.outershareable = s1desc.memattrs.outershareable | s2desc.memattrs.outershareable;
+ result.memattrs = __tmp_70
+ };
+ result.memattrs = MemAttrDefaults(result.memattrs);
+ return(result)
+}
+
+val IsExternalSyncAbort__0 : Fault -> bool effect {escape}
+
+val IsExternalSyncAbort__1 : FaultRecord -> bool effect {escape}
+
+overload IsExternalSyncAbort = {IsExternalSyncAbort__0, IsExternalSyncAbort__1}
+
+function IsExternalSyncAbort__0 typ = {
+ assert(typ != Fault_None);
+ return(typ == Fault_SyncExternal | typ == Fault_SyncParity | typ == Fault_SyncExternalOnWalk | typ == Fault_SyncParityOnWalk)
+}
+
+function IsExternalSyncAbort__1 fault = return(IsExternalSyncAbort(fault.typ))
+
+val IsExternalAbort__0 : Fault -> bool effect {escape}
+
+val IsExternalAbort__1 : FaultRecord -> bool effect {escape}
+
+overload IsExternalAbort = {IsExternalAbort__0, IsExternalAbort__1}
+
+function IsExternalAbort__0 typ = {
+ assert(typ != Fault_None);
+ return(typ == Fault_SyncExternal | typ == Fault_SyncParity | typ == Fault_SyncExternalOnWalk | typ == Fault_SyncParityOnWalk | typ == Fault_AsyncExternal | typ == Fault_AsyncParity)
+}
+
+function IsExternalAbort__1 fault = return(IsExternalAbort(fault.typ))
+
+val IsDebugException : FaultRecord -> bool effect {escape}
+
+function IsDebugException fault = {
+ assert(fault.typ != Fault_None, "((fault).type != Fault_None)");
+ return(fault.typ == Fault_Debug)
+}
+
+val IPAValid : FaultRecord -> bool effect {escape}
+
+function IPAValid fault = {
+ assert(fault.typ != Fault_None, "((fault).type != Fault_None)");
+ if fault.s2fs1walk then return(fault.typ == Fault_AccessFlag | fault.typ == Fault_Permission | fault.typ == Fault_Translation | fault.typ == Fault_AddressSize) else if fault.secondstage then return(fault.typ == Fault_AccessFlag | fault.typ == Fault_Translation | fault.typ == Fault_AddressSize) else return(false)
+}
+
+val aarch64_integer_logical_immediate : forall ('datasize : Int).
+ (int, atom('datasize), bits('datasize), int, LogicalOp, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_logical_immediate ('d, datasize, imm, 'n, op, setflags) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = imm;
+ match op {
+ LogicalOp_AND => result = operand1 & operand2,
+ LogicalOp_ORR => result = operand1 | operand2,
+ LogicalOp_EOR => result = operand1 ^ operand2
+ };
+ if setflags then (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = ([result[datasize - 1]] @ IsZeroBit(result)) @ 0b00 else ();
+ if d == 31 & ~(setflags) then aset_SP(result) else aset_X(d, result)
+}
+
+val aarch64_integer_arithmetic_addsub_immediate : forall ('datasize : Int).
+ (int, atom('datasize), bits('datasize), int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_addsub_immediate ('d, datasize, imm, 'n, setflags, sub_op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = if n == 31 then aget_SP() else aget_X(n);
+ operand2 : bits('datasize) = imm;
+ nzcv : bits(4) = undefined;
+ carry_in : bits(1) = undefined;
+ if sub_op then {
+ operand2 = ~(operand2);
+ carry_in = 0b1
+ } else carry_in = 0b0;
+ (result, nzcv) = AddWithCarry(operand1, operand2, carry_in);
+ if setflags then (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = nzcv else ();
+ if d == 31 & ~(setflags) then aset_SP(result) else aset_X(d, result)
+}
+
+val aarch64_integer_arithmetic_addsub_extendedreg : (int, int, ExtendType, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_arithmetic_addsub_extendedreg ('d, 'datasize, extend_type, 'm, 'n, setflags, 'shift, sub_op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = if n == 31 then aget_SP() else aget_X(n);
+ operand2 : bits('datasize) = ExtendReg(m, extend_type, shift);
+ nzcv : bits(4) = undefined;
+ carry_in : bits(1) = undefined;
+ if sub_op then {
+ operand2 = ~(operand2);
+ carry_in = 0b1
+ } else carry_in = 0b0;
+ (result, nzcv) = AddWithCarry(operand1, operand2, carry_in);
+ if setflags then (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = nzcv else ();
+ if d == 31 & ~(setflags) then aset_SP(result) else aset_X(d, result)
+}
+
+val RestoredITBits : bits(32) -> bits(8) effect {escape, rreg}
+
+function RestoredITBits spsr = {
+ it : bits(8) = spsr[15 .. 10] @ spsr[26 .. 25];
+ if PSTATE.IL == 0b1 then
+ if ConstrainUnpredictableBool(Unpredictable_ILZEROIT) then return(0x00) else return(it)
+ else ();
+ if ~(IsZero(it[7 .. 4])) & IsZero(it[3 .. 0]) then return(0x00) else ();
+ itd : bits(1) = if PSTATE.EL == EL2 then [HSCTLR[7]] else [SCTLR[7]];
+ if [spsr[5]] == 0b0 & ~(IsZero(it)) | itd == 0b1 & ~(IsZero(it[2 .. 0])) then return(0x00) else return(it)
+}
+
+val IsEL1TransRegimeRegs : unit -> bool effect {rreg}
+
+function IsEL1TransRegimeRegs () = return((~(HaveEL(EL2)) | PSTATE.EL == EL1) | PSTATE.EL == EL0 & ([HCR_EL2[34]] == 0b0 | [HCR_EL2[27]] == 0b0))
+
+val CalculateTBI : (bits(64), bool) -> bool effect {rreg}
+
+function CalculateTBI (ptr, data) = {
+ tbi : bool = false;
+ if PtrHasUpperAndLowerAddRanges() then if IsEL1TransRegimeRegs() then if data then tbi = if [ptr[55]] == 0b1 then [TCR_EL1[38]] == 0b1 else [TCR_EL1[37]] == 0b1 else if [ptr[55]] == 0b1 then tbi = [TCR_EL1[38]] == 0b1 & [TCR_EL1[52]] == 0b0 else tbi = [TCR_EL1[37]] == 0b1 & [TCR_EL1[51]] == 0b0 else if data then tbi = if [ptr[55]] == 0b1 then [TCR_EL2[38]] == 0b1 else [TCR_EL2[37]] == 0b1 else if [ptr[55]] == 0b1 then tbi = [TCR_EL2[38]] == 0b1 & [TCR_EL2[52]] == 0b0 else tbi = [TCR_EL2[37]] == 0b1 & [TCR_EL2[51]] == 0b0 else if PSTATE.EL == EL2 then tbi = if data then [TCR_EL2[20]] == 0b1 else [TCR_EL2[20]] == 0b1 & [TCR_EL2[29]] == 0b0 else if PSTATE.EL == EL3 then tbi = if data then [TCR_EL3[20]] == 0b1 else [TCR_EL3[20]] == 0b1 & [TCR_EL3[29]] == 0b0 else ();
+ return(tbi)
+}
+
+val CalculateBottomPACBit : (bits(64), bits(1)) -> int effect {escape, rreg, undef}
+
+function CalculateBottomPACBit (ptr, top_bit) = {
+ tsz_field : int = undefined;
+ using64k : bool = undefined;
+ if PtrHasUpperAndLowerAddRanges() then if IsEL1TransRegimeRegs() then {
+ tsz_field = if top_bit == 0b1 then UInt(slice(TCR_EL1, 16, 6)) else UInt(slice(TCR_EL1, 0, 6));
+ using64k = if top_bit == 0b1 then slice(TCR_EL1, 30, 2) == 0b11 else slice(TCR_EL1, 14, 2) == 0b11
+ } else {
+ assert(HaveEL(EL2), "HaveEL(EL2)");
+ tsz_field = if top_bit == 0b1 then UInt(slice(TCR_EL2, 16, 6)) else UInt(slice(TCR_EL2, 0, 6));
+ using64k = if top_bit == 0b1 then slice(TCR_EL2, 30, 2) == 0b11 else slice(TCR_EL2, 14, 2) == 0b11
+ } else {
+ tsz_field = if PSTATE.EL == EL2 then UInt(slice(TCR_EL2, 0, 6)) else UInt(slice(TCR_EL3, 0, 6));
+ using64k = if PSTATE.EL == EL2 then slice(TCR_EL2, 14, 2) == 0b11 else slice(TCR_EL3, 14, 2) == 0b11
+ };
+ max_limit_tsz_field : int = 39;
+ c : Constraint = undefined;
+ if tsz_field > max_limit_tsz_field then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_NONE, "((c == Constraint_FORCE) || (c == Constraint_NONE))");
+ if c == Constraint_FORCE then tsz_field = max_limit_tsz_field else ()
+ } else ();
+ tszmin : int = if using64k & VAMax() == 52 then 12 else 16;
+ if tsz_field < tszmin then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_NONE, "((c == Constraint_FORCE) || (c == Constraint_NONE))");
+ if c == Constraint_FORCE then tsz_field = tszmin else ()
+ } else ();
+ return(64 - tsz_field)
+}
+
+val Auth : (bits(64), bits(64), bits(128), bool, bits(1)) -> bits(64) effect {escape, wreg, rreg, undef}
+
+function Auth (ptr, modifier, K, data, keynumber) = {
+ PAC : bits(64) = undefined;
+ result : bits(64) = undefined;
+ original_ptr : bits(64) = undefined;
+ error_code : bits(2) = undefined;
+ extfield : bits(64) = undefined;
+ tbi : bool = CalculateTBI(ptr, data);
+ let 'bottom_PAC_bit = ex_int(CalculateBottomPACBit(ptr, [ptr[55]]));
+ assert(constraint('bottom_PAC_bit >= 0));
+ extfield = replicate_bits([ptr[55]], 64);
+ if tbi then
+ original_ptr = (ptr[63 .. 56] @ extfield[(negate(bottom_PAC_bit) + 56) - 1 .. 0]) @ ptr[bottom_PAC_bit - 1 .. 0]
+ else
+ original_ptr = extfield[(negate(bottom_PAC_bit) + 64) - 1 .. 0] @ ptr[bottom_PAC_bit - 1 .. 0];
+ PAC = ComputePAC(original_ptr, modifier, K[127 .. 64], K[63 .. 0]);
+ if tbi then
+ if PAC[((negate(bottom_PAC_bit) + 55) - 1) + bottom_PAC_bit .. bottom_PAC_bit] == ptr[((negate(bottom_PAC_bit) + 55) - 1) + bottom_PAC_bit .. bottom_PAC_bit] then
+ result = original_ptr
+ else {
+ error_code = keynumber @ ~(keynumber);
+ result = (original_ptr[63 .. 55] @ error_code) @ original_ptr[52 .. 0]
+ }
+ else if PAC[((negate(bottom_PAC_bit) + 55) - 1) + bottom_PAC_bit .. bottom_PAC_bit] == ptr[((negate(bottom_PAC_bit) + 55) - 1) + bottom_PAC_bit .. bottom_PAC_bit] & PAC[63 .. 56] == ptr[63 .. 56] then
+ result = original_ptr
+ else {
+ error_code = keynumber @ ~(keynumber);
+ result = ([original_ptr[63]] @ error_code) @ original_ptr[60 .. 0]
+ };
+ return(result)
+}
+
+val HighestELUsingAArch32 : unit -> bool
+
+function HighestELUsingAArch32 () = {
+ if ~(HaveAnyAArch32()) then return(false) else ();
+ return(false)
+}
+
+val aget_SCR_GEN : unit -> bits(32) effect {escape, rreg, undef}
+
+function aget_SCR_GEN () = {
+ assert(HaveEL(EL3), "HaveEL(EL3)");
+ r : bits(32) = undefined;
+ if HighestELUsingAArch32() then r = SCR else r = SCR_EL3;
+ return(r)
+}
+
+val IsSecureBelowEL3 : unit -> bool effect {escape, rreg, undef}
+
+function IsSecureBelowEL3 () = if HaveEL(EL3) then return([aget_SCR_GEN()[0]] == 0b0) else if HaveEL(EL2) then return(false) else return(false)
+
+val UsingAArch32 : unit -> bool effect {escape, rreg}
+
+function UsingAArch32 () = {
+ aarch32 : bool = PSTATE.nRW == 0b1;
+ if ~(HaveAnyAArch32()) then assert(~(aarch32), "!(aarch32)") else ();
+ if HighestELUsingAArch32() then assert(aarch32, "aarch32") else ();
+ return(aarch32)
+}
+
+val aset_SPSR : bits(32) -> unit effect {escape, rreg, wreg}
+
+function aset_SPSR value_name = {
+ if UsingAArch32() then match PSTATE.M {
+ ? if ? == M32_FIQ => SPSR_fiq = value_name,
+ ? if ? == M32_IRQ => SPSR_irq = value_name,
+ ? if ? == M32_Svc => SPSR_svc = value_name,
+ ? if ? == M32_Monitor => SPSR_mon = value_name,
+ ? if ? == M32_Abort => SPSR_abt = value_name,
+ ? if ? == M32_Hyp => SPSR_hyp = value_name,
+ ? if ? == M32_Undef => SPSR_und = value_name,
+ _ => Unreachable()
+ } else match PSTATE.EL {
+ ? if ? == EL1 => SPSR_EL1 = value_name,
+ ? if ? == EL2 => SPSR_EL2 = value_name,
+ ? if ? == EL3 => SPSR_EL3 = value_name,
+ _ => Unreachable()
+ };
+ ()
+}
+
+val aget_SPSR : unit -> bits(32) effect {escape, rreg, undef}
+
+function aget_SPSR () = {
+ result : bits(32) = undefined;
+ if UsingAArch32() then match PSTATE.M {
+ ? if ? == M32_FIQ => result = SPSR_fiq,
+ ? if ? == M32_IRQ => result = SPSR_irq,
+ ? if ? == M32_Svc => result = SPSR_svc,
+ ? if ? == M32_Monitor => result = SPSR_mon,
+ ? if ? == M32_Abort => result = SPSR_abt,
+ ? if ? == M32_Hyp => result = SPSR_hyp,
+ ? if ? == M32_Undef => result = SPSR_und,
+ _ => Unreachable()
+ } else match PSTATE.EL {
+ ? if ? == EL1 => result = SPSR_EL1,
+ ? if ? == EL2 => result = SPSR_EL2,
+ ? if ? == EL3 => result = SPSR_EL3,
+ _ => Unreachable()
+ };
+ return(result)
+}
+
+val IsSecure : unit -> bool effect {escape, rreg, undef}
+
+function IsSecure () = {
+ if (HaveEL(EL3) & ~(UsingAArch32())) & PSTATE.EL == EL3 then return(true) else if (HaveEL(EL3) & UsingAArch32()) & PSTATE.M == M32_Monitor then return(true) else ();
+ return(IsSecureBelowEL3())
+}
+
+val FPProcessException : (FPExc, bits(32)) -> unit effect {escape, rreg, undef, wreg}
+
+function FPProcessException (exception, fpcr) = {
+ cumul : int = undefined;
+ match exception {
+ FPExc_InvalidOp => cumul = 0,
+ FPExc_DivideByZero => cumul = 1,
+ FPExc_Overflow => cumul = 2,
+ FPExc_Underflow => cumul = 3,
+ FPExc_Inexact => cumul = 4,
+ FPExc_InputDenorm => cumul = 7
+ };
+ enable : int = cumul + 8;
+ if [fpcr[enable]] == 0b1 then throw(Error_Implementation_Defined("floating-point trap handling")) else if UsingAArch32() then FPSCR = __SetSlice_bits(32, 1, FPSCR, cumul, 0b1) else FPSR = __SetSlice_bits(32, 1, FPSR, cumul, 0b1);
+ ()
+}
+
+val FPRoundBase : forall ('N : Int), 32 >= 0 & 'N >= 0.
+ (real, bits(32), FPRounding) -> bits('N) effect {escape, wreg, rreg, undef}
+
+function FPRoundBase (op, fpcr, rounding) = {
+ assert('N == 16 | 'N == 32 | 'N == 64);
+ assert(op != 0.0);
+ assert(rounding != FPRounding_TIEAWAY);
+ result : bits('N) = undefined;
+ F_mut : int = undefined;
+ E_mut : int = undefined;
+ minimum_exp : int = undefined;
+ if 'N == 16 then {
+ minimum_exp = negate(14);
+ E_mut = 5;
+ F_mut = 10
+ } else if 'N == 32 then {
+ minimum_exp = negate(126);
+ E_mut = 8;
+ F_mut = 23
+ } else {
+ minimum_exp = negate(1022);
+ E_mut = 11;
+ F_mut = 52
+ };
+ let 'F = F_mut;
+ let 'E = E_mut;
+ assert(constraint('F in {10, 23, 52} & 'E in {5, 8, 11}));
+ mantissa : real = undefined;
+ sign : bits(1) = undefined;
+ if op < 0.0 then {
+ sign = 0b1;
+ mantissa = negate(op)
+ } else {
+ sign = 0b0;
+ mantissa = op
+ };
+ exponent : int = 0;
+ while mantissa < 1.0 do {
+ mantissa = mantissa * 2.0;
+ exponent = exponent - 1
+ };
+ while mantissa >= 2.0 do {
+ mantissa = mantissa / 2.0;
+ exponent = exponent + 1
+ };
+ if ([fpcr[24]] == 0b1 & 'N != 16 | [fpcr[19]] == 0b1 & 'N == 16) & exponent < minimum_exp then {
+ if UsingAArch32() then FPSCR = __SetSlice_bits(32, 1, FPSCR, 3, 0b1)
+ else FPSR = __SetSlice_bits(32, 1, FPSR, 3, 0b1);
+ return(FPZero(sign))
+ } else ();
+ biased_exp : int = max((exponent - minimum_exp) + 1, 0);
+ if biased_exp == 0 then mantissa = mantissa / 2.0 ^ (minimum_exp - exponent)
+ else ();
+ int_mant : int = RoundDown(mantissa * 2.0 ^ F);
+ error : real = mantissa * 2.0 ^ F - Real(int_mant);
+ if biased_exp == 0 & (error != 0.0 | [fpcr[11]] == 0b1) then FPProcessException(FPExc_Underflow, fpcr) else ();
+ overflow_to_inf : bool = undefined;
+ round_up : bool = undefined;
+ match rounding {
+ FPRounding_TIEEVEN => {
+ round_up = error > 0.5 | error == 0.5 & __GetSlice_int(1, int_mant, 0) == 0b1;
+ overflow_to_inf = true
+ },
+ FPRounding_POSINF => {
+ round_up = error != 0.0 & sign == 0b0;
+ overflow_to_inf = sign == 0b0
+ },
+ FPRounding_NEGINF => {
+ round_up = error != 0.0 & sign == 0b1;
+ overflow_to_inf = sign == 0b1
+ },
+ FPRounding_ZERO => {
+ round_up = false;
+ overflow_to_inf = false
+ },
+ FPRounding_ODD => {
+ round_up = false;
+ overflow_to_inf = false
+ }
+ };
+ if round_up then {
+ int_mant = int_mant + 1;
+ if int_mant == pow2(F) then biased_exp = 1
+ else ();
+ if int_mant == pow2(F + 1) then {
+ biased_exp = biased_exp + 1;
+ int_mant = int_mant / 2
+ } else ()
+ } else ();
+ if error != 0.0 & rounding == FPRounding_ODD then
+ int_mant = __SetSlice_int(1, int_mant, 0, 0b1)
+ else ();
+ if 'N != 16 | [fpcr[26]] == 0b0 then
+ if biased_exp >= pow2(E) - 1 then {
+ result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
+ FPProcessException(FPExc_Overflow, fpcr);
+ error = 1.0
+ } else
+ result = (sign @ __GetSlice_int(('N - F) - 1, biased_exp, 0)) @ __GetSlice_int(F, int_mant, 0)
+ else if biased_exp >= pow2(E) then {
+ result = sign @ Ones('N - 1);
+ FPProcessException(FPExc_InvalidOp, fpcr);
+ error = 0.0
+ } else
+ result = (sign @ __GetSlice_int(('N - F) - 1, biased_exp, 0)) @ __GetSlice_int(F, int_mant, 0);
+ if error != 0.0 then FPProcessException(FPExc_Inexact, fpcr) else ();
+ return(result)
+}
+
+val FPRoundCV : forall ('N : Int), 32 >= 0 & 'N >= 0.
+ (real, bits(32), FPRounding) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPRoundCV (op, fpcr__arg, rounding) = {
+ fpcr = fpcr__arg;
+ fpcr = __SetSlice_bits(32, 1, fpcr, 19, 0b0);
+ return(FPRoundBase(op, fpcr, rounding))
+}
+
+val FPRound__0 : forall ('N : Int), 32 >= 0 & 'N >= 0.
+ (real, bits(32), FPRounding) -> bits('N) effect {escape, rreg, undef, wreg}
+
+val FPRound__1 : forall ('N : Int), 32 >= 0 & 'N >= 0.
+ (real, bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+overload FPRound = {FPRound__0, FPRound__1}
+
+function FPRound__0 (op, fpcr__arg, rounding) = {
+ fpcr = fpcr__arg;
+ fpcr = __SetSlice_bits(32, 1, fpcr, 26, 0b0);
+ return(FPRoundBase(op, fpcr, rounding))
+}
+
+function FPRound__1 (op, fpcr) = return(FPRound(op, fpcr, FPRoundingMode(fpcr)))
+
+val FixedToFP : forall ('M : Int) ('N : Int), 'M >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('M), int, bool, bits(32), FPRounding) -> bits('N) effect {escape, undef, wreg, rreg}
+
+function FixedToFP (op, 'fbits, unsigned, fpcr, rounding) = {
+ assert('N == 16 | 'N == 32 | 'N == 64);
+ assert('M == 16 | 'M == 32 | 'M == 64);
+ result : bits('N) = undefined;
+ assert(fbits >= 0);
+ assert(rounding != FPRounding_ODD);
+ int_operand : int = asl_Int(op, unsigned);
+ real_operand : real = Real(int_operand) / 2.0 ^ fbits;
+ if real_operand == 0.0 then result = FPZero(0b0)
+ else result = FPRound(real_operand, fpcr, rounding);
+ return(result)
+}
+
+val FPProcessNaN : forall ('N : Int), 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (FPType, bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPProcessNaN (typ, op, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ assert(typ == FPType_QNaN | typ == FPType_SNaN, "((type == FPType_QNaN) || (type == FPType_SNaN))");
+ topfrac : int = undefined;
+ match 'N {
+ 16 => topfrac = 9,
+ 32 => topfrac = 22,
+ 64 => topfrac = 51
+ };
+ result : bits('N) = op;
+ if typ == FPType_SNaN then {
+ result = __SetSlice_bits('N, 1, result, topfrac, 0b1);
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else ();
+ if [fpcr[25]] == 0b1 then result = FPDefaultNaN() else ();
+ return(result)
+}
+
+val FPProcessNaNs3 : forall ('N : Int), 'N >= 0 & 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (FPType, FPType, FPType, bits('N), bits('N), bits('N), bits(32)) -> (bool, bits('N)) effect {escape, rreg, undef, wreg}
+
+function FPProcessNaNs3 (type1, type2, type3, op1, op2, op3, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ if type1 == FPType_SNaN then {
+ done = true;
+ result = FPProcessNaN(type1, op1, fpcr)
+ } else if type2 == FPType_SNaN then {
+ done = true;
+ result = FPProcessNaN(type2, op2, fpcr)
+ } else if type3 == FPType_SNaN then {
+ done = true;
+ result = FPProcessNaN(type3, op3, fpcr)
+ } else if type1 == FPType_QNaN then {
+ done = true;
+ result = FPProcessNaN(type1, op1, fpcr)
+ } else if type2 == FPType_QNaN then {
+ done = true;
+ result = FPProcessNaN(type2, op2, fpcr)
+ } else if type3 == FPType_QNaN then {
+ done = true;
+ result = FPProcessNaN(type3, op3, fpcr)
+ } else {
+ done = false;
+ result = Zeros()
+ };
+ return((done, result))
+}
+
+val FPProcessNaNs : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (FPType, FPType, bits('N), bits('N), bits(32)) -> (bool, bits('N)) effect {escape, rreg, undef, wreg}
+
+function FPProcessNaNs (type1, type2, op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ if type1 == FPType_SNaN then {
+ done = true;
+ result = FPProcessNaN(type1, op1, fpcr)
+ } else if type2 == FPType_SNaN then {
+ done = true;
+ result = FPProcessNaN(type2, op2, fpcr)
+ } else if type1 == FPType_QNaN then {
+ done = true;
+ result = FPProcessNaN(type1, op1, fpcr)
+ } else if type2 == FPType_QNaN then {
+ done = true;
+ result = FPProcessNaN(type2, op2, fpcr)
+ } else {
+ done = false;
+ result = Zeros()
+ };
+ return((done, result))
+}
+
+val CurrentInstrSet : unit -> InstrSet effect {escape, rreg, undef}
+
+function CurrentInstrSet () = {
+ result : InstrSet = undefined;
+ if UsingAArch32() then result = if PSTATE.T == 0b0 then InstrSet_A32 else InstrSet_T32 else result = InstrSet_A64;
+ return(result)
+}
+
+val AArch32_ExecutingLSMInstr : unit -> bool effect {escape, rreg, undef}
+
+function AArch32_ExecutingLSMInstr () = {
+ instr : bits(32) = ThisInstr();
+ instr_set : InstrSet = CurrentInstrSet();
+ assert(instr_set == InstrSet_A32 | instr_set == InstrSet_T32, "((instr_set == InstrSet_A32) || (instr_set == InstrSet_T32))");
+ if instr_set == InstrSet_A32 then return(slice(instr, 28, 4) != 0xF & slice(instr, 25, 3) == 0b100) else if ThisInstrLength() == 16 then return(slice(instr, 12, 4) == 0xC) else return(slice(instr, 25, 7) == 0b1110100 & [instr[22]] == 0b0)
+}
+
+val AArch32_ExecutingCP10or11Instr : unit -> bool effect {escape, rreg, undef}
+
+function AArch32_ExecutingCP10or11Instr () = {
+ instr : bits(32) = ThisInstr();
+ instr_set : InstrSet = CurrentInstrSet();
+ assert(instr_set == InstrSet_A32 | instr_set == InstrSet_T32, "((instr_set == InstrSet_A32) || (instr_set == InstrSet_T32))");
+ if instr_set == InstrSet_A32 then return((slice(instr, 24, 4) == 0xE | slice(instr, 25, 3) == 0b110) & (slice(instr, 8, 4) & 0xE) == 0xA) else return(((slice(instr, 28, 4) & 0xE) == 0xE & (slice(instr, 24, 4) == 0xE | slice(instr, 25, 3) == 0b110)) & (slice(instr, 8, 4) & 0xE) == 0xA)
+}
+
+val AdvSIMDExpandImm : (bits(1), bits(4), bits(8)) -> bits(64) effect {escape, rreg, undef}
+
+function AdvSIMDExpandImm (op, cmode, imm8) = {
+ imm32 : bits(32) = undefined;
+ imm8h : bits(8) = undefined;
+ imm8g : bits(8) = undefined;
+ imm8f : bits(8) = undefined;
+ imm8e : bits(8) = undefined;
+ imm8d : bits(8) = undefined;
+ imm8c : bits(8) = undefined;
+ imm8b : bits(8) = undefined;
+ imm8a : bits(8) = undefined;
+ imm64 : bits(64) = undefined;
+ match slice(cmode, 1, 3) {
+ 0b000 => imm64 = replicate_bits(Zeros(24) @ imm8, 2),
+ 0b001 => imm64 = replicate_bits((Zeros(16) @ imm8) @ Zeros(8), 2),
+ 0b010 => imm64 = replicate_bits((Zeros(8) @ imm8) @ Zeros(16), 2),
+ 0b011 => imm64 = replicate_bits(imm8 @ Zeros(24), 2),
+ 0b100 => imm64 = replicate_bits(Zeros(8) @ imm8, 4),
+ 0b101 => imm64 = replicate_bits(imm8 @ Zeros(8), 4),
+ 0b110 => if [cmode[0]] == 0b0 then imm64 = replicate_bits((Zeros(16) @ imm8) @ Ones(8), 2) else imm64 = replicate_bits((Zeros(8) @ imm8) @ Ones(16), 2),
+ 0b111 => {
+ if [cmode[0]] == 0b0 & op == 0b0 then imm64 = replicate_bits(imm8, 8) else ();
+ if [cmode[0]] == 0b0 & op == 0b1 then {
+ imm8a = replicate_bits([imm8[7]], 8);
+ imm8b = replicate_bits([imm8[6]], 8);
+ imm8c = replicate_bits([imm8[5]], 8);
+ imm8d = replicate_bits([imm8[4]], 8);
+ imm8e = replicate_bits([imm8[3]], 8);
+ imm8f = replicate_bits([imm8[2]], 8);
+ imm8g = replicate_bits([imm8[1]], 8);
+ imm8h = replicate_bits([imm8[0]], 8);
+ imm64 = ((((((imm8a @ imm8b) @ imm8c) @ imm8d) @ imm8e) @ imm8f) @ imm8g) @ imm8h
+ } else ();
+ if [cmode[0]] == 0b1 & op == 0b0 then {
+ imm32 = ((([imm8[7]] @ ~([imm8[6]])) @ replicate_bits([imm8[6]], 5)) @ slice(imm8, 0, 6)) @ Zeros(19);
+ imm64 = replicate_bits(imm32, 2)
+ } else ();
+ if [cmode[0]] == 0b1 & op == 0b1 then {
+ if UsingAArch32() then throw(Error_ReservedEncoding()) else ();
+ imm64 = ((([imm8[7]] @ ~([imm8[6]])) @ replicate_bits([imm8[6]], 8)) @ slice(imm8, 0, 6)) @ Zeros(48)
+ } else ()
+ }
+ };
+ return(imm64)
+}
+
+val HaveCryptoExt2 : unit -> bool
+
+function HaveCryptoExt2 () = {
+ if ~(HasArchVersion(ARMv8p2)) | ~(HaveCryptoExt()) then return(false) else ();
+ return(__IMPDEF_boolean("Has SHA512 and SHA3 Crypto instructions"))
+}
+
+val HaveChCryptoExt : unit -> bool
+
+function HaveChCryptoExt () = {
+ if ~(HasArchVersion(ARMv8p2)) then return(false) else ();
+ return(__IMPDEF_boolean("Has SM3 and SM4 Crypto instructions"))
+}
+
+val HaveAnyAArch64 : unit -> bool
+
+function HaveAnyAArch64 () = return(~(HighestELUsingAArch32()))
+
+val AArch32_ReportDeferredSError : (bits(2), bits(1)) -> bits(32) effect {escape, rreg, undef}
+
+function AArch32_ReportDeferredSError (AET, ExT) = {
+ target : bits(32) = undefined;
+ target : bits(32) = __SetSlice_bits(32, 1, target, 31, 0b1);
+ syndrome : bits(16) = Zeros(16);
+ if PSTATE.EL == EL2 then {
+ syndrome[11 .. 10] = AET;
+ syndrome[9 .. 9] = ExT;
+ syndrome[5 .. 0] = 0b010001
+ } else {
+ syndrome[15 .. 14] = AET;
+ syndrome[12 .. 12] = ExT;
+ syndrome[9 .. 9] = [TTBCR[31]];
+ if [TTBCR[31]] == 0b1 then syndrome[5 .. 0] = 0b010001
+ else (syndrome[10 .. 10], syndrome[3 .. 0]) = (0b1, 0b0110)
+ };
+ if HaveAnyAArch64() then target[24 .. 0] = ZeroExtend(syndrome, 25)
+ else target[15 .. 0] = syndrome;
+ return(target)
+}
+
+val HaveAArch32EL : bits(2) -> bool
+
+function HaveAArch32EL el = {
+ if ~(HaveEL(el)) then return(false) else if ~(HaveAnyAArch32()) then return(false) else if HighestELUsingAArch32() then return(true) else if el == HighestEL() then return(false) else if el == EL0 then return(true) else ();
+ return(true)
+}
+
+val AArch64_ResetSpecialRegisters : unit -> unit effect {undef, wreg}
+
+function AArch64_ResetSpecialRegisters () = {
+ SP_EL0 = undefined;
+ SP_EL1 = undefined;
+ SPSR_EL1 = undefined;
+ ELR_EL1 = undefined;
+ if HaveEL(EL2) then {
+ SP_EL2 = undefined;
+ SPSR_EL2 = undefined;
+ ELR_EL2 = undefined
+ } else ();
+ if HaveEL(EL3) then {
+ SP_EL3 = undefined;
+ SPSR_EL3 = undefined;
+ ELR_EL3 = undefined
+ } else ();
+ if HaveAArch32EL(EL1) then {
+ SPSR_fiq = undefined;
+ SPSR_irq = undefined;
+ SPSR_abt = undefined;
+ SPSR_und = undefined
+ } else ();
+ DLR_EL0 = undefined;
+ DSPSR_EL0 = undefined;
+ ()
+}
+
+val Halted : unit -> bool effect {rreg}
+
+function Halted () = return(~(slice(EDSCR, 0, 6) == 0b000001 | slice(EDSCR, 0, 6) == 0b000010))
+
+val FPUnpackBase : forall ('N : Int), 'N >= 0 & 32 >= 0 & 1 >= 0.
+ (bits('N), bits(32)) -> (FPType, bits(1), real) effect {escape, rreg, undef, wreg}
+
+function FPUnpackBase (fpval, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ frac64 : bits(52) = undefined;
+ exp64 : bits(11) = undefined;
+ frac32 : bits(23) = undefined;
+ exp32 : bits(8) = undefined;
+ value_name : real = undefined;
+ typ : FPType = undefined;
+ frac16 : bits(10) = undefined;
+ exp16 : bits(5) = undefined;
+ sign : bits(1) = undefined;
+ if 'N == 16 then {
+ sign = [fpval[15]];
+ exp16 = slice(fpval, 10, 5);
+ frac16 = slice(fpval, 0, 10);
+ if IsZero(exp16) then if IsZero(frac16) | [fpcr[19]] == 0b1 then {
+ typ = FPType_Zero;
+ value_name = 0.0
+ } else {
+ typ = FPType_Nonzero;
+ value_name = 2.0 ^ negate(14) * (Real(UInt(frac16)) * 2.0 ^ negate(10))
+ } else if IsOnes(exp16) & [fpcr[26]] == 0b0 then if IsZero(frac16) then {
+ typ = FPType_Infinity;
+ value_name = 2.0 ^ 1000000
+ } else {
+ typ = if [frac16[9]] == 0b1 then FPType_QNaN else FPType_SNaN;
+ value_name = 0.0
+ } else {
+ typ = FPType_Nonzero;
+ value_name = 2.0 ^ (UInt(exp16) - 15) * (1.0 + Real(UInt(frac16)) * 2.0 ^ negate(10))
+ }
+ } else if 'N == 32 then {
+ sign = [fpval[31]];
+ exp32 = slice(fpval, 23, 8);
+ frac32 = slice(fpval, 0, 23);
+ if IsZero(exp32) then if IsZero(frac32) | [fpcr[24]] == 0b1 then {
+ typ = FPType_Zero;
+ value_name = 0.0;
+ if ~(IsZero(frac32)) then FPProcessException(FPExc_InputDenorm, fpcr) else ()
+ } else {
+ typ = FPType_Nonzero;
+ value_name = 2.0 ^ negate(126) * (Real(UInt(frac32)) * 2.0 ^ negate(23))
+ } else if IsOnes(exp32) then if IsZero(frac32) then {
+ typ = FPType_Infinity;
+ value_name = 2.0 ^ 1000000
+ } else {
+ typ = if [frac32[22]] == 0b1 then FPType_QNaN else FPType_SNaN;
+ value_name = 0.0
+ } else {
+ typ = FPType_Nonzero;
+ value_name = 2.0 ^ (UInt(exp32) - 127) * (1.0 + Real(UInt(frac32)) * 2.0 ^ negate(23))
+ }
+ } else {
+ sign = [fpval[63]];
+ exp64 = slice(fpval, 52, 11);
+ frac64 = slice(fpval, 0, 52);
+ if IsZero(exp64) then if IsZero(frac64) | [fpcr[24]] == 0b1 then {
+ typ = FPType_Zero;
+ value_name = 0.0;
+ if ~(IsZero(frac64)) then FPProcessException(FPExc_InputDenorm, fpcr) else ()
+ } else {
+ typ = FPType_Nonzero;
+ value_name = 2.0 ^ negate(1022) * (Real(UInt(frac64)) * 2.0 ^ negate(52))
+ } else if IsOnes(exp64) then if IsZero(frac64) then {
+ typ = FPType_Infinity;
+ value_name = 2.0 ^ 1000000
+ } else {
+ typ = if [frac64[51]] == 0b1 then FPType_QNaN else FPType_SNaN;
+ value_name = 0.0
+ } else {
+ typ = FPType_Nonzero;
+ value_name = 2.0 ^ (UInt(exp64) - 1023) * (1.0 + Real(UInt(frac64)) * 2.0 ^ negate(52))
+ }
+ };
+ if sign == 0b1 then value_name = negate(value_name) else ();
+ return((typ, sign, value_name))
+}
+
+val FPUnpackCV : forall ('N : Int), 'N >= 0 & 32 >= 0 & 1 >= 0.
+ (bits('N), bits(32)) -> (FPType, bits(1), real) effect {escape, rreg, undef, wreg}
+
+function FPUnpackCV (fpval, fpcr__arg) = {
+ fpcr = fpcr__arg;
+ fpcr = __SetSlice_bits(32, 1, fpcr, 19, 0b0);
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ fp_type : FPType = undefined;
+ (fp_type, sign, value_name) = FPUnpackBase(fpval, fpcr);
+ return((fp_type, sign, value_name))
+}
+
+val FPConvert__0 : forall ('N : Int) ('M : Int), 'N >= 0 & 32 >= 0 & 'M >= 0.
+ (bits('N), bits(32), FPRounding) -> bits('M) effect {escape, rreg, undef, wreg}
+
+val FPConvert__1 : forall ('N : Int) ('M : Int), 'N >= 0 & 32 >= 0 & 'M >= 0.
+ (bits('N), bits(32)) -> bits('M) effect {escape, rreg, undef, wreg}
+
+overload FPConvert = {FPConvert__0, FPConvert__1}
+
+function FPConvert__0 (op, fpcr, rounding) = {
+ assert('M == 16 | 'M == 32 | 'M == 64);
+ assert('N == 16 | 'N == 32 | 'N == 64);
+ result : bits('M) = undefined;
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpackCV(op, fpcr);
+ alt_hp : bool = 'M == 16 & [fpcr[26]] == 0b1;
+ if typ == FPType_SNaN | typ == FPType_QNaN then {
+ if alt_hp then result = FPZero(sign)
+ else if [fpcr[25]] == 0b1 then result = FPDefaultNaN()
+ else result = FPConvertNaN(op);
+ if typ == FPType_SNaN | alt_hp then FPProcessException(FPExc_InvalidOp, fpcr) else ()
+ } else if typ == FPType_Infinity then
+ if alt_hp then {
+ result = sign @ Ones('M - 1);
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else result = FPInfinity(sign)
+ else if typ == FPType_Zero then result = FPZero(sign)
+ else result = FPRoundCV(value_name, fpcr, rounding);
+ return(result)
+}
+
+function FPConvert__1 (op, fpcr) = return(FPConvert(op, fpcr, FPRoundingMode(fpcr)))
+
+val FPUnpack : forall ('N : Int), 'N >= 0 & 32 >= 0 & 1 >= 0.
+ (bits('N), bits(32)) -> (FPType, bits(1), real) effect {escape, rreg, undef, wreg}
+
+function FPUnpack (fpval, fpcr__arg) = {
+ fpcr = fpcr__arg;
+ fpcr = __SetSlice_bits(32, 1, fpcr, 26, 0b0);
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ fp_type : FPType = undefined;
+ (fp_type, sign, value_name) = FPUnpackBase(fpval, fpcr);
+ return((fp_type, sign, value_name))
+}
+
+val FPToFixedJS : forall ('M : Int) ('N : Int), 'M >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('M), bits(32), bool) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPToFixedJS (op, fpcr, Is64) = {
+ assert('M == 64 & 'N == 32, "((M == 64) && (N == 32))");
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpack(op, fpcr);
+ Z : bits(1) = 0b1;
+ if typ == FPType_SNaN | typ == FPType_QNaN then {
+ FPProcessException(FPExc_InvalidOp, fpcr);
+ Z = 0b0
+ } else ();
+ int_result : int = RoundDown(value_name);
+ error : real = value_name - Real(int_result);
+ round_it_up : bool = error != 0.0 & int_result < 0;
+ if round_it_up then int_result = int_result + 1 else ();
+ result : int = undefined;
+ if int_result < 0 then result = int_result - 2 ^ 32 * RoundUp(Real(int_result) / Real(2 ^ 32)) else result = int_result - 2 ^ 32 * RoundDown(Real(int_result) / Real(2 ^ 32));
+ if int_result < negate(2 ^ 31) | int_result > 2 ^ 31 - 1 then {
+ FPProcessException(FPExc_InvalidOp, fpcr);
+ Z = 0b0
+ } else if error != 0.0 then {
+ FPProcessException(FPExc_Inexact, fpcr);
+ Z = 0b0
+ } else ();
+ if sign == 0b1 & value_name == 0.0 then Z = 0b0 else ();
+ if typ == FPType_Infinity then result = 0 else ();
+ if Is64 then (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = (0b0 @ Z) @ 0b00 else FPSCR = __SetSlice_bits(32, 4, FPSCR, 28, (0b0 @ Z) @ 0b00);
+ return(__GetSlice_int(32, result, 0))
+}
+
+val FPToFixed : forall ('N : Int) ('M : Int), 'N >= 0 & 32 >= 0 & 'M >= 0.
+ (bits('N), int, bool, bits(32), FPRounding) -> bits('M) effect {escape, rreg, undef, wreg}
+
+function FPToFixed (op, 'fbits, unsigned, fpcr, rounding) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ assert('M == 16 | 'M == 32 | 'M == 64, "((M == 16) || ((M == 32) || (M == 64)))");
+ assert(fbits >= 0, "(fbits >= 0)");
+ assert(rounding != FPRounding_ODD, "(rounding != FPRounding_ODD)");
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpack(op, fpcr);
+ if typ == FPType_SNaN | typ == FPType_QNaN then FPProcessException(FPExc_InvalidOp, fpcr) else ();
+ value_name = value_name * 2.0 ^ fbits;
+ int_result : int = RoundDown(value_name);
+ error : real = value_name - Real(int_result);
+ round_up : bool = undefined;
+ match rounding {
+ FPRounding_TIEEVEN => round_up = error > 0.5 | error == 0.5 & __GetSlice_int(1, int_result, 0) == 0b1,
+ FPRounding_POSINF => round_up = error != 0.0,
+ FPRounding_NEGINF => round_up = false,
+ FPRounding_ZERO => round_up = error != 0.0 & int_result < 0,
+ FPRounding_TIEAWAY => round_up = error > 0.5 | error == 0.5 & int_result >= 0
+ };
+ if round_up then int_result = int_result + 1 else ();
+ overflow : bool = undefined;
+ result : bits('M) = undefined;
+ (result, overflow) = SatQ(int_result, 'M, unsigned);
+ if overflow then FPProcessException(FPExc_InvalidOp, fpcr) else if error != 0.0 then FPProcessException(FPExc_Inexact, fpcr) else ();
+ return(result)
+}
+
+val FPSqrt : forall ('N : Int), 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPSqrt (op, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpack(op, fpcr);
+ result : bits('N) = undefined;
+ if typ == FPType_SNaN | typ == FPType_QNaN then result = FPProcessNaN(typ, op, fpcr) else if typ == FPType_Zero then result = FPZero(sign) else if typ == FPType_Infinity & sign == 0b0 then result = FPInfinity(sign) else if sign == 0b1 then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else result = FPRound(Sqrt(value_name), fpcr);
+ return(result)
+}
+
+val FPRoundInt : forall ('N : Int), 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits(32), FPRounding, bool) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPRoundInt (op, fpcr, rounding, exact) = {
+ assert(rounding != FPRounding_ODD, "(rounding != FPRounding_ODD)");
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpack(op, fpcr);
+ real_result : real = undefined;
+ round_up : bool = undefined;
+ error : real = undefined;
+ int_result : int = undefined;
+ result : bits('N) = undefined;
+ if typ == FPType_SNaN | typ == FPType_QNaN then result = FPProcessNaN(typ, op, fpcr) else if typ == FPType_Infinity then result = FPInfinity(sign) else if typ == FPType_Zero then result = FPZero(sign) else {
+ int_result = RoundDown(value_name);
+ error = value_name - Real(int_result);
+ match rounding {
+ FPRounding_TIEEVEN => round_up = error > 0.5 | error == 0.5 & __GetSlice_int(1, int_result, 0) == 0b1,
+ FPRounding_POSINF => round_up = error != 0.0,
+ FPRounding_NEGINF => round_up = false,
+ FPRounding_ZERO => round_up = error != 0.0 & int_result < 0,
+ FPRounding_TIEAWAY => round_up = error > 0.5 | error == 0.5 & int_result >= 0
+ };
+ if round_up then int_result = int_result + 1 else ();
+ real_result = Real(int_result);
+ if real_result == 0.0 then result = FPZero(sign) else result = FPRound(real_result, fpcr, FPRounding_ZERO);
+ if error != 0.0 & exact then FPProcessException(FPExc_Inexact, fpcr) else ()
+ };
+ return(result)
+}
+
+val FPRecpX : forall ('N : Int), 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPRecpX (op, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ let 'esize : {|5, 8, 11|} = match 'N {
+ 16 => 5,
+ 32 => 8,
+ 64 => 11
+ };
+ result : bits('N) = undefined;
+ exp : bits('esize) = undefined;
+ max_exp : bits('esize) = undefined;
+ frac : bits('N - 'esize - 1) = Zeros();
+ match 'N {
+ 16 => exp = slice(op, 10, esize),
+ 32 => exp = slice(op, 23, esize),
+ 64 => exp = slice(op, 52, esize)
+ };
+ max_exp = Ones(esize) - 1;
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpack(op, fpcr);
+ if typ == FPType_SNaN | typ == FPType_QNaN then
+ result = FPProcessNaN(typ, op, fpcr)
+ else if IsZero(exp) then result = (sign @ max_exp) @ frac
+ else result = (sign @ ~(exp)) @ frac;
+ return(result)
+}
+
+val FPRecipEstimate : forall ('N : Int), 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPRecipEstimate (operand, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpack(operand, fpcr);
+ estimate : int = undefined;
+ result_exp : int = undefined;
+ exp : int = undefined;
+ fraction : bits(52) = undefined;
+ overflow_to_inf : bool = undefined;
+ result : bits('N) = undefined;
+ if typ == FPType_SNaN | typ == FPType_QNaN then
+ result = FPProcessNaN(typ, operand, fpcr)
+ else if typ == FPType_Infinity then result = FPZero(sign)
+ else if typ == FPType_Zero then {
+ result = FPInfinity(sign);
+ FPProcessException(FPExc_DivideByZero, fpcr)
+ } else if ('N == 16 & abs(value_name) < 2.0 ^ negate(16) | 'N == 32 & abs(value_name) < 2.0 ^ negate(128)) | 'N == 64 & abs(value_name) < 2.0 ^ negate(1024) then {
+ match FPRoundingMode(fpcr) {
+ FPRounding_TIEEVEN => overflow_to_inf = true,
+ FPRounding_POSINF => overflow_to_inf = sign == 0b0,
+ FPRounding_NEGINF => overflow_to_inf = sign == 0b1,
+ FPRounding_ZERO => overflow_to_inf = false
+ };
+ result = if overflow_to_inf then FPInfinity(sign) else FPMaxNormal(sign);
+ FPProcessException(FPExc_Overflow, fpcr);
+ FPProcessException(FPExc_Inexact, fpcr)
+ } else if ([fpcr[24]] == 0b1 & 'N != 16 | [fpcr[19]] == 0b1 & 'N == 16) & (('N == 16 & abs(value_name) >= 2.0 ^ 14 | 'N == 32 & abs(value_name) >= 2.0 ^ 126) | 'N == 64 & abs(value_name) >= 2.0 ^ 1022) then {
+ result = FPZero(sign);
+ if UsingAArch32() then FPSCR = __SetSlice_bits(32, 1, FPSCR, 3, 0b1)
+ else FPSR = __SetSlice_bits(32, 1, FPSR, 3, 0b1)
+ } else {
+ match 'N {
+ 16 => {
+ fraction = slice(operand, 0, 10) @ Zeros(42);
+ exp = UInt(slice(operand, 10, 5))
+ },
+ 32 => {
+ fraction = slice(operand, 0, 23) @ Zeros(29);
+ exp = UInt(slice(operand, 23, 8))
+ },
+ 64 => {
+ fraction = slice(operand, 0, 52);
+ exp = UInt(slice(operand, 52, 11))
+ }
+ };
+ if exp == 0 then
+ if [fraction[51]] == 0b0 then {
+ exp = negate(1);
+ fraction = slice(fraction, 0, 50) @ 0b00
+ } else fraction = slice(fraction, 0, 51) @ 0b0
+ else ();
+ scaled : int = UInt(0b1 @ slice(fraction, 44, 8));
+ match 'N {
+ 16 => result_exp = 29 - exp,
+ 32 => result_exp = 253 - exp,
+ 64 => result_exp = 2045 - exp
+ };
+ estimate = RecipEstimate(scaled);
+ fraction = __GetSlice_int(8, estimate, 0) @ Zeros(44);
+ if result_exp == 0 then fraction = 0b1 @ slice(fraction, 1, 51)
+ else if result_exp == negate(1) then {
+ fraction = 0b01 @ slice(fraction, 2, 50);
+ result_exp = 0
+ } else ();
+ match 'N {
+ 16 => result = (sign @ __GetSlice_int('N - 11, result_exp, 0)) @ slice(fraction, 42, 10),
+ 32 => result = (sign @ __GetSlice_int('N - 24, result_exp, 0)) @ slice(fraction, 29, 23),
+ 64 => result = (sign @ __GetSlice_int('N - 53, result_exp, 0)) @ slice(fraction, 0, 52)
+ }
+ };
+ return(result)
+}
+
+val FPRSqrtEstimate : forall ('N : Int), 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPRSqrtEstimate (operand, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ (typ, sign, value_name) = FPUnpack(operand, fpcr);
+ estimate : int = undefined;
+ result_exp : int = undefined;
+ scaled : int = undefined;
+ exp : int = undefined;
+ fraction : bits(52) = undefined;
+ result : bits('N) = undefined;
+ if typ == FPType_SNaN | typ == FPType_QNaN then
+ result = FPProcessNaN(typ, operand, fpcr)
+ else if typ == FPType_Zero then {
+ result = FPInfinity(sign);
+ FPProcessException(FPExc_DivideByZero, fpcr)
+ } else if sign == 0b1 then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else if typ == FPType_Infinity then result = FPZero(0b0)
+ else {
+ match 'N {
+ 16 => {
+ fraction = slice(operand, 0, 10) @ Zeros(42);
+ exp = UInt(slice(operand, 10, 5))
+ },
+ 32 => {
+ fraction = slice(operand, 0, 23) @ Zeros(29);
+ exp = UInt(slice(operand, 23, 8))
+ },
+ 64 => {
+ fraction = slice(operand, 0, 52);
+ exp = UInt(slice(operand, 52, 11))
+ }
+ };
+ if exp == 0 then {
+ while [fraction[51]] == 0b0 do {
+ fraction = slice(fraction, 0, 51) @ 0b0;
+ exp = exp - 1
+ };
+ fraction = slice(fraction, 0, 51) @ 0b0
+ } else ();
+ if __GetSlice_int(1, exp, 0) == 0b0 then
+ scaled = UInt(0b1 @ slice(fraction, 44, 8))
+ else scaled = UInt(0b01 @ slice(fraction, 45, 7));
+ match 'N {
+ 16 => result_exp = (44 - exp) / 2,
+ 32 => result_exp = (380 - exp) / 2,
+ 64 => result_exp = (3068 - exp) / 2
+ };
+ estimate = RecipSqrtEstimate(scaled);
+ match 'N {
+ 16 => result = ((0b0 @ __GetSlice_int('N - 11, result_exp, 0)) @ __GetSlice_int(8, estimate, 0)) @ Zeros(2),
+ 32 => result = ((0b0 @ __GetSlice_int('N - 24, result_exp, 0)) @ __GetSlice_int(8, estimate, 0)) @ Zeros(15),
+ 64 => result = ((0b0 @ __GetSlice_int('N - 53, result_exp, 0)) @ __GetSlice_int(8, estimate, 0)) @ Zeros(44)
+ }
+ };
+ return(result)
+}
+
+val FPCompareGT : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0.
+ (bits('N), bits('N), bits(32)) -> bool effect {escape, rreg, undef, wreg}
+
+function FPCompareGT (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bool = undefined;
+ if ((type1 == FPType_SNaN | type1 == FPType_QNaN) | type2 == FPType_SNaN) | type2 == FPType_QNaN then {
+ result = false;
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else result = value1_name > value2_name;
+ return(result)
+}
+
+val FPCompareGE : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0.
+ (bits('N), bits('N), bits(32)) -> bool effect {escape, rreg, undef, wreg}
+
+function FPCompareGE (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bool = undefined;
+ if ((type1 == FPType_SNaN | type1 == FPType_QNaN) | type2 == FPType_SNaN) | type2 == FPType_QNaN then {
+ result = false;
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else result = value1_name >= value2_name;
+ return(result)
+}
+
+val FPCompareEQ : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0.
+ (bits('N), bits('N), bits(32)) -> bool effect {escape, rreg, undef, wreg}
+
+function FPCompareEQ (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bool = undefined;
+ if ((type1 == FPType_SNaN | type1 == FPType_QNaN) | type2 == FPType_SNaN) | type2 == FPType_QNaN then {
+ result = false;
+ if type1 == FPType_SNaN | type2 == FPType_SNaN then FPProcessException(FPExc_InvalidOp, fpcr) else ()
+ } else result = value1_name == value2_name;
+ return(result)
+}
+
+val FPCompare : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 4 >= 0.
+ (bits('N), bits('N), bool, bits(32)) -> bits(4) effect {escape, rreg, undef, wreg}
+
+function FPCompare (op1, op2, signal_nans, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bits(4) = undefined;
+ if ((type1 == FPType_SNaN | type1 == FPType_QNaN) | type2 == FPType_SNaN) | type2 == FPType_QNaN then {
+ result = 0x3;
+ if (type1 == FPType_SNaN | type2 == FPType_SNaN) | signal_nans then FPProcessException(FPExc_InvalidOp, fpcr) else ()
+ } else if value1_name == value2_name then result = 0x6 else if value1_name < value2_name then result = 0x8 else result = 0x2;
+ return(result)
+}
+
+val FPSub : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPSub (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ rounding : FPRounding = FPRoundingMode(fpcr);
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
+ result_sign : bits(1) = undefined;
+ result_value : real = undefined;
+ zero2 : bool = undefined;
+ zero1 : bool = undefined;
+ inf2 : bool = undefined;
+ inf1 : bool = undefined;
+ if ~(done) then {
+ inf1 = type1 == FPType_Infinity;
+ inf2 = type2 == FPType_Infinity;
+ zero1 = type1 == FPType_Zero;
+ zero2 = type2 == FPType_Zero;
+ if (inf1 & inf2) & sign1 == sign2 then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else if inf1 & sign1 == 0b0 | inf2 & sign2 == 0b1 then result = FPInfinity(0b0) else if inf1 & sign1 == 0b1 | inf2 & sign2 == 0b0 then result = FPInfinity(0b1) else if (zero1 & zero2) & sign1 == ~(sign2) then result = FPZero(sign1) else {
+ result_value = value1_name - value2_name;
+ if result_value == 0.0 then {
+ result_sign = if rounding == FPRounding_NEGINF then 0b1 else 0b0;
+ result = FPZero(result_sign)
+ } else result = FPRound(result_value, fpcr, rounding)
+ }
+ } else ();
+ return(result)
+}
+
+val FPRecipStepFused : forall ('N : Int), 'N >= 0 & 'N >= 0 & 'N >= 0.
+ (bits('N), bits('N)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPRecipStepFused (op1__arg, op2) = {
+ op1 = op1__arg;
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ result : bits('N) = undefined;
+ op1 = FPNeg(op1);
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, FPCR);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, FPCR);
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, FPCR);
+ sign : bits(1) = undefined;
+ result_value : real = undefined;
+ zero2 : bool = undefined;
+ zero1 : bool = undefined;
+ inf2 : bool = undefined;
+ inf1 : bool = undefined;
+ if ~(done) then {
+ inf1 = type1 == FPType_Infinity;
+ inf2 = type2 == FPType_Infinity;
+ zero1 = type1 == FPType_Zero;
+ zero2 = type2 == FPType_Zero;
+ if inf1 & zero2 | zero1 & inf2 then result = FPTwo(0b0) else if inf1 | inf2 then result = FPInfinity(sign1 ^ sign2) else {
+ result_value = 2.0 + value1_name * value2_name;
+ if result_value == 0.0 then {
+ sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then 0b1 else 0b0;
+ result = FPZero(sign)
+ } else result = FPRound(result_value, FPCR)
+ }
+ } else ();
+ return(result)
+}
+
+val FPRSqrtStepFused : forall ('N : Int), 'N >= 0 & 'N >= 0 & 'N >= 0.
+ (bits('N), bits('N)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPRSqrtStepFused (op1__arg, op2) = {
+ op1 = op1__arg;
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ result : bits('N) = undefined;
+ op1 = FPNeg(op1);
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, FPCR);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, FPCR);
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, FPCR);
+ sign : bits(1) = undefined;
+ result_value : real = undefined;
+ zero2 : bool = undefined;
+ zero1 : bool = undefined;
+ inf2 : bool = undefined;
+ inf1 : bool = undefined;
+ if ~(done) then {
+ inf1 = type1 == FPType_Infinity;
+ inf2 = type2 == FPType_Infinity;
+ zero1 = type1 == FPType_Zero;
+ zero2 = type2 == FPType_Zero;
+ if inf1 & zero2 | zero1 & inf2 then result = FPOnePointFive(0b0) else if inf1 | inf2 then result = FPInfinity(sign1 ^ sign2) else {
+ result_value = (3.0 + value1_name * value2_name) / 2.0;
+ if result_value == 0.0 then {
+ sign = if FPRoundingMode(FPCR) == FPRounding_NEGINF then 0b1 else 0b0;
+ result = FPZero(sign)
+ } else result = FPRound(result_value, FPCR)
+ }
+ } else ();
+ return(result)
+}
+
+val FPMulX : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPMulX (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ result : bits('N) = undefined;
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
+ zero2 : bool = undefined;
+ zero1 : bool = undefined;
+ inf2 : bool = undefined;
+ inf1 : bool = undefined;
+ if ~(done) then {
+ inf1 = type1 == FPType_Infinity;
+ inf2 = type2 == FPType_Infinity;
+ zero1 = type1 == FPType_Zero;
+ zero2 = type2 == FPType_Zero;
+ if inf1 & zero2 | zero1 & inf2 then result = FPTwo(sign1 ^ sign2) else if inf1 | inf2 then result = FPInfinity(sign1 ^ sign2) else if zero1 | zero2 then result = FPZero(sign1 ^ sign2) else result = FPRound(value1_name * value2_name, fpcr)
+ } else ();
+ return(result)
+}
+
+val FPMulAdd : forall ('N : Int), 'N >= 0 & 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPMulAdd (addend, op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ rounding : FPRounding = FPRoundingMode(fpcr);
+ valueA_name : real = undefined;
+ signA : bits(1) = undefined;
+ typeA : FPType = undefined;
+ (typeA, signA, valueA_name) = FPUnpack(addend, fpcr);
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ inf1 : bool = type1 == FPType_Infinity;
+ zero1 : bool = type1 == FPType_Zero;
+ inf2 : bool = type2 == FPType_Infinity;
+ zero2 : bool = type2 == FPType_Zero;
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs3(typeA, type1, type2, addend, op1, op2, fpcr);
+ if typeA == FPType_QNaN & (inf1 & zero2 | zero1 & inf2) then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else ();
+ result_sign : bits(1) = undefined;
+ result_value : real = undefined;
+ zeroP : bool = undefined;
+ infP : bool = undefined;
+ signP : bits(1) = undefined;
+ zeroA : bool = undefined;
+ infA : bool = undefined;
+ if ~(done) then {
+ infA = typeA == FPType_Infinity;
+ zeroA = typeA == FPType_Zero;
+ signP = sign1 ^ sign2;
+ infP = inf1 | inf2;
+ zeroP = zero1 | zero2;
+ if (inf1 & zero2 | zero1 & inf2) | (infA & infP) & signA != signP then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else if infA & signA == 0b0 | infP & signP == 0b0 then result = FPInfinity(0b0) else if infA & signA == 0b1 | infP & signP == 0b1 then result = FPInfinity(0b1) else if (zeroA & zeroP) & signA == signP then result = FPZero(signA) else {
+ result_value = valueA_name + value1_name * value2_name;
+ if result_value == 0.0 then {
+ result_sign = if rounding == FPRounding_NEGINF then 0b1 else 0b0;
+ result = FPZero(result_sign)
+ } else result = FPRound(result_value, fpcr)
+ }
+ } else ();
+ return(result)
+}
+
+val FPMul : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPMul (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
+ zero2 : bool = undefined;
+ zero1 : bool = undefined;
+ inf2 : bool = undefined;
+ inf1 : bool = undefined;
+ if ~(done) then {
+ inf1 = type1 == FPType_Infinity;
+ inf2 = type2 == FPType_Infinity;
+ zero1 = type1 == FPType_Zero;
+ zero2 = type2 == FPType_Zero;
+ if inf1 & zero2 | zero1 & inf2 then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else if inf1 | inf2 then result = FPInfinity(sign1 ^ sign2) else if zero1 | zero2 then result = FPZero(sign1 ^ sign2) else result = FPRound(value1_name * value2_name, fpcr)
+ } else ();
+ return(result)
+}
+
+val FPMin : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPMin (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ if ~(done) then {
+ if value1_name < value2_name then (typ, sign, value_name) = (type1, sign1, value1_name) else (typ, sign, value_name) = (type2, sign2, value2_name);
+ if typ == FPType_Infinity then result = FPInfinity(sign) else if typ == FPType_Zero then {
+ sign = sign1 | sign2;
+ result = FPZero(sign)
+ } else result = FPRound(value_name, fpcr)
+ } else ();
+ return(result)
+}
+
+val FPMinNum : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPMinNum (op1__arg, op2__arg, fpcr) = {
+ op1 = op1__arg;
+ op2 = op2__arg;
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ __anon2 : real = undefined;
+ __anon1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, __anon1, __anon2) = FPUnpack(op1, fpcr);
+ __anon4 : real = undefined;
+ __anon3 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, __anon3, __anon4) = FPUnpack(op2, fpcr);
+ if type1 == FPType_QNaN & type2 != FPType_QNaN then op1 = FPInfinity(0b0) else if type1 != FPType_QNaN & type2 == FPType_QNaN then op2 = FPInfinity(0b0) else ();
+ return(FPMin(op1, op2, fpcr))
+}
+
+val FPMax : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPMax (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
+ value_name : real = undefined;
+ sign : bits(1) = undefined;
+ typ : FPType = undefined;
+ if ~(done) then {
+ if value1_name > value2_name then (typ, sign, value_name) = (type1, sign1, value1_name) else (typ, sign, value_name) = (type2, sign2, value2_name);
+ if typ == FPType_Infinity then result = FPInfinity(sign) else if typ == FPType_Zero then {
+ sign = sign1 & sign2;
+ result = FPZero(sign)
+ } else result = FPRound(value_name, fpcr)
+ } else ();
+ return(result)
+}
+
+val FPMaxNum : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPMaxNum (op1__arg, op2__arg, fpcr) = {
+ op1 = op1__arg;
+ op2 = op2__arg;
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ __anon2 : real = undefined;
+ __anon1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, __anon1, __anon2) = FPUnpack(op1, fpcr);
+ __anon4 : real = undefined;
+ __anon3 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, __anon3, __anon4) = FPUnpack(op2, fpcr);
+ if type1 == FPType_QNaN & type2 != FPType_QNaN then op1 = FPInfinity(0b1) else if type1 != FPType_QNaN & type2 == FPType_QNaN then op2 = FPInfinity(0b1) else ();
+ return(FPMax(op1, op2, fpcr))
+}
+
+val FPDiv : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPDiv (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
+ zero2 : bool = undefined;
+ zero1 : bool = undefined;
+ inf2 : bool = undefined;
+ inf1 : bool = undefined;
+ if ~(done) then {
+ inf1 = type1 == FPType_Infinity;
+ inf2 = type2 == FPType_Infinity;
+ zero1 = type1 == FPType_Zero;
+ zero2 = type2 == FPType_Zero;
+ if inf1 & inf2 | zero1 & zero2 then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else if inf1 | zero2 then {
+ result = FPInfinity(sign1 ^ sign2);
+ if ~(inf1) then FPProcessException(FPExc_DivideByZero, fpcr) else ()
+ } else if zero1 | inf2 then result = FPZero(sign1 ^ sign2) else result = FPRound(value1_name / value2_name, fpcr)
+ } else ();
+ return(result)
+}
+
+val FPAdd : forall ('N : Int), 'N >= 0 & 'N >= 0 & 32 >= 0 & 'N >= 0.
+ (bits('N), bits('N), bits(32)) -> bits('N) effect {escape, rreg, undef, wreg}
+
+function FPAdd (op1, op2, fpcr) = {
+ assert('N == 16 | 'N == 32 | 'N == 64, "((N == 16) || ((N == 32) || (N == 64)))");
+ rounding : FPRounding = FPRoundingMode(fpcr);
+ value1_name : real = undefined;
+ sign1 : bits(1) = undefined;
+ type1 : FPType = undefined;
+ (type1, sign1, value1_name) = FPUnpack(op1, fpcr);
+ value2_name : real = undefined;
+ sign2 : bits(1) = undefined;
+ type2 : FPType = undefined;
+ (type2, sign2, value2_name) = FPUnpack(op2, fpcr);
+ result : bits('N) = undefined;
+ done : bool = undefined;
+ (done, result) = FPProcessNaNs(type1, type2, op1, op2, fpcr);
+ result_sign : bits(1) = undefined;
+ result_value : real = undefined;
+ zero2 : bool = undefined;
+ zero1 : bool = undefined;
+ inf2 : bool = undefined;
+ inf1 : bool = undefined;
+ if ~(done) then {
+ inf1 = type1 == FPType_Infinity;
+ inf2 = type2 == FPType_Infinity;
+ zero1 = type1 == FPType_Zero;
+ zero2 = type2 == FPType_Zero;
+ if (inf1 & inf2) & sign1 == ~(sign2) then {
+ result = FPDefaultNaN();
+ FPProcessException(FPExc_InvalidOp, fpcr)
+ } else if inf1 & sign1 == 0b0 | inf2 & sign2 == 0b0 then result = FPInfinity(0b0) else if inf1 & sign1 == 0b1 | inf2 & sign2 == 0b1 then result = FPInfinity(0b1) else if (zero1 & zero2) & sign1 == sign2 then result = FPZero(sign1) else {
+ result_value = value1_name + value2_name;
+ if result_value == 0.0 then {
+ result_sign = if rounding == FPRounding_NEGINF then 0b1 else 0b0;
+ result = FPZero(result_sign)
+ } else result = FPRound(result_value, fpcr, rounding)
+ }
+ } else ();
+ return(result)
+}
+
+val Reduce : forall ('N : Int) ('esize : Int), 'N >= 0 & 'esize >= 0.
+ (ReduceOp, bits('N), atom('esize)) -> bits('esize) effect {escape, rreg, undef, wreg}
+
+function Reduce (op, input, esize) = {
+ hi : bits('esize) = undefined;
+ lo : bits('esize) = undefined;
+ result : bits('esize) = undefined;
+ if 'N == 'esize then return(input) else ();
+ let 'half = 'N / 2;
+ assert(constraint('half * 2 = 'N));
+ hi = Reduce(op, slice(input, half, negate(half) + 'N), 'esize);
+ lo = Reduce(op, slice(input, 0, half), 'esize);
+ match op {
+ ReduceOp_FMINNUM => result = FPMinNum(lo, hi, FPCR),
+ ReduceOp_FMAXNUM => result = FPMaxNum(lo, hi, FPCR),
+ ReduceOp_FMIN => result = FPMin(lo, hi, FPCR),
+ ReduceOp_FMAX => result = FPMax(lo, hi, FPCR),
+ ReduceOp_FADD => result = FPAdd(lo, hi, FPCR),
+ ReduceOp_ADD => result = lo + hi
+ };
+ return(result)
+}
+
+val ExternalSecureInvasiveDebugEnabled : unit -> bool effect {escape, rreg, undef}
+
+function ExternalSecureInvasiveDebugEnabled () = {
+ if ~(HaveEL(EL3)) & ~(IsSecure()) then return(false) else ();
+ return(ExternalInvasiveDebugEnabled() & SPIDEN == HIGH)
+}
+
+val ExternalDebugInterruptsDisabled : bits(2) -> bool effect {escape, rreg, undef}
+
+function ExternalDebugInterruptsDisabled target = {
+ int_dis : bool = undefined;
+ match target {
+ ? if ? == EL3 => int_dis = slice(EDSCR, 22, 2) == 0b11 & ExternalSecureInvasiveDebugEnabled(),
+ ? if ? == EL2 => int_dis = (slice(EDSCR, 22, 2) & 0b10) == 0b10 & ExternalInvasiveDebugEnabled(),
+ ? if ? == EL1 => if IsSecure() then int_dis = (slice(EDSCR, 22, 2) & 0b10) == 0b10 & ExternalSecureInvasiveDebugEnabled() else int_dis = slice(EDSCR, 22, 2) != 0b00 & ExternalInvasiveDebugEnabled()
+ };
+ return(int_dis)
+}
+
+val ELStateUsingAArch32K : (bits(2), bool) -> (bool, bool) effect {rreg, undef}
+
+function ELStateUsingAArch32K (el, secure) = {
+ aarch32 : bool = undefined;
+ known : bool = true;
+ aarch32_at_el1 : bool = undefined;
+ aarch32_below_el3 : bool = undefined;
+ if ~(HaveAArch32EL(el)) then aarch32 = false else if HighestELUsingAArch32() then aarch32 = true else {
+ aarch32_below_el3 = HaveEL(EL3) & [SCR_EL3[10]] == 0b0;
+ aarch32_at_el1 = aarch32_below_el3 | ((HaveEL(EL2) & ~(secure)) & [HCR_EL2[31]] == 0b0) & ~(([HCR_EL2[34]] == 0b1 & [HCR_EL2[27]] == 0b1) & HaveVirtHostExt());
+ if el == EL0 & ~(aarch32_at_el1) then if PSTATE.EL == EL0 then aarch32 = PSTATE.nRW == 0b1 else known = false else aarch32 = aarch32_below_el3 & el != EL3 | aarch32_at_el1 & (el == EL1 | el == EL0)
+ };
+ if ~(known) then aarch32 = undefined else ();
+ return((known, aarch32))
+}
+
+val ELUsingAArch32K : bits(2) -> (bool, bool) effect {escape, rreg, undef}
+
+function ELUsingAArch32K el = return(ELStateUsingAArch32K(el, IsSecureBelowEL3()))
+
+val ELStateUsingAArch32 : (bits(2), bool) -> bool effect {escape, rreg, undef}
+
+function ELStateUsingAArch32 (el, secure) = {
+ aarch32 : bool = undefined;
+ known : bool = undefined;
+ (known, aarch32) = ELStateUsingAArch32K(el, secure);
+ assert(known, "known");
+ return(aarch32)
+}
+
+val ELUsingAArch32 : bits(2) -> bool effect {escape, rreg, undef}
+
+function ELUsingAArch32 el = return(ELStateUsingAArch32(el, IsSecureBelowEL3()))
+
+val UpdateEDSCRFields : unit -> unit effect {escape, rreg, undef, wreg}
+
+function UpdateEDSCRFields () = {
+ if ~(Halted()) then {
+ EDSCR = __SetSlice_bits(32, 2, EDSCR, 8, 0b00);
+ EDSCR = __SetSlice_bits(32, 1, EDSCR, 18, undefined);
+ EDSCR = __SetSlice_bits(32, 4, EDSCR, 10, 0xF)
+ } else {
+ EDSCR = __SetSlice_bits(32, 2, EDSCR, 8, PSTATE.EL);
+ EDSCR = __SetSlice_bits(32, 1, EDSCR, 18, if IsSecure() then 0b0 else 0b1);
+ RW : bits(4) = undefined;
+ RW : bits(4) = __SetSlice_bits(4, 1, RW, 1, if ELUsingAArch32(EL1) then 0b0 else 0b1);
+ if PSTATE.EL != EL0 then RW = __SetSlice_bits(4, 1, RW, 0, [RW[1]]) else RW = __SetSlice_bits(4, 1, RW, 0, if UsingAArch32() then 0b0 else 0b1);
+ if ~(HaveEL(EL2)) | HaveEL(EL3) & [aget_SCR_GEN()[0]] == 0b0 then RW = __SetSlice_bits(4, 1, RW, 2, [RW[1]]) else RW = __SetSlice_bits(4, 1, RW, 2, if ELUsingAArch32(EL2) then 0b0 else 0b1);
+ if ~(HaveEL(EL3)) then RW = __SetSlice_bits(4, 1, RW, 3, [RW[2]]) else RW = __SetSlice_bits(4, 1, RW, 3, if ELUsingAArch32(EL3) then 0b0 else 0b1);
+ if [RW[3]] == 0b0 then RW = __SetSlice_bits(4, 3, RW, 0, undefined) else if [RW[2]] == 0b0 then RW = __SetSlice_bits(4, 2, RW, 0, undefined) else if [RW[1]] == 0b0 then RW = __SetSlice_bits(4, 1, RW, 0, undefined) else ();
+ EDSCR = __SetSlice_bits(32, 4, EDSCR, 10, RW)
+ };
+ ()
+}
+
+val Halt : bits(6) -> unit effect {wreg, undef, rreg, escape}
+
+function Halt reason = {
+ CTI_SignalEvent(CrossTriggerIn_CrossHalt);
+ if UsingAArch32() then {
+ DLR = ThisInstrAddr();
+ DSPSR = GetPSRFromPSTATE();
+ DSPSR[21 .. 21] = PSTATE.SS
+ } else {
+ DLR_EL0 = ThisInstrAddr();
+ DSPSR_EL0 = GetPSRFromPSTATE();
+ DSPSR_EL0[21 .. 21] = PSTATE.SS
+ };
+ EDSCR[24 .. 24] = 0b1;
+ EDSCR[28 .. 28] = 0b0;
+ if IsSecure() then EDSCR[16 .. 16] = 0b0
+ else if HaveEL(EL3) then
+ EDSCR[16 .. 16] = if ExternalSecureInvasiveDebugEnabled() then 0b0 else 0b1
+ else assert([EDSCR[16]] == 0b1, "((EDSCR).SDD == '1')");
+ EDSCR[20 .. 20] = 0b0;
+ if UsingAArch32() then {
+ (PSTATE.SS @ PSTATE.A @ PSTATE.I @ PSTATE.F) = undefined : bits(4);
+ PSTATE.IT = 0x00;
+ PSTATE.T = 0b1
+ } else
+ (PSTATE.SS @ PSTATE.D @ PSTATE.A @ PSTATE.I @ PSTATE.F) = undefined : bits(5);
+ PSTATE.IL = 0b0;
+ StopInstructionPrefetchAndEnableITR();
+ EDSCR[5 .. 0] = reason;
+ UpdateEDSCRFields();
+ ()
+}
+
+val aarch64_system_exceptions_debug_halt : unit -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_exceptions_debug_halt () = Halt(DebugHalt_HaltInstruction)
+
+val S2CacheDisabled : AccType -> bool effect {escape, rreg, undef}
+
+function S2CacheDisabled acctype = {
+ disable : bits(1) = undefined;
+ if ELUsingAArch32(EL2) then disable = if acctype == AccType_IFETCH then [HCR2[1]] else [HCR2[0]] else disable = if acctype == AccType_IFETCH then [HCR_EL2[33]] else [HCR_EL2[32]];
+ return(disable == 0b1)
+}
+
+val S2ConvertAttrsHints : (bits(2), AccType) -> MemAttrHints effect {escape, rreg, undef}
+
+function S2ConvertAttrsHints (attr, acctype) = {
+ assert(~(IsZero(attr)), "!(IsZero(attr))");
+ result : MemAttrHints = undefined;
+ if S2CacheDisabled(acctype) then {
+ result.attrs = MemAttr_NC;
+ result.hints = MemHint_No
+ } else match attr {
+ 0b01 => {
+ result.attrs = MemAttr_NC;
+ result.hints = MemHint_No
+ },
+ 0b10 => {
+ result.attrs = MemAttr_WT;
+ result.hints = MemHint_RWA
+ },
+ 0b11 => {
+ result.attrs = MemAttr_WB;
+ result.hints = MemHint_RWA
+ }
+ };
+ result.transient = false;
+ return(result)
+}
+
+val S2AttrDecode : (bits(2), bits(4), AccType) -> MemoryAttributes effect {escape, rreg, undef}
+
+function S2AttrDecode (SH, attr, acctype) = {
+ memattrs : MemoryAttributes = undefined;
+ if slice(attr, 2, 2) == 0b00 then {
+ memattrs.typ = MemType_Device;
+ match slice(attr, 0, 2) {
+ 0b00 => memattrs.device = DeviceType_nGnRnE,
+ 0b01 => memattrs.device = DeviceType_nGnRE,
+ 0b10 => memattrs.device = DeviceType_nGRE,
+ 0b11 => memattrs.device = DeviceType_GRE
+ }
+ } else if slice(attr, 0, 2) != 0b00 then {
+ memattrs.typ = MemType_Normal;
+ memattrs.outer = S2ConvertAttrsHints(slice(attr, 2, 2), acctype);
+ memattrs.inner = S2ConvertAttrsHints(slice(attr, 0, 2), acctype);
+ memattrs.shareable = [SH[1]] == 0b1;
+ memattrs.outershareable = SH == 0b10
+ } else memattrs = undefined;
+ return(MemAttrDefaults(memattrs))
+}
+
+val ELIsInHost : bits(2) -> bool effect {escape, rreg, undef}
+
+function ELIsInHost el = return((((~(IsSecureBelowEL3()) & HaveVirtHostExt()) & ~(ELUsingAArch32(EL2))) & [HCR_EL2[34]] == 0b1) & (el == EL2 | el == EL0 & [HCR_EL2[27]] == 0b1))
+
+val S1TranslationRegime__0 : bits(2) -> bits(2) effect {rreg, undef, escape}
+
+val S1TranslationRegime__1 : unit -> bits(2) effect {rreg, undef, escape}
+
+overload S1TranslationRegime = {S1TranslationRegime__0, S1TranslationRegime__1}
+
+function S1TranslationRegime__0 el = if el != EL0 then return(el) else if (HaveEL(EL3) & ELUsingAArch32(EL3)) & [SCR[0]] == 0b0 then return(EL3) else if HaveVirtHostExt() & ELIsInHost(el) then return(EL2) else return(EL1)
+
+function S1TranslationRegime__1 () = return(S1TranslationRegime(PSTATE.EL))
+
+val aset_FAR__0 : (bits(2), bits(64)) -> unit effect {wreg, escape}
+
+val aset_FAR__1 : bits(64) -> unit effect {wreg, undef, rreg, escape}
+
+overload aset_FAR = {aset_FAR__0, aset_FAR__1}
+
+function aset_FAR__0 (regime, value_name) = {
+ r : bits(64) = value_name;
+ match regime {
+ ? if ? == EL1 => FAR_EL1 = r,
+ ? if ? == EL2 => FAR_EL2 = r,
+ ? if ? == EL3 => FAR_EL3 = r,
+ _ => Unreachable()
+ };
+ ()
+}
+
+function aset_FAR__1 value_name = {
+ aset_FAR(S1TranslationRegime(), value_name);
+ ()
+}
+
+val aset_ESR__0 : (bits(2), bits(32)) -> unit effect {wreg, escape}
+
+val aset_ESR__1 : bits(32) -> unit effect {wreg, rreg, undef, escape}
+
+overload aset_ESR = {aset_ESR__0, aset_ESR__1}
+
+function aset_ESR__0 (regime, value_name) = {
+ r : bits(32) = value_name;
+ match regime {
+ ? if ? == EL1 => ESR_EL1 = r,
+ ? if ? == EL2 => ESR_EL2 = r,
+ ? if ? == EL3 => ESR_EL3 = r,
+ _ => Unreachable()
+ };
+ ()
+}
+
+function aset_ESR__1 value_name = aset_ESR(S1TranslationRegime(), value_name)
+
+val aget_VBAR__0 : bits(2) -> bits(64) effect {rreg, undef, escape}
+
+val aget_VBAR__1 : unit -> bits(64) effect {rreg, undef, escape}
+
+overload aget_VBAR = {aget_VBAR__0, aget_VBAR__1}
+
+function aget_VBAR__0 regime = {
+ r : bits(64) = undefined;
+ match regime {
+ ? if ? == EL1 => r = VBAR_EL1,
+ ? if ? == EL2 => r = VBAR_EL2,
+ ? if ? == EL3 => r = VBAR_EL3,
+ _ => Unreachable()
+ };
+ return(r)
+}
+
+function aget_VBAR__1 () = return(aget_VBAR(S1TranslationRegime()))
+
+val aget_SCTLR__0 : bits(2) -> bits(32) effect {rreg, undef, escape}
+
+val aget_SCTLR__1 : unit -> bits(32) effect {rreg, undef, escape}
+
+overload aget_SCTLR = {aget_SCTLR__0, aget_SCTLR__1}
+
+function aget_SCTLR__0 regime = {
+ r : bits(32) = undefined;
+ match regime {
+ ? if ? == EL1 => r = SCTLR_EL1,
+ ? if ? == EL2 => r = SCTLR_EL2,
+ ? if ? == EL3 => r = SCTLR_EL3,
+ _ => Unreachable()
+ };
+ return(r)
+}
+
+function aget_SCTLR__1 () = return(aget_SCTLR(S1TranslationRegime()))
+
+val BigEndian : unit -> bool effect {escape, rreg, undef}
+
+function BigEndian () = {
+ bigend : bool = undefined;
+ if UsingAArch32() then bigend = PSTATE.E != 0b0 else if PSTATE.EL == EL0 then bigend = [aget_SCTLR()[24]] != 0b0 else bigend = [aget_SCTLR()[25]] != 0b0;
+ return(bigend)
+}
+
+val aget_MAIR__0 : bits(2) -> bits(64) effect {rreg, undef, escape}
+
+val aget_MAIR__1 : unit -> bits(64) effect {rreg, undef, escape}
+
+overload aget_MAIR = {aget_MAIR__0, aget_MAIR__1}
+
+function aget_MAIR__0 regime = {
+ r : bits(64) = undefined;
+ match regime {
+ ? if ? == EL1 => r = MAIR_EL1,
+ ? if ? == EL2 => r = MAIR_EL2,
+ ? if ? == EL3 => r = MAIR_EL3,
+ _ => Unreachable()
+ };
+ return(r)
+}
+
+function aget_MAIR__1 () = return(aget_MAIR(S1TranslationRegime()))
+
+val S1CacheDisabled : AccType -> bool effect {escape, rreg, undef}
+
+function S1CacheDisabled acctype = {
+ enable : bits(1) = undefined;
+ if ELUsingAArch32(S1TranslationRegime()) then if PSTATE.EL == EL2 then enable = if acctype == AccType_IFETCH then [HSCTLR[12]] else [HSCTLR[2]] else enable = if acctype == AccType_IFETCH then [SCTLR[12]] else [SCTLR[2]] else enable = if acctype == AccType_IFETCH then [aget_SCTLR()[12]] else [aget_SCTLR()[2]];
+ return(enable == 0b0)
+}
+
+val ShortConvertAttrsHints : (bits(2), AccType, bool) -> MemAttrHints effect {escape, rreg, undef}
+
+function ShortConvertAttrsHints (RGN, acctype, secondstage) = {
+ result : MemAttrHints = undefined;
+ if ~(secondstage) & S1CacheDisabled(acctype) | secondstage & S2CacheDisabled(acctype) then {
+ result.attrs = MemAttr_NC;
+ result.hints = MemHint_No
+ } else match RGN {
+ 0b00 => {
+ result.attrs = MemAttr_NC;
+ result.hints = MemHint_No
+ },
+ 0b01 => {
+ result.attrs = MemAttr_WB;
+ result.hints = MemHint_RWA
+ },
+ 0b10 => {
+ result.attrs = MemAttr_WT;
+ result.hints = MemHint_RA
+ },
+ 0b11 => {
+ result.attrs = MemAttr_WB;
+ result.hints = MemHint_RA
+ }
+ };
+ result.transient = false;
+ return(result)
+}
+
+val WalkAttrDecode : (bits(2), bits(2), bits(2), bool) -> MemoryAttributes effect {escape, rreg, undef}
+
+function WalkAttrDecode (SH, ORGN, IRGN, secondstage) = {
+ memattrs : MemoryAttributes = undefined;
+ acctype : AccType = AccType_NORMAL;
+ memattrs.typ = MemType_Normal;
+ memattrs.inner = ShortConvertAttrsHints(IRGN, acctype, secondstage);
+ memattrs.outer = ShortConvertAttrsHints(ORGN, acctype, secondstage);
+ memattrs.shareable = [SH[1]] == 0b1;
+ memattrs.outershareable = SH == 0b10;
+ return(MemAttrDefaults(memattrs))
+}
+
+val LongConvertAttrsHints : (bits(4), AccType) -> MemAttrHints effect {escape, rreg, undef}
+
+function LongConvertAttrsHints (attrfield, acctype) = {
+ assert(~(IsZero(attrfield)), "!(IsZero(attrfield))");
+ result : MemAttrHints = undefined;
+ if S1CacheDisabled(acctype) then {
+ result.attrs = MemAttr_NC;
+ result.hints = MemHint_No
+ } else if slice(attrfield, 2, 2) == 0b00 then {
+ result.attrs = MemAttr_WT;
+ result.hints = slice(attrfield, 0, 2);
+ result.transient = true
+ } else if slice(attrfield, 0, 4) == 0x4 then {
+ result.attrs = MemAttr_NC;
+ result.hints = MemHint_No;
+ result.transient = false
+ } else if slice(attrfield, 2, 2) == 0b01 then {
+ result.attrs = slice(attrfield, 0, 2);
+ result.hints = MemAttr_WB;
+ result.transient = true
+ } else {
+ result.attrs = slice(attrfield, 2, 2);
+ result.hints = slice(attrfield, 0, 2);
+ result.transient = false
+ };
+ return(result)
+}
+
+val AArch64_S1AttrDecode : (bits(2), bits(3), AccType) -> MemoryAttributes effect {rreg, undef, escape}
+
+function AArch64_S1AttrDecode (SH, attr, acctype) = let 'uattr = ex_nat(UInt(attr)) in {
+ memattrs : MemoryAttributes = undefined;
+ mair : bits(64) = aget_MAIR();
+ index : atom(8 * 'uattr) = 8 * uattr;
+ attrfield : bits(8) = mair[7 + index .. index];
+ __anon1 : Constraint = undefined;
+ if attrfield[7 .. 4] != 0x0 & attrfield[3 .. 0] == 0x0 | attrfield[7 .. 4] == 0x0 & (attrfield[3 .. 0] & 0x3) != 0x0 then
+ (__anon1, attrfield) = ConstrainUnpredictableBits(Unpredictable_RESMAIR) : (Constraint, bits(8))
+ else ();
+ if attrfield[7 .. 4] == 0x0 then {
+ memattrs.typ = MemType_Device;
+ match attrfield[3 .. 0] {
+ 0x0 => memattrs.device = DeviceType_nGnRnE,
+ 0x4 => memattrs.device = DeviceType_nGnRE,
+ 0x8 => memattrs.device = DeviceType_nGRE,
+ 0xC => memattrs.device = DeviceType_GRE,
+ _ => Unreachable()
+ }
+ } else if attrfield[3 .. 0] != 0x0 then {
+ memattrs.typ = MemType_Normal;
+ memattrs.outer = LongConvertAttrsHints(attrfield[7 .. 4], acctype);
+ memattrs.inner = LongConvertAttrsHints(attrfield[3 .. 0], acctype);
+ memattrs.shareable = [SH[1]] == 0b1;
+ memattrs.outershareable = SH == 0b10
+ } else Unreachable();
+ return(MemAttrDefaults(memattrs))
+}
+
+val IsInHost : unit -> bool effect {escape, rreg, undef}
+
+function IsInHost () = return(ELIsInHost(PSTATE.EL))
+
+val aget_CPACR : unit -> bits(32) effect {escape, rreg, undef}
+
+function aget_CPACR () = {
+ if IsInHost() then return(CPTR_EL2) else ();
+ return(CPACR_EL1)
+}
+
+val HasS2Translation : unit -> bool effect {escape, rreg, undef}
+
+function HasS2Translation () = return(((HaveEL(EL2) & ~(IsSecure())) & ~(IsInHost())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1))
+
+val AArch64_SecondStageWalk : (AddressDescriptor, bits(64), AccType, bool, int, bool) -> AddressDescriptor effect {escape, rmem, rreg, undef, wmem}
+
+function AArch64_SecondStageWalk (S1, vaddress, acctype, iswrite, 'size, hwupdatewalk) = {
+ assert(HasS2Translation(), "HasS2Translation()");
+ s2fs1walk : bool = true;
+ wasaligned : bool = true;
+ return(AArch64_SecondStageTranslate(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk))
+}
+
+val DoubleLockStatus : unit -> bool effect {escape, rreg, undef}
+
+function DoubleLockStatus () = if ELUsingAArch32(EL1) then return(([DBGOSDLR[0]] == 0b1 & [DBGPRCR[0]] == 0b0) & ~(Halted())) else return(([OSDLR_EL1[0]] == 0b1 & [DBGPRCR_EL1[0]] == 0b0) & ~(Halted()))
+
+val HaltingAllowed : unit -> bool effect {escape, rreg, undef}
+
+function HaltingAllowed () = if Halted() | DoubleLockStatus() then return(false) else if IsSecure() then return(ExternalSecureInvasiveDebugEnabled()) else return(ExternalInvasiveDebugEnabled())
+
+val system_exceptions_debug_halt_decode : (bits(3), bits(16), bits(3), bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_exceptions_debug_halt_decode (opc, imm16, op2, LL) = {
+ __unconditional = true;
+ if [EDSCR[14]] == 0b0 | ~(HaltingAllowed()) then UndefinedFault() else ();
+ aarch64_system_exceptions_debug_halt()
+}
+
+val HaltOnBreakpointOrWatchpoint : unit -> bool effect {escape, rreg, undef}
+
+function HaltOnBreakpointOrWatchpoint () = return((HaltingAllowed() & [EDSCR[14]] == 0b1) & [OSLSR_EL1[1]] == 0b0)
+
+val DebugTargetFrom : bool -> bits(2) effect {escape, rreg, undef}
+
+function DebugTargetFrom secure = {
+ route_to_el2 : bool = undefined;
+ if HaveEL(EL2) & ~(secure) then if ELUsingAArch32(EL2) then route_to_el2 = [HDCR[8]] == 0b1 | [HCR[27]] == 0b1 else route_to_el2 = [MDCR_EL2[8]] == 0b1 | [HCR_EL2[27]] == 0b1 else route_to_el2 = false;
+ target : bits(2) = undefined;
+ if route_to_el2 then target = EL2 else if (HaveEL(EL3) & HighestELUsingAArch32()) & secure then target = EL3 else target = EL1;
+ return(target)
+}
+
+val DebugTarget : unit -> bits(2) effect {escape, rreg, undef}
+
+function DebugTarget () = {
+ secure : bool = IsSecure();
+ return(DebugTargetFrom(secure))
+}
+
+val SSAdvance : unit -> unit effect {escape, rreg, undef, wreg}
+
+function SSAdvance () = {
+ target : bits(2) = DebugTarget();
+ step_enabled : bool = ~(ELUsingAArch32(target)) & [MDSCR_EL1[0]] == 0b1;
+ active_not_pending : bool = step_enabled & PSTATE.SS == 0b1;
+ if active_not_pending then PSTATE.SS = 0b0 else ();
+ ()
+}
+
+val ConditionHolds : bits(4) -> bool effect {rreg, undef}
+
+function ConditionHolds cond = {
+ result : bool = undefined;
+ match slice(cond, 1, 3) {
+ 0b000 => result = PSTATE.Z == 0b1,
+ 0b001 => result = PSTATE.C == 0b1,
+ 0b010 => result = PSTATE.N == 0b1,
+ 0b011 => result = PSTATE.V == 0b1,
+ 0b100 => result = PSTATE.C == 0b1 & PSTATE.Z == 0b0,
+ 0b101 => result = PSTATE.N == PSTATE.V,
+ 0b110 => result = PSTATE.N == PSTATE.V & PSTATE.Z == 0b0,
+ 0b111 => result = true
+ };
+ if [cond[0]] == 0b1 & cond != 0xF then result = ~(result) else ();
+ return(result)
+}
+
+val aarch64_integer_conditional_select : (bits(4), int, int, bool, bool, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_conditional_select (condition, 'd, 'datasize, else_inc, else_inv, 'm, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ if ConditionHolds(condition) then result = operand1 else {
+ result = operand2;
+ if else_inv then result = ~(result) else ();
+ if else_inc then result = result + 1 else ()
+ };
+ aset_X(d, result)
+}
+
+val integer_conditional_select_decode : (bits(1), bits(1), bits(1), bits(5), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_conditional_select_decode (sf, op, S, Rm, cond, o2, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ condition : bits(4) = cond;
+ else_inv : bool = op == 0b1;
+ else_inc : bool = o2 == 0b1;
+ aarch64_integer_conditional_select(condition, d, datasize, else_inc, else_inv, m, n)
+}
+
+val aarch64_integer_conditional_compare_register : (bits(4), int, bits(4), int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_conditional_compare_register (condition, 'datasize, flags__arg, 'm, 'n, sub_op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ flags = flags__arg;
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = aget_X(m);
+ carry_in : bits(1) = 0b0;
+ __anon1 : bits('datasize) = undefined;
+ if ConditionHolds(condition) then {
+ if sub_op then {
+ operand2 = ~(operand2);
+ carry_in = 0b1
+ } else ();
+ (__anon1, flags) = AddWithCarry(operand1, operand2, carry_in)
+ } else ();
+ (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = flags
+}
+
+val integer_conditional_compare_register_decode : (bits(1), bits(1), bits(1), bits(5), bits(4), bits(1), bits(5), bits(1), bits(4)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_conditional_compare_register_decode (sf, op, S, Rm, cond, o2, Rn, o3, nzcv) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ sub_op : bool = op == 0b1;
+ condition : bits(4) = cond;
+ flags : bits(4) = nzcv;
+ aarch64_integer_conditional_compare_register(condition, datasize, flags, m, n, sub_op)
+}
+
+val aarch64_integer_conditional_compare_immediate : forall ('datasize : Int).
+ (bits(4), atom('datasize), bits(4), bits('datasize), int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_conditional_compare_immediate (condition, datasize, flags__arg, imm, 'n, sub_op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ flags = flags__arg;
+ operand1 : bits('datasize) = aget_X(n);
+ operand2 : bits('datasize) = imm;
+ carry_in : bits(1) = 0b0;
+ __anon1 : bits('datasize) = undefined;
+ if ConditionHolds(condition) then {
+ if sub_op then {
+ operand2 = ~(operand2);
+ carry_in = 0b1
+ } else ();
+ (__anon1, flags) = AddWithCarry(operand1, operand2, carry_in)
+ } else ();
+ (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = flags
+}
+
+val integer_conditional_compare_immediate_decode : (bits(1), bits(1), bits(1), bits(5), bits(4), bits(1), bits(5), bits(1), bits(4)) -> unit effect {escape, wreg, undef, rreg}
+
+function integer_conditional_compare_immediate_decode (sf, op, S, imm5, cond, o2, Rn, o3, nzcv) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ sub_op : bool = op == 0b1;
+ condition : bits(4) = cond;
+ flags : bits(4) = nzcv;
+ imm : bits('datasize) = ZeroExtend(imm5, datasize);
+ aarch64_integer_conditional_compare_immediate(condition, datasize, flags, imm, n, sub_op)
+}
+
+val ConditionSyndrome : unit -> bits(5) effect {escape, rreg, undef}
+
+function ConditionSyndrome () = {
+ syndrome : bits(5) = undefined;
+ cond : bits(4) = undefined;
+ if UsingAArch32() then {
+ cond = AArch32_CurrentCond();
+ if PSTATE.T == 0b0 then {
+ syndrome = __SetSlice_bits(5, 1, syndrome, 4, 0b1);
+ if ConditionHolds(cond) & ConstrainUnpredictableBool(Unpredictable_ESRCONDPASS) then syndrome = __SetSlice_bits(5, 4, syndrome, 0, 0xE) else syndrome = __SetSlice_bits(5, 4, syndrome, 0, cond)
+ } else if __IMPDEF_boolean("Condition valid for trapped T32") then {
+ syndrome = __SetSlice_bits(5, 1, syndrome, 4, 0b1);
+ syndrome = __SetSlice_bits(5, 4, syndrome, 0, cond)
+ } else {
+ syndrome = __SetSlice_bits(5, 1, syndrome, 4, 0b0);
+ syndrome = __SetSlice_bits(5, 4, syndrome, 0, undefined)
+ }
+ } else {
+ syndrome = __SetSlice_bits(5, 1, syndrome, 4, 0b1);
+ syndrome = __SetSlice_bits(5, 4, syndrome, 0, 0xE)
+ };
+ return(syndrome)
+}
+
+val BranchToAddr : forall ('N : Int), 'N >= 0.
+ (bits('N), BranchType) -> unit effect {escape, rreg, wreg}
+
+function BranchToAddr (target, branch_type) = {
+ __BranchTaken = true;
+ Hint_Branch(branch_type);
+ if 'N == 32 then {
+ assert(UsingAArch32(), "UsingAArch32()");
+ _PC = ZeroExtend(target)
+ } else {
+ assert('N == 64 & ~(UsingAArch32()), "((N == 64) && !(UsingAArch32()))");
+ _PC = slice(target, 0, 64)
+ };
+ ()
+}
+
+val BadMode : bits(5) -> bool effect {undef}
+
+function BadMode mode = {
+ valid_name : bool = undefined;
+ match mode {
+ ? if ? == M32_Monitor => valid_name = HaveAArch32EL(EL3),
+ ? if ? == M32_Hyp => valid_name = HaveAArch32EL(EL2),
+ ? if ? == M32_FIQ => valid_name = HaveAArch32EL(EL1),
+ ? if ? == M32_IRQ => valid_name = HaveAArch32EL(EL1),
+ ? if ? == M32_Svc => valid_name = HaveAArch32EL(EL1),
+ ? if ? == M32_Abort => valid_name = HaveAArch32EL(EL1),
+ ? if ? == M32_Undef => valid_name = HaveAArch32EL(EL1),
+ ? if ? == M32_System => valid_name = HaveAArch32EL(EL1),
+ ? if ? == M32_User => valid_name = HaveAArch32EL(EL0),
+ _ => valid_name = false
+ };
+ return(~(valid_name))
+}
+
+val aset_Rmode : (int, bits(5), bits(32)) -> unit effect {wreg, rreg, undef, escape}
+
+function aset_Rmode (n, mode, value_name) = {
+ assert(n >= 0 & n <= 14, "((n >= 0) && (n <= 14))");
+ if ~(IsSecure()) then assert(mode != M32_Monitor, "(mode != M32_Monitor)") else ();
+ assert(~(BadMode(mode)), "!(BadMode(mode))");
+ if mode == M32_Monitor then
+ if n == 13 then SP_mon = value_name
+ else if n == 14 then LR_mon = value_name
+ else {
+ __tmp_1 : bits(64) = _R[n];
+ __tmp_1[31 .. 0] = value_name;
+ _R[n] = __tmp_1
+ }
+ else if ~(HighestELUsingAArch32()) & ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then
+ _R[LookUpRIndex(n, mode)] = ZeroExtend(value_name, 64)
+ else {
+ __tmp_2 : bits(64) = _R[LookUpRIndex(n, mode)];
+ __tmp_2[31 .. 0] = value_name;
+ _R[LookUpRIndex(n, mode)] = __tmp_2
+ };
+ ()
+}
+
+val aset_R : (int, bits(32)) -> unit effect {escape, rreg, undef, wreg}
+
+function aset_R ('n, value_name) = {
+ aset_Rmode(n, PSTATE.M, value_name);
+ ()
+}
+
+val set_LR : bits(32) -> unit effect {escape, rreg, undef, wreg}
+
+function set_LR value_name = {
+ aset_R(14, value_name);
+ ()
+}
+
+val ELFromM32 : bits(5) -> (bool, bits(2)) effect {escape, rreg, undef}
+
+function ELFromM32 mode = {
+ el : bits(2) = undefined;
+ valid_name : bool = ~(BadMode(mode));
+ match mode {
+ ? if ? == M32_Monitor => el = EL3,
+ ? if ? == M32_Hyp => {
+ el = EL2;
+ valid_name = valid_name & (~(HaveEL(EL3)) | [aget_SCR_GEN()[0]] == 0b1)
+ },
+ ? if ? == M32_FIQ => el = if (HaveEL(EL3) & HighestELUsingAArch32()) & [SCR[0]] == 0b0 then EL3 else EL1,
+ ? if ? == M32_IRQ => el = if (HaveEL(EL3) & HighestELUsingAArch32()) & [SCR[0]] == 0b0 then EL3 else EL1,
+ ? if ? == M32_Svc => el = if (HaveEL(EL3) & HighestELUsingAArch32()) & [SCR[0]] == 0b0 then EL3 else EL1,
+ ? if ? == M32_Abort => el = if (HaveEL(EL3) & HighestELUsingAArch32()) & [SCR[0]] == 0b0 then EL3 else EL1,
+ ? if ? == M32_Undef => el = if (HaveEL(EL3) & HighestELUsingAArch32()) & [SCR[0]] == 0b0 then EL3 else EL1,
+ ? if ? == M32_System => el = if (HaveEL(EL3) & HighestELUsingAArch32()) & [SCR[0]] == 0b0 then EL3 else EL1,
+ ? if ? == M32_User => el = EL0,
+ _ => valid_name = false
+ };
+ if ~(valid_name) then el = undefined else ();
+ return((valid_name, el))
+}
+
+val ELFromSPSR : bits(32) -> (bool, bits(2)) effect {escape, rreg, undef}
+
+function ELFromSPSR spsr = {
+ valid_name : bool = undefined;
+ el : bits(2) = undefined;
+ if [spsr[4]] == 0b0 then {
+ el = slice(spsr, 2, 2);
+ if HighestELUsingAArch32() then valid_name = false else if ~(HaveEL(el)) then valid_name = false else if [spsr[1]] == 0b1 then valid_name = false else if el == EL0 & [spsr[0]] == 0b1 then valid_name = false else if (el == EL2 & HaveEL(EL3)) & [SCR_EL3[0]] == 0b0 then valid_name = false else valid_name = true
+ } else if ~(HaveAnyAArch32()) then valid_name = false else (valid_name, el) = ELFromM32(slice(spsr, 0, 5));
+ if ~(valid_name) then el = undefined else ();
+ return((valid_name, el))
+}
+
+val IllegalExceptionReturn : bits(32) -> bool effect {escape, rreg, undef}
+
+function IllegalExceptionReturn spsr = {
+ target : bits(2) = undefined;
+ valid_name : bool = undefined;
+ (valid_name, target) = ELFromSPSR(spsr);
+ if ~(valid_name) then return(true) else ();
+ if UInt(target) > UInt(PSTATE.EL) then return(true) else ();
+ spsr_mode_is_aarch32 : bool = [spsr[4]] == 0b1;
+ target_el_is_aarch32 : bool = undefined;
+ known : bool = undefined;
+ (known, target_el_is_aarch32) = ELUsingAArch32K(target);
+ assert(known | target == EL0 & ~(ELUsingAArch32(EL1)), "(known || ((target == EL0) && !(ELUsingAArch32(EL1))))");
+ if known & spsr_mode_is_aarch32 != target_el_is_aarch32 then return(true) else ();
+ if UsingAArch32() & ~(spsr_mode_is_aarch32) then return(true) else ();
+ if ((HaveEL(EL2) & target == EL1) & ~(IsSecureBelowEL3())) & [HCR_EL2[27]] == 0b1 then return(true) else ();
+ return(false)
+}
+
+val AArch32_WriteMode : bits(5) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch32_WriteMode mode = {
+ el : bits(2) = undefined;
+ valid_name : bool = undefined;
+ (valid_name, el) = ELFromM32(mode);
+ assert(valid_name, "valid");
+ PSTATE.M = mode;
+ PSTATE.EL = el;
+ PSTATE.nRW = 0b1;
+ PSTATE.SP = if mode == M32_User | mode == M32_System then 0b0 else 0b1;
+ ()
+}
+
+val AddrTop : (bits(64), bool, bits(2)) -> int effect {escape, rreg, undef}
+
+function AddrTop (address, IsInstr, el) = {
+ assert(HaveEL(el), "HaveEL(el)");
+ regime : bits(2) = S1TranslationRegime(el);
+ tbid : bits(1) = undefined;
+ tbi : bits(1) = undefined;
+ if ELUsingAArch32(regime) then return(31) else match regime {
+ ? if ? == EL1 => {
+ tbi = if [address[55]] == 0b1 then [TCR_EL1[38]] else [TCR_EL1[37]];
+ if HavePACExt() then tbid = if [address[55]] == 0b1 then [TCR_EL1[52]] else [TCR_EL1[51]] else ()
+ },
+ ? if ? == EL2 => if HaveVirtHostExt() & ELIsInHost(el) then {
+ tbi = if [address[55]] == 0b1 then [TCR_EL2[38]] else [TCR_EL2[37]];
+ if HavePACExt() then tbid = if [address[55]] == 0b1 then [TCR_EL2[52]] else [TCR_EL2[51]] else ()
+ } else {
+ tbi = [TCR_EL2[20]];
+ if HavePACExt() then tbid = [TCR_EL2[29]] else ()
+ },
+ ? if ? == EL3 => {
+ tbi = [TCR_EL3[20]];
+ if HavePACExt() then tbid = [TCR_EL3[29]] else ()
+ }
+ };
+ return(if tbi == 0b1 & ((~(HavePACExt()) | tbid == 0b0) | ~(IsInstr)) then 55 else 63)
+}
+
+val AddPAC : (bits(64), bits(64), bits(128), bool) -> bits(64) effect {escape, wreg, rreg, undef}
+
+function AddPAC (ptr, modifier, K, data) = {
+ PAC : bits(64) = undefined;
+ result : bits(64) = undefined;
+ ext_ptr : bits(64) = undefined;
+ extfield : bits(64) = undefined;
+ selbit : bits(1) = undefined;
+ tbi : bool = CalculateTBI(ptr, data);
+ let 'top_bit : {|55, 63|} = if tbi then 55 else 63;
+ if PtrHasUpperAndLowerAddRanges() then
+ if IsEL1TransRegimeRegs() then
+ if data then
+ selbit = if [TCR_EL1[38]] == 0b1 | [TCR_EL1[37]] == 0b1 then [ptr[55]] else [ptr[63]]
+ else if [TCR_EL1[38]] == 0b1 & [TCR_EL1[52]] == 0b0 | [TCR_EL1[37]] == 0b1 & [TCR_EL1[51]] == 0b0 then
+ selbit = [ptr[55]]
+ else selbit = [ptr[63]]
+ else if data then
+ selbit = if HaveEL(EL2) & [TCR_EL2[38]] == 0b1 | HaveEL(EL2) & [TCR_EL2[37]] == 0b1 then [ptr[55]] else [ptr[63]]
+ else
+ selbit = if (HaveEL(EL2) & [TCR_EL2[38]] == 0b1) & [TCR_EL1[52]] == 0b0 | (HaveEL(EL2) & [TCR_EL2[37]] == 0b1) & [TCR_EL1[51]] == 0b0 then [ptr[55]] else [ptr[63]]
+ else selbit = if tbi then [ptr[55]] else [ptr[63]];
+ let 'bottom_PAC_bit : {'n, true. atom('n)} = ex_int(CalculateBottomPACBit(ptr, selbit));
+ assert(constraint('bottom_PAC_bit <= 55));
+ extfield = replicate_bits(selbit, 64);
+ if tbi then
+ ext_ptr = (ptr[63 .. 56] @ extfield[(negate(bottom_PAC_bit) + 56) - 1 .. 0]) @ ptr[bottom_PAC_bit - 1 .. 0]
+ else
+ ext_ptr = extfield[(negate(bottom_PAC_bit) + 64) - 1 .. 0] @ ptr[bottom_PAC_bit - 1 .. 0];
+ PAC = ComputePAC(ext_ptr, modifier, K[127 .. 64], K[63 .. 0]);
+ if ~(IsZero(ptr[(((top_bit - bottom_PAC_bit) + 1) - 1) + bottom_PAC_bit .. bottom_PAC_bit])) & ~(IsOnes(ptr[(((top_bit - bottom_PAC_bit) + 1) - 1) + bottom_PAC_bit .. bottom_PAC_bit])) then
+ PAC[top_bit - 1 .. top_bit - 1] = ~([PAC[top_bit - 1]])
+ else ();
+ if tbi then
+ result = ((ptr[63 .. 56] @ selbit) @ PAC[((negate(bottom_PAC_bit) + 55) - 1) + bottom_PAC_bit .. bottom_PAC_bit]) @ ptr[bottom_PAC_bit - 1 .. 0]
+ else
+ result = ((PAC[63 .. 56] @ selbit) @ PAC[((negate(bottom_PAC_bit) + 55) - 1) + bottom_PAC_bit .. bottom_PAC_bit]) @ ptr[bottom_PAC_bit - 1 .. 0];
+ return(result)
+}
+
+val AArch64_vESBOperation : unit -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_vESBOperation () = {
+ assert((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1), "((HaveEL(EL2) && !(IsSecure())) && (((PSTATE).EL == EL0) || ((PSTATE).EL == EL1)))");
+ vSEI_enabled : bool = [HCR_EL2[27]] == 0b0 & [HCR_EL2[5]] == 0b1;
+ vSEI_pending : bool = vSEI_enabled & [HCR_EL2[8]] == 0b1;
+ vintdis : bool = Halted() | ExternalDebugInterruptsDisabled(EL1);
+ vmasked : bool = vintdis | PSTATE.A == 0b1;
+ VDISR_EL2 : bits(64) = undefined;
+ VDISR : bits(32) = undefined;
+ if vSEI_pending & vmasked then {
+ if ELUsingAArch32(EL1) then VDISR = AArch32_ReportDeferredSError(slice(VDFSR, 14, 2), [VDFSR[12]]) else VDISR_EL2 = AArch64_ReportDeferredSError(slice(VSESR_EL2, 0, 25));
+ HCR_EL2 = __SetSlice_bits(64, 1, HCR_EL2, 8, 0b0)
+ } else ();
+ ()
+}
+
+val AArch64_WatchpointByteMatch : (int, bits(64)) -> bool effect {rreg, undef, escape}
+
+function AArch64_WatchpointByteMatch (n, vaddress) = let 'top : {'n, true. atom('n)} = AddrTop(vaddress, false, PSTATE.EL) in {
+ bottom : int = if [DBGWVR_EL1[n][2]] == 0b1 then 2 else 3;
+ byte_select_match : bool = [DBGWCR_EL1[n][12 .. 5][UInt(vaddress[bottom - 1 .. 0])]] != 0b0;
+ mask : int = UInt(DBGWCR_EL1[n][28 .. 24]);
+ MSB : bits(8) = undefined;
+ LSB : bits(8) = undefined;
+ if mask > 0 & ~(IsOnes(DBGWCR_EL1[n][12 .. 5])) then
+ byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPMASKANDBAS)
+ else {
+ LSB = DBGWCR_EL1[n][12 .. 5] & ~(DBGWCR_EL1[n][12 .. 5] - 1);
+ MSB = DBGWCR_EL1[n][12 .. 5] + LSB;
+ if ~(IsZero(MSB & MSB - 1)) then {
+ byte_select_match = ConstrainUnpredictableBool(Unpredictable_WPBASCONTIGUOUS);
+ bottom = 3
+ } else ()
+ };
+ c : Constraint = undefined;
+ if mask > 0 & mask <= 2 then {
+ (c, mask) = ConstrainUnpredictableInteger(3, 31, Unpredictable_RESWPMASK);
+ assert(c == Constraint_DISABLED | c == Constraint_NONE | c == Constraint_UNKNOWN, "((c == Constraint_DISABLED) || ((c == Constraint_NONE) || (c == Constraint_UNKNOWN)))");
+ match c {
+ Constraint_DISABLED => return(false),
+ Constraint_NONE => mask = 0
+ }
+ } else ();
+ WVR_match : bool = undefined;
+ let 'mask2 : {'n, true. atom('n)} = ex_int(mask);
+ let 'bottom2 : {'n, true. atom('n)} = ex_int(bottom);
+ if mask > bottom then {
+ assert(constraint('mask2 >= 'bottom2 + 1));
+ WVR_match = vaddress[(((top - mask2) + 1) - 1) + mask2 .. mask2] == DBGWVR_EL1[n][(((top - mask2) + 1) - 1) + mask2 .. mask2];
+ if WVR_match & ~(IsZero(DBGWVR_EL1[n][((mask2 - bottom2) - 1) + bottom2 .. bottom2])) then
+ WVR_match = ConstrainUnpredictableBool(Unpredictable_WPMASKEDBITS)
+ else ()
+ } else
+ WVR_match = vaddress[(((top - bottom2) + 1) - 1) + bottom2 .. bottom2] == DBGWVR_EL1[n][(((top - bottom2) + 1) - 1) + bottom2 .. bottom2];
+ return(WVR_match & byte_select_match)
+}
+
+val IsZero_slice : forall 'n, 'n >= 0.
+ (bits('n), int, int) -> bool effect {escape}
+
+function IsZero_slice (xs, i, 'l) = {
+ assert(constraint('l >= 0));
+ IsZero(slice(xs, i, l))
+}
+
+val IsOnes_slice : forall 'n, 'n >= 0.
+ (bits('n), int, int) -> bool effect {escape}
+
+function IsOnes_slice (xs, i, 'l) = {
+ assert(constraint('l >= 0));
+ IsOnes(slice(xs, i, l))
+}
+
+val ZeroExtend_slice_append : forall 'n 'm 'o, 'n >= 0 & 'm >= 0 & 'o >= 0.
+ (bits('n), int, int, bits('m)) -> bits('o) effect {escape}
+
+function ZeroExtend_slice_append (xs, i, 'l, ys) = {
+ assert(constraint('l >= 0));
+ ZeroExtend(slice(xs, i, l) @ ys)
+}
+
+val AArch64_TranslationTableWalk : (bits(52), bits(64), AccType, bool, bool, bool, int) -> TLBRecord effect {escape, rreg, rmem, wmem, undef}
+
+function AArch64_TranslationTableWalk (ipaddress, vaddress, acctype, iswrite, secondstage, s2fs1walk, 'size) = {
+ if ~(secondstage) then assert(~(ELUsingAArch32(S1TranslationRegime()))) else assert(((HaveEL(EL2) & ~(IsSecure())) & ~(ELUsingAArch32(EL2))) & HasS2Translation());
+ result : TLBRecord = undefined;
+ descaddr : AddressDescriptor = undefined;
+ baseregister : bits(64) = undefined;
+ inputaddr : bits(64) = undefined;
+ __tmp_18 : MemoryAttributes = descaddr.memattrs;
+ __tmp_18.typ = MemType_Normal;
+ descaddr.memattrs = __tmp_18;
+ startsizecheck : int = undefined;
+ inputsizecheck : int = undefined;
+ startlevel : int = undefined;
+ level : int = undefined;
+ stride : int = undefined;
+ firstblocklevel : int = undefined;
+ grainsize : int = undefined;
+ hierattrsdisabled : bool = undefined;
+ update_AP : bool = undefined;
+ update_AF : bool = undefined;
+ singlepriv : bool = undefined;
+ lookupsecure : bool = undefined;
+ reversedescriptors : bool = undefined;
+ disabled : bool = undefined;
+ basefound : bool = undefined;
+ ps : bits(3) = undefined;
+ inputsize_min : int = undefined;
+ c : Constraint = undefined;
+ inputsize_max : int = undefined;
+ inputsize : int = undefined;
+ midgrain : bool = undefined;
+ largegrain : bool = undefined;
+ top : int = undefined;
+ if ~(secondstage) then {
+ inputaddr = ZeroExtend(vaddress);
+ top = AddrTop(inputaddr, acctype == AccType_IFETCH, PSTATE.EL);
+ if PSTATE.EL == EL3 then {
+ largegrain = slice(TCR_EL3, 14, 2) == 0b01;
+ midgrain = slice(TCR_EL3, 14, 2) == 0b10;
+ inputsize = 64 - UInt(slice(TCR_EL3, 0, 6));
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ ps = slice(TCR_EL3, 16, 3);
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsZero_slice(inputaddr, inputsize, (top - inputsize) + 1);
+ disabled = false;
+ baseregister = TTBR0_EL3;
+ descaddr.memattrs = WalkAttrDecode(slice(TCR_EL3, 12, 2), slice(TCR_EL3, 10, 2), slice(TCR_EL3, 8, 2), secondstage);
+ reversedescriptors = [SCTLR_EL3[25]] == 0b1;
+ lookupsecure = true;
+ singlepriv = true;
+ update_AF = HaveAccessFlagUpdateExt() & [TCR_EL3[21]] == 0b1;
+ update_AP = (HaveDirtyBitModifierExt() & update_AF) & [TCR_EL3[22]] == 0b1;
+ hierattrsdisabled = AArch64_HaveHPDExt() & [TCR_EL3[24]] == 0b1
+ } else if IsInHost() then {
+ if [inputaddr[top]] == 0b0 then {
+ largegrain = slice(TCR_EL2, 14, 2) == 0b01;
+ midgrain = slice(TCR_EL2, 14, 2) == 0b10;
+ inputsize = 64 - UInt(slice(TCR_EL2, 0, 6));
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsZero_slice(inputaddr, inputsize, (top - inputsize) + 1);
+ disabled = [TCR_EL2[7]] == 0b1;
+ baseregister = TTBR0_EL2;
+ descaddr.memattrs = WalkAttrDecode(slice(TCR_EL2, 12, 2), slice(TCR_EL2, 10, 2), slice(TCR_EL2, 8, 2), secondstage);
+ hierattrsdisabled = AArch64_HaveHPDExt() & [TCR_EL2[41]] == 0b1
+ } else {
+ inputsize = 64 - UInt(slice(TCR_EL2, 16, 6));
+ largegrain = slice(TCR_EL2, 30, 2) == 0b11;
+ midgrain = slice(TCR_EL2, 30, 2) == 0b01;
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsOnes_slice(inputaddr, inputsize, (top - inputsize) + 1);
+ disabled = [TCR_EL2[23]] == 0b1;
+ baseregister = TTBR1_EL2;
+ descaddr.memattrs = WalkAttrDecode(slice(TCR_EL2, 28, 2), slice(TCR_EL2, 26, 2), slice(TCR_EL2, 24, 2), secondstage);
+ hierattrsdisabled = AArch64_HaveHPDExt() & [TCR_EL2[42]] == 0b1
+ };
+ ps = slice(TCR_EL2, 32, 3);
+ reversedescriptors = [SCTLR_EL2[25]] == 0b1;
+ lookupsecure = false;
+ singlepriv = false;
+ update_AF = HaveAccessFlagUpdateExt() & [TCR_EL2[39]] == 0b1;
+ update_AP = (HaveDirtyBitModifierExt() & update_AF) & [TCR_EL2[40]] == 0b1
+ } else if PSTATE.EL == EL2 then {
+ inputsize = 64 - UInt(slice(TCR_EL2, 0, 6));
+ largegrain = slice(TCR_EL2, 14, 2) == 0b01;
+ midgrain = slice(TCR_EL2, 14, 2) == 0b10;
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ ps = slice(TCR_EL2, 16, 3);
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsZero_slice(inputaddr, inputsize, (top - inputsize) + 1);
+ disabled = false;
+ baseregister = TTBR0_EL2;
+ descaddr.memattrs = WalkAttrDecode(slice(TCR_EL2, 12, 2), slice(TCR_EL2, 10, 2), slice(TCR_EL2, 8, 2), secondstage);
+ reversedescriptors = [SCTLR_EL2[25]] == 0b1;
+ lookupsecure = false;
+ singlepriv = true;
+ update_AF = HaveAccessFlagUpdateExt() & [TCR_EL2[39]] == 0b1;
+ update_AP = (HaveDirtyBitModifierExt() & update_AF) & [TCR_EL2[40]] == 0b1;
+ hierattrsdisabled = AArch64_HaveHPDExt() & [TCR_EL2[24]] == 0b1
+ } else {
+ if [inputaddr[top]] == 0b0 then {
+ inputsize = 64 - UInt(slice(TCR_EL1, 0, 6));
+ largegrain = slice(TCR_EL1, 14, 2) == 0b01;
+ midgrain = slice(TCR_EL1, 14, 2) == 0b10;
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsZero_slice(inputaddr, inputsize, (top - inputsize) + 1);
+ disabled = [TCR_EL1[7]] == 0b1;
+ baseregister = TTBR0_EL1;
+ descaddr.memattrs = WalkAttrDecode(slice(TCR_EL1, 12, 2), slice(TCR_EL1, 10, 2), slice(TCR_EL1, 8, 2), secondstage);
+ hierattrsdisabled = AArch64_HaveHPDExt() & [TCR_EL1[41]] == 0b1
+ } else {
+ inputsize = 64 - UInt(slice(TCR_EL1, 16, 6));
+ largegrain = slice(TCR_EL1, 30, 2) == 0b11;
+ midgrain = slice(TCR_EL1, 30, 2) == 0b01;
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsOnes_slice(inputaddr, inputsize, (top - inputsize) + 1);
+ disabled = [TCR_EL1[23]] == 0b1;
+ baseregister = TTBR1_EL1;
+ descaddr.memattrs = WalkAttrDecode(slice(TCR_EL1, 28, 2), slice(TCR_EL1, 26, 2), slice(TCR_EL1, 24, 2), secondstage);
+ hierattrsdisabled = AArch64_HaveHPDExt() & [TCR_EL1[42]] == 0b1
+ };
+ ps = slice(TCR_EL1, 32, 3);
+ reversedescriptors = [SCTLR_EL1[25]] == 0b1;
+ lookupsecure = IsSecure();
+ singlepriv = false;
+ update_AF = HaveAccessFlagUpdateExt() & [TCR_EL1[39]] == 0b1;
+ update_AP = (HaveDirtyBitModifierExt() & update_AF) & [TCR_EL1[40]] == 0b1
+ };
+ if largegrain then {
+ grainsize = 16;
+ firstblocklevel = if Have52BitPAExt() then 1 else 2
+ } else if midgrain then {
+ grainsize = 14;
+ firstblocklevel = 2
+ } else {
+ grainsize = 12;
+ firstblocklevel = 1
+ };
+ stride = grainsize - 3;
+ level = 4 - RoundUp(Real(inputsize - grainsize) / Real(stride))
+ } else {
+ inputaddr = ZeroExtend(ipaddress);
+ inputsize = 64 - UInt(slice(VTCR_EL2, 0, 6));
+ largegrain = slice(VTCR_EL2, 14, 2) == 0b01;
+ midgrain = slice(VTCR_EL2, 14, 2) == 0b10;
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ ps = slice(VTCR_EL2, 16, 3);
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsZero_slice(inputaddr, inputsize, negate(inputsize) + 64);
+ disabled = false;
+ baseregister = VTTBR_EL2;
+ descaddr.memattrs = WalkAttrDecode(slice(VTCR_EL2, 8, 2), slice(VTCR_EL2, 10, 2), slice(VTCR_EL2, 12, 2), secondstage);
+ reversedescriptors = [SCTLR_EL2[25]] == 0b1;
+ lookupsecure = false;
+ singlepriv = true;
+ update_AF = HaveAccessFlagUpdateExt() & [VTCR_EL2[21]] == 0b1;
+ update_AP = (HaveDirtyBitModifierExt() & update_AF) & [VTCR_EL2[22]] == 0b1;
+ startlevel = UInt(slice(VTCR_EL2, 6, 2));
+ if largegrain then {
+ grainsize = 16;
+ level = 3 - startlevel;
+ firstblocklevel = if Have52BitPAExt() then 1 else 2
+ } else if midgrain then {
+ grainsize = 14;
+ level = 3 - startlevel;
+ firstblocklevel = 2
+ } else {
+ grainsize = 12;
+ level = 2 - startlevel;
+ firstblocklevel = 1
+ };
+ stride = grainsize - 3;
+ if largegrain then
+ if level == 0 | level == 1 & PAMax() <= 42 then basefound = false
+ else ()
+ else if midgrain then
+ if level == 0 | level == 1 & PAMax() <= 40 then basefound = false
+ else ()
+ else if level < 0 | level == 0 & PAMax() <= 42 then basefound = false
+ else ();
+ inputsizecheck = inputsize;
+ if inputsize > PAMax() & (~(ELUsingAArch32(EL1)) | inputsize > 40) then match ConstrainUnpredictable(Unpredictable_LARGEIPA) {
+ Constraint_FORCE => {
+ inputsize = PAMax();
+ inputsizecheck = PAMax()
+ },
+ Constraint_FORCENOSLCHECK => inputsize = PAMax(),
+ Constraint_FAULT => basefound = false,
+ _ => Unreachable()
+ } else ();
+ startsizecheck = inputsizecheck - ((3 - level) * stride + grainsize);
+ if startsizecheck < 1 | startsizecheck > stride + 4 then basefound = false
+ else ()
+ };
+ if ~(basefound) | disabled then {
+ level = 0;
+ __tmp_19 : AddressDescriptor = result.addrdesc;
+ __tmp_19.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_19;
+ return(result)
+ } else ();
+ outputsize : int = undefined;
+ match ps {
+ 0b000 => outputsize = 32,
+ 0b001 => outputsize = 36,
+ 0b010 => outputsize = 40,
+ 0b011 => outputsize = 42,
+ 0b100 => outputsize = 44,
+ 0b101 => outputsize = 48,
+ 0b110 => outputsize = if Have52BitPAExt() & largegrain then 52 else 48,
+ _ => outputsize = 48
+ };
+ if outputsize > PAMax() then outputsize = PAMax()
+ else ();
+ if outputsize < 48 & ~(IsZero_slice(baseregister, outputsize, negate(outputsize) + 48)) then {
+ level = 0;
+ __tmp_20 : AddressDescriptor = result.addrdesc;
+ __tmp_20.fault = AArch64_AddressSizeFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_20;
+ return(result)
+ } else ();
+ let 'baselowerbound = ((3 + inputsize) - ((3 - level) * stride + grainsize)) : int;
+ assert(constraint(0 <= 'baselowerbound & 'baselowerbound <= 48));
+ baseaddress : bits(52) = undefined;
+ if outputsize == 52 then let 'z = (if baselowerbound < 6 then 6 else baselowerbound) : int in {
+ assert(constraint(0 <= 'z & 'z <= 48));
+ baseaddress = (slice(baseregister, 2, 4) @ slice(baseregister, z, negate(z) + 48)) @ Zeros(z)
+ } else
+ baseaddress = ZeroExtend(slice(baseregister, baselowerbound, negate(baselowerbound) + 48) @ Zeros(baselowerbound));
+ ns_table : bits(1) = if lookupsecure then 0b0 else 0b1;
+ ap_table : bits(2) = 0b00;
+ xn_table : bits(1) = 0b0;
+ pxn_table : bits(1) = 0b0;
+ addrselecttop : int = inputsize - 1;
+ apply_nvnv1_effect : bool = ((HaveNVExt() & HaveEL(EL2)) & [HCR_EL2[42]] == 0b1) & [HCR_EL2[43]] == 0b1;
+ blocktranslate : bool = undefined;
+ desc : bits(64) = undefined;
+ accdesc : AccessDescriptor = undefined;
+ hwupdatewalk : bool = undefined;
+ descaddr2 : AddressDescriptor = undefined;
+ addrselectbottom : int = undefined;
+ repeat {
+ addrselectbottom = (3 - level) * stride + grainsize;
+ index : bits(52) = ZeroExtend_slice_append(inputaddr, addrselectbottom, (addrselecttop - addrselectbottom) + 1, 0b000);
+ __tmp_21 : FullAddress = descaddr.paddress;
+ __tmp_21.physicaladdress = baseaddress | index;
+ descaddr.paddress = __tmp_21;
+ __tmp_22 : FullAddress = descaddr.paddress;
+ __tmp_22.NS = ns_table;
+ descaddr.paddress = __tmp_22;
+ if secondstage | ~(HasS2Translation()) then descaddr2 = descaddr
+ else {
+ hwupdatewalk = false;
+ descaddr2 = AArch64_SecondStageWalk(descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
+ if IsFault(descaddr2) then {
+ __tmp_23 : AddressDescriptor = result.addrdesc;
+ __tmp_23.fault = descaddr2.fault;
+ result.addrdesc = __tmp_23;
+ return(result)
+ } else ()
+ };
+ descaddr2.vaddress = ZeroExtend(vaddress);
+ accdesc = CreateAccessDescriptorPTW(acctype, secondstage, s2fs1walk, level);
+ desc = aget__Mem(descaddr2, 8, accdesc);
+ if reversedescriptors then desc = BigEndianReverse(desc)
+ else ();
+ if [desc[0]] == 0b0 | slice(desc, 0, 2) == 0b01 & level == 3 then {
+ __tmp_24 : AddressDescriptor = result.addrdesc;
+ __tmp_24.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_24;
+ return(result)
+ } else ();
+ if slice(desc, 0, 2) == 0b01 | level == 3 then blocktranslate = true
+ else {
+ if (outputsize < 52 & largegrain) & ~(IsZero(slice(desc, 12, 4))) | outputsize < 48 & ~(IsZero_slice(desc, outputsize, negate(outputsize) + 48)) then {
+ __tmp_25 : AddressDescriptor = result.addrdesc;
+ __tmp_25.fault = AArch64_AddressSizeFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_25;
+ return(result)
+ } else ();
+ let 'gsz = grainsize;
+ assert(constraint(0 <= 'gsz & 'gsz <= 48));
+ if outputsize == 52 then
+ baseaddress = (slice(desc, 12, 4) @ slice(desc, gsz, negate(gsz) + 48)) @ Zeros(gsz)
+ else
+ baseaddress = ZeroExtend(slice(desc, gsz, negate(gsz) + 48) @ Zeros(gsz));
+ if ~(secondstage) then ns_table = ns_table | [desc[63]]
+ else ();
+ if ~(secondstage) & ~(hierattrsdisabled) then {
+ ap_table = __SetSlice_bits(2, 1, ap_table, 1, [ap_table[1]] | [desc[62]]);
+ if apply_nvnv1_effect then pxn_table = pxn_table | [desc[60]]
+ else xn_table = xn_table | [desc[60]];
+ if ~(singlepriv) then
+ if ~(apply_nvnv1_effect) then {
+ pxn_table = pxn_table | [desc[59]];
+ ap_table = __SetSlice_bits(2, 1, ap_table, 0, [ap_table[0]] | [desc[61]])
+ } else ()
+ else ()
+ } else ();
+ level = level + 1;
+ addrselecttop = addrselectbottom - 1;
+ blocktranslate = false
+ }
+ } until blocktranslate;
+ if level < firstblocklevel then {
+ __tmp_26 : AddressDescriptor = result.addrdesc;
+ __tmp_26.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_26;
+ return(result)
+ } else ();
+ contiguousbitcheck : bool = undefined;
+ if largegrain then contiguousbitcheck = level == 2 & inputsize < 34
+ else if midgrain then contiguousbitcheck = level == 2 & inputsize < 30
+ else contiguousbitcheck = level == 1 & inputsize < 34;
+ if contiguousbitcheck & [desc[52]] == 0b1 then
+ if undefined then {
+ __tmp_27 : AddressDescriptor = result.addrdesc;
+ __tmp_27.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_27;
+ return(result)
+ } else ()
+ else ();
+ if (outputsize < 52 & largegrain) & ~(IsZero(slice(desc, 12, 4))) | outputsize < 48 & ~(IsZero_slice(desc, outputsize, negate(outputsize) + 48)) then {
+ __tmp_28 : AddressDescriptor = result.addrdesc;
+ __tmp_28.fault = AArch64_AddressSizeFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_28;
+ return(result)
+ } else ();
+ outputaddress : bits(52) = undefined;
+ let 'asb = addrselectbottom;
+ assert(constraint(0 <= 'asb & 'asb <= 48));
+ if outputsize == 52 then
+ outputaddress = (slice(desc, 12, 4) @ slice(desc, asb, negate(asb) + 48)) @ slice(inputaddr, 0, asb)
+ else
+ outputaddress = ZeroExtend(slice(desc, asb, negate(asb) + 48) @ slice(inputaddr, 0, asb));
+ if [desc[10]] == 0b0 then
+ if ~(update_AF) then {
+ __tmp_29 : AddressDescriptor = result.addrdesc;
+ __tmp_29.fault = AArch64_AccessFlagFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_29;
+ return(result)
+ } else {
+ __tmp_30 : DescriptorUpdate = result.descupdate;
+ __tmp_30.AF = true;
+ result.descupdate = __tmp_30
+ }
+ else ();
+ if update_AP & [desc[51]] == 0b1 then
+ if ~(secondstage) & [desc[7]] == 0b1 then {
+ desc = __SetSlice_bits(64, 1, desc, 7, 0b0);
+ __tmp_31 : DescriptorUpdate = result.descupdate;
+ __tmp_31.AP = true;
+ result.descupdate = __tmp_31
+ } else if secondstage & [desc[7]] == 0b0 then {
+ desc = __SetSlice_bits(64, 1, desc, 7, 0b1);
+ __tmp_32 : DescriptorUpdate = result.descupdate;
+ __tmp_32.AP = true;
+ result.descupdate = __tmp_32
+ } else ()
+ else ();
+ __tmp_33 : DescriptorUpdate = result.descupdate;
+ __tmp_33.descaddr = descaddr;
+ result.descupdate = __tmp_33;
+ xn : bits(1) = undefined;
+ pxn : bits(1) = undefined;
+ if apply_nvnv1_effect then {
+ pxn = [desc[54]];
+ xn = 0b0
+ } else {
+ xn = [desc[54]];
+ pxn = [desc[53]]
+ };
+ contiguousbit : bits(1) = [desc[52]];
+ nG : bits(1) = [desc[11]];
+ sh : bits(2) = slice(desc, 8, 2);
+ ap : bits(3) = undefined;
+ if apply_nvnv1_effect then ap = [desc[7]] @ 0b01
+ else ap = slice(desc, 6, 2) @ 0b1;
+ memattr : bits(4) = slice(desc, 2, 4);
+ result.domain = undefined;
+ result.level = level;
+ result.blocksize = 2 ^ ((3 - level) * stride + grainsize);
+ if ~(secondstage) then {
+ __tmp_34 : Permissions = result.perms;
+ __tmp_34.xn = xn | xn_table;
+ result.perms = __tmp_34;
+ __tmp_35 : bits(3) = result.perms.ap;
+ __tmp_35 = __SetSlice_bits(3, 1, __tmp_35, 2, [ap[2]] | [ap_table[1]]);
+ __tmp_36 : Permissions = result.perms;
+ __tmp_36.ap = __tmp_35;
+ result.perms = __tmp_36;
+ if ~(singlepriv) then {
+ __tmp_37 : bits(3) = result.perms.ap;
+ __tmp_37 = __SetSlice_bits(3, 1, __tmp_37, 1, [ap[1]] & ~([ap_table[0]]));
+ __tmp_38 : Permissions = result.perms;
+ __tmp_38.ap = __tmp_37;
+ result.perms = __tmp_38;
+ __tmp_39 : Permissions = result.perms;
+ __tmp_39.pxn = pxn | pxn_table;
+ result.perms = __tmp_39;
+ if IsSecure() then result.nG = nG | ns_table
+ else result.nG = nG
+ } else {
+ __tmp_40 : bits(3) = result.perms.ap;
+ __tmp_40 = __SetSlice_bits(3, 1, __tmp_40, 1, 0b1);
+ __tmp_41 : Permissions = result.perms;
+ __tmp_41.ap = __tmp_40;
+ result.perms = __tmp_41;
+ __tmp_42 : Permissions = result.perms;
+ __tmp_42.pxn = 0b0;
+ result.perms = __tmp_42;
+ result.nG = 0b0
+ };
+ __tmp_43 : bits(3) = result.perms.ap;
+ __tmp_43 = __SetSlice_bits(3, 1, __tmp_43, 0, 0b1);
+ __tmp_44 : Permissions = result.perms;
+ __tmp_44.ap = __tmp_43;
+ result.perms = __tmp_44;
+ __tmp_45 : AddressDescriptor = result.addrdesc;
+ __tmp_45.memattrs = AArch64_S1AttrDecode(sh, slice(memattr, 0, 3), acctype);
+ result.addrdesc = __tmp_45;
+ __tmp_46 : FullAddress = result.addrdesc.paddress;
+ __tmp_46.NS = [memattr[3]] | ns_table;
+ __tmp_47 : AddressDescriptor = result.addrdesc;
+ __tmp_47.paddress = __tmp_46;
+ result.addrdesc = __tmp_47
+ } else {
+ __tmp_48 : bits(3) = result.perms.ap;
+ __tmp_48 = __SetSlice_bits(3, 2, __tmp_48, 1, slice(ap, 1, 2));
+ __tmp_49 : Permissions = result.perms;
+ __tmp_49.ap = __tmp_48;
+ result.perms = __tmp_49;
+ __tmp_50 : bits(3) = result.perms.ap;
+ __tmp_50 = __SetSlice_bits(3, 1, __tmp_50, 0, 0b1);
+ __tmp_51 : Permissions = result.perms;
+ __tmp_51.ap = __tmp_50;
+ result.perms = __tmp_51;
+ __tmp_52 : Permissions = result.perms;
+ __tmp_52.xn = xn;
+ result.perms = __tmp_52;
+ if HaveExtendedExecuteNeverExt() then {
+ __tmp_53 : Permissions = result.perms;
+ __tmp_53.xxn = [desc[53]];
+ result.perms = __tmp_53
+ } else ();
+ __tmp_54 : Permissions = result.perms;
+ __tmp_54.pxn = 0b0;
+ result.perms = __tmp_54;
+ result.nG = 0b0;
+ __tmp_55 : AddressDescriptor = result.addrdesc;
+ __tmp_55.memattrs = S2AttrDecode(sh, memattr, acctype);
+ result.addrdesc = __tmp_55;
+ __tmp_56 : FullAddress = result.addrdesc.paddress;
+ __tmp_56.NS = 0b1;
+ __tmp_57 : AddressDescriptor = result.addrdesc;
+ __tmp_57.paddress = __tmp_56;
+ result.addrdesc = __tmp_57
+ };
+ __tmp_58 : FullAddress = result.addrdesc.paddress;
+ __tmp_58.physicaladdress = outputaddress;
+ __tmp_59 : AddressDescriptor = result.addrdesc;
+ __tmp_59.paddress = __tmp_58;
+ result.addrdesc = __tmp_59;
+ __tmp_60 : AddressDescriptor = result.addrdesc;
+ __tmp_60.fault = AArch64_NoFault();
+ result.addrdesc = __tmp_60;
+ result.contiguous = contiguousbit == 0b1;
+ if HaveCommonNotPrivateTransExt() then result.CnP = [baseregister[0]]
+ else ();
+ return(result)
+}
+
+val IsZero_slice2 : forall 'n, 'n >= 0.
+ (bits('n), int, int) -> bool effect {escape}
+
+function IsZero_slice2 (xs, i, 'l) = {
+ assert(constraint('l >= 0));
+ IsZero(slice(xs, i, l))
+}
+
+val AArch64_TranslateAddressS1Off : (bits(64), AccType, bool) -> TLBRecord effect {rreg, undef, escape}
+
+function AArch64_TranslateAddressS1Off (vaddress, acctype, iswrite) = {
+ assert(~(ELUsingAArch32(S1TranslationRegime())));
+ result : TLBRecord = undefined;
+ Top : int = AddrTop(vaddress, false, PSTATE.EL);
+ s2fs1walk : bool = undefined;
+ secondstage : bool = undefined;
+ ipaddress : bits(52) = undefined;
+ level : int = undefined;
+ if ~(IsZero_slice2(vaddress, PAMax(), (Top + 1) - PAMax())) then {
+ level = 0;
+ ipaddress = undefined;
+ secondstage = false;
+ s2fs1walk = false;
+ __tmp_198 : AddressDescriptor = result.addrdesc;
+ __tmp_198.fault = AArch64_AddressSizeFault(ipaddress, level, acctype, iswrite, secondstage, s2fs1walk);
+ result.addrdesc = __tmp_198;
+ return(result)
+ } else ();
+ default_cacheable : bool = HasS2Translation() & [HCR_EL2[12]] == 0b1;
+ cacheable : bool = undefined;
+ if default_cacheable then {
+ __tmp_199 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_199.typ = MemType_Normal;
+ __tmp_200 : AddressDescriptor = result.addrdesc;
+ __tmp_200.memattrs = __tmp_199;
+ result.addrdesc = __tmp_200;
+ __tmp_201 : MemAttrHints = result.addrdesc.memattrs.inner;
+ __tmp_201.attrs = MemAttr_WB;
+ __tmp_202 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_202.inner = __tmp_201;
+ __tmp_203 : AddressDescriptor = result.addrdesc;
+ __tmp_203.memattrs = __tmp_202;
+ result.addrdesc = __tmp_203;
+ __tmp_204 : MemAttrHints = result.addrdesc.memattrs.inner;
+ __tmp_204.hints = MemHint_RWA;
+ __tmp_205 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_205.inner = __tmp_204;
+ __tmp_206 : AddressDescriptor = result.addrdesc;
+ __tmp_206.memattrs = __tmp_205;
+ result.addrdesc = __tmp_206;
+ __tmp_207 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_207.shareable = false;
+ __tmp_208 : AddressDescriptor = result.addrdesc;
+ __tmp_208.memattrs = __tmp_207;
+ result.addrdesc = __tmp_208;
+ __tmp_209 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_209.outershareable = false;
+ __tmp_210 : AddressDescriptor = result.addrdesc;
+ __tmp_210.memattrs = __tmp_209;
+ result.addrdesc = __tmp_210
+ } else if acctype != AccType_IFETCH then {
+ __tmp_211 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_211.typ = MemType_Device;
+ __tmp_212 : AddressDescriptor = result.addrdesc;
+ __tmp_212.memattrs = __tmp_211;
+ result.addrdesc = __tmp_212;
+ __tmp_213 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_213.device = DeviceType_nGnRnE;
+ __tmp_214 : AddressDescriptor = result.addrdesc;
+ __tmp_214.memattrs = __tmp_213;
+ result.addrdesc = __tmp_214;
+ __tmp_215 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_215.inner = undefined;
+ __tmp_216 : AddressDescriptor = result.addrdesc;
+ __tmp_216.memattrs = __tmp_215;
+ result.addrdesc = __tmp_216
+ } else {
+ cacheable = [aget_SCTLR()[12]] == 0b1;
+ __tmp_217 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_217.typ = MemType_Normal;
+ __tmp_218 : AddressDescriptor = result.addrdesc;
+ __tmp_218.memattrs = __tmp_217;
+ result.addrdesc = __tmp_218;
+ if cacheable then {
+ __tmp_219 : MemAttrHints = result.addrdesc.memattrs.inner;
+ __tmp_219.attrs = MemAttr_WT;
+ __tmp_220 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_220.inner = __tmp_219;
+ __tmp_221 : AddressDescriptor = result.addrdesc;
+ __tmp_221.memattrs = __tmp_220;
+ result.addrdesc = __tmp_221;
+ __tmp_222 : MemAttrHints = result.addrdesc.memattrs.inner;
+ __tmp_222.hints = MemHint_RA;
+ __tmp_223 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_223.inner = __tmp_222;
+ __tmp_224 : AddressDescriptor = result.addrdesc;
+ __tmp_224.memattrs = __tmp_223;
+ result.addrdesc = __tmp_224
+ } else {
+ __tmp_225 : MemAttrHints = result.addrdesc.memattrs.inner;
+ __tmp_225.attrs = MemAttr_NC;
+ __tmp_226 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_226.inner = __tmp_225;
+ __tmp_227 : AddressDescriptor = result.addrdesc;
+ __tmp_227.memattrs = __tmp_226;
+ result.addrdesc = __tmp_227;
+ __tmp_228 : MemAttrHints = result.addrdesc.memattrs.inner;
+ __tmp_228.hints = MemHint_No;
+ __tmp_229 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_229.inner = __tmp_228;
+ __tmp_230 : AddressDescriptor = result.addrdesc;
+ __tmp_230.memattrs = __tmp_229;
+ result.addrdesc = __tmp_230
+ };
+ __tmp_231 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_231.shareable = true;
+ __tmp_232 : AddressDescriptor = result.addrdesc;
+ __tmp_232.memattrs = __tmp_231;
+ result.addrdesc = __tmp_232;
+ __tmp_233 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_233.outershareable = true;
+ __tmp_234 : AddressDescriptor = result.addrdesc;
+ __tmp_234.memattrs = __tmp_233;
+ result.addrdesc = __tmp_234
+ };
+ __tmp_235 : MemoryAttributes = result.addrdesc.memattrs;
+ __tmp_235.outer = result.addrdesc.memattrs.inner;
+ __tmp_236 : AddressDescriptor = result.addrdesc;
+ __tmp_236.memattrs = __tmp_235;
+ result.addrdesc = __tmp_236;
+ __tmp_237 : AddressDescriptor = result.addrdesc;
+ __tmp_237.memattrs = MemAttrDefaults(result.addrdesc.memattrs);
+ result.addrdesc = __tmp_237;
+ __tmp_238 : Permissions = result.perms;
+ __tmp_238.ap = undefined;
+ result.perms = __tmp_238;
+ __tmp_239 : Permissions = result.perms;
+ __tmp_239.xn = 0b0;
+ result.perms = __tmp_239;
+ __tmp_240 : Permissions = result.perms;
+ __tmp_240.pxn = 0b0;
+ result.perms = __tmp_240;
+ result.nG = undefined;
+ result.contiguous = undefined;
+ result.domain = undefined;
+ result.level = undefined;
+ result.blocksize = undefined;
+ __tmp_241 : FullAddress = result.addrdesc.paddress;
+ __tmp_241.physicaladdress = slice(vaddress, 0, 52);
+ __tmp_242 : AddressDescriptor = result.addrdesc;
+ __tmp_242.paddress = __tmp_241;
+ result.addrdesc = __tmp_242;
+ __tmp_243 : FullAddress = result.addrdesc.paddress;
+ __tmp_243.NS = if IsSecure() then 0b0 else 0b1;
+ __tmp_244 : AddressDescriptor = result.addrdesc;
+ __tmp_244.paddress = __tmp_243;
+ result.addrdesc = __tmp_244;
+ __tmp_245 : AddressDescriptor = result.addrdesc;
+ __tmp_245.fault = AArch64_NoFault();
+ result.addrdesc = __tmp_245;
+ return(result)
+}
+
+val AArch64_MaybeZeroRegisterUppers : unit -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_MaybeZeroRegisterUppers () = {
+ assert(UsingAArch32(), "UsingAArch32()");
+ include_R15_name : bool = undefined;
+ last : int = undefined;
+ first : int = undefined;
+ if PSTATE.EL == EL0 & ~(ELUsingAArch32(EL1)) then {
+ first = 0;
+ last = 14;
+ include_R15_name = false
+ } else if (((PSTATE.EL == EL0 | PSTATE.EL == EL1) & HaveEL(EL2)) & ~(IsSecure())) & ~(ELUsingAArch32(EL2)) then {
+ first = 0;
+ last = 30;
+ include_R15_name = false
+ } else {
+ first = 0;
+ last = 30;
+ include_R15_name = true
+ };
+ foreach (n from first to last by 1 in inc)
+ if (n != 15 | include_R15_name) & ConstrainUnpredictableBool(Unpredictable_ZEROUPPER) then {
+ __tmp_3 : bits(64) = _R[n];
+ __tmp_3 = __SetSlice_bits(64, 32, __tmp_3, 32, Zeros());
+ _R[n] = __tmp_3
+ } else ();
+ ()
+}
+
+val DCPSInstruction : bits(2) -> unit effect {escape, rreg, undef, wreg}
+
+function DCPSInstruction target_el = {
+ SynchronizeContext();
+ handle_el : bits(2) = undefined;
+ match target_el {
+ ? if ? == EL1 => if PSTATE.EL == EL2 | PSTATE.EL == EL3 & ~(UsingAArch32()) then handle_el = PSTATE.EL else if (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[27]] == 0b1 then UndefinedFault() else handle_el = EL1,
+ ? if ? == EL2 => if ~(HaveEL(EL2)) then UndefinedFault() else if PSTATE.EL == EL3 & ~(UsingAArch32()) then handle_el = EL3 else if IsSecure() then UndefinedFault() else handle_el = EL2,
+ ? if ? == EL3 => {
+ if [EDSCR[16]] == 0b1 | ~(HaveEL(EL3)) then UndefinedFault() else ();
+ handle_el = EL3
+ },
+ _ => Unreachable()
+ };
+ from_secure : bool = IsSecure();
+ if ELUsingAArch32(handle_el) then {
+ if PSTATE.M == M32_Monitor then SCR = __SetSlice_bits(32, 1, SCR, 0, 0b0) else ();
+ assert(UsingAArch32(), "UsingAArch32()");
+ match handle_el {
+ ? if ? == EL1 => {
+ AArch32_WriteMode(M32_Svc);
+ if HavePANExt() & [SCTLR[23]] == 0b0 then PSTATE.PAN = 0b1 else ()
+ },
+ ? if ? == EL2 => AArch32_WriteMode(M32_Hyp),
+ ? if ? == EL3 => {
+ AArch32_WriteMode(M32_Monitor);
+ if HavePANExt() then if ~(from_secure) then PSTATE.PAN = 0b0 else if [SCTLR[23]] == 0b0 then PSTATE.PAN = 0b1 else () else ()
+ }
+ };
+ if handle_el == EL2 then {
+ ELR_hyp = undefined;
+ HSR = undefined
+ } else set_LR(undefined);
+ aset_SPSR(undefined);
+ PSTATE.E = [aget_SCTLR()[25]];
+ DLR = undefined;
+ DSPSR = undefined
+ } else {
+ if UsingAArch32() then AArch64_MaybeZeroRegisterUppers() else ();
+ PSTATE.nRW = 0b0;
+ PSTATE.SP = 0b1;
+ PSTATE.EL = handle_el;
+ if HavePANExt() & (handle_el == EL1 & [SCTLR_EL1[23]] == 0b0 | ((handle_el == EL2 & [HCR_EL2[34]] == 0b1) & [HCR_EL2[27]] == 0b1) & [SCTLR_EL2[23]] == 0b0) then PSTATE.PAN = 0b1 else ();
+ aset_ELR(undefined);
+ aset_SPSR(undefined);
+ aset_ESR(undefined);
+ DLR_EL0 = undefined;
+ DSPSR_EL0 = undefined;
+ if HaveUAOExt() then PSTATE.UAO = 0b0 else ()
+ };
+ UpdateEDSCRFields();
+ if (HaveRASExt() & [aget_SCTLR()[21]] == 0b1) & ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then ErrorSynchronizationBarrier(MBReqDomain_FullSystem, MBReqTypes_All) else ();
+ ()
+}
+
+val aarch64_system_exceptions_debug_exception : bits(2) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_exceptions_debug_exception target_level = DCPSInstruction(target_level)
+
+val AArch64_GenerateDebugExceptionsFrom : (bits(2), bool, bits(1)) -> bool effect {escape, rreg, undef}
+
+function AArch64_GenerateDebugExceptionsFrom (from, secure, mask) = {
+ if ([OSLSR_EL1[1]] == 0b1 | DoubleLockStatus()) | Halted() then return(false) else ();
+ route_to_el2 : bool = (HaveEL(EL2) & ~(secure)) & ([HCR_EL2[27]] == 0b1 | [MDCR_EL2[8]] == 0b1);
+ target : bits(2) = if route_to_el2 then EL2 else EL1;
+ enabled : bool = (~(HaveEL(EL3)) | ~(secure)) | [MDCR_EL3[16]] == 0b0;
+ if from == target then enabled = (enabled & [MDSCR_EL1[13]] == 0b1) & mask == 0b0 else enabled = enabled & UInt(target) > UInt(from);
+ return(enabled)
+}
+
+val AArch64_GenerateDebugExceptions : unit -> bool effect {escape, rreg, undef}
+
+function AArch64_GenerateDebugExceptions () = return(AArch64_GenerateDebugExceptionsFrom(PSTATE.EL, IsSecure(), PSTATE.D))
+
+val AArch64_FaultSyndrome : (bool, FaultRecord) -> bits(25) effect {escape, undef}
+
+function AArch64_FaultSyndrome (d_side, fault) = {
+ assert(fault.typ != Fault_None, "((fault).type != Fault_None)");
+ iss : bits(25) = Zeros();
+ if HaveRASExt() & IsExternalSyncAbort(fault) then iss = __SetSlice_bits(25, 2, iss, 11, fault.errortype) else ();
+ if d_side then {
+ if IsSecondStage(fault) & ~(fault.s2fs1walk) then iss = __SetSlice_bits(25, 11, iss, 14, LSInstructionSyndrome()) else ();
+ if fault.acctype == AccType_DC | fault.acctype == AccType_IC | fault.acctype == AccType_AT then {
+ iss = __SetSlice_bits(25, 1, iss, 8, 0b1);
+ iss = __SetSlice_bits(25, 1, iss, 6, 0b1)
+ } else iss = __SetSlice_bits(25, 1, iss, 6, if fault.write then 0b1 else 0b0)
+ } else ();
+ if IsExternalAbort(fault) then iss = __SetSlice_bits(25, 1, iss, 9, fault.extflag) else ();
+ iss = __SetSlice_bits(25, 1, iss, 7, if fault.s2fs1walk then 0b1 else 0b0);
+ iss = __SetSlice_bits(25, 6, iss, 0, EncodeLDFSC(fault.typ, fault.level));
+ return(iss)
+}
+
+val AArch64_AbortSyndrome : (Exception, FaultRecord, bits(64)) -> ExceptionRecord effect {escape, undef}
+
+function AArch64_AbortSyndrome (typ, fault, vaddress) = {
+ exception : ExceptionRecord = ExceptionSyndrome(typ);
+ d_side : bool = typ == Exception_DataAbort | typ == Exception_Watchpoint;
+ exception.syndrome = AArch64_FaultSyndrome(d_side, fault);
+ exception.vaddress = ZeroExtend(vaddress);
+ if IPAValid(fault) then {
+ exception.ipavalid = true;
+ exception.ipaddress = fault.ipaddress
+ } else exception.ipavalid = false;
+ return(exception)
+}
+
+val AArch64_ExecutingATS1xPInstr : unit -> bool effect {rreg, undef}
+
+function AArch64_ExecutingATS1xPInstr () = {
+ if ~(HavePrivATExt()) then return(false) else ();
+ instr : bits(32) = ThisInstr();
+ op2 : bits(3) = undefined;
+ CRm : bits(4) = undefined;
+ CRn : bits(4) = undefined;
+ op1 : bits(3) = undefined;
+ if slice(instr, 22, 10) == 0b1101010100 then {
+ op1 = slice(instr, 16, 3);
+ CRn = slice(instr, 12, 4);
+ CRm = slice(instr, 8, 4);
+ op2 = slice(instr, 5, 3);
+ return(((op1 == 0b000 & CRn == 0x7) & CRm == 0x9) & (op2 == 0b000 | op2 == 0b001))
+ } else return(false)
+}
+
+val AArch64_ExceptionClass : (Exception, bits(2)) -> (int, bits(1)) effect {escape, rreg, undef}
+
+function AArch64_ExceptionClass (typ, target_el) = {
+ il : bits(1) = if ThisInstrLength() == 32 then 0b1 else 0b0;
+ from_32 : bool = UsingAArch32();
+ assert(from_32 | il == 0b1, "(from_32 || (il == '1'))");
+ ec : int = undefined;
+ match typ {
+ Exception_Uncategorized => {
+ ec = 0;
+ il = 0b1
+ },
+ Exception_WFxTrap => ec = 1,
+ Exception_CP15RTTrap => {
+ ec = 3;
+ assert(from_32, "from_32")
+ },
+ Exception_CP15RRTTrap => {
+ ec = 4;
+ assert(from_32, "from_32")
+ },
+ Exception_CP14RTTrap => {
+ ec = 5;
+ assert(from_32, "from_32")
+ },
+ Exception_CP14DTTrap => {
+ ec = 6;
+ assert(from_32, "from_32")
+ },
+ Exception_AdvSIMDFPAccessTrap => ec = 7,
+ Exception_FPIDTrap => ec = 8,
+ Exception_CP14RRTTrap => {
+ ec = 12;
+ assert(from_32, "from_32")
+ },
+ Exception_IllegalState => {
+ ec = 14;
+ il = 0b1
+ },
+ Exception_SupervisorCall => ec = 17,
+ Exception_HypervisorCall => ec = 18,
+ Exception_MonitorCall => ec = 19,
+ Exception_SystemRegisterTrap => {
+ ec = 24;
+ assert(~(from_32), "!(from_32)")
+ },
+ Exception_InstructionAbort => {
+ ec = 32;
+ il = 0b1
+ },
+ Exception_PCAlignment => {
+ ec = 34;
+ il = 0b1
+ },
+ Exception_DataAbort => ec = 36,
+ Exception_SPAlignment => {
+ ec = 38;
+ il = 0b1;
+ assert(~(from_32), "!(from_32)")
+ },
+ Exception_FPTrappedException => ec = 40,
+ Exception_SError => {
+ ec = 47;
+ il = 0b1
+ },
+ Exception_Breakpoint => {
+ ec = 48;
+ il = 0b1
+ },
+ Exception_SoftwareStep => {
+ ec = 50;
+ il = 0b1
+ },
+ Exception_Watchpoint => {
+ ec = 52;
+ il = 0b1
+ },
+ Exception_SoftwareBreakpoint => ec = 56,
+ Exception_VectorCatch => {
+ ec = 58;
+ il = 0b1;
+ assert(from_32, "from_32")
+ },
+ _ => Unreachable()
+ };
+ if (ec == 32 | ec == 36 | ec == 48 | ec == 50 | ec == 52) & target_el == PSTATE.EL then ec = ec + 1 else ();
+ if (ec == 17 | ec == 18 | ec == 19 | ec == 40 | ec == 56) & ~(from_32) then ec = ec + 4 else ();
+ return((ec, il))
+}
+
+val AArch64_ReportException : (ExceptionRecord, bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_ReportException (exception, target_el) = {
+ typ : Exception = exception.typ;
+ il : bits(1) = undefined;
+ ec : int = undefined;
+ (ec, il) = AArch64_ExceptionClass(typ, target_el);
+ iss : bits(25) = exception.syndrome;
+ if (ec == 36 | ec == 37) & [iss[24]] == 0b0 then il = 0b1 else ();
+ aset_ESR(target_el, (__GetSlice_int(6, ec, 0) @ il) @ iss);
+ if typ == Exception_InstructionAbort | typ == Exception_PCAlignment | typ == Exception_DataAbort | typ == Exception_Watchpoint then aset_FAR(target_el, exception.vaddress) else aset_FAR(target_el, undefined);
+ if target_el == EL2 then if exception.ipavalid then HPFAR_EL2 = __SetSlice_bits(64, 40, HPFAR_EL2, 4, slice(exception.ipaddress, 12, 40)) else HPFAR_EL2 = __SetSlice_bits(64, 40, HPFAR_EL2, 4, undefined) else ();
+ ()
+}
+
+val AArch64_ESBOperation : unit -> unit effect {escape, wreg, undef, rreg}
+
+function AArch64_ESBOperation () = {
+ route_to_el3 : bool = HaveEL(EL3) & [SCR_EL3[3]] == 0b1;
+ route_to_el2 : bool = (HaveEL(EL2) & ~(IsSecure())) & ([HCR_EL2[27]] == 0b1 | [HCR_EL2[5]] == 0b1);
+ target : bits(2) = if route_to_el3 then EL3 else if route_to_el2 then EL2 else EL1;
+ mask_active : bool = undefined;
+ if target == EL1 then mask_active = PSTATE.EL == EL0 | PSTATE.EL == EL1
+ else if (HaveVirtHostExt() & target == EL2) & ((HCR_EL2[34], HCR_EL2[27])) == ((bitone, bitone)) then
+ mask_active = PSTATE.EL == EL0 | PSTATE.EL == EL2
+ else mask_active = PSTATE.EL == target;
+ mask_set : bool = PSTATE.A == 0b1;
+ intdis : bool = Halted() | ExternalDebugInterruptsDisabled(target);
+ masked : bool = (UInt(target) < UInt(PSTATE.EL) | intdis) | mask_active & mask_set;
+ DISR_EL1 : bits(64) = undefined;
+ syndrome64 : bits(25) = undefined;
+ implicit_esb : bool = undefined;
+ DISR : bits(32) = undefined;
+ syndrome32 : AArch32_SErrorSyndrome = undefined;
+ if SErrorPending() & masked then {
+ if ELUsingAArch32(S1TranslationRegime()) then {
+ syndrome32 = AArch32_PhysicalSErrorSyndrome();
+ DISR = AArch32_ReportDeferredSError(syndrome32.AET, syndrome32.ExT)
+ } else {
+ implicit_esb = false;
+ syndrome64 = AArch64_PhysicalSErrorSyndrome(implicit_esb);
+ DISR_EL1 = AArch64_ReportDeferredSError(syndrome64)
+ };
+ ClearPendingPhysicalSError()
+ } else ();
+ ()
+}
+
+val AArch64_CheckS2Permission : (Permissions, bits(64), bits(52), int, AccType, bool, bool, bool) -> FaultRecord effect {escape, rreg, undef}
+
+function AArch64_CheckS2Permission (perms, vaddress, ipaddress, 'level, acctype, iswrite, s2fs1walk, hwupdatewalk) = {
+ assert(((HaveEL(EL2) & ~(IsSecure())) & ~(ELUsingAArch32(EL2))) & HasS2Translation(), "(((HaveEL(EL2) && !(IsSecure())) && !(ELUsingAArch32(EL2))) && HasS2Translation())");
+ r : bool = [perms.ap[1]] == 0b1;
+ w : bool = [perms.ap[2]] == 0b1;
+ xn : bool = undefined;
+ if HaveExtendedExecuteNeverExt() then match perms.xn @ perms.xxn {
+ 0b00 => xn = false,
+ 0b01 => xn = PSTATE.EL == EL1,
+ 0b10 => xn = true,
+ 0b11 => xn = PSTATE.EL == EL0
+ } else xn = perms.xn == 0b1;
+ failedread : bool = undefined;
+ fail : bool = undefined;
+ if acctype == AccType_IFETCH & ~(s2fs1walk) then {
+ fail = xn;
+ failedread = true
+ } else if (acctype == AccType_ATOMICRW | acctype == AccType_ORDEREDRW) & ~(s2fs1walk) then {
+ fail = ~(r) | ~(w);
+ failedread = ~(r)
+ } else if iswrite & ~(s2fs1walk) then {
+ fail = ~(w);
+ failedread = false
+ } else if hwupdatewalk then {
+ fail = ~(w);
+ failedread = ~(iswrite)
+ } else {
+ fail = ~(r);
+ failedread = ~(iswrite)
+ };
+ secondstage : bool = undefined;
+ domain : bits(4) = undefined;
+ if fail then {
+ domain = undefined;
+ secondstage = true;
+ return(AArch64_PermissionFault(ipaddress, level, acctype, ~(failedread), secondstage, s2fs1walk))
+ } else return(AArch64_NoFault())
+}
+
+function AArch64_CheckAndUpdateDescriptor_SecondStage (result, fault, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk__arg) = {
+ hwupdatewalk = hwupdatewalk__arg;
+ hw_update_AF : bool = undefined;
+ if result.AF then
+ if fault.typ == Fault_None then hw_update_AF = true
+ else if ConstrainUnpredictable(Unpredictable_AFUPDATE) == Constraint_TRUE then
+ hw_update_AF = true
+ else hw_update_AF = false
+ else ();
+ hw_update_AP : bool = undefined;
+ write_perm_req : bool = undefined;
+ if result.AP & fault.typ == Fault_None then {
+ write_perm_req = (iswrite | acctype == AccType_ATOMICRW | acctype == AccType_ORDEREDRW) & ~(s2fs1walk);
+ hw_update_AP = write_perm_req & ~(acctype == AccType_AT | acctype == AccType_DC) | hwupdatewalk
+ } else hw_update_AP = false;
+ desc : bits(64) = undefined;
+ accdesc : AccessDescriptor = undefined;
+ descaddr2 : AddressDescriptor = undefined;
+ if hw_update_AF | hw_update_AP then {
+ descaddr2 = result.descaddr;
+ accdesc = CreateAccessDescriptor(AccType_ATOMICRW);
+ desc = aget__Mem(descaddr2, 8, accdesc);
+ if hw_update_AF then desc = __SetSlice_bits(64, 1, desc, 10, 0b1)
+ else ();
+ if hw_update_AP then desc = __SetSlice_bits(64, 1, desc, 7, 0b1)
+ else ();
+ aset__Mem(descaddr2, 8, accdesc, desc)
+ } else ();
+ return(fault)
+}
+
+function AArch64_TranslationTableWalk_SecondStage (ipaddress, vaddress, acctype, iswrite, s2fs1walk, 'size) = {
+ assert(((HaveEL(EL2) & ~(IsSecure())) & ~(ELUsingAArch32(EL2))) & HasS2Translation());
+ result : TLBRecord = undefined;
+ descaddr : AddressDescriptor = undefined;
+ baseregister : bits(64) = undefined;
+ inputaddr : bits(64) = undefined;
+ __tmp_18 : MemoryAttributes = descaddr.memattrs;
+ __tmp_18.typ = MemType_Normal;
+ descaddr.memattrs = __tmp_18;
+ startsizecheck : int = undefined;
+ inputsizecheck : int = undefined;
+ startlevel : int = undefined;
+ level : int = undefined;
+ stride : int = undefined;
+ firstblocklevel : int = undefined;
+ grainsize : int = undefined;
+ hierattrsdisabled : bool = undefined;
+ update_AP : bool = undefined;
+ update_AF : bool = undefined;
+ singlepriv : bool = undefined;
+ lookupsecure : bool = undefined;
+ reversedescriptors : bool = undefined;
+ disabled : bool = undefined;
+ basefound : bool = undefined;
+ ps : bits(3) = undefined;
+ inputsize_min : int = undefined;
+ c : Constraint = undefined;
+ inputsize_max : int = undefined;
+ inputsize : int = undefined;
+ midgrain : bool = undefined;
+ largegrain : bool = undefined;
+ top : int = undefined;
+ inputaddr = ZeroExtend(ipaddress);
+ inputsize = 64 - UInt(slice(VTCR_EL2, 0, 6));
+ largegrain = slice(VTCR_EL2, 14, 2) == 0b01;
+ midgrain = slice(VTCR_EL2, 14, 2) == 0b10;
+ inputsize_max = if Have52BitVAExt() & largegrain then 52 else 48;
+ if inputsize > inputsize_max then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_max
+ else ()
+ } else ();
+ inputsize_min = 64 - 39;
+ if inputsize < inputsize_min then {
+ c = ConstrainUnpredictable(Unpredictable_RESTnSZ);
+ assert(c == Constraint_FORCE | c == Constraint_FAULT);
+ if c == Constraint_FORCE then inputsize = inputsize_min
+ else ()
+ } else ();
+ ps = slice(VTCR_EL2, 16, 3);
+ basefound = (inputsize >= inputsize_min & inputsize <= inputsize_max) & IsZero_slice(inputaddr, inputsize, negate(inputsize) + 64);
+ disabled = false;
+ baseregister = VTTBR_EL2;
+ descaddr.memattrs = WalkAttrDecode(slice(VTCR_EL2, 8, 2), slice(VTCR_EL2, 10, 2), slice(VTCR_EL2, 12, 2), true);
+ reversedescriptors = [SCTLR_EL2[25]] == 0b1;
+ lookupsecure = false;
+ singlepriv = true;
+ update_AF = HaveAccessFlagUpdateExt() & [VTCR_EL2[21]] == 0b1;
+ update_AP = (HaveDirtyBitModifierExt() & update_AF) & [VTCR_EL2[22]] == 0b1;
+ startlevel = UInt(slice(VTCR_EL2, 6, 2));
+ if largegrain then {
+ grainsize = 16;
+ level = 3 - startlevel;
+ firstblocklevel = if Have52BitPAExt() then 1 else 2
+ } else if midgrain then {
+ grainsize = 14;
+ level = 3 - startlevel;
+ firstblocklevel = 2
+ } else {
+ grainsize = 12;
+ level = 2 - startlevel;
+ firstblocklevel = 1
+ };
+ stride = grainsize - 3;
+ if largegrain then
+ if level == 0 | level == 1 & PAMax() <= 42 then basefound = false
+ else ()
+ else if midgrain then
+ if level == 0 | level == 1 & PAMax() <= 40 then basefound = false
+ else ()
+ else if level < 0 | level == 0 & PAMax() <= 42 then basefound = false
+ else ();
+ inputsizecheck = inputsize;
+ if inputsize > PAMax() & (~(ELUsingAArch32(EL1)) | inputsize > 40) then match ConstrainUnpredictable(Unpredictable_LARGEIPA) {
+ Constraint_FORCE => {
+ inputsize = PAMax();
+ inputsizecheck = PAMax()
+ },
+ Constraint_FORCENOSLCHECK => inputsize = PAMax(),
+ Constraint_FAULT => basefound = false,
+ _ => Unreachable()
+ } else ();
+ startsizecheck = inputsizecheck - ((3 - level) * stride + grainsize);
+ if startsizecheck < 1 | startsizecheck > stride + 4 then basefound = false
+ else ();
+ if ~(basefound) | disabled then {
+ level = 0;
+ __tmp_19 : AddressDescriptor = result.addrdesc;
+ __tmp_19.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_19;
+ return(result)
+ } else ();
+ outputsize : int = undefined;
+ match ps {
+ 0b000 => outputsize = 32,
+ 0b001 => outputsize = 36,
+ 0b010 => outputsize = 40,
+ 0b011 => outputsize = 42,
+ 0b100 => outputsize = 44,
+ 0b101 => outputsize = 48,
+ 0b110 => outputsize = if Have52BitPAExt() & largegrain then 52 else 48,
+ _ => outputsize = 48
+ };
+ if outputsize > PAMax() then outputsize = PAMax()
+ else ();
+ if outputsize < 48 & ~(IsZero_slice(baseregister, outputsize, negate(outputsize) + 48)) then {
+ level = 0;
+ __tmp_20 : AddressDescriptor = result.addrdesc;
+ __tmp_20.fault = AArch64_AddressSizeFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_20;
+ return(result)
+ } else ();
+ let 'baselowerbound = ((3 + inputsize) - ((3 - level) * stride + grainsize)) : int;
+ assert(constraint(0 <= 'baselowerbound & 'baselowerbound <= 48));
+ baseaddress : bits(52) = undefined;
+ if outputsize == 52 then let 'z = (if baselowerbound < 6 then 6 else baselowerbound) : int in {
+ assert(constraint(0 <= 'z & 'z <= 48));
+ baseaddress = (slice(baseregister, 2, 4) @ slice(baseregister, z, negate(z) + 48)) @ Zeros(z)
+ } else
+ baseaddress = ZeroExtend(slice(baseregister, baselowerbound, negate(baselowerbound) + 48) @ Zeros(baselowerbound));
+ ns_table : bits(1) = if lookupsecure then 0b0 else 0b1;
+ ap_table : bits(2) = 0b00;
+ xn_table : bits(1) = 0b0;
+ pxn_table : bits(1) = 0b0;
+ addrselecttop : int = inputsize - 1;
+ apply_nvnv1_effect : bool = ((HaveNVExt() & HaveEL(EL2)) & [HCR_EL2[42]] == 0b1) & [HCR_EL2[43]] == 0b1;
+ blocktranslate : bool = undefined;
+ desc : bits(64) = undefined;
+ accdesc : AccessDescriptor = undefined;
+ hwupdatewalk : bool = undefined;
+ descaddr2 : AddressDescriptor = undefined;
+ addrselectbottom : int = undefined;
+ repeat {
+ addrselectbottom = (3 - level) * stride + grainsize;
+ index : bits(52) = ZeroExtend_slice_append(inputaddr, addrselectbottom, (addrselecttop - addrselectbottom) + 1, 0b000);
+ __tmp_21 : FullAddress = descaddr.paddress;
+ __tmp_21.physicaladdress = baseaddress | index;
+ descaddr.paddress = __tmp_21;
+ __tmp_22 : FullAddress = descaddr.paddress;
+ __tmp_22.NS = ns_table;
+ descaddr.paddress = __tmp_22;
+ descaddr2 = descaddr;
+ descaddr2.vaddress = ZeroExtend(vaddress);
+ accdesc = CreateAccessDescriptorPTW(acctype, true, s2fs1walk, level);
+ desc = aget__Mem(descaddr2, 8, accdesc);
+ if reversedescriptors then desc = BigEndianReverse(desc)
+ else ();
+ if [desc[0]] == 0b0 | slice(desc, 0, 2) == 0b01 & level == 3 then {
+ __tmp_24 : AddressDescriptor = result.addrdesc;
+ __tmp_24.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_24;
+ return(result)
+ } else ();
+ if slice(desc, 0, 2) == 0b01 | level == 3 then blocktranslate = true
+ else {
+ if (outputsize < 52 & largegrain) & ~(IsZero(slice(desc, 12, 4))) | outputsize < 48 & ~(IsZero_slice(desc, outputsize, negate(outputsize) + 48)) then {
+ __tmp_25 : AddressDescriptor = result.addrdesc;
+ __tmp_25.fault = AArch64_AddressSizeFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_25;
+ return(result)
+ } else ();
+ let 'gsz = grainsize;
+ assert(constraint(0 <= 'gsz & 'gsz <= 48));
+ if outputsize == 52 then
+ baseaddress = (slice(desc, 12, 4) @ slice(desc, gsz, negate(gsz) + 48)) @ Zeros(gsz)
+ else
+ baseaddress = ZeroExtend(slice(desc, gsz, negate(gsz) + 48) @ Zeros(gsz));
+ level = level + 1;
+ addrselecttop = addrselectbottom - 1;
+ blocktranslate = false
+ }
+ } until blocktranslate;
+ if level < firstblocklevel then {
+ __tmp_26 : AddressDescriptor = result.addrdesc;
+ __tmp_26.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_26;
+ return(result)
+ } else ();
+ contiguousbitcheck : bool = undefined;
+ if largegrain then contiguousbitcheck = level == 2 & inputsize < 34
+ else if midgrain then contiguousbitcheck = level == 2 & inputsize < 30
+ else contiguousbitcheck = level == 1 & inputsize < 34;
+ if contiguousbitcheck & [desc[52]] == 0b1 then
+ if undefined then {
+ __tmp_27 : AddressDescriptor = result.addrdesc;
+ __tmp_27.fault = AArch64_TranslationFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_27;
+ return(result)
+ } else ()
+ else ();
+ if (outputsize < 52 & largegrain) & ~(IsZero(slice(desc, 12, 4))) | outputsize < 48 & ~(IsZero_slice(desc, outputsize, negate(outputsize) + 48)) then {
+ __tmp_28 : AddressDescriptor = result.addrdesc;
+ __tmp_28.fault = AArch64_AddressSizeFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_28;
+ return(result)
+ } else ();
+ outputaddress : bits(52) = undefined;
+ let 'asb = addrselectbottom;
+ assert(constraint(0 <= 'asb & 'asb <= 48));
+ if outputsize == 52 then
+ outputaddress = (slice(desc, 12, 4) @ slice(desc, asb, negate(asb) + 48)) @ slice(inputaddr, 0, asb)
+ else
+ outputaddress = ZeroExtend(slice(desc, asb, negate(asb) + 48) @ slice(inputaddr, 0, asb));
+ if [desc[10]] == 0b0 then
+ if ~(update_AF) then {
+ __tmp_29 : AddressDescriptor = result.addrdesc;
+ __tmp_29.fault = AArch64_AccessFlagFault(ipaddress, level, acctype, iswrite, true, s2fs1walk);
+ result.addrdesc = __tmp_29;
+ return(result)
+ } else {
+ __tmp_30 : DescriptorUpdate = result.descupdate;
+ __tmp_30.AF = true;
+ result.descupdate = __tmp_30
+ }
+ else ();
+ if update_AP & [desc[51]] == 0b1 then
+ if [desc[7]] == 0b0 then {
+ desc = __SetSlice_bits(64, 1, desc, 7, 0b1);
+ __tmp_32 : DescriptorUpdate = result.descupdate;
+ __tmp_32.AP = true;
+ result.descupdate = __tmp_32
+ } else ()
+ else ();
+ __tmp_33 : DescriptorUpdate = result.descupdate;
+ __tmp_33.descaddr = descaddr;
+ result.descupdate = __tmp_33;
+ xn : bits(1) = undefined;
+ pxn : bits(1) = undefined;
+ if apply_nvnv1_effect then {
+ pxn = [desc[54]];
+ xn = 0b0
+ } else {
+ xn = [desc[54]];
+ pxn = [desc[53]]
+ };
+ contiguousbit : bits(1) = [desc[52]];
+ nG : bits(1) = [desc[11]];
+ sh : bits(2) = slice(desc, 8, 2);
+ ap : bits(3) = undefined;
+ if apply_nvnv1_effect then ap = [desc[7]] @ 0b01
+ else ap = slice(desc, 6, 2) @ 0b1;
+ memattr : bits(4) = slice(desc, 2, 4);
+ result.domain = undefined;
+ result.level = level;
+ result.blocksize = 2 ^ ((3 - level) * stride + grainsize);
+ __tmp_48 : bits(3) = result.perms.ap;
+ __tmp_48 = __SetSlice_bits(3, 2, __tmp_48, 1, slice(ap, 1, 2));
+ __tmp_49 : Permissions = result.perms;
+ __tmp_49.ap = __tmp_48;
+ result.perms = __tmp_49;
+ __tmp_50 : bits(3) = result.perms.ap;
+ __tmp_50 = __SetSlice_bits(3, 1, __tmp_50, 0, 0b1);
+ __tmp_51 : Permissions = result.perms;
+ __tmp_51.ap = __tmp_50;
+ result.perms = __tmp_51;
+ __tmp_52 : Permissions = result.perms;
+ __tmp_52.xn = xn;
+ result.perms = __tmp_52;
+ if HaveExtendedExecuteNeverExt() then {
+ __tmp_53 : Permissions = result.perms;
+ __tmp_53.xxn = [desc[53]];
+ result.perms = __tmp_53
+ } else ();
+ __tmp_54 : Permissions = result.perms;
+ __tmp_54.pxn = 0b0;
+ result.perms = __tmp_54;
+ result.nG = 0b0;
+ __tmp_55 : AddressDescriptor = result.addrdesc;
+ __tmp_55.memattrs = S2AttrDecode(sh, memattr, acctype);
+ result.addrdesc = __tmp_55;
+ __tmp_56 : FullAddress = result.addrdesc.paddress;
+ __tmp_56.NS = 0b1;
+ __tmp_57 : AddressDescriptor = result.addrdesc;
+ __tmp_57.paddress = __tmp_56;
+ result.addrdesc = __tmp_57;
+ __tmp_58 : FullAddress = result.addrdesc.paddress;
+ __tmp_58.physicaladdress = outputaddress;
+ __tmp_59 : AddressDescriptor = result.addrdesc;
+ __tmp_59.paddress = __tmp_58;
+ result.addrdesc = __tmp_59;
+ __tmp_60 : AddressDescriptor = result.addrdesc;
+ __tmp_60.fault = AArch64_NoFault();
+ result.addrdesc = __tmp_60;
+ result.contiguous = contiguousbit == 0b1;
+ if HaveCommonNotPrivateTransExt() then result.CnP = [baseregister[0]]
+ else ();
+ return(result)
+}
+
+function AArch64_SecondStageTranslate (S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, 'size, hwupdatewalk) = {
+ assert(HasS2Translation(), "HasS2Translation()");
+ s2_enabled : bool = [HCR_EL2[0]] == 0b1 | [HCR_EL2[12]] == 0b1;
+ secondstage : bool = true;
+ result : AddressDescriptor = undefined;
+ S2 : TLBRecord = undefined;
+ ipaddress : bits(52) = undefined;
+ if s2_enabled then {
+ ipaddress = slice(S1.paddress.physicaladdress, 0, 52);
+ S2 = AArch64_TranslationTableWalk_SecondStage(ipaddress, vaddress, acctype, iswrite, s2fs1walk, size);
+ if ((~(wasaligned) & acctype != AccType_IFETCH | acctype == AccType_DCZVA) & S2.addrdesc.memattrs.typ == MemType_Device) & ~(IsFault(S2.addrdesc)) then {
+ __tmp_71 : AddressDescriptor = S2.addrdesc;
+ __tmp_71.fault = AArch64_AlignmentFault(acctype, iswrite, secondstage);
+ S2.addrdesc = __tmp_71
+ } else ();
+ if ~(IsFault(S2.addrdesc)) then {
+ __tmp_72 : AddressDescriptor = S2.addrdesc;
+ __tmp_72.fault = AArch64_CheckS2Permission(S2.perms, vaddress, ipaddress, S2.level, acctype, iswrite, s2fs1walk, hwupdatewalk);
+ S2.addrdesc = __tmp_72
+ } else ();
+ if ((~(s2fs1walk) & ~(IsFault(S2.addrdesc))) & S2.addrdesc.memattrs.typ == MemType_Device) & acctype == AccType_IFETCH then
+ S2.addrdesc = AArch64_InstructionDevice(S2.addrdesc, vaddress, ipaddress, S2.level, acctype, iswrite, secondstage, s2fs1walk)
+ else ();
+ if ((s2fs1walk & ~(IsFault(S2.addrdesc))) & [HCR_EL2[2]] == 0b1) & S2.addrdesc.memattrs.typ == MemType_Device then {
+ __tmp_73 : AddressDescriptor = S2.addrdesc;
+ __tmp_73.fault = AArch64_PermissionFault(ipaddress, S2.level, acctype, iswrite, secondstage, s2fs1walk);
+ S2.addrdesc = __tmp_73
+ } else ();
+ __tmp_74 : AddressDescriptor = S2.addrdesc;
+ __tmp_74.fault = AArch64_CheckAndUpdateDescriptor_SecondStage(S2.descupdate, S2.addrdesc.fault, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk);
+ S2.addrdesc = __tmp_74;
+ result = CombineS1S2Desc(S1, S2.addrdesc)
+ } else result = S1;
+ return(result)
+}
+
+function AArch64_CheckAndUpdateDescriptor (result, fault, secondstage, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk__arg) = {
+ hwupdatewalk = hwupdatewalk__arg;
+ hw_update_AF : bool = undefined;
+ if result.AF then if fault.typ == Fault_None then hw_update_AF = true else if ConstrainUnpredictable(Unpredictable_AFUPDATE) == Constraint_TRUE then hw_update_AF = true else hw_update_AF = false else ();
+ hw_update_AP : bool = undefined;
+ write_perm_req : bool = undefined;
+ if result.AP & fault.typ == Fault_None then {
+ write_perm_req = (iswrite | acctype == AccType_ATOMICRW | acctype == AccType_ORDEREDRW) & ~(s2fs1walk);
+ hw_update_AP = write_perm_req & ~(acctype == AccType_AT | acctype == AccType_DC) | hwupdatewalk
+ } else hw_update_AP = false;
+ desc : bits(64) = undefined;
+ accdesc : AccessDescriptor = undefined;
+ descaddr2 : AddressDescriptor = undefined;
+ if hw_update_AF | hw_update_AP then {
+ if secondstage | ~(HasS2Translation()) then descaddr2 = result.descaddr else {
+ hwupdatewalk = true;
+ descaddr2 = AArch64_SecondStageWalk(result.descaddr, vaddress, acctype, iswrite, 8, hwupdatewalk);
+ if IsFault(descaddr2) then return(descaddr2.fault) else ()
+ };
+ accdesc = CreateAccessDescriptor(AccType_ATOMICRW);
+ desc = aget__Mem(descaddr2, 8, accdesc);
+ if hw_update_AF then desc = __SetSlice_bits(64, 1, desc, 10, 0b1) else ();
+ if hw_update_AP then desc = __SetSlice_bits(64, 1, desc, 7, if secondstage then 0b1 else 0b0) else ();
+ aset__Mem(descaddr2, 8, accdesc, desc)
+ } else ();
+ return(fault)
+}
+
+val AArch64_BreakpointValueMatch : (int, bits(64), bool) -> bool
+
+function AArch64_BreakpointValueMatch (n__arg, vaddress, linked_to) = false
+
+val AArch64_StateMatch : (bits(2), bits(1), bits(2), bool, bits(4), bool, bool) -> bool effect {rreg, undef, escape}
+
+function AArch64_StateMatch (SSC__arg, HMC__arg, PxC__arg, linked__arg, LBN, isbreakpnt, ispriv) = {
+ HMC = HMC__arg;
+ PxC = PxC__arg;
+ SSC = SSC__arg;
+ linked = linked__arg;
+ c : Constraint = undefined;
+ if (((((((HMC @ SSC) @ PxC) & 0b11100) == 0b01100 | (((HMC @ SSC) @ PxC) & 0b11101) == 0b10000 | (((HMC @ SSC) @ PxC) & 0b11101) == 0b10100 | ((HMC @ SSC) @ PxC) == 0b11010 | ((HMC @ SSC) @ PxC) == 0b11101 | (((HMC @ SSC) @ PxC) & 0b11110) == 0b11110) | (HMC == 0b0 & PxC == 0b00) & (~(isbreakpnt) | ~(HaveAArch32EL(EL1)))) | (SSC == 0b01 | SSC == 0b10) & ~(HaveEL(EL3))) | (((HMC @ SSC) != 0b000 & (HMC @ SSC) != 0b111) & ~(HaveEL(EL3))) & ~(HaveEL(EL2))) | ((HMC @ SSC) @ PxC) == 0b11100 & ~(HaveEL(EL2)) then {
+ __tmp_5 : bits(5) = undefined;
+ (c, __tmp_5) = ConstrainUnpredictableBits(Unpredictable_RESBPWPCTRL) : (Constraint, bits(5));
+ __tmp_6 : bits(5) = __tmp_5;
+ HMC = [__tmp_6[4]];
+ __tmp_7 : bits(4) = slice(__tmp_6, 0, 4);
+ SSC = slice(__tmp_7, 2, 2);
+ PxC = slice(__tmp_7, 0, 2);
+ assert(c == Constraint_DISABLED | c == Constraint_UNKNOWN, "((c == Constraint_DISABLED) || (c == Constraint_UNKNOWN))");
+ if c == Constraint_DISABLED then return(false) else ()
+ } else ();
+ EL3_match : bool = (HaveEL(EL3) & HMC == 0b1) & [SSC[0]] == 0b0;
+ EL2_match : bool = HaveEL(EL2) & HMC == 0b1;
+ EL1_match : bool = [PxC[0]] == 0b1;
+ EL0_match : bool = [PxC[1]] == 0b1;
+ priv_match : bool = undefined;
+ if ~(ispriv) & ~(isbreakpnt) then priv_match = EL0_match
+ else match PSTATE.EL {
+ EL3 => priv_match = EL3_match,
+ EL2 => priv_match = EL2_match,
+ EL1 => priv_match = EL1_match,
+ EL0 => priv_match = EL0_match
+ };
+ security_state_match : bool = undefined;
+ match SSC {
+ 0b00 => security_state_match = true,
+ 0b01 => security_state_match = ~(IsSecure()),
+ 0b10 => security_state_match = IsSecure(),
+ 0b11 => security_state_match = true
+ };
+ last_ctx_cmp : int = undefined;
+ first_ctx_cmp : int = undefined;
+ lbn : int = undefined;
+ if linked then {
+ lbn = UInt(LBN);
+ first_ctx_cmp = UInt(slice(ID_AA64DFR0_EL1, 12, 4)) - UInt(slice(ID_AA64DFR0_EL1, 28, 4));
+ last_ctx_cmp = UInt(slice(ID_AA64DFR0_EL1, 12, 4));
+ if lbn < first_ctx_cmp | lbn > last_ctx_cmp then {
+ (c, lbn) = ConstrainUnpredictableInteger(first_ctx_cmp, last_ctx_cmp, Unpredictable_BPNOTCTXCMP);
+ assert(c == Constraint_DISABLED | c == Constraint_NONE | c == Constraint_UNKNOWN, "((c == Constraint_DISABLED) || ((c == Constraint_NONE) || (c == Constraint_UNKNOWN)))");
+ match c {
+ Constraint_DISABLED => return(false),
+ Constraint_NONE => linked = false
+ }
+ } else ()
+ } else ();
+ linked_match : bool = undefined;
+ linked_to : bool = undefined;
+ vaddress : bits(64) = undefined;
+ if linked then {
+ vaddress = undefined;
+ linked_to = true;
+ linked_match = AArch64_BreakpointValueMatch(lbn, vaddress, linked_to)
+ } else ();
+ return((priv_match & security_state_match) & (~(linked) | linked_match))
+}
+
+val AArch64_WatchpointMatch : (int, bits(64), int, bool, bool) -> bool effect {escape, rreg, undef}
+
+function AArch64_WatchpointMatch ('n, vaddress, 'size, ispriv, iswrite) = {
+ assert(~(ELUsingAArch32(S1TranslationRegime())), "!(ELUsingAArch32(S1TranslationRegime()))");
+ assert(n <= UInt(slice(ID_AA64DFR0_EL1, 20, 4)), "(n <= UInt((ID_AA64DFR0_EL1).WRPs))");
+ enabled : bool = [DBGWCR_EL1[n][0]] == 0b1;
+ linked : bool = [DBGWCR_EL1[n][20]] == 0b1;
+ isbreakpnt : bool = false;
+ state_match : bool = AArch64_StateMatch(slice(DBGWCR_EL1[n], 14, 2), [DBGWCR_EL1[n][13]], slice(DBGWCR_EL1[n], 1, 2), linked, slice(DBGWCR_EL1[n], 16, 4), isbreakpnt, ispriv);
+ ls_match : bool = [slice(DBGWCR_EL1[n], 3, 2)[if iswrite then 1 else 0]] == 0b1;
+ value_match_name : bool = false;
+ foreach (byte from 0 to (size - 1) by 1 in inc)
+ value_match_name = value_match_name | AArch64_WatchpointByteMatch(n, vaddress + byte);
+ return(((value_match_name & state_match) & ls_match) & enabled)
+}
+
+val AArch64_BreakpointMatch : (int, bits(64), int) -> bool effect {escape, rreg, undef}
+
+function AArch64_BreakpointMatch ('n, vaddress, 'size) = {
+ assert(~(ELUsingAArch32(S1TranslationRegime())), "!(ELUsingAArch32(S1TranslationRegime()))");
+ assert(n <= UInt(slice(ID_AA64DFR0_EL1, 12, 4)), "(n <= UInt((ID_AA64DFR0_EL1).BRPs))");
+ enabled : bool = [DBGBCR_EL1[n][0]] == 0b1;
+ ispriv : bool = PSTATE.EL != EL0;
+ linked : bool = (slice(DBGBCR_EL1[n], 20, 4) & 0xB) == 0x1;
+ isbreakpnt : bool = true;
+ linked_to : bool = false;
+ state_match : bool = AArch64_StateMatch(slice(DBGBCR_EL1[n], 14, 2), [DBGBCR_EL1[n][13]], slice(DBGBCR_EL1[n], 1, 2), linked, slice(DBGBCR_EL1[n], 16, 4), isbreakpnt, ispriv);
+ value_match_name : bool = AArch64_BreakpointValueMatch(n, vaddress, linked_to);
+ match_i : bool = undefined;
+ if HaveAnyAArch32() & size == 4 then {
+ match_i = AArch64_BreakpointValueMatch(n, vaddress + 2, linked_to);
+ if ~(value_match_name) & match_i then value_match_name = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF) else ()
+ } else ();
+ if [vaddress[1]] == 0b1 & slice(DBGBCR_EL1[n], 5, 4) == 0xF then if value_match_name then value_match_name = ConstrainUnpredictableBool(Unpredictable_BPMATCHHALF) else () else ();
+ val_match : bool = (value_match_name & state_match) & enabled;
+ return(val_match)
+}
+
+val AArch64_CheckBreakpoint : (bits(64), int) -> FaultRecord effect {wreg, rreg, undef, escape}
+
+function AArch64_CheckBreakpoint (vaddress, size) = {
+ assert(~(ELUsingAArch32(S1TranslationRegime())), "!(ELUsingAArch32(S1TranslationRegime()))");
+ assert(UsingAArch32() & (size == 2 | size == 4) | size == 4, "((UsingAArch32() && ((size == 2) || (size == 4))) || (size == 4))");
+ val_match : bool = false;
+ match_i : bool = undefined;
+ foreach (i from 0 to UInt(slice(ID_AA64DFR0_EL1, 12, 4)) by 1 in inc) {
+ match_i = AArch64_BreakpointMatch(i, vaddress, size);
+ val_match = val_match | match_i
+ };
+ iswrite : bool = undefined;
+ acctype : AccType = undefined;
+ reason : bits(6) = undefined;
+ if val_match & HaltOnBreakpointOrWatchpoint() then {
+ reason = DebugHalt_Breakpoint;
+ Halt(reason);
+ undefined : FaultRecord
+ } else if (val_match & [MDSCR_EL1[15]] == 0b1) & AArch64_GenerateDebugExceptions() then {
+ acctype = AccType_IFETCH;
+ iswrite = false;
+ return(AArch64_DebugFault(acctype, iswrite))
+ } else return(AArch64_NoFault())
+}
+
+val AArch64_BranchAddr : bits(64) -> bits(64) effect {rreg, undef, escape}
+
+function AArch64_BranchAddr vaddress = {
+ assert(~(UsingAArch32()), "!(UsingAArch32())");
+ msbit : nat = coerce_int_nat(AddrTop(vaddress, true, PSTATE.EL));
+ if msbit == 63 then return(vaddress) else if ((PSTATE.EL == EL0 | PSTATE.EL == EL1) | IsInHost()) & [vaddress[msbit]] == 0b1 then return(SignExtend(slice(vaddress, 0, msbit + 1))) else return(ZeroExtend(slice(vaddress, 0, msbit + 1)))
+}
+
+val BranchTo : forall ('N : Int), 'N >= 0.
+ (bits('N), BranchType) -> unit effect {escape, rreg, undef, wreg}
+
+function BranchTo (target, branch_type) = {
+ __BranchTaken = true;
+ Hint_Branch(branch_type);
+ if 'N == 32 then {
+ assert(UsingAArch32(), "UsingAArch32()");
+ _PC = ZeroExtend(target)
+ } else {
+ assert('N == 64 & ~(UsingAArch32()), "((N == 64) && !(UsingAArch32()))");
+ _PC = AArch64_BranchAddr(slice(target, 0, 64))
+ };
+ ()
+}
+
+val aarch64_branch_unconditional_immediate : (BranchType, bits(64)) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_branch_unconditional_immediate (branch_type, offset) = {
+ if branch_type == BranchType_CALL then aset_X(30, aget_PC() + 4) else ();
+ BranchTo(aget_PC() + offset, branch_type)
+}
+
+val branch_unconditional_immediate_decode : (bits(1), bits(26)) -> unit effect {escape, rreg, undef, wreg}
+
+function branch_unconditional_immediate_decode (op, imm26) = {
+ __unconditional = true;
+ branch_type : BranchType = if op == 0b1 then BranchType_CALL else BranchType_JMP;
+ offset : bits(64) = SignExtend(imm26 @ 0b00, 64);
+ aarch64_branch_unconditional_immediate(branch_type, offset)
+}
+
+val aarch64_branch_conditional_test : (int, bits(1), int, bits(64), int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_branch_conditional_test ('bit_pos, bit_val, 'datasize, offset, 't) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand : bits('datasize) = aget_X(t);
+ if [operand[bit_pos]] == bit_val then BranchTo(aget_PC() + offset, BranchType_JMP) else ()
+}
+
+val branch_conditional_test_decode : (bits(1), bits(1), bits(5), bits(14), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function branch_conditional_test_decode (b5, op, b40, imm14, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ let 'datasize : {|64, 32|} = if b5 == 0b1 then 64 else 32;
+ bit_pos : int = UInt(b5 @ b40);
+ bit_val : bits(1) = op;
+ offset : bits(64) = SignExtend(imm14 @ 0b00, 64);
+ aarch64_branch_conditional_test(bit_pos, bit_val, datasize, offset, t)
+}
+
+val aarch64_branch_conditional_cond : (bits(4), bits(64)) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_branch_conditional_cond (condition, offset) = if ConditionHolds(condition) then BranchTo(aget_PC() + offset, BranchType_JMP) else ()
+
+val branch_conditional_cond_decode : (bits(1), bits(19), bits(1), bits(4)) -> unit effect {escape, rreg, undef, wreg}
+
+function branch_conditional_cond_decode (o1, imm19, o0, cond) = {
+ __unconditional = true;
+ offset : bits(64) = SignExtend(imm19 @ 0b00, 64);
+ condition : bits(4) = cond;
+ aarch64_branch_conditional_cond(condition, offset)
+}
+
+val aarch64_branch_conditional_compare : (int, bool, bits(64), int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_branch_conditional_compare ('datasize, iszero, offset, 't) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ operand1 : bits('datasize) = aget_X(t);
+ if IsZero(operand1) == iszero then BranchTo(aget_PC() + offset, BranchType_JMP) else ()
+}
+
+val branch_conditional_compare_decode : (bits(1), bits(1), bits(19), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function branch_conditional_compare_decode (sf, op, imm19, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ iszero : bool = op == 0b0;
+ offset : bits(64) = SignExtend(imm19 @ 0b00, 64);
+ aarch64_branch_conditional_compare(datasize, iszero, offset, t)
+}
+
+val AArch64_TakeReset : bool -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_TakeReset cold_reset = {
+ assert(~(HighestELUsingAArch32()), "!(HighestELUsingAArch32())");
+ PSTATE.nRW = 0b0;
+ if HaveEL(EL3) then PSTATE.EL = EL3
+ else if HaveEL(EL2) then PSTATE.EL = EL2
+ else PSTATE.EL = EL1;
+ AArch64_ResetControlRegisters(cold_reset);
+ PSTATE.SP = 0b1;
+ (PSTATE.D @ PSTATE.A @ PSTATE.I @ PSTATE.F) = 0xF;
+ PSTATE.SS = 0b0;
+ PSTATE.IL = 0b0;
+ AArch64_ResetGeneralRegisters();
+ AArch64_ResetSIMDFPRegisters();
+ AArch64_ResetSpecialRegisters();
+ ResetExternalDebugRegisters(cold_reset);
+ rv : bits(64) = undefined;
+ if HaveEL(EL3) then rv = RVBAR_EL3
+ else if HaveEL(EL2) then rv = RVBAR_EL2
+ else rv = RVBAR_EL1;
+ assert(IsZero_slice(rv, PAMax(), 64 - PAMax()) & IsZero_slice(rv, 0, 2), "(IsZero((rv)<PAMax()+:((63 - PAMax()) + 1)>) && IsZero((rv)<0+:((1 - 0) + 1)>))");
+ BranchTo(rv, BranchType_UNKNOWN)
+}
+
+val __TakeColdReset : unit -> unit effect {escape, rreg, undef, wreg}
+
+function __TakeColdReset () = {
+ PSTATE.nRW = 0b0;
+ PSTATE.SS = 0b0;
+ __ResetInterruptState();
+ __ResetMemoryState();
+ __ResetExecuteState();
+ AArch64_TakeReset(true)
+}
+
+val AArch64_TakeException : (bits(2), ExceptionRecord, bits(64), int) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_TakeException (target_el, exception, preferred_exception_return, vect_offset__arg) = {
+ vect_offset : int = vect_offset__arg;
+ SynchronizeContext();
+ assert((HaveEL(target_el) & ~(ELUsingAArch32(target_el))) & UInt(target_el) >= UInt(PSTATE.EL), "((HaveEL(target_el) && !(ELUsingAArch32(target_el))) && (UInt(target_el) >= UInt((PSTATE).EL)))");
+ from_32 : bool = UsingAArch32();
+ if from_32 then AArch64_MaybeZeroRegisterUppers() else ();
+ if UInt(target_el) > UInt(PSTATE.EL) then {
+ lower_32 : bool = undefined;
+ if target_el == EL3 then
+ if ~(IsSecure()) & HaveEL(EL2) then lower_32 = ELUsingAArch32(EL2)
+ else lower_32 = ELUsingAArch32(EL1)
+ else if (IsInHost() & PSTATE.EL == EL0) & target_el == EL2 then
+ lower_32 = ELUsingAArch32(EL0)
+ else lower_32 = ELUsingAArch32(target_el - 1);
+ vect_offset = vect_offset + (if lower_32 then 1536 else 1024)
+ } else if PSTATE.SP == 0b1 then vect_offset = vect_offset + 512
+ else ();
+ spsr : bits(32) = GetPSRFromPSTATE();
+ if HaveUAOExt() then PSTATE.UAO = 0b0
+ else ();
+ if ~(exception.typ == Exception_IRQ | exception.typ == Exception_FIQ) then AArch64_ReportException(exception, target_el) else ();
+ PSTATE.EL = target_el;
+ PSTATE.nRW = 0b0;
+ PSTATE.SP = 0b1;
+ aset_SPSR(spsr);
+ aset_ELR(preferred_exception_return);
+ PSTATE.SS = 0b0;
+ (PSTATE.D @ PSTATE.A @ PSTATE.I @ PSTATE.F) = 0xF;
+ PSTATE.IL = 0b0;
+ if from_32 then {
+ PSTATE.IT = 0x00;
+ PSTATE.T = 0b0
+ } else ();
+ if (HavePANExt() & (PSTATE.EL == EL1 | PSTATE.EL == EL2 & ELIsInHost(EL0))) & [aget_SCTLR()[23]] == 0b0 then
+ PSTATE.PAN = 0b1
+ else ();
+ BranchTo(slice(aget_VBAR(), 11, 53) @ __GetSlice_int(11, vect_offset, 0), BranchType_EXCEPTION);
+ iesb_req : bool = undefined;
+ if HaveRASExt() & [aget_SCTLR()[21]] == 0b1 then {
+ ErrorSynchronizationBarrier(MBReqDomain_FullSystem, MBReqTypes_All);
+ iesb_req = true;
+ TakeUnmaskedPhysicalSErrorInterrupts(iesb_req)
+ } else ();
+ EndOfInstruction()
+}
+
+val TrapPACUse : bits(2) -> unit effect {escape, rreg, undef, wreg}
+
+function TrapPACUse target_el = {
+ assert((HaveEL(target_el) & target_el != EL0) & UInt(target_el) >= UInt(PSTATE.EL), "((HaveEL(target_el) && (target_el != EL0)) && (UInt(target_el) >= UInt((PSTATE).EL)))");
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ exception : ExceptionRecord = undefined;
+ vect_offset : int = 0;
+ exception = ExceptionSyndrome(Exception_PACTrap);
+ AArch64_TakeException(target_el, exception, preferred_exception_return, vect_offset)
+}
+
+val Strip : (bits(64), bool) -> bits(64) effect {wreg, rreg, escape, undef}
+
+function Strip (A, data) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ original_ptr : bits(64) = undefined;
+ extfield : bits(64) = undefined;
+ tbi : bool = CalculateTBI(A, data);
+ let 'bottom_PAC_bit = ex_int(CalculateBottomPACBit(A, [A[55]]));
+ assert(constraint(0 <= 'bottom_PAC_bit & 'bottom_PAC_bit <= 56));
+ extfield = replicate_bits([A[55]], 64);
+ if tbi then
+ original_ptr = (slice(A, 56, 8) @ slice(extfield, 0, negate(bottom_PAC_bit) + 56)) @ slice(A, 0, bottom_PAC_bit)
+ else
+ original_ptr = slice(extfield, 0, negate(bottom_PAC_bit) + 64) @ slice(A, 0, bottom_PAC_bit);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(original_ptr)
+}
+
+val aarch64_integer_pac_strip_dp_1src : (int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_strip_dp_1src ('d, data) = if HavePACExt() then aset_X(d, Strip(aget_X(d), data)) else ()
+
+val integer_pac_strip_hint_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_strip_hint_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ d : int = 30;
+ data : bool = false;
+ aarch64_integer_pac_strip_dp_1src(d, data)
+}
+
+val AuthIB : (bits(64), bits(64)) -> bits(64) effect {escape, wreg, rreg, undef}
+
+function AuthIB (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APIBKey_EL1 : bits(128) = slice(APIBKeyHi_EL1, 0, 64) @ slice(APIBKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[30]] else [SCTLR_EL2[30]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[30]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[30]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[30]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(Auth(X, Y, APIBKey_EL1, false, 0b1))
+}
+
+val aarch64_integer_pac_autib_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_autib_dp_1src ('d, 'n, source_is_sp) = if HavePACExt() then if source_is_sp then aset_X(d, AuthIB(aget_X(d), aget_SP())) else aset_X(d, AuthIB(aget_X(d), aget_X(n))) else ()
+
+val integer_pac_autib_hint_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_autib_hint_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ d : int = undefined;
+ n : int = undefined;
+ source_is_sp : bool = false;
+ match CRm @ op2 {
+ 0b0011110 => {
+ d = 30;
+ n = 31
+ },
+ 0b0011111 => {
+ d = 30;
+ source_is_sp = true
+ },
+ 0b0001110 => {
+ d = 17;
+ n = 16
+ },
+ 0b0001000 => throw(Error_See("PACIA")),
+ 0b0001010 => throw(Error_See("PACIB")),
+ 0b0001100 => throw(Error_See("AUTIA")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitzero] @ [bitzero] @ _ : bits(1) => throw(Error_See("PACIA")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitzero] @ [bitone] @ _ : bits(1) => throw(Error_See("PACIB")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitone] @ [bitzero] @ _ : bits(1) => throw(Error_See("AUTIA")),
+ 0b0000111 => throw(Error_See("XPACLRI"))
+ };
+ aarch64_integer_pac_autib_dp_1src(d, n, source_is_sp)
+}
+
+val AuthIA : (bits(64), bits(64)) -> bits(64) effect {escape, wreg, rreg, undef}
+
+function AuthIA (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APIAKey_EL1 : bits(128) = slice(APIAKeyHi_EL1, 0, 64) @ slice(APIAKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[31]] else [SCTLR_EL2[31]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[31]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[31]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[31]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(Auth(X, Y, APIAKey_EL1, false, 0b0))
+}
+
+val aarch64_integer_pac_autia_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_autia_dp_1src ('d, 'n, source_is_sp) = if HavePACExt() then if source_is_sp then aset_X(d, AuthIA(aget_X(d), aget_SP())) else aset_X(d, AuthIA(aget_X(d), aget_X(n))) else ()
+
+val integer_pac_autia_hint_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_autia_hint_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ d : int = undefined;
+ n : int = undefined;
+ source_is_sp : bool = false;
+ match CRm @ op2 {
+ 0b0011100 => {
+ d = 30;
+ n = 31
+ },
+ 0b0011101 => {
+ d = 30;
+ source_is_sp = true
+ },
+ 0b0001100 => {
+ d = 17;
+ n = 16
+ },
+ 0b0001000 => throw(Error_See("PACIA")),
+ 0b0001010 => throw(Error_See("PACIB")),
+ 0b0001110 => throw(Error_See("AUTIB")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitzero] @ [bitzero] @ _ : bits(1) => throw(Error_See("PACIA")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitzero] @ [bitone] @ _ : bits(1) => throw(Error_See("PACIB")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitone] @ [bitone] @ _ : bits(1) => throw(Error_See("AUTIB")),
+ 0b0000111 => throw(Error_See("XPACLRI")),
+ _ => throw(Error_See("HINT"))
+ };
+ aarch64_integer_pac_autia_dp_1src(d, n, source_is_sp)
+}
+
+val aarch64_branch_unconditional_register : (BranchType, int, int, bool, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_branch_unconditional_register (branch_type, 'm, 'n, pac, source_is_sp, use_key_a) = {
+ target : bits(64) = aget_X(n);
+ if pac then {
+ modifier : bits(64) = if source_is_sp then aget_SP() else aget_X(m);
+ if use_key_a then target = AuthIA(target, modifier) else target = AuthIB(target, modifier)
+ } else ();
+ if branch_type == BranchType_CALL then aset_X(30, aget_PC() + 4) else ();
+ BranchTo(target, branch_type)
+}
+
+val AuthDB : (bits(64), bits(64)) -> bits(64) effect {undef, escape, wreg, rreg}
+
+function AuthDB (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APDBKey_EL1 : bits(128) = slice(APDBKeyHi_EL1, 0, 64) @ slice(APDBKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[13]] else [SCTLR_EL2[13]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[13]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[13]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[13]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(Auth(X, Y, APDBKey_EL1, true, 0b1))
+}
+
+val aarch64_integer_pac_autdb_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_autdb_dp_1src ('d, 'n, source_is_sp) = if source_is_sp then aset_X(d, AuthDB(aget_X(d), aget_SP())) else aset_X(d, AuthDB(aget_X(d), aget_X(n)))
+
+val AuthDA : (bits(64), bits(64)) -> bits(64) effect {undef, escape, wreg, rreg}
+
+function AuthDA (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APDAKey_EL1 : bits(128) = slice(APDAKeyHi_EL1, 0, 64) @ slice(APDAKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[27]] else [SCTLR_EL2[27]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[27]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[27]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[27]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(Auth(X, Y, APDAKey_EL1, true, 0b0))
+}
+
+val aarch64_integer_pac_autda_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_autda_dp_1src ('d, 'n, source_is_sp) = if source_is_sp then aset_X(d, AuthDA(aget_X(d), aget_SP())) else aset_X(d, AuthDA(aget_X(d), aget_X(n)))
+
+val AddPACIB : (bits(64), bits(64)) -> bits(64) effect {undef, escape, wreg, rreg}
+
+function AddPACIB (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APIBKey_EL1 : bits(128) = slice(APIBKeyHi_EL1, 0, 64) @ slice(APIBKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[30]] else [SCTLR_EL2[30]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[30]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[30]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[30]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(AddPAC(X, Y, APIBKey_EL1, false))
+}
+
+val aarch64_integer_pac_pacib_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_pacib_dp_1src ('d, 'n, source_is_sp) = if HavePACExt() then if source_is_sp then aset_X(d, AddPACIB(aget_X(d), aget_SP())) else aset_X(d, AddPACIB(aget_X(d), aget_X(n))) else ()
+
+val integer_pac_pacib_hint_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_pacib_hint_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ d : int = undefined;
+ n : int = undefined;
+ source_is_sp : bool = false;
+ match CRm @ op2 {
+ 0b0011010 => {
+ d = 30;
+ n = 31
+ },
+ 0b0011011 => {
+ d = 30;
+ source_is_sp = true
+ },
+ 0b0001010 => {
+ d = 17;
+ n = 16
+ },
+ 0b0001000 => throw(Error_See("PACIA")),
+ 0b0001100 => throw(Error_See("AUTIA")),
+ 0b0001110 => throw(Error_See("AUTIB")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitzero] @ [bitzero] @ _ : bits(1) => throw(Error_See("PACIA")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitone] @ [bitzero] @ _ : bits(1) => throw(Error_See("AUTIA")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitone] @ [bitone] @ _ : bits(1) => throw(Error_See("AUTIB")),
+ 0b0000111 => throw(Error_See("XPACLRI"))
+ };
+ aarch64_integer_pac_pacib_dp_1src(d, n, source_is_sp)
+}
+
+val AddPACIA : (bits(64), bits(64)) -> bits(64) effect {undef, escape, wreg, rreg}
+
+function AddPACIA (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APIAKey_EL1 : bits(128) = slice(APIAKeyHi_EL1, 0, 64) @ slice(APIAKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[31]] else [SCTLR_EL2[31]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[31]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[31]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[31]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(AddPAC(X, Y, APIAKey_EL1, false))
+}
+
+val aarch64_integer_pac_pacia_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_pacia_dp_1src ('d, 'n, source_is_sp) = if HavePACExt() then if source_is_sp then aset_X(d, AddPACIA(aget_X(d), aget_SP())) else aset_X(d, AddPACIA(aget_X(d), aget_X(n))) else ()
+
+val integer_pac_pacia_hint_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_pacia_hint_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ d : int = undefined;
+ n : int = undefined;
+ source_is_sp : bool = false;
+ match CRm @ op2 {
+ 0b0011000 => {
+ d = 30;
+ n = 31
+ },
+ 0b0011001 => {
+ d = 30;
+ source_is_sp = true
+ },
+ 0b0001000 => {
+ d = 17;
+ n = 16
+ },
+ 0b0001010 => throw(Error_See("PACIB")),
+ 0b0001100 => throw(Error_See("AUTIA")),
+ 0b0001110 => throw(Error_See("AUTIB")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitzero] @ [bitone] @ _ : bits(1) => throw(Error_See("PACIB")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitone] @ [bitzero] @ _ : bits(1) => throw(Error_See("AUTIA")),
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ [bitone] @ [bitone] @ _ : bits(1) => throw(Error_See("AUTIB")),
+ 0b0000111 => throw(Error_See("XPACLRI"))
+ };
+ aarch64_integer_pac_pacia_dp_1src(d, n, source_is_sp)
+}
+
+val AddPACGA : (bits(64), bits(64)) -> bits(64) effect {undef, escape, wreg, rreg}
+
+function AddPACGA (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ APGAKey_EL1 : bits(128) = slice(APGAKeyHi_EL1, 0, 64) @ slice(APGAKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(slice(ComputePAC(X, Y, slice(APGAKey_EL1, 64, 64), slice(APGAKey_EL1, 0, 64)), 32, 32) @ Zeros(32))
+}
+
+val aarch64_integer_pac_pacga_dp_2src : (int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_pacga_dp_2src ('d, 'm, 'n, source_is_sp) = if source_is_sp then aset_X(d, AddPACGA(aget_X(n), aget_SP())) else aset_X(d, AddPACGA(aget_X(n), aget_X(m)))
+
+val AddPACDB : (bits(64), bits(64)) -> bits(64) effect {undef, escape, wreg, rreg}
+
+function AddPACDB (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APDBKey_EL1 : bits(128) = slice(APDBKeyHi_EL1, 0, 64) @ slice(APDBKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[13]] else [SCTLR_EL2[13]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[13]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[13]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[13]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(AddPAC(X, Y, APDBKey_EL1, true))
+}
+
+val aarch64_integer_pac_pacdb_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_pacdb_dp_1src ('d, 'n, source_is_sp) = if source_is_sp then aset_X(d, AddPACDB(aget_X(d), aget_SP())) else aset_X(d, AddPACDB(aget_X(d), aget_X(n)))
+
+val AddPACDA : (bits(64), bits(64)) -> bits(64) effect {undef, escape, wreg, rreg}
+
+function AddPACDA (X, Y) = {
+ TrapEL2 : bool = undefined;
+ TrapEL3 : bool = undefined;
+ Enable : bits(1) = undefined;
+ APDAKey_EL1 : bits(128) = slice(APDAKeyHi_EL1, 0, 64) @ slice(APDAKeyLo_EL1, 0, 64);
+ match PSTATE.EL {
+ EL0 => {
+ IsEL1Regime : bool = (~(HaveEL(EL2)) | [HCR_EL2[27]] == 0b0) | [HCR_EL2[34]] == 0b0;
+ Enable = if IsEL1Regime then [SCTLR_EL1[27]] else [SCTLR_EL2[27]];
+ TrapEL2 = ((HaveEL(EL2) & IsEL1Regime) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL1 => {
+ Enable = [SCTLR_EL1[27]];
+ TrapEL2 = (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[41]] == 0b0;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL2 => {
+ Enable = [SCTLR_EL2[27]];
+ TrapEL2 = false;
+ TrapEL3 = HaveEL(EL3) & [SCR_EL3[17]] == 0b0
+ },
+ EL3 => {
+ Enable = [SCTLR_EL3[27]];
+ TrapEL2 = false;
+ TrapEL3 = false
+ }
+ };
+ if Enable == 0b0 then return(X) else if TrapEL2 then {
+ TrapPACUse(EL2);
+ undefined
+ } else if TrapEL3 then {
+ TrapPACUse(EL3);
+ undefined
+ } else return(AddPAC(X, Y, APDAKey_EL1, true))
+}
+
+val aarch64_integer_pac_pacda_dp_1src : (int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_integer_pac_pacda_dp_1src ('d, 'n, source_is_sp) = if source_is_sp then aset_X(d, AddPACDA(aget_X(d), aget_SP())) else aset_X(d, AddPACDA(aget_X(d), aget_X(n)))
+
+val AArch64_WatchpointException : (bits(64), FaultRecord) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_WatchpointException (vaddress, fault) = {
+ assert(PSTATE.EL != EL3, "((PSTATE).EL != EL3)");
+ route_to_el2 : bool = ((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1)) & ([HCR_EL2[27]] == 0b1 | [MDCR_EL2[8]] == 0b1);
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = AArch64_AbortSyndrome(Exception_Watchpoint, fault, vaddress);
+ if PSTATE.EL == EL2 | route_to_el2 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_WFxTrap : (bits(2), bool) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_WFxTrap (target_el, is_wfe) = {
+ assert(UInt(target_el) > UInt(PSTATE.EL), "(UInt(target_el) > UInt((PSTATE).EL))");
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_WFxTrap);
+ __tmp_272 : bits(25) = exception.syndrome;
+ __tmp_272 = __SetSlice_bits(25, 5, __tmp_272, 20, ConditionSyndrome());
+ exception.syndrome = __tmp_272;
+ __tmp_273 : bits(25) = exception.syndrome;
+ __tmp_273 = __SetSlice_bits(25, 1, __tmp_273, 0, if is_wfe then 0b1 else 0b0);
+ exception.syndrome = __tmp_273;
+ if ((target_el == EL1 & HaveEL(EL2)) & ~(IsSecure())) & [HCR_EL2[27]] == 0b1 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(target_el, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_CheckForWFxTrap : (bits(2), bool) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CheckForWFxTrap (target_el, is_wfe) = {
+ assert(HaveEL(target_el), "HaveEL(target_el)");
+ trap : bool = undefined;
+ match target_el {
+ ? if ? == EL1 => trap = (if is_wfe then [aget_SCTLR()[18]] else [aget_SCTLR()[16]]) == 0b0,
+ ? if ? == EL2 => trap = (if is_wfe then [HCR_EL2[14]] else [HCR_EL2[13]]) == 0b1,
+ ? if ? == EL3 => trap = (if is_wfe then [SCR_EL3[13]] else [SCR_EL3[12]]) == 0b1
+ };
+ if trap then AArch64_WFxTrap(target_el, is_wfe) else ()
+}
+
+val aarch64_system_hints : SystemHintOp -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_hints op = match op {
+ SystemHintOp_YIELD => Hint_Yield(),
+ SystemHintOp_WFE => if IsEventRegisterSet() then ClearEventRegister() else {
+ if PSTATE.EL == EL0 then AArch64_CheckForWFxTrap(EL1, true) else ();
+ if ((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1)) & ~(IsInHost()) then AArch64_CheckForWFxTrap(EL2, true) else ();
+ if HaveEL(EL3) & PSTATE.EL != EL3 then AArch64_CheckForWFxTrap(EL3, true) else ();
+ WaitForEvent()
+ },
+ SystemHintOp_WFI => if ~(InterruptPending()) then {
+ if PSTATE.EL == EL0 then AArch64_CheckForWFxTrap(EL1, false) else ();
+ if ((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1)) & ~(IsInHost()) then AArch64_CheckForWFxTrap(EL2, false) else ();
+ if HaveEL(EL3) & PSTATE.EL != EL3 then AArch64_CheckForWFxTrap(EL3, false) else ();
+ WaitForInterrupt()
+ } else (),
+ SystemHintOp_SEV => SendEvent(),
+ SystemHintOp_SEVL => SendEventLocal(),
+ SystemHintOp_ESB => {
+ ErrorSynchronizationBarrier(MBReqDomain_FullSystem, MBReqTypes_All);
+ AArch64_ESBOperation();
+ if (HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1) then AArch64_vESBOperation() else ();
+ TakeUnmaskedSErrorInterrupts()
+ },
+ SystemHintOp_PSB => ProfilingSynchronizationBarrier(),
+ _ => ()
+}
+
+val system_hints_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_hints_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ op : SystemHintOp = undefined;
+ match CRm @ op2 {
+ 0b0000000 => op = SystemHintOp_NOP,
+ 0b0000001 => op = SystemHintOp_YIELD,
+ 0b0000010 => op = SystemHintOp_WFE,
+ 0b0000011 => op = SystemHintOp_WFI,
+ 0b0000100 => op = SystemHintOp_SEV,
+ 0b0000101 => op = SystemHintOp_SEVL,
+ 0b0000111 => throw(Error_See("XPACLRI")),
+ [bitzero] @ [bitzero] @ [bitzero] @ [bitone] @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) => throw(Error_See("PACIA1716, PACIB1716, AUTIA1716, AUTIB1716")),
+ 0b0010000 => {
+ if ~(HaveRASExt()) then EndOfInstruction() else ();
+ op = SystemHintOp_ESB
+ },
+ 0b0010001 => {
+ if ~(HaveStatisticalProfiling()) then EndOfInstruction() else ();
+ op = SystemHintOp_PSB
+ },
+ [bitzero] @ [bitzero] @ [bitone] @ [bitone] @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) => throw(Error_See("PACIAZ, PACIASP, PACIBZ, PACIBSP, AUTIAZ, AUTIASP, AUTIBZ, AUTIBSP")),
+ _ => EndOfInstruction()
+ };
+ aarch64_system_hints(op)
+}
+
+val AArch64_VectorCatchException : FaultRecord -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_VectorCatchException fault = {
+ assert(PSTATE.EL != EL2, "((PSTATE).EL != EL2)");
+ assert((HaveEL(EL2) & ~(IsSecure())) & ([HCR_EL2[27]] == 0b1 | [MDCR_EL2[8]] == 0b1), "((HaveEL(EL2) && !(IsSecure())) && (((HCR_EL2).TGE == '1') || ((MDCR_EL2).TDE == '1')))");
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ vaddress : bits(64) = undefined;
+ exception : ExceptionRecord = AArch64_AbortSyndrome(Exception_VectorCatch, fault, vaddress);
+ AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_UndefinedFault : unit -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_UndefinedFault () = {
+ route_to_el2 : bool = ((HaveEL(EL2) & ~(IsSecure())) & PSTATE.EL == EL0) & [HCR_EL2[27]] == 0b1;
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_Uncategorized);
+ if UInt(PSTATE.EL) > UInt(EL1) then AArch64_TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset) else if route_to_el2 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_SystemRegisterTrap : (bits(2), bits(2), bits(3), bits(3), bits(4), bits(5), bits(4), bits(1)) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_SystemRegisterTrap (target_el, op0, op2, op1, crn, rt, crm, dir) = {
+ assert(UInt(target_el) >= UInt(PSTATE.EL), "(UInt(target_el) >= UInt((PSTATE).EL))");
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_SystemRegisterTrap);
+ __tmp_280 : bits(25) = exception.syndrome;
+ __tmp_280 = __SetSlice_bits(25, 2, __tmp_280, 20, op0);
+ exception.syndrome = __tmp_280;
+ __tmp_281 : bits(25) = exception.syndrome;
+ __tmp_281 = __SetSlice_bits(25, 3, __tmp_281, 17, op2);
+ exception.syndrome = __tmp_281;
+ __tmp_282 : bits(25) = exception.syndrome;
+ __tmp_282 = __SetSlice_bits(25, 3, __tmp_282, 14, op1);
+ exception.syndrome = __tmp_282;
+ __tmp_283 : bits(25) = exception.syndrome;
+ __tmp_283 = __SetSlice_bits(25, 4, __tmp_283, 10, crn);
+ exception.syndrome = __tmp_283;
+ __tmp_284 : bits(25) = exception.syndrome;
+ __tmp_284 = __SetSlice_bits(25, 5, __tmp_284, 5, rt);
+ exception.syndrome = __tmp_284;
+ __tmp_285 : bits(25) = exception.syndrome;
+ __tmp_285 = __SetSlice_bits(25, 4, __tmp_285, 1, crm);
+ exception.syndrome = __tmp_285;
+ __tmp_286 : bits(25) = exception.syndrome;
+ __tmp_286 = __SetSlice_bits(25, 1, __tmp_286, 0, dir);
+ exception.syndrome = __tmp_286;
+ if ((target_el == EL1 & HaveEL(EL2)) & ~(IsSecure())) & [HCR_EL2[27]] == 0b1 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(target_el, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_SoftwareBreakpoint : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_SoftwareBreakpoint immediate = {
+ route_to_el2 : bool = ((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1)) & ([HCR_EL2[27]] == 0b1 | [MDCR_EL2[8]] == 0b1);
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_SoftwareBreakpoint);
+ __tmp_271 : bits(25) = exception.syndrome;
+ __tmp_271 = __SetSlice_bits(25, 16, __tmp_271, 0, immediate);
+ exception.syndrome = __tmp_271;
+ if UInt(PSTATE.EL) > UInt(EL1) then AArch64_TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset) else if route_to_el2 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val aarch64_system_exceptions_debug_breakpoint : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_exceptions_debug_breakpoint comment = AArch64_SoftwareBreakpoint(comment)
+
+val system_exceptions_debug_breakpoint_decode : (bits(3), bits(16), bits(3), bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_exceptions_debug_breakpoint_decode (opc, imm16, op2, LL) = {
+ __unconditional = true;
+ comment : bits(16) = imm16;
+ aarch64_system_exceptions_debug_breakpoint(comment)
+}
+
+val AArch64_SPAlignmentFault : unit -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_SPAlignmentFault () = {
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_SPAlignment);
+ if UInt(PSTATE.EL) > UInt(EL1) then AArch64_TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset) else if (HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[27]] == 0b1 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val CheckSPAlignment : unit -> unit effect {escape, rreg, undef, wreg}
+
+function CheckSPAlignment () = {
+ sp : bits(64) = aget_SP();
+ stack_align_check : bool = undefined;
+ if PSTATE.EL == EL0 then stack_align_check = [aget_SCTLR()[4]] != 0b0 else stack_align_check = [aget_SCTLR()[3]] != 0b0;
+ if stack_align_check & sp != Align(sp, 16) then AArch64_SPAlignmentFault() else ();
+ ()
+}
+
+val AArch64_InstructionAbort : (bits(64), FaultRecord) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_InstructionAbort (vaddress, fault) = {
+ route_to_el3 : bool = (HaveEL(EL3) & [SCR_EL3[3]] == 0b1) & IsExternalAbort(fault);
+ route_to_el2 : bool = ((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1)) & (([HCR_EL2[27]] == 0b1 | IsSecondStage(fault)) | (HaveRASExt() & [HCR_EL2[37]] == 0b1) & IsExternalAbort(fault));
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = AArch64_AbortSyndrome(Exception_InstructionAbort, fault, vaddress);
+ if PSTATE.EL == EL3 | route_to_el3 then AArch64_TakeException(EL3, exception, preferred_exception_return, vect_offset) else if PSTATE.EL == EL2 | route_to_el2 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_DataAbort : (bits(64), FaultRecord) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_DataAbort (vaddress, fault) = {
+ route_to_el3 : bool = (HaveEL(EL3) & [SCR_EL3[3]] == 0b1) & IsExternalAbort(fault);
+ route_to_el2 : bool = ((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1)) & (([HCR_EL2[27]] == 0b1 | IsSecondStage(fault)) | (HaveRASExt() & [HCR_EL2[37]] == 0b1) & IsExternalAbort(fault));
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = AArch64_AbortSyndrome(Exception_DataAbort, fault, vaddress);
+ if PSTATE.EL == EL3 | route_to_el3 then AArch64_TakeException(EL3, exception, preferred_exception_return, vect_offset) else if PSTATE.EL == EL2 | route_to_el2 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_CheckForERetTrap : (bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CheckForERetTrap (eret_with_pac, pac_uses_key_a) = {
+ route_to_el2 : bool = (((HaveNVExt() & HaveEL(EL2)) & ~(IsSecure())) & PSTATE.EL == EL1) & [HCR_EL2[42]] == 0b1;
+ vect_offset : int = undefined;
+ if route_to_el2 then {
+ exception : ExceptionRecord = undefined;
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset = 0;
+ exception = ExceptionSyndrome(Exception_ERetTrap);
+ __tmp_255 : bits(25) = exception.syndrome;
+ __tmp_255 = __SetSlice_bits(25, 23, __tmp_255, 2, ZeroExtend(0b0, 23));
+ exception.syndrome = __tmp_255;
+ if ~(eret_with_pac) then {
+ __tmp_256 : bits(25) = exception.syndrome;
+ __tmp_256 = __SetSlice_bits(25, 1, __tmp_256, 1, 0b0);
+ exception.syndrome = __tmp_256;
+ __tmp_257 : bits(25) = exception.syndrome;
+ __tmp_257 = __SetSlice_bits(25, 1, __tmp_257, 0, 0b0);
+ exception.syndrome = __tmp_257
+ } else {
+ __tmp_258 : bits(25) = exception.syndrome;
+ __tmp_258 = __SetSlice_bits(25, 1, __tmp_258, 1, 0b1);
+ exception.syndrome = __tmp_258;
+ if pac_uses_key_a then {
+ __tmp_259 : bits(25) = exception.syndrome;
+ __tmp_259 = __SetSlice_bits(25, 1, __tmp_259, 0, 0b0);
+ exception.syndrome = __tmp_259
+ } else {
+ __tmp_260 : bits(25) = exception.syndrome;
+ __tmp_260 = __SetSlice_bits(25, 1, __tmp_260, 0, 0b1);
+ exception.syndrome = __tmp_260
+ }
+ };
+ AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset)
+ } else ()
+}
+
+val AArch64_CallSupervisor : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CallSupervisor immediate = {
+ if UsingAArch32() then AArch32_ITAdvance() else ();
+ SSAdvance();
+ route_to_el2 : bool = ((HaveEL(EL2) & ~(IsSecure())) & PSTATE.EL == EL0) & [HCR_EL2[27]] == 0b1;
+ preferred_exception_return : bits(64) = NextInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_SupervisorCall);
+ __tmp_277 : bits(25) = exception.syndrome;
+ __tmp_277 = __SetSlice_bits(25, 16, __tmp_277, 0, immediate);
+ exception.syndrome = __tmp_277;
+ if UInt(PSTATE.EL) > UInt(EL1) then AArch64_TakeException(PSTATE.EL, exception, preferred_exception_return, vect_offset) else if route_to_el2 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val aarch64_system_exceptions_runtime_svc : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_exceptions_runtime_svc imm = AArch64_CallSupervisor(imm)
+
+val system_exceptions_runtime_svc_decode : (bits(3), bits(16), bits(3), bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_exceptions_runtime_svc_decode (opc, imm16, op2, LL) = {
+ __unconditional = true;
+ imm : bits(16) = imm16;
+ aarch64_system_exceptions_runtime_svc(imm)
+}
+
+val AArch64_CallSecureMonitor : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CallSecureMonitor immediate = {
+ assert(HaveEL(EL3) & ~(ELUsingAArch32(EL3)), "(HaveEL(EL3) && !(ELUsingAArch32(EL3)))");
+ if UsingAArch32() then AArch32_ITAdvance() else ();
+ SSAdvance();
+ preferred_exception_return : bits(64) = NextInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_MonitorCall);
+ __tmp_293 : bits(25) = exception.syndrome;
+ __tmp_293 = __SetSlice_bits(25, 16, __tmp_293, 0, immediate);
+ exception.syndrome = __tmp_293;
+ AArch64_TakeException(EL3, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_CallHypervisor : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CallHypervisor immediate = {
+ assert(HaveEL(EL2), "HaveEL(EL2)");
+ if UsingAArch32() then AArch32_ITAdvance() else ();
+ SSAdvance();
+ preferred_exception_return : bits(64) = NextInstrAddr();
+ vect_offset : int = 0;
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_HypervisorCall);
+ __tmp_289 : bits(25) = exception.syndrome;
+ __tmp_289 = __SetSlice_bits(25, 16, __tmp_289, 0, immediate);
+ exception.syndrome = __tmp_289;
+ if PSTATE.EL == EL3 then AArch64_TakeException(EL3, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_BreakpointException : FaultRecord -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_BreakpointException fault = {
+ assert(PSTATE.EL != EL3, "((PSTATE).EL != EL3)");
+ route_to_el2 : bool = ((HaveEL(EL2) & ~(IsSecure())) & (PSTATE.EL == EL0 | PSTATE.EL == EL1)) & ([HCR_EL2[27]] == 0b1 | [MDCR_EL2[8]] == 0b1);
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ vaddress : bits(64) = undefined;
+ exception : ExceptionRecord = AArch64_AbortSyndrome(Exception_Breakpoint, fault, vaddress);
+ if PSTATE.EL == EL2 | route_to_el2 then AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset) else AArch64_TakeException(EL1, exception, preferred_exception_return, vect_offset)
+}
+
+val AArch64_Abort : (bits(64), FaultRecord) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_Abort (vaddress, fault) = if IsDebugException(fault) then if fault.acctype == AccType_IFETCH then if UsingAArch32() & fault.debugmoe == DebugException_VectorCatch then AArch64_VectorCatchException(fault) else AArch64_BreakpointException(fault) else AArch64_WatchpointException(vaddress, fault) else if fault.acctype == AccType_IFETCH then AArch64_InstructionAbort(vaddress, fault) else AArch64_DataAbort(vaddress, fault)
+
+val AArch64_CheckAlignment : (bits(64), int, AccType, bool) -> bool effect {escape, rreg, undef, wreg}
+
+function AArch64_CheckAlignment (address, 'alignment, acctype, iswrite) = {
+ aligned : bool = address == Align(address, alignment);
+ atomic : bool = acctype == AccType_ATOMIC | acctype == AccType_ATOMICRW;
+ ordered : bool = acctype == AccType_ORDERED | acctype == AccType_ORDEREDRW | acctype == AccType_LIMITEDORDERED;
+ vector_name : bool = acctype == AccType_VEC;
+ check : bool = (atomic | ordered) | [aget_SCTLR()[1]] == 0b1;
+ secondstage : bool = undefined;
+ if check & ~(aligned) then {
+ secondstage = false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage))
+ } else ();
+ return(aligned)
+}
+
+val AArch32_EnterMode : (bits(5), bits(32), int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch32_EnterMode (target_mode, preferred_exception_return, 'lr_offset, 'vect_offset) = {
+ SynchronizeContext();
+ assert(ELUsingAArch32(EL1) & PSTATE.EL != EL2, "(ELUsingAArch32(EL1) && ((PSTATE).EL != EL2))");
+ spsr : bits(32) = GetPSRFromPSTATE();
+ if PSTATE.M == M32_Monitor then SCR = __SetSlice_bits(32, 1, SCR, 0, 0b0) else ();
+ AArch32_WriteMode(target_mode);
+ aset_SPSR(spsr);
+ aset_R(14, preferred_exception_return + lr_offset);
+ PSTATE.T = [SCTLR[30]];
+ PSTATE.SS = 0b0;
+ if target_mode == M32_FIQ then (PSTATE.A @ PSTATE.I @ PSTATE.F) = 0b111 else if target_mode == M32_Abort | target_mode == M32_IRQ then (PSTATE.A @ PSTATE.I) = 0b11 else PSTATE.I = 0b1;
+ PSTATE.E = [SCTLR[25]];
+ PSTATE.IL = 0b0;
+ PSTATE.IT = 0x00;
+ if HavePANExt() & [SCTLR[23]] == 0b0 then PSTATE.PAN = 0b1 else ();
+ BranchTo(slice(ExcVectorBase(), 5, 27) @ __GetSlice_int(5, vect_offset, 0), BranchType_UNKNOWN);
+ EndOfInstruction()
+}
+
+val AArch64_AdvSIMDFPAccessTrap : bits(2) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_AdvSIMDFPAccessTrap target_el = {
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset : int = 0;
+ route_to_el2 : bool = ((target_el == EL1 & HaveEL(EL2)) & ~(IsSecure())) & [HCR_EL2[27]] == 0b1;
+ exception : ExceptionRecord = undefined;
+ if route_to_el2 then {
+ exception = ExceptionSyndrome(Exception_Uncategorized);
+ AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset)
+ } else {
+ exception = ExceptionSyndrome(Exception_AdvSIMDFPAccessTrap);
+ __tmp_261 : bits(25) = exception.syndrome;
+ __tmp_261 = __SetSlice_bits(25, 5, __tmp_261, 20, ConditionSyndrome());
+ exception.syndrome = __tmp_261;
+ AArch64_TakeException(target_el, exception, preferred_exception_return, vect_offset)
+ };
+ ()
+}
+
+val AArch64_CheckFPAdvSIMDTrap : unit -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CheckFPAdvSIMDTrap () = {
+ disabled : bool = undefined;
+ if HaveEL(EL2) & ~(IsSecure()) then if HaveVirtHostExt() & [HCR_EL2[34]] == 0b1 then {
+ match slice(CPTR_EL2, 20, 2) {
+ _ : bits(1) @ [bitzero] => disabled = ~(PSTATE.EL == EL1 & [HCR_EL2[27]] == 0b1),
+ 0b01 => disabled = PSTATE.EL == EL0 & [HCR_EL2[27]] == 0b1,
+ 0b11 => disabled = false
+ };
+ if disabled then AArch64_AdvSIMDFPAccessTrap(EL2) else ()
+ } else if [CPTR_EL2[10]] == 0b1 then AArch64_AdvSIMDFPAccessTrap(EL2) else () else ();
+ if HaveEL(EL3) then if [CPTR_EL3[10]] == 0b1 then AArch64_AdvSIMDFPAccessTrap(EL3) else () else ();
+ ()
+}
+
+val AArch64_CheckFPAdvSIMDEnabled : unit -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CheckFPAdvSIMDEnabled () = {
+ disabled : bool = undefined;
+ if PSTATE.EL == EL0 | PSTATE.EL == EL1 then {
+ match slice(aget_CPACR(), 20, 2) {
+ _ : bits(1) @ [bitzero] => disabled = true,
+ 0b01 => disabled = PSTATE.EL == EL0,
+ 0b11 => disabled = false
+ };
+ if disabled then AArch64_AdvSIMDFPAccessTrap(EL1) else ()
+ } else ();
+ AArch64_CheckFPAdvSIMDTrap()
+}
+
+val CheckFPAdvSIMDEnabled64 : unit -> unit effect {escape, rreg, undef, wreg}
+
+function CheckFPAdvSIMDEnabled64 () = AArch64_CheckFPAdvSIMDEnabled()
+
+val aarch64_vector_transfer_vector_table : (int, int, int, bool, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_vector_table ('d, 'datasize, 'elements, is_tbl, 'm, n__arg, 'regs) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ assert(constraint('regs >= 1 & 'elements >= 1));
+ n : int = n__arg;
+ CheckFPAdvSIMDEnabled64();
+ indices : bits('datasize) = aget_V(m);
+ table : bits(128 * 'regs) = Zeros(128 * regs);
+ result : bits('datasize) = undefined;
+ index : int = undefined;
+ i : int = undefined;
+ foreach (i from 0 to (regs - 1) by 1 in inc) {
+ table = __SetSlice_bits(128 * regs, 128, table, 128 * i, aget_V(n));
+ n = (n + 1) % 32
+ };
+ result = if is_tbl then Zeros() else aget_V(d);
+ foreach (i from 0 to (elements - 1) by 1 in inc) {
+ index = UInt(aget_Elem(indices, i, 8));
+ if index < 16 * regs then
+ result = aset_Elem(result, i, 8, aget_Elem(table, index, 8))
+ else ()
+ };
+ aset_V(d, result)
+}
+
+val vector_transfer_vector_table_decode : (bits(1), bits(2), bits(5), bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_table_decode (Q, op2, Rm, len, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / 8;
+ regs : int = UInt(len) + 1;
+ is_tbl : bool = op == 0b0;
+ aarch64_vector_transfer_vector_table(d, datasize, elements, is_tbl, m, n, regs)
+}
+
+val aarch64_vector_transfer_vector_permute_zip : (int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_vector_permute_zip ('d, 'datasize, 'esize, 'm, 'n, 'pairs, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ base : int = part * pairs;
+ p : int = undefined;
+ foreach (p from 0 to (pairs - 1) by 1 in inc) {
+ result = aset_Elem(result, 2 * p + 0, esize, aget_Elem(operand1, base + p, esize));
+ result = aset_Elem(result, 2 * p + 1, esize, aget_Elem(operand2, base + p, esize))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_transfer_vector_permute_unzip : (int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_vector_permute_unzip ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operandl : bits('datasize) = aget_V(n);
+ operandh : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ e : int = undefined;
+ zipped : bits(2 * 'datasize) = operandh @ operandl;
+ foreach (e from 0 to (elements - 1) by 1 in inc)
+ result = aset_Elem(result, e, esize, aget_Elem(zipped, 2 * e + part, esize));
+ aset_V(d, result)
+}
+
+val aarch64_vector_transfer_vector_permute_transpose : (int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_vector_permute_transpose ('d, 'datasize, 'esize, 'm, 'n, 'pairs, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ p : int = undefined;
+ foreach (p from 0 to (pairs - 1) by 1 in inc) {
+ result = aset_Elem(result, 2 * p + 0, esize, aget_Elem(operand1, 2 * p + part, esize));
+ result = aset_Elem(result, 2 * p + 1, esize, aget_Elem(operand2, 2 * p + part, esize))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_transfer_vector_insert : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_vector_insert ('d, 'dst_index, 'esize, 'idxdsize, 'n, 'src_index) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('idxdsize) = aget_V(n);
+ result : bits(128) = aget_V(d);
+ result = aset_Elem(result, dst_index, esize, aget_Elem(operand, src_index, esize));
+ aset_V(d, result)
+}
+
+val aarch64_vector_transfer_vector_extract : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_vector_extract ('d, 'datasize, 'm, 'n, 'position) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ hi : bits('datasize) = aget_V(m);
+ lo : bits('datasize) = aget_V(n);
+ concat : bits(2 * 'datasize) = hi @ lo;
+ aset_V(d, slice(concat, position, datasize))
+}
+
+val aarch64_vector_transfer_vector_cpydup_sisd : (int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_vector_cpydup_sisd ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'n) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('idxdsize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = aget_Elem(operand, index, esize);
+ foreach (e from 0 to (elements - 1) by 1 in inc)
+ result = aset_Elem(result, e, esize, element);
+ aset_V(d, result)
+}
+
+val aarch64_vector_transfer_integer_move_unsigned : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_integer_move_unsigned ('d, 'datasize, 'esize, 'idxdsize, 'index, 'n) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('idxdsize) = aget_V(n);
+ aset_X(d, ZeroExtend(aget_Elem(operand, index, esize), datasize))
+}
+
+val aarch64_vector_transfer_integer_move_signed : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_integer_move_signed ('d, 'datasize, 'esize, 'idxdsize, 'index, 'n) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('idxdsize) = aget_V(n);
+ aset_X(d, SignExtend(aget_Elem(operand, index, esize), datasize))
+}
+
+val aarch64_vector_transfer_integer_insert : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_integer_insert ('d, 'datasize, 'esize, 'index, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ element : bits('esize) = aget_X(n);
+ result : bits('datasize) = aget_V(d);
+ result = aset_Elem(result, index, esize, element);
+ aset_V(d, result)
+}
+
+val aarch64_vector_transfer_integer_dup : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_transfer_integer_dup ('d, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ element : bits('esize) = aget_X(n);
+ result : bits('datasize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc)
+ result = aset_Elem(result, e, esize, element);
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_right_sisd : (bool, int, int, int, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_right_sisd (accumulate, 'd, 'datasize, 'elements, 'esize, 'n, round, 'shift, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = undefined;
+ result : bits('datasize) = undefined;
+ round_const : int = if round then shl_int(1, shift - 1) else 0;
+ element : int = undefined;
+ operand2 = if accumulate then aget_V(d) else Zeros();
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = shr_int(asl_Int(aget_Elem(operand, e, esize), unsigned) + round_const, shift);
+ result = aset_Elem(result, e, esize, aget_Elem(operand2, e, esize) + __GetSlice_int(esize, element, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_rightnarrow_uniform_sisd : (int, int, int, int, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_rightnarrow_uniform_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'part, round, 'shift, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ round_const : int = if round then shl_int(1, shift - 1) else 0;
+ element : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = shr_int(asl_Int(aget_Elem(operand, e, 2 * esize), unsigned) + round_const, shift);
+ __tmp_831 : bits('esize) = undefined;
+ (__tmp_831, sat) = SatQ(element, esize, unsigned);
+ result = aset_Elem(result, e, esize, __tmp_831);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_shift_rightnarrow_nonuniform_sisd : (int, int, int, int, int, int, bool, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_rightnarrow_nonuniform_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'part, round, 'shift) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ round_const : int = if round then shl_int(1, shift - 1) else 0;
+ element : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = shr_int(SInt(aget_Elem(operand, e, 2 * esize)) + round_const, shift);
+ __tmp_856 : bits('esize) = undefined;
+ (__tmp_856, sat) = UnsignedSatQ(element, esize);
+ result = aset_Elem(result, e, esize, __tmp_856);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_shift_rightnarrow_logical : (int, int, int, int, int, int, bool, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_rightnarrow_logical ('d, 'datasize, 'elements, 'esize, 'n, 'part, round, 'shift) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ round_const : int = if round then shl_int(1, shift - 1) else 0;
+ element : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = shr_int(UInt(aget_Elem(operand, e, 2 * esize)) + round_const, shift);
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, element, 0))
+ };
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_shift_rightinsert_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_rightinsert_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'shift) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ mask : bits('esize) = LSR(Ones(esize), shift);
+ shifted : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ shifted = LSR(aget_Elem(operand, e, esize), shift);
+ result = aset_Elem(result, e, esize, aget_Elem(operand2, e, esize) & ~(mask) | shifted)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_left_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_left_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'shift) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc)
+ result = aset_Elem(result, e, esize, LSL(aget_Elem(operand, e, esize), shift));
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_leftsat_sisd : (int, int, bool, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_leftsat_sisd ('d, 'datasize, dst_unsigned, 'elements, 'esize, 'n, 'shift, src_unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = shl_int(asl_Int(aget_Elem(operand, e, esize), src_unsigned), shift);
+ __tmp_863 : bits('esize) = undefined;
+ (__tmp_863, sat) = SatQ(element, esize, dst_unsigned);
+ result = aset_Elem(result, e, esize, __tmp_863);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_leftlong : (int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_leftlong ('d, 'datasize, 'elements, 'esize, 'n, 'part, 'shift, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_Vpart(n, part);
+ result : bits(2 * 'datasize) = undefined;
+ element : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = shl_int(asl_Int(aget_Elem(operand, e, esize), unsigned), shift);
+ result = aset_Elem(result, e, 2 * esize, __GetSlice_int(2 * esize, element, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_leftinsert_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_leftinsert_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'shift) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ mask : bits('esize) = LSL(Ones(esize), shift);
+ shifted : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ shifted = LSL(aget_Elem(operand, e, esize), shift);
+ result = aset_Elem(result, e, esize, aget_Elem(operand2, e, esize) & ~(mask) | shifted)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_conv_int_sisd : (int, int, int, int, int, int, FPRounding, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_conv_int_sisd ('d, 'datasize, 'elements, 'esize, 'fracbits, 'n, rounding, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FixedToFP(element, fracbits, unsigned, FPCR, rounding))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_shift_conv_float_sisd : (int, int, int, int, int, int, FPRounding, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_shift_conv_float_sisd ('d, 'datasize, 'elements, 'esize, 'fracbits, 'n, rounding, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPToFixed(element, fracbits, unsigned, FPCR, rounding))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_reduce_intmax : (int, int, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_intmax ('d, 'datasize, 'elements, 'esize, min, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ maxmin : int = undefined;
+ element : int = undefined;
+ maxmin = asl_Int(aget_Elem(operand, 0, esize), unsigned);
+ foreach (e from 1 to (elements - 1) by 1 in inc) {
+ element = asl_Int(aget_Elem(operand, e, esize), unsigned);
+ maxmin = if min then min(maxmin, element) else max(maxmin, element)
+ };
+ aset_V(d, __GetSlice_int(esize, maxmin, 0))
+}
+
+val aarch64_vector_reduce_fp16maxnm_sisd : (int, int, int, int, ReduceOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_fp16maxnm_sisd ('d, 'datasize, 'esize, 'n, op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ aset_V(d, Reduce(op, operand, esize))
+}
+
+val vector_reduce_fpmaxnm_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fpmaxnm_sisd_decode (U, o1, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize * 2;
+ elements : int = 2;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMINNUM else ReduceOp_FMAXNUM;
+ aarch64_vector_reduce_fp16maxnm_sisd(d, datasize, esize, n, op)
+}
+
+val aarch64_vector_reduce_fp16maxnm_simd : (int, int, int, int, ReduceOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_fp16maxnm_simd ('d, 'datasize, 'esize, 'n, op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ aset_V(d, Reduce(op, operand, esize))
+}
+
+val aarch64_vector_reduce_fp16max_sisd : (int, int, int, int, ReduceOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_fp16max_sisd ('d, 'datasize, 'esize, 'n, op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ aset_V(d, Reduce(op, operand, esize))
+}
+
+val vector_reduce_fpmax_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fpmax_sisd_decode (U, o1, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize * 2;
+ elements : int = 2;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMIN else ReduceOp_FMAX;
+ aarch64_vector_reduce_fp16max_sisd(d, datasize, esize, n, op)
+}
+
+val aarch64_vector_reduce_fp16max_simd : (int, int, int, int, ReduceOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_fp16max_simd ('d, 'datasize, 'esize, 'n, op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ aset_V(d, Reduce(op, operand, esize))
+}
+
+val aarch64_vector_reduce_fp16add_sisd : (int, int, int, int, ReduceOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_fp16add_sisd ('d, 'datasize, 'esize, 'n, op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ aset_V(d, Reduce(op, operand, esize))
+}
+
+val vector_reduce_fpadd_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fpadd_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize * 2;
+ elements : int = 2;
+ op : ReduceOp = ReduceOp_FADD;
+ aarch64_vector_reduce_fp16add_sisd(d, datasize, esize, n, op)
+}
+
+val aarch64_vector_reduce_add_sisd : (int, int, int, int, ReduceOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_add_sisd ('d, 'datasize, 'esize, 'n, op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ aset_V(d, Reduce(op, operand, esize))
+}
+
+val aarch64_vector_reduce_add_simd : (int, int, int, int, ReduceOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_add_simd ('d, 'datasize, 'esize, 'n, op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ aset_V(d, Reduce(op, operand, esize))
+}
+
+val aarch64_vector_reduce_addlong : (int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_reduce_addlong ('d, 'datasize, 'elements, 'esize, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ sum : int = asl_Int(aget_Elem(operand, 0, esize), unsigned);
+ foreach (e from 1 to (elements - 1) by 1 in inc)
+ sum = sum + asl_Int(aget_Elem(operand, e, esize), unsigned);
+ aset_V(d, __GetSlice_int(2 * esize, sum, 0))
+}
+
+val aarch64_vector_logical : forall ('datasize : Int).
+ (atom('datasize), bits('datasize), ImmediateOp, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_logical (datasize, imm, operation, 'rd) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = undefined;
+ result : bits('datasize) = undefined;
+ match operation {
+ ImmediateOp_MOVI => result = imm,
+ ImmediateOp_MVNI => result = ~(imm),
+ ImmediateOp_ORR => {
+ operand = aget_V(rd);
+ result = operand | imm
+ },
+ ImmediateOp_BIC => {
+ operand = aget_V(rd);
+ result = operand & ~(imm)
+ }
+ };
+ aset_V(rd, result)
+}
+
+val aarch64_vector_fp16_movi : forall ('datasize : Int).
+ (atom('datasize), bits('datasize), int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_fp16_movi (datasize, imm, 'rd) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ aset_V(rd, imm)
+}
+
+val aarch64_vector_arithmetic_unary_special_sqrtfp16 : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_special_sqrtfp16 ('d, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPSqrt(element, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_special_sqrtest_int : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_special_sqrtest_int ('d, 'datasize, 'elements, 'n) = {
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits(32) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, 32);
+ result = aset_Elem(result, e, 32, UnsignedRSqrtEstimate(element))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_special_sqrtest_fp16_sisd : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_special_sqrtest_fp16_sisd ('d, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPRSqrtEstimate(element, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_special_sqrtest_float_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_sqrtest_float_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_unary_special_sqrtest_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val aarch64_vector_arithmetic_unary_special_recip_int : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_special_recip_int ('d, 'datasize, 'elements, 'n) = {
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits(32) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, 32);
+ result = aset_Elem(result, e, 32, UnsignedRecipEstimate(element))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_special_recip_fp16_sisd : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_special_recip_fp16_sisd ('d, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPRecipEstimate(element, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_special_recip_float_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_recip_float_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_unary_special_recip_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val aarch64_vector_arithmetic_unary_special_frecpxfp16 : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_special_frecpxfp16 ('d, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPRecpX(element, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_special_frecpx_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_frecpx_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_unary_special_frecpxfp16(d, datasize, elements, esize, n)
+}
+
+val aarch64_vector_arithmetic_unary_shift : (int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_shift ('d, 'datasize, 'elements, 'esize, 'n, 'part, 'shift, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_Vpart(n, part);
+ result : bits(2 * 'datasize) = undefined;
+ element : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = shl_int(asl_Int(aget_Elem(operand, e, esize), unsigned), shift);
+ result = aset_Elem(result, e, 2 * esize, __GetSlice_int(2 * esize, element, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_rev : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_rev ('containers, 'd, 'datasize, 'elements_per_container, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : int = 0;
+ rev_element : int = undefined;
+ foreach (c from 0 to (containers - 1) by 1 in inc) {
+ rev_element = (element + elements_per_container) - 1;
+ foreach (e from 0 to (elements_per_container - 1) by 1 in inc) {
+ result = aset_Elem(result, rev_element, esize, aget_Elem(operand, element, esize));
+ element = element + 1;
+ rev_element = rev_element - 1
+ }
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_rbit : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_rbit ('d, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ rev : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ foreach (i from 0 to (esize - 1) by 1 in inc)
+ rev = __SetSlice_bits(esize, 1, rev, (esize - 1) - i, [element[i]]);
+ result = aset_Elem(result, e, esize, rev)
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_rbit_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_rbit_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 8;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / 8;
+ aarch64_vector_arithmetic_unary_rbit(d, datasize, elements, esize, n)
+}
+
+val aarch64_vector_arithmetic_unary_not : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_not ('d, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, ~(element))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_not_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_not_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 8;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / 8;
+ aarch64_vector_arithmetic_unary_not(d, datasize, elements, esize, n)
+}
+
+val aarch64_vector_arithmetic_unary_fp16_round : (int, int, int, int, bool, int, FPRounding) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_fp16_round ('d, 'datasize, 'elements, 'esize, exact, 'n, rounding) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPRoundInt(element, FPCR, rounding, exact))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_fp16_conv_int_sisd : (int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_fp16_conv_int_sisd ('d, 'datasize, 'elements, 'esize, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ rounding : FPRounding = FPRoundingMode(FPCR);
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FixedToFP(element, 0, unsigned, FPCR, rounding))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_float_conv_int_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_conv_int_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_int_sisd(d, datasize, elements, esize, n, unsigned)
+}
+
+val aarch64_vector_arithmetic_unary_fp16_conv_float_tieaway_sisd : (int, int, int, int, int, FPRounding, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_fp16_conv_float_tieaway_sisd ('d, 'datasize, 'elements, 'esize, 'n, rounding, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPToFixed(element, 0, unsigned, FPCR, rounding))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_float_conv_float_tieaway_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_conv_float_tieaway_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ rounding : FPRounding = FPRounding_TIEAWAY;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_tieaway_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val aarch64_vector_arithmetic_unary_fp16_conv_float_bulk_sisd : (int, int, int, int, int, FPRounding, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_fp16_conv_float_bulk_sisd ('d, 'datasize, 'elements, 'esize, 'n, rounding, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ result = aset_Elem(result, e, esize, FPToFixed(element, 0, unsigned, FPCR, rounding))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_float_conv_float_bulk_sisd_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_conv_float_bulk_sisd_decode (U, o2, sz, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ rounding : FPRounding = FPDecodeRounding(o1 @ o2);
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_bulk_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val aarch64_vector_arithmetic_unary_float_xtn_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_float_xtn_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc)
+ result = aset_Elem(result, e, esize, FPConvert(aget_Elem(operand, e, 2 * esize), FPCR, FPRounding_ODD));
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_arithmetic_unary_float_widen : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_float_widen ('d, 'datasize, 'elements, 'esize, 'n, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_Vpart(n, part);
+ result : bits(2 * 'datasize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc)
+ result = aset_Elem(result, e, 2 * esize, FPConvert(aget_Elem(operand, e, esize), FPCR));
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_float_widen_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_widen_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(16, UInt(sz));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_float_widen(d, datasize, elements, esize, n, part)
+}
+
+val aarch64_vector_arithmetic_unary_float_narrow : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_float_narrow ('d, 'datasize, 'elements, 'esize, 'n, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc)
+ result = aset_Elem(result, e, esize, FPConvert(aget_Elem(operand, e, 2 * esize), FPCR));
+ aset_Vpart(d, part, result)
+}
+
+val vector_arithmetic_unary_float_narrow_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_narrow_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(16, UInt(sz));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_float_narrow(d, datasize, elements, esize, n, part)
+}
+
+val aarch64_vector_arithmetic_unary_extract_sqxtun_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_extract_sqxtun_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits(2 * 'esize) = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, 2 * esize);
+ __tmp_781 : bits('esize) = undefined;
+ (__tmp_781, sat) = UnsignedSatQ(SInt(element), esize);
+ result = aset_Elem(result, e, esize, __tmp_781);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_arithmetic_unary_extract_sat_sisd : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_extract_sat_sisd ('d, 'datasize, 'elements, 'esize, 'n, 'part, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits(2 * 'esize) = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, 2 * esize);
+ __tmp_738 : bits('esize) = undefined;
+ (__tmp_738, sat) = SatQ(asl_Int(element, unsigned), esize, unsigned);
+ result = aset_Elem(result, e, esize, __tmp_738);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_arithmetic_unary_extract_nosat : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_extract_nosat ('d, 'datasize, 'elements, 'esize, 'n, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits(2 * 'datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits(2 * 'esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, 2 * esize);
+ result = aset_Elem(result, e, esize, slice(element, 0, esize))
+ };
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_arithmetic_unary_diffneg_sat_sisd : (int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_diffneg_sat_sisd ('d, 'datasize, 'elements, 'esize, 'n, neg) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = SInt(aget_Elem(operand, e, esize));
+ if neg then element = negate(element) else element = abs(element);
+ __tmp_818 : bits('esize) = undefined;
+ (__tmp_818, sat) = SignedSatQ(element, esize);
+ result = aset_Elem(result, e, esize, __tmp_818);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_diffneg_sat_sisd_decode : (bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_diffneg_sat_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ neg : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_diffneg_sat_sisd(d, datasize, elements, esize, n, neg)
+}
+
+val aarch64_vector_arithmetic_unary_diffneg_int_sisd : (int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_diffneg_int_sisd ('d, 'datasize, 'elements, 'esize, 'n, neg) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = SInt(aget_Elem(operand, e, esize));
+ if neg then element = negate(element) else element = abs(element);
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, element, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_diffneg_fp16 : (int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_diffneg_fp16 ('d, 'datasize, 'elements, 'esize, 'n, neg) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ if neg then element = FPNeg(element) else element = FPAbs(element);
+ result = aset_Elem(result, e, esize, element)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_cnt : (int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_cnt ('d, 'datasize, 'elements, 'esize, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ assert('elements >= 1 & 'esize >= 1);
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ count : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ count = BitCount(aget_Elem(operand, e, esize));
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, count, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_cmp_int_lessthan_sisd : (CompareOp, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_cmp_int_lessthan_sisd (comparison, 'd, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : int = undefined;
+ test_passed : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = SInt(aget_Elem(operand, e, esize));
+ match comparison {
+ CompareOp_GT => test_passed = element > 0,
+ CompareOp_GE => test_passed = element >= 0,
+ CompareOp_EQ => test_passed = element == 0,
+ CompareOp_LE => test_passed = element <= 0,
+ CompareOp_LT => test_passed = element < 0
+ };
+ result = aset_Elem(result, e, esize, if test_passed then Ones() else Zeros())
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_cmp_int_bulk_sisd : (CompareOp, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_cmp_int_bulk_sisd (comparison, 'd, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ element : int = undefined;
+ test_passed : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = SInt(aget_Elem(operand, e, esize));
+ match comparison {
+ CompareOp_GT => test_passed = element > 0,
+ CompareOp_GE => test_passed = element >= 0,
+ CompareOp_EQ => test_passed = element == 0,
+ CompareOp_LE => test_passed = element <= 0,
+ CompareOp_LT => test_passed = element < 0
+ };
+ result = aset_Elem(result, e, esize, if test_passed then Ones() else Zeros())
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_cmp_fp16_lessthan_sisd : (CompareOp, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_cmp_fp16_lessthan_sisd (comparison, 'd, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ zero : bits('esize) = FPZero(0b0);
+ element : bits('esize) = undefined;
+ test_passed : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ match comparison {
+ CompareOp_GT => test_passed = FPCompareGT(element, zero, FPCR),
+ CompareOp_GE => test_passed = FPCompareGE(element, zero, FPCR),
+ CompareOp_EQ => test_passed = FPCompareEQ(element, zero, FPCR),
+ CompareOp_LE => test_passed = FPCompareGE(zero, element, FPCR),
+ CompareOp_LT => test_passed = FPCompareGT(zero, element, FPCR)
+ };
+ result = aset_Elem(result, e, esize, if test_passed then Ones() else Zeros())
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_cmp_float_lessthan_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_float_lessthan_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ comparison : CompareOp = CompareOp_LT;
+ aarch64_vector_arithmetic_unary_cmp_fp16_lessthan_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val aarch64_vector_arithmetic_unary_cmp_fp16_bulk_sisd : (CompareOp, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_cmp_fp16_bulk_sisd (comparison, 'd, 'datasize, 'elements, 'esize, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ zero : bits('esize) = FPZero(0b0);
+ element : bits('esize) = undefined;
+ test_passed : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element = aget_Elem(operand, e, esize);
+ match comparison {
+ CompareOp_GT => test_passed = FPCompareGT(element, zero, FPCR),
+ CompareOp_GE => test_passed = FPCompareGE(element, zero, FPCR),
+ CompareOp_EQ => test_passed = FPCompareEQ(element, zero, FPCR),
+ CompareOp_LE => test_passed = FPCompareGE(zero, element, FPCR),
+ CompareOp_LT => test_passed = FPCompareGT(zero, element, FPCR)
+ };
+ result = aset_Elem(result, e, esize, if test_passed then Ones() else Zeros())
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_cmp_float_bulk_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_float_bulk_sisd_decode (U, sz, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ comparison : CompareOp = undefined;
+ match op @ U {
+ 0b00 => comparison = CompareOp_GT,
+ 0b01 => comparison = CompareOp_GE,
+ 0b10 => comparison = CompareOp_EQ,
+ 0b11 => comparison = CompareOp_LE
+ };
+ aarch64_vector_arithmetic_unary_cmp_fp16_bulk_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val aarch64_vector_arithmetic_unary_clsz : (CountOp, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_clsz (countop, 'd, 'datasize, 'elements, 'esize, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ assert('elements >= 1 & 'esize >= 3);
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ count : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ if countop == CountOp_CLS then
+ count = CountLeadingSignBits(aget_Elem(operand, e, esize))
+ else count = CountLeadingZeroBits(aget_Elem(operand, e, esize));
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, count, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_unary_add_saturating_sisd : (int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_add_saturating_sisd ('d, 'datasize, 'elements, 'esize, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ operand2 : bits('datasize) = aget_V(d);
+ op1 : int = undefined;
+ op2 : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ op1 = asl_Int(aget_Elem(operand, e, esize), ~(unsigned));
+ op2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ __tmp_868 : bits('esize) = undefined;
+ (__tmp_868, sat) = SatQ(op1 + op2, esize, unsigned);
+ result = aset_Elem(result, e, esize, __tmp_868);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_unary_add_saturating_sisd_decode : (bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_add_saturating_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_add_saturating_sisd(d, datasize, elements, esize, n, unsigned)
+}
+
+val aarch64_vector_arithmetic_unary_add_pairwise : (bool, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_unary_add_pairwise (acc, 'd, 'datasize, 'elements, 'esize, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand : bits('datasize) = aget_V(n);
+ result : bits('datasize) = undefined;
+ sum : bits(2 * 'esize) = undefined;
+ op1 : int = undefined;
+ op2 : int = undefined;
+ result = if acc then aget_V(d) else Zeros();
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ op1 = asl_Int(aget_Elem(operand, 2 * e + 0, esize), unsigned);
+ op2 = asl_Int(aget_Elem(operand, 2 * e + 1, esize), unsigned);
+ sum = __GetSlice_int(2 * esize, op1 + op2, 0);
+ result = aset_Elem(result, e, 2 * esize, aget_Elem(result, e, 2 * esize) + sum)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_sub_saturating_sisd : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_sub_saturating_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ diff : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ diff = element1 - element2;
+ __tmp_697 : bits('esize) = undefined;
+ (__tmp_697, sat) = SatQ(diff, esize, unsigned);
+ result = aset_Elem(result, e, esize, __tmp_697);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_binary_uniform_sub_saturating_sisd_decode : (bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_sub_saturating_sisd_decode (U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_sub_saturating_sisd(d, datasize, elements, esize, m, n, unsigned)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_sub_int : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_sub_int ('d, 'datasize, 'elements, 'esize, 'm, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ diff : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ diff = element1 - element2;
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, diff, 1))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_sub_fp16_sisd : (bool, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_sub_fp16_sisd (abs, 'd, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ diff : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ diff = FPSub(element1, element2, FPCR);
+ result = aset_Elem(result, e, esize, if abs then FPAbs(diff) else diff)
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_binary_uniform_sub_fp_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_sub_fp_sisd_decode (U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ abs : bool = true;
+ aarch64_vector_arithmetic_binary_uniform_sub_fp16_sisd(abs, d, datasize, elements, esize, m, n)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_sub_fp16_simd : (bool, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_sub_fp16_simd (abs, 'd, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ diff : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ diff = FPSub(element1, element2, FPCR);
+ result = aset_Elem(result, e, esize, if abs then FPAbs(diff) else diff)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_shift_sisd : (int, int, int, int, int, int, bool, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_shift_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, rounding, saturating, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ round_const : int = 0;
+ shift : int = undefined;
+ element : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ shift = SInt(slice(aget_Elem(operand2, e, esize), 0, 8));
+ if rounding then round_const = shl_int(1, negate(shift) - 1) else ();
+ element = shl_int(asl_Int(aget_Elem(operand1, e, esize), unsigned) + round_const, shift);
+ if saturating then {
+ __tmp_702 : bits('esize) = undefined;
+ (__tmp_702, sat) = SatQ(element, esize, unsigned);
+ result = aset_Elem(result, e, esize, __tmp_702);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ } else result = aset_Elem(result, e, esize, __GetSlice_int(esize, element, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_rsqrtsfp16_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_rsqrtsfp16_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ result = aset_Elem(result, e, esize, FPRSqrtStepFused(element1, element2))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_binary_uniform_rsqrts_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_rsqrts_sisd_decode (U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_binary_uniform_rsqrtsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_recpsfp16_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_recpsfp16_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ result = aset_Elem(result, e, esize, FPRecipStepFused(element1, element2))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_binary_uniform_recps_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_recps_sisd_decode (U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_binary_uniform_recpsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_int_product : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_int_product ('d, 'datasize, 'elements, 'esize, 'm, 'n, poly) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ assert('elements >= 1 & 'esize >= 1);
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ product : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ if poly then product = slice(PolynomialMult(element1, element2), 0, esize)
+ else product = __GetSlice_int(esize, UInt(element1) * UInt(element2), 0);
+ result = aset_Elem(result, e, esize, product)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_sisd : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, rounding) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ round_const : int = if rounding then shl_int(1, esize - 1) else 0;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ element2 = SInt(aget_Elem(operand2, e, esize));
+ product = (2 * element1) * element2 + round_const;
+ __tmp_754 : bits('esize) = undefined;
+ (__tmp_754, sat) = SignedSatQ(shr_int(product, esize), esize);
+ result = aset_Elem(result, e, esize, __tmp_754);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_accum_sisd : (int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_accum_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, rounding, sub_op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ rounding_const : int = if rounding then shl_int(1, esize - 1) else 0;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ element3 : int = undefined;
+ product : int = undefined;
+ sat : bool = undefined;
+ accum : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ element2 = SInt(aget_Elem(operand2, e, esize));
+ element3 = SInt(aget_Elem(operand3, e, esize));
+ if sub_op then accum = (shl_int(element3, esize) - 2 * (element1 * element2)) + rounding_const else accum = (shl_int(element3, esize) + 2 * (element1 * element2)) + rounding_const;
+ __tmp_835 : bits('esize) = undefined;
+ (__tmp_835, sat) = SignedSatQ(shr_int(accum, esize), esize);
+ result = aset_Elem(result, e, esize, __tmp_835);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_int_dotp : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_int_dotp ('d, 'datasize, 'elements, 'esize, 'm, 'n, signed) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = aget_V(d);
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ res : int = 0;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ foreach (i from 0 to 3 by 1 in inc) {
+ if signed then {
+ element1 = SInt(aget_Elem(operand1, 4 * e + i, esize / 4));
+ element2 = SInt(aget_Elem(operand2, 4 * e + i, esize / 4))
+ } else {
+ element1 = UInt(aget_Elem(operand1, 4 * e + i, esize / 4));
+ element2 = UInt(aget_Elem(operand2, 4 * e + i, esize / 4))
+ };
+ res = res + element1 * element2
+ };
+ result = aset_Elem(result, e, esize, aget_Elem(result, e, esize) + res)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_int_accum : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_int_accum ('d, 'datasize, 'elements, 'esize, 'm, 'n, sub_op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ product : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ product = __GetSlice_int(esize, UInt(element1) * UInt(element2), 0);
+ if sub_op then result = aset_Elem(result, e, esize, aget_Elem(operand3, e, esize) - product) else result = aset_Elem(result, e, esize, aget_Elem(operand3, e, esize) + product)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_fp16_product : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_fp16_product ('d, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ result = aset_Elem(result, e, esize, FPMul(element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_fp16_fused : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_fp16_fused ('d, 'datasize, 'elements, 'esize, 'm, 'n, sub_op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ if sub_op then element1 = FPNeg(element1) else ();
+ result = aset_Elem(result, e, esize, FPMulAdd(aget_Elem(operand3, e, esize), element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_fp16_extended_sisd : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_fp16_extended_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ result = aset_Elem(result, e, esize, FPMulX(element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp_extended_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp_extended_sisd_decode (U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_extended_sisd(d, datasize, elements, esize, m, n)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_mul_fp_complex : (int, int, int, int, int, int, bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_mul_fp_complex ('d, 'datasize, 'elements, 'esize, 'm, 'n, rot) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ element3 : bits('esize) = undefined;
+ element4 : bits('esize) = undefined;
+ foreach (e from 0 to (elements / 2 - 1) by 1 in inc) {
+ match rot {
+ 0b00 => {
+ element1 = aget_Elem(operand2, e * 2, esize);
+ element2 = aget_Elem(operand1, e * 2, esize);
+ element3 = aget_Elem(operand2, e * 2 + 1, esize);
+ element4 = aget_Elem(operand1, e * 2, esize)
+ },
+ 0b01 => {
+ element1 = FPNeg(aget_Elem(operand2, e * 2 + 1, esize));
+ element2 = aget_Elem(operand1, e * 2 + 1, esize);
+ element3 = aget_Elem(operand2, e * 2, esize);
+ element4 = aget_Elem(operand1, e * 2 + 1, esize)
+ },
+ 0b10 => {
+ element1 = FPNeg(aget_Elem(operand2, e * 2, esize));
+ element2 = aget_Elem(operand1, e * 2, esize);
+ element3 = FPNeg(aget_Elem(operand2, e * 2 + 1, esize));
+ element4 = aget_Elem(operand1, e * 2, esize)
+ },
+ 0b11 => {
+ element1 = aget_Elem(operand2, e * 2 + 1, esize);
+ element2 = aget_Elem(operand1, e * 2 + 1, esize);
+ element3 = FPNeg(aget_Elem(operand2, e * 2, esize));
+ element4 = aget_Elem(operand1, e * 2 + 1, esize)
+ }
+ };
+ result = aset_Elem(result, e * 2, esize, FPMulAdd(aget_Elem(operand3, e * 2, esize), element2, element1, FPCR));
+ result = aset_Elem(result, e * 2 + 1, esize, FPMulAdd(aget_Elem(operand3, e * 2 + 1, esize), element4, element3, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_maxmin_single : (int, int, int, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_maxmin_single ('d, 'datasize, 'elements, 'esize, 'm, minimum, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ maxmin : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ maxmin = if minimum then min(element1, element2) else max(element1, element2);
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, maxmin, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_maxmin_pair : (int, int, int, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_maxmin_pair ('d, 'datasize, 'elements, 'esize, 'm, minimum, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ concat : bits(2 * 'datasize) = operand2 @ operand1;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ maxmin : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(concat, 2 * e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(concat, 2 * e + 1, esize), unsigned);
+ maxmin = if minimum then min(element1, element2) else max(element1, element2);
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, maxmin, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_2008 : (int, int, int, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_2008 ('d, 'datasize, 'elements, 'esize, 'm, minimum, 'n, pair) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ concat : bits(2 * 'datasize) = operand2 @ operand1;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ if pair then {
+ element1 = aget_Elem(concat, 2 * e, esize);
+ element2 = aget_Elem(concat, 2 * e + 1, esize)
+ } else {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize)
+ };
+ if minimum then result = aset_Elem(result, e, esize, FPMinNum(element1, element2, FPCR)) else result = aset_Elem(result, e, esize, FPMaxNum(element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_1985 : (int, int, int, int, int, bool, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_1985 ('d, 'datasize, 'elements, 'esize, 'm, minimum, 'n, pair) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ concat : bits(2 * 'datasize) = operand2 @ operand1;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ if pair then {
+ element1 = aget_Elem(concat, 2 * e, esize);
+ element2 = aget_Elem(concat, 2 * e + 1, esize)
+ } else {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize)
+ };
+ if minimum then result = aset_Elem(result, e, esize, FPMin(element1, element2, FPCR)) else result = aset_Elem(result, e, esize, FPMax(element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_logical_bsleor : (int, int, int, int, VBitOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_logical_bsleor ('d, 'datasize, 'm, 'n, op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = undefined;
+ operand2 : bits('datasize) = undefined;
+ operand3 : bits('datasize) = undefined;
+ operand4 : bits('datasize) = aget_V(n);
+ match op {
+ VBitOp_VEOR => {
+ operand1 = aget_V(m);
+ operand2 = Zeros();
+ operand3 = Ones()
+ },
+ VBitOp_VBSL => {
+ operand1 = aget_V(m);
+ operand2 = operand1;
+ operand3 = aget_V(d)
+ },
+ VBitOp_VBIT => {
+ operand1 = aget_V(d);
+ operand2 = operand1;
+ operand3 = aget_V(m)
+ },
+ VBitOp_VBIF => {
+ operand1 = aget_V(d);
+ operand2 = operand1;
+ operand3 = ~(aget_V(m))
+ }
+ };
+ aset_V(d, operand1 ^ (operand2 ^ operand4 & operand3))
+}
+
+val vector_arithmetic_binary_uniform_logical_bsleor_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_logical_bsleor_decode (Q, U, opc2, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 8;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ op : VBitOp = undefined;
+ match opc2 {
+ 0b00 => op = VBitOp_VEOR,
+ 0b01 => op = VBitOp_VBSL,
+ 0b10 => op = VBitOp_VBIT,
+ 0b11 => op = VBitOp_VBIF
+ };
+ aarch64_vector_arithmetic_binary_uniform_logical_bsleor(d, datasize, m, n, op)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_logical_andorr : (int, int, bool, int, int, LogicalOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_logical_andorr ('d, 'datasize, invert, 'm, 'n, op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ if invert then operand2 = ~(operand2) else ();
+ match op {
+ LogicalOp_AND => result = operand1 & operand2,
+ LogicalOp_ORR => result = operand1 | operand2
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_binary_uniform_logical_andorr_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_logical_andorr_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 8;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ invert : bool = [size[0]] == 0b1;
+ op : LogicalOp = if [size[1]] == 0b1 then LogicalOp_ORR else LogicalOp_AND;
+ aarch64_vector_arithmetic_binary_uniform_logical_andorr(d, datasize, invert, m, n, op)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_divfp16 : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_divfp16 ('d, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ result = aset_Elem(result, e, esize, FPDiv(element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_diff : (bool, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_diff (accumulate, 'd, 'datasize, 'elements, 'esize, 'm, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ absdiff : bits('esize) = undefined;
+ result = if accumulate then aget_V(d) else Zeros();
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ absdiff = __GetSlice_int(esize, abs(element1 - element2), 0);
+ result = aset_Elem(result, e, esize, aget_Elem(result, e, esize) + absdiff)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_cmp_int_sisd : (bool, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_cmp_int_sisd (cmp_eq, 'd, 'datasize, 'elements, 'esize, 'm, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ test_passed : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ test_passed = if cmp_eq then element1 >= element2 else element1 > element2;
+ result = aset_Elem(result, e, esize, if test_passed then Ones() else Zeros())
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_cmp_fp16_sisd : (bool, CompareOp, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_cmp_fp16_sisd (abs, cmp, 'd, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ test_passed : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ if abs then {
+ element1 = FPAbs(element1);
+ element2 = FPAbs(element2)
+ } else ();
+ match cmp {
+ CompareOp_EQ => test_passed = FPCompareEQ(element1, element2, FPCR),
+ CompareOp_GE => test_passed = FPCompareGE(element1, element2, FPCR),
+ CompareOp_GT => test_passed = FPCompareGT(element1, element2, FPCR)
+ };
+ result = aset_Elem(result, e, esize, if test_passed then Ones() else Zeros())
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_cmp_bitwise_sisd : (bool, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_cmp_bitwise_sisd (and_test, 'd, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ test_passed : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ if and_test then test_passed = ~(IsZero(element1 & element2)) else test_passed = element1 == element2;
+ result = aset_Elem(result, e, esize, if test_passed then Ones() else Zeros())
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_add_wrapping_single_sisd : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_add_wrapping_single_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, sub_op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ if sub_op then result = aset_Elem(result, e, esize, element1 - element2) else result = aset_Elem(result, e, esize, element1 + element2)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_add_wrapping_pair : (int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_add_wrapping_pair ('d, 'datasize, 'elements, 'esize, 'm, 'n) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ concat : bits(2 * 'datasize) = operand2 @ operand1;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(concat, 2 * e, esize);
+ element2 = aget_Elem(concat, 2 * e + 1, esize);
+ result = aset_Elem(result, e, esize, element1 + element2)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_add_saturating_sisd : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_add_saturating_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ sum : int = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ sum = element1 + element2;
+ __tmp_735 : bits('esize) = undefined;
+ (__tmp_735, sat) = SatQ(sum, esize, unsigned);
+ result = aset_Elem(result, e, esize, __tmp_735);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val vector_arithmetic_binary_uniform_add_saturating_sisd_decode : (bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_saturating_sisd_decode (U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_saturating_sisd(d, datasize, elements, esize, m, n, unsigned)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_add_halving_truncating : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_add_halving_truncating ('d, 'datasize, 'elements, 'esize, 'm, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ sum : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ sum = element1 + element2;
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, sum, 1))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_add_halving_rounding : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_add_halving_rounding ('d, 'datasize, 'elements, 'esize, 'm, 'n, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ result = aset_Elem(result, e, esize, __GetSlice_int(esize, (element1 + element2) + 1, 1))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_add_fp16 : (int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_add_fp16 ('d, 'datasize, 'elements, 'esize, 'm, 'n, pair) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ concat : bits(2 * 'datasize) = operand2 @ operand1;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ if pair then {
+ element1 = aget_Elem(concat, 2 * e, esize);
+ element2 = aget_Elem(concat, 2 * e + 1, esize)
+ } else {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize)
+ };
+ result = aset_Elem(result, e, esize, FPAdd(element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_uniform_add_fp_complex : (int, int, int, int, int, int, bits(1)) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_uniform_add_fp_complex ('d, 'datasize, 'elements, 'esize, 'm, 'n, rot) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element3 : bits('esize) = undefined;
+ foreach (e from 0 to (elements / 2 - 1) by 1 in inc) {
+ match rot {
+ 0b0 => {
+ element1 = FPNeg(aget_Elem(operand2, e * 2 + 1, esize));
+ element3 = aget_Elem(operand2, e * 2, esize)
+ },
+ 0b1 => {
+ element1 = aget_Elem(operand2, e * 2 + 1, esize);
+ element3 = FPNeg(aget_Elem(operand2, e * 2, esize))
+ }
+ };
+ result = aset_Elem(result, e * 2, esize, FPAdd(aget_Elem(operand1, e * 2, esize), element1, FPCR));
+ result = aset_Elem(result, e * 2 + 1, esize, FPAdd(aget_Elem(operand1, e * 2 + 1, esize), element3, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mul_long : (int, int, int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mul_long ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, 'part, unsigned) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('idxdsize) = aget_V(m);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits(2 * 'esize) = undefined;
+ element2 = asl_Int(aget_Elem(operand2, index, esize), unsigned);
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ product = __GetSlice_int(2 * esize, element1 * element2, 0);
+ result = aset_Elem(result, e, 2 * esize, product)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mul_int : (int, int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mul_int ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('idxdsize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits('esize) = undefined;
+ element2 = UInt(aget_Elem(operand2, index, esize));
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = UInt(aget_Elem(operand1, e, esize));
+ product = __GetSlice_int(esize, element1 * element2, 0);
+ result = aset_Elem(result, e, esize, product)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mul_high_sisd : (int, int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mul_high_sisd ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, round) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('idxdsize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ round_const : int = if round then shl_int(1, esize - 1) else 0;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : int = undefined;
+ sat : bool = undefined;
+ element2 = SInt(aget_Elem(operand2, index, esize));
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ product = (2 * element1) * element2 + round_const;
+ __tmp_771 : bits('esize) = undefined;
+ (__tmp_771, sat) = SignedSatQ(shr_int(product, esize), esize);
+ result = aset_Elem(result, e, esize, __tmp_771);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mul_fp16_sisd : (int, int, int, int, int, int, int, bool, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mul_fp16_sisd ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, mulx_op, 'n) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('idxdsize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = aget_Elem(operand2, index, esize);
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ if mulx_op then result = aset_Elem(result, e, esize, FPMulX(element1, element2, FPCR)) else result = aset_Elem(result, e, esize, FPMul(element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mul_double_sisd : (int, int, int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mul_double_sisd ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, 'part) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('idxdsize) = aget_V(m);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits(2 * 'esize) = undefined;
+ sat : bool = undefined;
+ element2 = SInt(aget_Elem(operand2, index, esize));
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ (product, sat) = SignedSatQ((2 * element1) * element2, 2 * esize);
+ result = aset_Elem(result, e, 2 * esize, product);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mulacc_long : (int, int, int, int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mulacc_long ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, 'part, sub_op, unsigned) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('idxdsize) = aget_V(m);
+ operand3 : bits(2 * 'datasize) = aget_V(d);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits(2 * 'esize) = undefined;
+ element2 = asl_Int(aget_Elem(operand2, index, esize), unsigned);
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ product = __GetSlice_int(2 * esize, element1 * element2, 0);
+ if sub_op then result = aset_Elem(result, e, 2 * esize, aget_Elem(operand3, e, 2 * esize) - product) else result = aset_Elem(result, e, 2 * esize, aget_Elem(operand3, e, 2 * esize) + product)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mulacc_int : (int, int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mulacc_int ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, sub_op) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('idxdsize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits('esize) = undefined;
+ element2 = UInt(aget_Elem(operand2, index, esize));
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = UInt(aget_Elem(operand1, e, esize));
+ product = __GetSlice_int(esize, element1 * element2, 0);
+ if sub_op then result = aset_Elem(result, e, esize, aget_Elem(operand3, e, esize) - product) else result = aset_Elem(result, e, esize, aget_Elem(operand3, e, esize) + product)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mulacc_high_sisd : (int, int, int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mulacc_high_sisd ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, rounding, sub_op) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('idxdsize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ rounding_const : int = if rounding then shl_int(1, esize - 1) else 0;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ element3 : int = undefined;
+ product : int = undefined;
+ sat : bool = undefined;
+ element2 = SInt(aget_Elem(operand2, index, esize));
+ accum : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ element3 = SInt(aget_Elem(operand3, e, esize));
+ if sub_op then accum = (shl_int(element3, esize) - 2 * (element1 * element2)) + rounding_const else accum = (shl_int(element3, esize) + 2 * (element1 * element2)) + rounding_const;
+ __tmp_698 : bits('esize) = undefined;
+ (__tmp_698, sat) = SignedSatQ(shr_int(accum, esize), esize);
+ result = aset_Elem(result, e, esize, __tmp_698);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mulacc_fp16_sisd : (int, int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mulacc_fp16_sisd ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, sub_op) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('idxdsize) = aget_V(m);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = aget_Elem(operand2, index, esize);
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ if sub_op then element1 = FPNeg(element1) else ();
+ result = aset_Elem(result, e, esize, FPMulAdd(aget_Elem(operand3, e, esize), element1, element2, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mulacc_double_sisd : (int, int, int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mulacc_double_sisd ('d, 'datasize, 'elements, 'esize, 'idxdsize, 'index, 'm, 'n, 'part, sub_op) = {
+ assert(constraint('idxdsize >= 0), "idxdsize constraint");
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('idxdsize) = aget_V(m);
+ operand3 : bits(2 * 'datasize) = aget_V(d);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits(2 * 'esize) = undefined;
+ accum : int = undefined;
+ sat1 : bool = undefined;
+ sat2 : bool = undefined;
+ element2 = SInt(aget_Elem(operand2, index, esize));
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ (product, sat1) = SignedSatQ((2 * element1) * element2, 2 * esize);
+ if sub_op then accum = SInt(aget_Elem(operand3, e, 2 * esize)) - SInt(product) else accum = SInt(aget_Elem(operand3, e, 2 * esize)) + SInt(product);
+ __tmp_828 : bits(2 * 'esize) = undefined;
+ (__tmp_828, sat2) = SignedSatQ(accum, 2 * esize);
+ result = aset_Elem(result, e, 2 * esize, __tmp_828);
+ if sat1 | sat2 then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_mulacc_complex : (int, int, int, int, int, int, int, bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_mulacc_complex ('d, 'datasize, 'elements, 'esize, 'index, 'm, 'n, rot) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(m);
+ operand2 : bits('datasize) = aget_V(n);
+ operand3 : bits('datasize) = aget_V(d);
+ result : bits('datasize) = undefined;
+ element4 : bits('esize) = undefined;
+ element3 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ element1 : bits('esize) = undefined;
+ foreach (e from 0 to (elements / 2 - 1) by 1 in inc) {
+ match rot {
+ 0b00 => {
+ element1 = aget_Elem(operand1, index * 2, esize);
+ element2 = aget_Elem(operand2, e * 2, esize);
+ element3 = aget_Elem(operand1, index * 2 + 1, esize);
+ element4 = aget_Elem(operand2, e * 2, esize)
+ },
+ 0b01 => {
+ element1 = FPNeg(aget_Elem(operand1, index * 2 + 1, esize));
+ element2 = aget_Elem(operand2, e * 2 + 1, esize);
+ element3 = aget_Elem(operand1, index * 2, esize);
+ element4 = aget_Elem(operand2, e * 2 + 1, esize)
+ },
+ 0b10 => {
+ element1 = FPNeg(aget_Elem(operand1, index * 2, esize));
+ element2 = aget_Elem(operand2, e * 2, esize);
+ element3 = FPNeg(aget_Elem(operand1, index * 2 + 1, esize));
+ element4 = aget_Elem(operand2, e * 2, esize)
+ },
+ 0b11 => {
+ element1 = aget_Elem(operand1, index * 2 + 1, esize);
+ element2 = aget_Elem(operand2, e * 2 + 1, esize);
+ element3 = FPNeg(aget_Elem(operand1, index * 2, esize));
+ element4 = aget_Elem(operand2, e * 2 + 1, esize)
+ }
+ };
+ result = aset_Elem(result, e * 2, esize, FPMulAdd(aget_Elem(operand3, e * 2, esize), element2, element1, FPCR));
+ result = aset_Elem(result, e * 2 + 1, esize, FPMulAdd(aget_Elem(operand3, e * 2 + 1, esize), element4, element3, FPCR))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_element_dotp : (int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_element_dotp ('d, 'datasize, 'elements, 'esize, 'index, 'm, 'n, signed) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits(128) = aget_V(m);
+ result : bits('datasize) = aget_V(d);
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ res : int = 0;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ foreach (i from 0 to 3 by 1 in inc) {
+ if signed then {
+ element1 = SInt(aget_Elem(operand1, 4 * e + i, esize / 4));
+ element2 = SInt(aget_Elem(operand2, 4 * index + i, esize / 4))
+ } else {
+ element1 = UInt(aget_Elem(operand1, 4 * e + i, esize / 4));
+ element2 = UInt(aget_Elem(operand2, 4 * index + i, esize / 4))
+ };
+ res = res + element1 * element2
+ };
+ result = aset_Elem(result, e, esize, aget_Elem(result, e, esize) + res)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_mul_product : (int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_mul_product ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ result = aset_Elem(result, e, 2 * esize, __GetSlice_int(2 * esize, element1 * element2, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_mul_poly : (int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_mul_poly ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part) = {
+ assert(constraint('esize >= 1), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : bits('esize) = undefined;
+ element2 : bits('esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, esize);
+ element2 = aget_Elem(operand2, e, esize);
+ result = aset_Elem(result, e, 2 * esize, PolynomialMult(element1, element2))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_mul_double_sisd : (int, int, int, int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_mul_double_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits(2 * 'esize) = undefined;
+ sat : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ element2 = SInt(aget_Elem(operand2, e, esize));
+ (product, sat) = SignedSatQ((2 * element1) * element2, 2 * esize);
+ result = aset_Elem(result, e, 2 * esize, product);
+ if sat then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_mul_dmacc_sisd : (int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_mul_dmacc_sisd ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part, sub_op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ operand3 : bits(2 * 'datasize) = aget_V(d);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits(2 * 'esize) = undefined;
+ accum : int = undefined;
+ sat1 : bool = undefined;
+ sat2 : bool = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = SInt(aget_Elem(operand1, e, esize));
+ element2 = SInt(aget_Elem(operand2, e, esize));
+ (product, sat1) = SignedSatQ((2 * element1) * element2, 2 * esize);
+ if sub_op then accum = SInt(aget_Elem(operand3, e, 2 * esize)) - SInt(product) else accum = SInt(aget_Elem(operand3, e, 2 * esize)) + SInt(product);
+ __tmp_838 : bits(2 * 'esize) = undefined;
+ (__tmp_838, sat2) = SignedSatQ(accum, 2 * esize);
+ result = aset_Elem(result, e, 2 * esize, __tmp_838);
+ if sat1 | sat2 then FPSR = __SetSlice_bits(32, 1, FPSR, 27, 0b1) else ()
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_mul_accum : (int, int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_mul_accum ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part, sub_op, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ operand3 : bits(2 * 'datasize) = aget_V(d);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ product : bits(2 * 'esize) = undefined;
+ accum : bits(2 * 'esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ product = __GetSlice_int(2 * esize, element1 * element2, 0);
+ if sub_op then accum = aget_Elem(operand3, e, 2 * esize) - product else accum = aget_Elem(operand3, e, 2 * esize) + product;
+ result = aset_Elem(result, e, 2 * esize, accum)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_diff : (bool, int, int, int, int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_diff (accumulate, 'd, 'datasize, 'elements, 'esize, 'm, 'n, 'part, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ absdiff : bits(2 * 'esize) = undefined;
+ result = if accumulate then aget_V(d) else Zeros();
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ absdiff = __GetSlice_int(2 * esize, abs(element1 - element2), 0);
+ result = aset_Elem(result, e, 2 * esize, aget_Elem(result, e, 2 * esize) + absdiff)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_addsub_wide : (int, int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_addsub_wide ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part, sub_op, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits(2 * 'datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ sum : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, 2 * esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ if sub_op then sum = element1 - element2 else sum = element1 + element2;
+ result = aset_Elem(result, e, 2 * esize, __GetSlice_int(2 * esize, sum, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_addsub_narrow : (int, int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_addsub_narrow ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part, round, sub_op) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits(2 * 'datasize) = aget_V(n);
+ operand2 : bits(2 * 'datasize) = aget_V(m);
+ result : bits('datasize) = undefined;
+ round_const : int = if round then shl_int(1, esize - 1) else 0;
+ element1 : bits(2 * 'esize) = undefined;
+ element2 : bits(2 * 'esize) = undefined;
+ sum : bits(2 * 'esize) = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = aget_Elem(operand1, e, 2 * esize);
+ element2 = aget_Elem(operand2, e, 2 * esize);
+ if sub_op then sum = element1 - element2 else sum = element1 + element2;
+ sum = sum + round_const;
+ result = aset_Elem(result, e, esize, slice(sum, esize, esize))
+ };
+ aset_Vpart(d, part, result)
+}
+
+val aarch64_vector_arithmetic_binary_disparate_addsub_long : (int, int, int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_arithmetic_binary_disparate_addsub_long ('d, 'datasize, 'elements, 'esize, 'm, 'n, 'part, sub_op, unsigned) = {
+ assert(constraint('esize >= 0), "esize constraint");
+ assert(constraint('elements >= 1), "elements constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_Vpart(n, part);
+ operand2 : bits('datasize) = aget_Vpart(m, part);
+ result : bits(2 * 'datasize) = undefined;
+ element1 : int = undefined;
+ element2 : int = undefined;
+ sum : int = undefined;
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ element1 = asl_Int(aget_Elem(operand1, e, esize), unsigned);
+ element2 = asl_Int(aget_Elem(operand2, e, esize), unsigned);
+ if sub_op then sum = element1 - element2 else sum = element1 + element2;
+ result = aset_Elem(result, e, 2 * esize, __GetSlice_int(2 * esize, sum, 0))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_float_move_fp_select : (bits(4), int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_move_fp_select (condition, 'd, 'datasize, 'm, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = if ConditionHolds(condition) then aget_V(n) else aget_V(m);
+ aset_V(d, result)
+}
+
+val aarch64_float_move_fp_imm : forall ('datasize : Int).
+ (int, atom('datasize), bits('datasize)) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_move_fp_imm ('d, datasize, imm) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ aset_V(d, imm)
+}
+
+val aarch64_float_convert_int : forall ('fltsize : Int) ('intsize : Int), 'fltsize >= 0 & 'intsize >= 0.
+ (int, atom('fltsize), atom('intsize), int, FPConvOp, int, FPRounding, bool) -> unit effect {undef, escape, wreg, rreg}
+
+function aarch64_float_convert_int (d, fltsize, intsize, n, op, part, rounding, unsigned) = {
+ CheckFPAdvSIMDEnabled64();
+ fltval : bits('fltsize) = undefined;
+ intval : bits('intsize) = undefined;
+ match op {
+ FPConvOp_CVT_FtoI => {
+ fltval = aget_V(n);
+ intval = FPToFixed(fltval, 0, unsigned, FPCR, rounding);
+ aset_X(d, intval)
+ },
+ FPConvOp_CVT_ItoF => {
+ intval = aget_X(n);
+ fltval = FixedToFP(intval, 0, unsigned, FPCR, rounding);
+ aset_V(d, fltval)
+ },
+ FPConvOp_MOV_FtoI => {
+ fltval = aget_Vpart(n, part);
+ intval = ZeroExtend(fltval, intsize);
+ aset_X(d, intval)
+ },
+ FPConvOp_MOV_ItoF => {
+ intval = aget_X(n);
+ fltval = slice(intval, 0, fltsize);
+ aset_Vpart(d, part, fltval)
+ },
+ FPConvOp_CVT_FtoI_JS => {
+ fltval = aget_V(n);
+ intval = FPToFixedJS(fltval, FPCR, true);
+ aset_X(d, ZeroExtend(slice(intval, 0, 32), 64))
+ }
+ }
+}
+
+val aarch64_float_convert_fp : forall ('dstsize : Int) ('srcsize : Int), 'dstsize >= 0 & 'srcsize >= 0.
+ (int, atom('dstsize), int, atom('srcsize)) -> unit effect {undef, escape, wreg, rreg}
+
+function aarch64_float_convert_fp (d, dstsize, n, srcsize) = {
+ CheckFPAdvSIMDEnabled64();
+ result : bits('dstsize) = undefined;
+ operand : bits('srcsize) = aget_V(n);
+ result = FPConvert(operand, FPCR);
+ aset_V(d, result)
+}
+
+val aarch64_float_convert_fix : forall ('fltsize : Int) ('intsize : Int), 'fltsize >= 0 & 'intsize >= 0.
+ (int, atom('fltsize), int, atom('intsize), int, FPConvOp, FPRounding, bool) -> unit effect {undef, escape, wreg, rreg}
+
+function aarch64_float_convert_fix (d, fltsize, fracbits, intsize, n, op, rounding, unsigned) = {
+ CheckFPAdvSIMDEnabled64();
+ fltval : bits('fltsize) = undefined;
+ intval : bits('intsize) = undefined;
+ match op {
+ FPConvOp_CVT_FtoI => {
+ fltval = aget_V(n);
+ intval = FPToFixed(fltval, fracbits, unsigned, FPCR, rounding);
+ aset_X(d, intval)
+ },
+ FPConvOp_CVT_ItoF => {
+ intval = aget_X(n);
+ fltval = FixedToFP(intval, fracbits, unsigned, FPCR, rounding);
+ aset_V(d, fltval)
+ }
+ }
+}
+
+val aarch64_float_compare_uncond : (bool, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_compare_uncond (cmp_with_zero, 'datasize, 'm, 'n, signal_all_nans) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = if cmp_with_zero then FPZero(0b0) else aget_V(m);
+ (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = FPCompare(operand1, operand2, signal_all_nans, FPCR)
+}
+
+val aarch64_float_compare_cond : (bits(4), int, bits(4), int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_compare_cond (condition, 'datasize, flags__arg, 'm, 'n, signal_all_nans) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ flags = flags__arg;
+ CheckFPAdvSIMDEnabled64();
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ if ConditionHolds(condition) then flags = FPCompare(operand1, operand2, signal_all_nans, FPCR) else ();
+ (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = flags
+}
+
+val aarch64_float_arithmetic_unary : (int, int, FPUnaryOp, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_arithmetic_unary ('d, 'datasize, fpop, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = undefined;
+ operand : bits('datasize) = aget_V(n);
+ match fpop {
+ FPUnaryOp_MOV => result = operand,
+ FPUnaryOp_ABS => result = FPAbs(operand),
+ FPUnaryOp_NEG => result = FPNeg(operand),
+ FPUnaryOp_SQRT => result = FPSqrt(operand, FPCR)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_float_arithmetic_round : (int, int, bool, int, FPRounding) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_arithmetic_round ('d, 'datasize, exact, 'n, rounding) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = undefined;
+ operand : bits('datasize) = aget_V(n);
+ result = FPRoundInt(operand, FPCR, rounding, exact);
+ aset_V(d, result)
+}
+
+val aarch64_float_arithmetic_mul_product : (int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_arithmetic_mul_product ('d, 'datasize, 'm, 'n, negated) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result = FPMul(operand1, operand2, FPCR);
+ if negated then result = FPNeg(result) else ();
+ aset_V(d, result)
+}
+
+val aarch64_float_arithmetic_mul_addsub : (int, int, int, int, int, bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_arithmetic_mul_addsub ('a, 'd, 'datasize, 'm, 'n, op1_neg, opa_neg) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = undefined;
+ operanda : bits('datasize) = aget_V(a);
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ if opa_neg then operanda = FPNeg(operanda) else ();
+ if op1_neg then operand1 = FPNeg(operand1) else ();
+ result = FPMulAdd(operanda, operand1, operand2, FPCR);
+ aset_V(d, result)
+}
+
+val aarch64_float_arithmetic_maxmin : (int, int, int, int, FPMaxMinOp) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_arithmetic_maxmin ('d, 'datasize, 'm, 'n, operation) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ match operation {
+ FPMaxMinOp_MAX => result = FPMax(operand1, operand2, FPCR),
+ FPMaxMinOp_MIN => result = FPMin(operand1, operand2, FPCR),
+ FPMaxMinOp_MAXNUM => result = FPMaxNum(operand1, operand2, FPCR),
+ FPMaxMinOp_MINNUM => result = FPMinNum(operand1, operand2, FPCR)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_float_arithmetic_div : (int, int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_arithmetic_div ('d, 'datasize, 'm, 'n) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ result = FPDiv(operand1, operand2, FPCR);
+ aset_V(d, result)
+}
+
+val aarch64_float_arithmetic_addsub : (int, int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_float_arithmetic_addsub ('d, 'datasize, 'm, 'n, sub_op) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ result : bits('datasize) = undefined;
+ operand1 : bits('datasize) = aget_V(n);
+ operand2 : bits('datasize) = aget_V(m);
+ if sub_op then result = FPSub(operand1, operand2, FPCR) else result = FPAdd(operand1, operand2, FPCR);
+ aset_V(d, result)
+}
+
+val CheckCryptoEnabled64 : unit -> unit effect {escape, rreg, undef, wreg}
+
+function CheckCryptoEnabled64 () = {
+ AArch64_CheckFPAdvSIMDEnabled();
+ ()
+}
+
+val aarch64_vector_crypto_sha3op_sha256sched1 : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3op_sha256sched1 ('d, 'm, 'n) = {
+ CheckCryptoEnabled64();
+ operand1 : bits(128) = aget_V(d);
+ operand2 : bits(128) = aget_V(n);
+ operand3 : bits(128) = aget_V(m);
+ result : bits(128) = undefined;
+ T0 : bits(128) = slice(operand3, 0, 32) @ slice(operand2, 32, 96);
+ T1 : bits(64) = undefined;
+ elt : bits(32) = undefined;
+ T1 = slice(operand3, 64, 64);
+ foreach (e from 0 to 1 by 1 in inc) {
+ elt = aget_Elem(T1, e, 32);
+ elt = (ROR(elt, 17) ^ ROR(elt, 19)) ^ LSR(elt, 10);
+ elt = (elt + aget_Elem(operand1, e, 32)) + aget_Elem(T0, e, 32);
+ result = aset_Elem(result, e, 32, elt)
+ };
+ T1 = slice(result, 0, 64);
+ foreach (e from 2 to 3 by 1 in inc) {
+ elt = aget_Elem(T1, e - 2, 32);
+ elt = (ROR(elt, 17) ^ ROR(elt, 19)) ^ LSR(elt, 10);
+ elt = (elt + aget_Elem(operand1, e, 32)) + aget_Elem(T0, e, 32);
+ result = aset_Elem(result, e, 32, elt)
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sha3op_sha256hash : (int, int, int, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3op_sha256hash ('d, 'm, 'n, part1) = {
+ CheckCryptoEnabled64();
+ result : bits(128) = undefined;
+ if part1 then result = SHA256hash(aget_V(d), aget_V(n), aget_V(m), true) else result = SHA256hash(aget_V(n), aget_V(d), aget_V(m), false);
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sha3op_sha1sched0 : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3op_sha1sched0 ('d, 'm, 'n) = {
+ CheckCryptoEnabled64();
+ operand1 : bits(128) = aget_V(d);
+ operand2 : bits(128) = aget_V(n);
+ operand3 : bits(128) = aget_V(m);
+ result : bits(128) = slice(operand2, 0, 64) @ slice(operand1, 64, 64);
+ result = (result ^ operand1) ^ operand3;
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sha3op_sha1hash_parity : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3op_sha1hash_parity ('d, 'm, 'n) = {
+ CheckCryptoEnabled64();
+ X : bits(128) = aget_V(d);
+ Y : bits(32) = aget_V(n);
+ W : bits(128) = aget_V(m);
+ t : bits(32) = undefined;
+ foreach (e from 0 to 3 by 1 in inc) {
+ t = SHAparity(slice(X, 32, 32), slice(X, 64, 32), slice(X, 96, 32));
+ Y = ((Y + ROL(slice(X, 0, 32), 5)) + t) + aget_Elem(W, e, 32);
+ X = __SetSlice_bits(128, 32, X, 32, ROL(slice(X, 32, 32), 30));
+ __tmp_845 : bits(160) = ROL(Y @ X, 32);
+ Y = slice(__tmp_845, 128, 32);
+ X = slice(__tmp_845, 0, 128)
+ };
+ aset_V(d, X)
+}
+
+val aarch64_vector_crypto_sha3op_sha1hash_majority : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3op_sha1hash_majority ('d, 'm, 'n) = {
+ CheckCryptoEnabled64();
+ X : bits(128) = aget_V(d);
+ Y : bits(32) = aget_V(n);
+ W : bits(128) = aget_V(m);
+ t : bits(32) = undefined;
+ foreach (e from 0 to 3 by 1 in inc) {
+ t = SHAmajority(slice(X, 32, 32), slice(X, 64, 32), slice(X, 96, 32));
+ Y = ((Y + ROL(slice(X, 0, 32), 5)) + t) + aget_Elem(W, e, 32);
+ X = __SetSlice_bits(128, 32, X, 32, ROL(slice(X, 32, 32), 30));
+ __tmp_768 : bits(160) = ROL(Y @ X, 32);
+ Y = slice(__tmp_768, 128, 32);
+ X = slice(__tmp_768, 0, 128)
+ };
+ aset_V(d, X)
+}
+
+val aarch64_vector_crypto_sha3op_sha1hash_choose : (int, int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha3op_sha1hash_choose ('d, 'm, 'n) = {
+ CheckCryptoEnabled64();
+ X : bits(128) = aget_V(d);
+ Y : bits(32) = aget_V(n);
+ W : bits(128) = aget_V(m);
+ t : bits(32) = undefined;
+ foreach (e from 0 to 3 by 1 in inc) {
+ t = SHAchoose(slice(X, 32, 32), slice(X, 64, 32), slice(X, 96, 32));
+ Y = ((Y + ROL(slice(X, 0, 32), 5)) + t) + aget_Elem(W, e, 32);
+ X = __SetSlice_bits(128, 32, X, 32, ROL(slice(X, 32, 32), 30));
+ __tmp_832 : bits(160) = ROL(Y @ X, 32);
+ Y = slice(__tmp_832, 128, 32);
+ X = slice(__tmp_832, 0, 128)
+ };
+ aset_V(d, X)
+}
+
+val aarch64_vector_crypto_sha2op_sha256sched0 : (int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha2op_sha256sched0 ('d, 'n) = {
+ CheckCryptoEnabled64();
+ operand1 : bits(128) = aget_V(d);
+ operand2 : bits(128) = aget_V(n);
+ result : bits(128) = undefined;
+ T : bits(128) = slice(operand2, 0, 32) @ slice(operand1, 32, 96);
+ elt : bits(32) = undefined;
+ foreach (e from 0 to 3 by 1 in inc) {
+ elt = aget_Elem(T, e, 32);
+ elt = (ROR(elt, 7) ^ ROR(elt, 18)) ^ LSR(elt, 3);
+ result = aset_Elem(result, e, 32, elt + aget_Elem(operand1, e, 32))
+ };
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sha2op_sha1sched1 : (int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha2op_sha1sched1 ('d, 'n) = {
+ CheckCryptoEnabled64();
+ operand1 : bits(128) = aget_V(d);
+ operand2 : bits(128) = aget_V(n);
+ result : bits(128) = undefined;
+ T : bits(128) = operand1 ^ LSR(operand2, 32);
+ result = __SetSlice_bits(128, 32, result, 0, ROL(slice(T, 0, 32), 1));
+ result = __SetSlice_bits(128, 32, result, 32, ROL(slice(T, 32, 32), 1));
+ result = __SetSlice_bits(128, 32, result, 64, ROL(slice(T, 64, 32), 1));
+ result = __SetSlice_bits(128, 32, result, 96, ROL(slice(T, 96, 32), 1) ^ ROL(slice(T, 0, 32), 2));
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_sha2op_sha1hash : (int, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_sha2op_sha1hash ('d, 'n) = {
+ CheckCryptoEnabled64();
+ operand : bits(32) = aget_V(n);
+ aset_V(d, ROL(operand, 30))
+}
+
+val aarch64_vector_crypto_aes_round : (int, bool, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_aes_round ('d, decrypt, 'n) = {
+ CheckCryptoEnabled64();
+ operand1 : bits(128) = aget_V(d);
+ operand2 : bits(128) = aget_V(n);
+ result : bits(128) = operand1 ^ operand2;
+ if decrypt then result = AESInvSubBytes(AESInvShiftRows(result)) else result = AESSubBytes(AESShiftRows(result));
+ aset_V(d, result)
+}
+
+val aarch64_vector_crypto_aes_mix : (int, bool, int) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_vector_crypto_aes_mix ('d, decrypt, 'n) = {
+ CheckCryptoEnabled64();
+ operand : bits(128) = aget_V(n);
+ result : bits(128) = undefined;
+ if decrypt then result = AESInvMixColumns(operand) else result = AESMixColumns(operand);
+ aset_V(d, result)
+}
+
+val AArch64_AccessIsPrivileged : AccType -> bool effect {escape, rreg, undef}
+
+function AArch64_AccessIsPrivileged acctype = {
+ ispriv : bool = undefined;
+ if PSTATE.EL == EL0 then ispriv = false else if PSTATE.EL == EL3 then ispriv = true else if PSTATE.EL == EL2 & (~(IsInHost()) | [HCR_EL2[27]] == 0b0) then ispriv = true else if HaveUAOExt() & PSTATE.UAO == 0b1 then ispriv = true else ispriv = acctype != AccType_UNPRIV;
+ return(ispriv)
+}
+
+val AArch64_CheckWatchpoint : (bits(64), AccType, bool, int) -> FaultRecord effect {wreg, rreg, undef, escape}
+
+function AArch64_CheckWatchpoint (vaddress, acctype, iswrite, size) = {
+ assert(~(ELUsingAArch32(S1TranslationRegime())), "!(ELUsingAArch32(S1TranslationRegime()))");
+ val_match : bool = false;
+ ispriv : bool = AArch64_AccessIsPrivileged(acctype);
+ foreach (i from 0 to UInt(slice(ID_AA64DFR0_EL1, 20, 4)) by 1 in inc)
+ val_match = val_match | AArch64_WatchpointMatch(i, vaddress, size, ispriv, iswrite);
+ reason : bits(6) = undefined;
+ if val_match & HaltOnBreakpointOrWatchpoint() then {
+ reason = DebugHalt_Watchpoint;
+ Halt(reason);
+ undefined
+ } else if (val_match & [MDSCR_EL1[15]] == 0b1) & AArch64_GenerateDebugExceptions() then return(AArch64_DebugFault(acctype, iswrite)) else return(AArch64_NoFault())
+}
+
+val AArch64_CheckDebug : (bits(64), AccType, bool, int) -> FaultRecord effect {escape, rreg, undef, wreg}
+
+function AArch64_CheckDebug (vaddress, acctype, iswrite, 'size) = {
+ fault : FaultRecord = AArch64_NoFault();
+ d_side : bool = acctype != AccType_IFETCH;
+ generate_exception : bool = AArch64_GenerateDebugExceptions() & [MDSCR_EL1[15]] == 0b1;
+ halt : bool = HaltOnBreakpointOrWatchpoint();
+ if generate_exception | halt then if d_side then fault = AArch64_CheckWatchpoint(vaddress, acctype, iswrite, size) else fault = AArch64_CheckBreakpoint(vaddress, size) else ();
+ return(fault)
+}
+
+val AArch64_CheckPermission : (Permissions, bits(64), int, bits(1), AccType, bool) -> FaultRecord effect {rreg, undef, escape}
+
+function AArch64_CheckPermission (perms, vaddress, level, NS, acctype, iswrite) = {
+ assert(~(ELUsingAArch32(S1TranslationRegime())), "!(ELUsingAArch32(S1TranslationRegime()))");
+ wxn : bool = [aget_SCTLR()[19]] == 0b1;
+ xn : bool = undefined;
+ w : bool = undefined;
+ r : bool = undefined;
+ priv_xn : bool = undefined;
+ user_xn : bool = undefined;
+ pan : bits(1) = undefined;
+ ispriv : bool = undefined;
+ user_w : bool = undefined;
+ user_r : bool = undefined;
+ priv_w : bool = undefined;
+ priv_r : bool = undefined;
+ if (PSTATE.EL == EL0 | PSTATE.EL == EL1) | IsInHost() then {
+ priv_r = true;
+ priv_w = [perms.ap[2]] == 0b0;
+ user_r = [perms.ap[1]] == 0b1;
+ user_w = slice(perms.ap, 1, 2) == 0b01;
+ ispriv = AArch64_AccessIsPrivileged(acctype);
+ pan = if HavePANExt() then PSTATE.PAN else 0b0;
+ if ((((HaveNVExt() & HaveEL(EL2)) & [HCR_EL2[42]] == 0b1) & [HCR_EL2[43]] == 0b1) & ~(IsSecure())) & PSTATE.EL == EL1 then
+ pan = 0b0
+ else ();
+ if ((pan == 0b1 & user_r) & ispriv) & ~(acctype == AccType_DC | acctype == AccType_AT | acctype == AccType_IFETCH) | acctype == AccType_AT & AArch64_ExecutingATS1xPInstr() then {
+ priv_r = false;
+ priv_w = false
+ } else ();
+ user_xn = perms.xn == 0b1 | user_w & wxn;
+ priv_xn = (perms.pxn == 0b1 | priv_w & wxn) | user_w;
+ if ispriv then (r, w, xn) = (priv_r, priv_w, priv_xn)
+ else (r, w, xn) = (user_r, user_w, user_xn)
+ } else {
+ r = true;
+ w = [perms.ap[2]] == 0b0;
+ xn = perms.xn == 0b1 | w & wxn
+ };
+ if ((HaveEL(EL3) & IsSecure()) & NS == 0b1) & [SCR_EL3[9]] == 0b1 then
+ xn = true
+ else ();
+ failedread : bool = undefined;
+ fail : bool = undefined;
+ if acctype == AccType_IFETCH then {
+ fail = xn;
+ failedread = true
+ } else if acctype == AccType_ATOMICRW | acctype == AccType_ORDEREDRW then {
+ fail = ~(r) | ~(w);
+ failedread = ~(r)
+ } else if iswrite then {
+ fail = ~(w);
+ failedread = false
+ } else {
+ fail = ~(r);
+ failedread = true
+ };
+ ipaddress : bits(52) = undefined;
+ s2fs1walk : bool = undefined;
+ secondstage : bool = undefined;
+ if fail then {
+ secondstage = false;
+ s2fs1walk = false;
+ ipaddress = undefined;
+ return(AArch64_PermissionFault(ipaddress, level, acctype, ~(failedread), secondstage, s2fs1walk))
+ } else return(AArch64_NoFault())
+}
+
+val AArch64_FirstStageTranslate : (bits(64), AccType, bool, bool, int) -> AddressDescriptor effect {escape, rmem, rreg, undef, wmem}
+
+function AArch64_FirstStageTranslate (vaddress, acctype, iswrite, wasaligned, 'size) = {
+ s1_enabled : bool = undefined;
+ if HasS2Translation() then s1_enabled = ([HCR_EL2[27]] == 0b0 & [HCR_EL2[12]] == 0b0) & [SCTLR_EL1[0]] == 0b1 else s1_enabled = [aget_SCTLR()[0]] == 0b1;
+ ipaddress : bits(52) = undefined;
+ secondstage : bool = false;
+ s2fs1walk : bool = false;
+ nTLSMD : bits(1) = undefined;
+ permissioncheck : bool = undefined;
+ S1 : TLBRecord = undefined;
+ if s1_enabled then {
+ S1 = AArch64_TranslationTableWalk(ipaddress, vaddress, acctype, iswrite, secondstage, s2fs1walk, size);
+ permissioncheck = true
+ } else {
+ S1 = AArch64_TranslateAddressS1Off(vaddress, acctype, iswrite);
+ permissioncheck = false;
+ if (UsingAArch32() & HaveTrapLoadStoreMultipleDeviceExt()) & AArch32_ExecutingLSMInstr() then if S1.addrdesc.memattrs.typ == MemType_Device & S1.addrdesc.memattrs.device != DeviceType_GRE then {
+ nTLSMD = if S1TranslationRegime() == EL2 then [SCTLR_EL2[28]] else [SCTLR_EL1[28]];
+ if nTLSMD == 0b0 then {
+ __tmp_246 : AddressDescriptor = S1.addrdesc;
+ __tmp_246.fault = AArch64_AlignmentFault(acctype, iswrite, secondstage);
+ S1.addrdesc = __tmp_246
+ } else ()
+ } else () else ()
+ };
+ if ((~(wasaligned) & acctype != AccType_IFETCH | acctype == AccType_DCZVA) & S1.addrdesc.memattrs.typ == MemType_Device) & ~(IsFault(S1.addrdesc)) then {
+ __tmp_247 : AddressDescriptor = S1.addrdesc;
+ __tmp_247.fault = AArch64_AlignmentFault(acctype, iswrite, secondstage);
+ S1.addrdesc = __tmp_247
+ } else ();
+ if ~(IsFault(S1.addrdesc)) & permissioncheck then {
+ __tmp_248 : AddressDescriptor = S1.addrdesc;
+ __tmp_248.fault = AArch64_CheckPermission(S1.perms, vaddress, S1.level, S1.addrdesc.paddress.NS, acctype, iswrite);
+ S1.addrdesc = __tmp_248
+ } else ();
+ if (~(IsFault(S1.addrdesc)) & S1.addrdesc.memattrs.typ == MemType_Device) & acctype == AccType_IFETCH then S1.addrdesc = AArch64_InstructionDevice(S1.addrdesc, vaddress, ipaddress, S1.level, acctype, iswrite, secondstage, s2fs1walk) else ();
+ hwupdatewalk : bool = false;
+ s2fs1walk = false;
+ __tmp_249 : AddressDescriptor = S1.addrdesc;
+ __tmp_249.fault = AArch64_CheckAndUpdateDescriptor(S1.descupdate, S1.addrdesc.fault, secondstage, vaddress, acctype, iswrite, s2fs1walk, hwupdatewalk);
+ S1.addrdesc = __tmp_249;
+ return(S1.addrdesc)
+}
+
+val AArch64_FullTranslate : (bits(64), AccType, bool, bool, int) -> AddressDescriptor effect {escape, rmem, rreg, undef, wmem}
+
+function AArch64_FullTranslate (vaddress, acctype, iswrite, wasaligned, 'size) = {
+ S1 : AddressDescriptor = AArch64_FirstStageTranslate(vaddress, acctype, iswrite, wasaligned, size);
+ result : AddressDescriptor = undefined;
+ hwupdatewalk : bool = undefined;
+ s2fs1walk : bool = undefined;
+ if ~(IsFault(S1)) & HasS2Translation() then {
+ s2fs1walk = false;
+ hwupdatewalk = false;
+ result = AArch64_SecondStageTranslate(S1, vaddress, acctype, iswrite, wasaligned, s2fs1walk, size, hwupdatewalk)
+ } else result = S1;
+ return(result)
+}
+
+val AArch64_TranslateAddress : (bits(64), AccType, bool, bool, int) -> AddressDescriptor effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function AArch64_TranslateAddress (vaddress, acctype, iswrite, wasaligned, 'size) = {
+ result : AddressDescriptor = AArch64_FullTranslate(vaddress, acctype, iswrite, wasaligned, size);
+ if ~(acctype == AccType_PTW | acctype == AccType_IC | acctype == AccType_AT) & ~(IsFault(result)) then result.fault = AArch64_CheckDebug(vaddress, acctype, iswrite, size) else ();
+ result.vaddress = ZeroExtend(vaddress);
+ return(result)
+}
+
+val AArch64_aset_MemSingle : forall ('size : Int), 64 >= 0 & 8 * 'size >= 0.
+ (bits(64), atom('size), AccType, bool, bits(8 * 'size)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function AArch64_aset_MemSingle (address, size, acctype, wasaligned, value_name) = {
+ assert('size == 1 | 'size == 2 | 'size == 4 | 'size == 8 | 'size == 16, "((size == 1) || ((size == 2) || ((size == 4) || ((size == 8) || (size == 16)))))");
+ assert(address == Align(address, 'size), "(address == Align(address, size))");
+ memaddrdesc : AddressDescriptor = undefined;
+ iswrite : bool = true;
+ memaddrdesc = AArch64_TranslateAddress(address, acctype, iswrite, wasaligned, 'size);
+ if IsFault(memaddrdesc) then AArch64_Abort(address, memaddrdesc.fault) else ();
+ if memaddrdesc.memattrs.shareable then ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), 'size) else ();
+ accdesc : AccessDescriptor = CreateAccessDescriptor(acctype);
+ aset__Mem(memaddrdesc, 'size, accdesc, value_name);
+ ()
+}
+
+val aset_Mem : forall ('size : Int), 64 >= 0 & 8 * 'size >= 0.
+ (bits(64), atom('size), AccType, bits(8 * 'size)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aset_Mem (address, size, acctype, value_name__arg) = {
+ value_name = value_name__arg;
+ i : int = undefined;
+ iswrite : bool = true;
+ if BigEndian() then value_name = BigEndianReverse(value_name) else ();
+ aligned : bool = AArch64_CheckAlignment(address, 'size, acctype, iswrite);
+ atomic : bool = undefined;
+ if 'size != 16 | ~(acctype == AccType_VEC | acctype == AccType_VECSTREAM) then atomic = aligned else atomic = address == Align(address, 8);
+ c : Constraint = undefined;
+ if ~(atomic) then {
+ assert('size > 1, "(size > 1)");
+ AArch64_aset_MemSingle(address, 1, acctype, aligned, slice(value_name, 0, 8));
+ if ~(aligned) then {
+ c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
+ assert(c == Constraint_FAULT | c == Constraint_NONE, "((c == Constraint_FAULT) || (c == Constraint_NONE))");
+ if c == Constraint_NONE then aligned = true else ()
+ } else ();
+ foreach (i from 1 to ('size - 1) by 1 in inc)
+ AArch64_aset_MemSingle(address + i, 1, acctype, aligned, slice(value_name, 8 * i, 8))
+ } else if 'size == 16 & (acctype == AccType_VEC | acctype == AccType_VECSTREAM) then {
+ AArch64_aset_MemSingle(address, 8, acctype, aligned, slice(value_name, 0, 64));
+ AArch64_aset_MemSingle(address + 8, 8, acctype, aligned, slice(value_name, 64, 64))
+ } else AArch64_aset_MemSingle(address, 'size, acctype, aligned, value_name);
+ ()
+}
+
+val AArch64_aget_MemSingle : forall ('size : Int), 64 >= 0 & 8 * 'size >= 0.
+ (bits(64), atom('size), AccType, bool) -> bits(8 * 'size) effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function AArch64_aget_MemSingle (address, size, acctype, wasaligned) = {
+ assert('size == 1 | 'size == 2 | 'size == 4 | 'size == 8 | 'size == 16, "((size == 1) || ((size == 2) || ((size == 4) || ((size == 8) || (size == 16)))))");
+ assert(address == Align(address, 'size), "(address == Align(address, size))");
+ memaddrdesc : AddressDescriptor = undefined;
+ value_name : bits(8 * 'size) = undefined;
+ iswrite : bool = false;
+ memaddrdesc = AArch64_TranslateAddress(address, acctype, iswrite, wasaligned, 'size);
+ if IsFault(memaddrdesc) then AArch64_Abort(address, memaddrdesc.fault) else ();
+ accdesc : AccessDescriptor = CreateAccessDescriptor(acctype);
+ value_name = aget__Mem(memaddrdesc, 'size, accdesc);
+ return(value_name)
+}
+
+val aget_Mem : forall ('size : Int), 64 >= 0 & 8 * 'size >= 0.
+ (bits(64), atom('size), AccType) -> bits(8 * 'size) effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aget_Mem (address, size, acctype) = {
+ assert('size == 1 | 'size == 2 | 'size == 4 | 'size == 8 | 'size == 16, "((size == 1) || ((size == 2) || ((size == 4) || ((size == 8) || (size == 16)))))");
+ value_name : bits(8 * 'size) = undefined;
+ i : int = undefined;
+ iswrite : bool = false;
+ aligned : bool = AArch64_CheckAlignment(address, 'size, acctype, iswrite);
+ atomic : bool = undefined;
+ if 'size != 16 | ~(acctype == AccType_VEC | acctype == AccType_VECSTREAM) then atomic = aligned else atomic = address == Align(address, 8);
+ c : Constraint = undefined;
+ if ~(atomic) then {
+ assert('size > 1, "(size > 1)");
+ value_name = __SetSlice_bits(8 * 'size, 8, value_name, 0, AArch64_aget_MemSingle(address, 1, acctype, aligned));
+ if ~(aligned) then {
+ c = ConstrainUnpredictable(Unpredictable_DEVPAGE2);
+ assert(c == Constraint_FAULT | c == Constraint_NONE, "((c == Constraint_FAULT) || (c == Constraint_NONE))");
+ if c == Constraint_NONE then aligned = true else ()
+ } else ();
+ foreach (i from 1 to ('size - 1) by 1 in inc)
+ value_name = __SetSlice_bits(8 * 'size, 8, value_name, 8 * i, AArch64_aget_MemSingle(address + i, 1, acctype, aligned))
+ } else if 'size == 16 & (acctype == AccType_VEC | acctype == AccType_VECSTREAM) then {
+ value_name = __SetSlice_bits(8 * 'size, 64, value_name, 0, AArch64_aget_MemSingle(address, 8, acctype, aligned));
+ value_name = __SetSlice_bits(8 * 'size, 64, value_name, 64, AArch64_aget_MemSingle(address + 8, 8, acctype, aligned))
+ } else value_name = AArch64_aget_MemSingle(address, 'size, acctype, aligned);
+ if BigEndian() then value_name = BigEndianReverse(value_name) else ();
+ return(value_name)
+}
+
+val aarch64_memory_vector_single_nowb : forall ('esize : Int) ('selem : Int).
+ (int, atom('esize), int, int, MemOp, int, bool, atom('selem), int, bool) -> unit effect {escape, rmem, wmem, undef, wreg, rreg}
+
+function aarch64_memory_vector_single_nowb (datasize, esize, index, m, memop, n, replicate, selem, t__arg, wback) = {
+ assert(constraint('selem >= 1 & 'esize >= 0));
+ t : int = t__arg;
+ CheckFPAdvSIMDEnabled64();
+ address : bits(64) = undefined;
+ offs : bits(64) = undefined;
+ rval : bits(128) = undefined;
+ element : bits('esize) = undefined;
+ s : int = undefined;
+ let 'ebytes : {'n, true. atom('n)} = ex_int(esize / 8);
+ assert(constraint(8 * 'ebytes = 'esize));
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ offs = Zeros();
+ if replicate then foreach (s from 0 to (selem - 1) by 1 in inc) {
+ element = aget_Mem(address + offs, ebytes, AccType_VEC);
+ let 'v : {'n, true. atom('n)} = ex_int(datasize / esize) in {
+ assert(constraint('esize * 'v >= 0));
+ aset_V(t, replicate_bits(element, v))
+ };
+ offs = offs + ebytes;
+ t = (t + 1) % 32
+ } else foreach (s from 0 to (selem - 1) by 1 in inc) {
+ rval = aget_V(t);
+ if memop == MemOp_LOAD then {
+ rval = aset_Elem(rval, index, esize, aget_Mem(address + offs, ebytes, AccType_VEC));
+ aset_V(t, rval)
+ } else aset_Mem(address + offs, ebytes, AccType_VEC, aget_Elem(rval, index, esize));
+ offs = offs + ebytes;
+ t = (t + 1) % 32
+ };
+ if wback then {
+ if m != 31 then offs = aget_X(m)
+ else ();
+ if n == 31 then aset_SP(address + offs) else aset_X(n, address + offs)
+ } else ()
+}
+
+val aarch64_memory_vector_multiple_nowb : forall ('datasize : Int) ('esize : Int) ('elements : Int) ('rpt : Int) ('selem : Int).
+ (atom('datasize), atom('elements), atom('esize), int, MemOp, int, atom('rpt), atom('selem), int, bool) -> unit effect {escape, rmem, wmem, undef, wreg, rreg}
+
+function aarch64_memory_vector_multiple_nowb (datasize, elements, esize, m, memop, n, rpt, selem, t, wback) = {
+ assert(constraint('datasize in {8, 16, 32, 64, 128} & ('rpt >= 1 & ('elements >= 1 & ('selem >= 1 & 'esize >= 0)))), "datasize constraint");
+ CheckFPAdvSIMDEnabled64();
+ address : bits(64) = undefined;
+ offs : bits(64) = undefined;
+ rval : bits('datasize) = undefined;
+ e : int = undefined;
+ r : int = undefined;
+ s : int = undefined;
+ tt : int = undefined;
+ let 'ebytes = ex_int(esize / 8);
+ assert(constraint(8 * 'ebytes = 'esize));
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ offs = Zeros();
+ foreach (r from 0 to (rpt - 1) by 1 in inc)
+ foreach (e from 0 to (elements - 1) by 1 in inc) {
+ tt = (t + r) % 32;
+ foreach (s from 0 to (selem - 1) by 1 in inc) {
+ rval = aget_V(tt);
+ if memop == MemOp_LOAD then {
+ rval = aset_Elem(rval, e, esize, aget_Mem(address + offs, ebytes, AccType_VEC));
+ aset_V(tt, rval)
+ } else aset_Mem(address + offs, ebytes, AccType_VEC, aget_Elem(rval, e, esize));
+ offs = offs + ebytes;
+ tt = (tt + 1) % 32
+ }
+ };
+ if wback then {
+ if m != 31 then offs = aget_X(m)
+ else ();
+ if n == 31 then aset_SP(address + offs) else aset_X(n, address + offs)
+ } else ()
+}
+
+val aarch64_memory_single_simdfp_register : (AccType, int, ExtendType, int, MemOp, int, bool, int, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_simdfp_register (acctype, 'datasize, extend_type, 'm, memop, 'n, postindex, 'shift, 't, wback) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ offset : bits(64) = ExtendReg(m, extend_type, shift);
+ CheckFPAdvSIMDEnabled64();
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ data = aget_V(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ aset_V(t, data)
+ }
+ };
+ if wback then {
+ if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_single_simdfp_immediate_signed_postidx : (AccType, int, MemOp, int, bits(64), bool, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_simdfp_immediate_signed_postidx (acctype, 'datasize, memop, 'n, offset, postindex, 't, wback) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ data = aget_V(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ aset_V(t, data)
+ }
+ };
+ if wback then {
+ if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_single_simdfp_immediate_signed_offset_normal : (AccType, int, MemOp, int, bits(64), bool, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_simdfp_immediate_signed_offset_normal (acctype, 'datasize, memop, 'n, offset, postindex, 't, wback) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ data = aget_V(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ aset_V(t, data)
+ }
+ };
+ if wback then {
+ if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_ordered : forall ('datasize : Int) ('regsize : Int).
+ (AccType, atom('datasize), MemOp, int, atom('regsize), int) -> unit effect {escape, undef, wreg, rreg, rmem, wmem}
+
+function aarch64_memory_ordered (acctype, datasize, memop, n, regsize, t) = {
+ assert(constraint('datasize in {8, 16, 32, 64, 128} & 'regsize >= 0), "datasize constraint");
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint(8 * 'dbytes = 'datasize));
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ match memop {
+ MemOp_STORE => {
+ data = aget_X(t);
+ aset_Mem(address, dbytes, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, dbytes, acctype);
+ aset_X(t, ZeroExtend(data, regsize))
+ }
+ }
+}
+
+val memory_ordered_decode : (bits(2), bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_ordered_decode (size, o2, L, o1, Rs, o0, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ s : int = UInt(Rs);
+ acctype : AccType = if o0 == 0b0 then AccType_LIMITEDORDERED else AccType_ORDERED;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ elsize : int = shl_int(8, UInt(size));
+ regsize : int = if elsize == 64 then 64 else 32;
+ datasize : int = elsize;
+ aarch64_memory_ordered(acctype, datasize, memop, n, regsize, t)
+}
+
+val aarch64_memory_orderedrcpc : forall ('datasize : Int) ('regsize : Int).
+ (AccType, atom('datasize), int, atom('regsize), int) -> unit effect {escape, undef, wreg, rreg, rmem, wmem}
+
+function aarch64_memory_orderedrcpc (acctype, datasize, n, regsize, t) = {
+ assert(constraint('datasize in {8, 16, 32, 64, 128} & 'regsize >= 0), "datasize constraint");
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint(8 * 'dbytes = 'datasize));
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ data = aget_Mem(address, dbytes, acctype);
+ aset_X(t, ZeroExtend(data, regsize))
+}
+
+val memory_orderedrcpc_decode : (bits(2), bits(1), bits(1), bits(1), bits(5), bits(1), bits(3), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_orderedrcpc_decode (size, V, A, R, Rs, o3, opc, Rn, Rt) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ s : int = UInt(Rs);
+ acctype : AccType = AccType_ORDERED;
+ elsize : int = shl_int(8, UInt(size));
+ regsize : int = if elsize == 64 then 64 else 32;
+ datasize : int = elsize;
+ aarch64_memory_orderedrcpc(acctype, datasize, n, regsize, t)
+}
+
+val aarch64_memory_literal_simdfp : forall ('size : Int).
+ (bits(64), atom('size), int) -> unit effect {escape, undef, wreg, rreg, rmem, wmem}
+
+function aarch64_memory_literal_simdfp (offset, size, t) = {
+ assert(constraint('size >= 0));
+ address : bits(64) = aget_PC() + offset;
+ data : bits(8 * 'size) = undefined;
+ CheckFPAdvSIMDEnabled64();
+ data = aget_Mem(address, size, AccType_VEC);
+ aset_V(t, data)
+}
+
+val aarch64_memory_literal_general : forall ('size : Int).
+ (MemOp, bits(64), bool, atom('size), int) -> unit effect {escape, undef, wreg, rreg, rmem, wmem}
+
+function aarch64_memory_literal_general (memop, offset, signed, size, t) = {
+ assert(constraint('size >= 0));
+ address : bits(64) = aget_PC() + offset;
+ data : bits(8 * 'size) = undefined;
+ match memop {
+ MemOp_LOAD => {
+ data = aget_Mem(address, size, AccType_NORMAL);
+ if signed then aset_X(t, SignExtend(data, 64)) else aset_X(t, data)
+ },
+ MemOp_PREFETCH => Prefetch(address, __GetSlice_int(5, t, 0))
+ }
+}
+
+val memory_literal_general_decode : (bits(2), bits(1), bits(19), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_literal_general_decode (opc, V, imm19, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ memop : MemOp = MemOp_LOAD;
+ signed : bool = false;
+ size : int = undefined;
+ offset : bits(64) = undefined;
+ match opc {
+ 0b00 => size = 4,
+ 0b01 => size = 8,
+ 0b10 => {
+ size = 4;
+ signed = true
+ },
+ 0b11 => memop = MemOp_PREFETCH
+ };
+ offset = SignExtend(imm19 @ 0b00, 64);
+ aarch64_memory_literal_general(memop, offset, signed, size, t)
+}
+
+val aarch64_memory_atomicops_swp : (int, AccType, int, int, int, AccType, int) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_atomicops_swp ('datasize, ldacctype, 'n, 'regsize, 's, stacctype, 't) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ data = aget_Mem(address, datasize / 8, ldacctype);
+ aset_Mem(address, datasize / 8, stacctype, aget_X(s));
+ aset_X(t, ZeroExtend(data, regsize))
+}
+
+val aarch64_memory_atomicops_st : (int, AccType, int, MemAtomicOp, int, AccType) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_atomicops_st ('datasize, ldacctype, 'n, op, 's, stacctype) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ value_name : bits('datasize) = undefined;
+ data : bits('datasize) = undefined;
+ result : bits('datasize) = undefined;
+ value_name = aget_X(s);
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ data = aget_Mem(address, datasize / 8, ldacctype);
+ match op {
+ MemAtomicOp_ADD => result = data + value_name,
+ MemAtomicOp_BIC => result = data & ~(value_name),
+ MemAtomicOp_EOR => result = data ^ value_name,
+ MemAtomicOp_ORR => result = data | value_name,
+ MemAtomicOp_SMAX => result = if SInt(data) > SInt(value_name) then data else value_name,
+ MemAtomicOp_SMIN => result = if SInt(data) > SInt(value_name) then value_name else data,
+ MemAtomicOp_UMAX => result = if UInt(data) > UInt(value_name) then data else value_name,
+ MemAtomicOp_UMIN => result = if UInt(data) > UInt(value_name) then value_name else data
+ };
+ aset_Mem(address, datasize / 8, stacctype, result)
+}
+
+val aarch64_memory_atomicops_ld : (int, AccType, int, MemAtomicOp, int, int, AccType, int) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_atomicops_ld ('datasize, ldacctype, 'n, op, 'regsize, 's, stacctype, 't) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ value_name : bits('datasize) = undefined;
+ data : bits('datasize) = undefined;
+ result : bits('datasize) = undefined;
+ value_name = aget_X(s);
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ data = aget_Mem(address, datasize / 8, ldacctype);
+ match op {
+ MemAtomicOp_ADD => result = data + value_name,
+ MemAtomicOp_BIC => result = data & ~(value_name),
+ MemAtomicOp_EOR => result = data ^ value_name,
+ MemAtomicOp_ORR => result = data | value_name,
+ MemAtomicOp_SMAX => result = if SInt(data) > SInt(value_name) then data else value_name,
+ MemAtomicOp_SMIN => result = if SInt(data) > SInt(value_name) then value_name else data,
+ MemAtomicOp_UMAX => result = if UInt(data) > UInt(value_name) then data else value_name,
+ MemAtomicOp_UMIN => result = if UInt(data) > UInt(value_name) then value_name else data
+ };
+ aset_Mem(address, datasize / 8, stacctype, result);
+ aset_X(t, ZeroExtend(data, regsize))
+}
+
+val aarch64_memory_atomicops_cas_single : (int, AccType, int, int, int, AccType, int) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_atomicops_cas_single ('datasize, ldacctype, 'n, 'regsize, 's, stacctype, 't) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ comparevalue : bits('datasize) = undefined;
+ newvalue : bits('datasize) = undefined;
+ data : bits('datasize) = undefined;
+ comparevalue = aget_X(s);
+ newvalue = aget_X(t);
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ data = aget_Mem(address, datasize / 8, ldacctype);
+ if data == comparevalue then aset_Mem(address, datasize / 8, stacctype, newvalue) else ();
+ aset_X(s, ZeroExtend(data, regsize))
+}
+
+val aarch64_memory_atomicops_cas_pair : (int, AccType, int, int, int, AccType, int) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_atomicops_cas_pair ('datasize, ldacctype, 'n, 'regsize, 's, stacctype, 't) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ comparevalue : bits(2 * 'datasize) = undefined;
+ newvalue : bits(2 * 'datasize) = undefined;
+ data : bits(2 * 'datasize) = undefined;
+ s1 : bits('datasize) = aget_X(s);
+ s2 : bits('datasize) = aget_X(s + 1);
+ t1 : bits('datasize) = aget_X(t);
+ t2 : bits('datasize) = aget_X(t + 1);
+ comparevalue = if BigEndian() then s1 @ s2 else s2 @ s1;
+ newvalue = if BigEndian() then t1 @ t2 else t2 @ t1;
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ data = aget_Mem(address, (2 * datasize) / 8, ldacctype);
+ if data == comparevalue then aset_Mem(address, (2 * datasize) / 8, stacctype, newvalue) else ();
+ if BigEndian() then {
+ aset_X(s, ZeroExtend(slice(data, datasize, datasize), regsize));
+ aset_X(s + 1, ZeroExtend(slice(data, 0, datasize), regsize))
+ } else {
+ aset_X(s, ZeroExtend(slice(data, 0, datasize), regsize));
+ aset_X(s + 1, ZeroExtend(slice(data, datasize, datasize), regsize))
+ }
+}
+
+val AArch64_SetExclusiveMonitors : (bits(64), int) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function AArch64_SetExclusiveMonitors (address, 'size) = {
+ acctype : AccType = AccType_ATOMIC;
+ iswrite : bool = false;
+ aligned : bool = address != Align(address, size);
+ memaddrdesc : AddressDescriptor = AArch64_TranslateAddress(address, acctype, iswrite, aligned, size);
+ if IsFault(memaddrdesc) then () else ();
+ if memaddrdesc.memattrs.shareable then MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size) else ();
+ MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
+ AArch64_MarkExclusiveVA(address, ProcessorID(), size)
+}
+
+val AArch64_ExclusiveMonitorsPass : (bits(64), int) -> bool effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function AArch64_ExclusiveMonitorsPass (address, 'size) = {
+ acctype : AccType = AccType_ATOMIC;
+ iswrite : bool = true;
+ aligned : bool = address == Align(address, size);
+ secondstage : bool = undefined;
+ if ~(aligned) then {
+ secondstage = false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage))
+ } else ();
+ passed : bool = AArch64_IsExclusiveVA(address, ProcessorID(), size);
+ if ~(passed) then return(false) else ();
+ memaddrdesc : AddressDescriptor = AArch64_TranslateAddress(address, acctype, iswrite, aligned, size);
+ if IsFault(memaddrdesc) then AArch64_Abort(address, memaddrdesc.fault) else ();
+ passed = IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
+ if passed then {
+ ClearExclusiveLocal(ProcessorID());
+ if memaddrdesc.memattrs.shareable then passed = IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size) else ()
+ } else ();
+ return(passed)
+}
+
+val AArch32_SelfHostedSecurePrivilegedInvasiveDebugEnabled : unit -> bool effect {escape, rreg, undef}
+
+function AArch32_SelfHostedSecurePrivilegedInvasiveDebugEnabled () = {
+ if ~(HaveEL(EL3)) & ~(IsSecure()) then return(false) else ();
+ return(DBGEN == HIGH & SPIDEN == HIGH)
+}
+
+val AArch32_GenerateDebugExceptionsFrom : (bits(2), bool) -> bool effect {escape, rreg, undef}
+
+function AArch32_GenerateDebugExceptionsFrom (from, secure) = {
+ mask : bits(1) = undefined;
+ if from == EL0 & ~(ELStateUsingAArch32(EL1, secure)) then {
+ mask = undefined;
+ return(AArch64_GenerateDebugExceptionsFrom(from, secure, mask))
+ } else ();
+ if ([DBGOSLSR[1]] == 0b1 | DoubleLockStatus()) | Halted() then return(false) else ();
+ enabled : bool = undefined;
+ spd : bits(2) = undefined;
+ if HaveEL(EL3) & secure then {
+ spd = if ELUsingAArch32(EL3) then slice(SDCR, 14, 2) else slice(MDCR_EL3, 14, 2);
+ if [spd[1]] == 0b1 then enabled = [spd[0]] == 0b1 else enabled = AArch32_SelfHostedSecurePrivilegedInvasiveDebugEnabled();
+ if from == EL0 then enabled = enabled | [SDER[0]] == 0b1 else ()
+ } else enabled = from != EL2;
+ return(enabled)
+}
+
+val AArch32_GenerateDebugExceptions : unit -> bool effect {escape, rreg, undef}
+
+function AArch32_GenerateDebugExceptions () = return(AArch32_GenerateDebugExceptionsFrom(PSTATE.EL, IsSecure()))
+
+val DebugExceptionReturnSS : bits(32) -> bits(1) effect {escape, rreg, undef}
+
+function DebugExceptionReturnSS spsr = {
+ assert((Halted() | Restarting()) | PSTATE.EL != EL0, "((Halted() || Restarting()) || ((PSTATE).EL != EL0))");
+ SS_bit : bits(1) = 0b0;
+ ELd : bits(2) = undefined;
+ mask : bits(1) = undefined;
+ enabled_at_dest : bool = undefined;
+ secure : bool = undefined;
+ valid_name : bool = undefined;
+ dest : bits(2) = undefined;
+ enabled_at_source : bool = undefined;
+ if [MDSCR_EL1[0]] == 0b1 then {
+ if Restarting() then enabled_at_source = false else if UsingAArch32() then enabled_at_source = AArch32_GenerateDebugExceptions() else enabled_at_source = AArch64_GenerateDebugExceptions();
+ if IllegalExceptionReturn(spsr) then dest = PSTATE.EL else {
+ (valid_name, dest) = ELFromSPSR(spsr);
+ assert(valid_name, "valid")
+ };
+ secure = IsSecureBelowEL3() | dest == EL3;
+ if ELUsingAArch32(dest) then enabled_at_dest = AArch32_GenerateDebugExceptionsFrom(dest, secure) else {
+ mask = [spsr[9]];
+ enabled_at_dest = AArch64_GenerateDebugExceptionsFrom(dest, secure, mask)
+ };
+ ELd = DebugTargetFrom(secure);
+ if (~(ELUsingAArch32(ELd)) & ~(enabled_at_source)) & enabled_at_dest then SS_bit = [spsr[21]] else ()
+ } else ();
+ return(SS_bit)
+}
+
+val SetPSTATEFromPSR : bits(32) -> unit effect {escape, rreg, undef, wreg}
+
+function SetPSTATEFromPSR spsr__arg = {
+ spsr = spsr__arg;
+ PSTATE.SS = DebugExceptionReturnSS(spsr);
+ if IllegalExceptionReturn(spsr) then PSTATE.IL = 0b1 else {
+ PSTATE.IL = [spsr[20]];
+ if [spsr[4]] == 0b1 then AArch32_WriteMode(slice(spsr, 0, 5)) else {
+ PSTATE.nRW = 0b0;
+ PSTATE.EL = slice(spsr, 2, 2);
+ PSTATE.SP = [spsr[0]]
+ }
+ };
+ if PSTATE.IL == 0b1 & PSTATE.nRW == 0b1 then if ConstrainUnpredictableBool(Unpredictable_ILZEROT) then spsr = __SetSlice_bits(32, 1, spsr, 5, 0b0) else () else ();
+ (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V) = slice(spsr, 28, 4);
+ if PSTATE.nRW == 0b1 then {
+ PSTATE.Q = [spsr[27]];
+ PSTATE.IT = RestoredITBits(spsr);
+ PSTATE.GE = slice(spsr, 16, 4);
+ PSTATE.E = [spsr[9]];
+ (PSTATE.A @ PSTATE.I @ PSTATE.F) = slice(spsr, 6, 3);
+ PSTATE.T = [spsr[5]]
+ } else (PSTATE.D @ PSTATE.A @ PSTATE.I @ PSTATE.F) = slice(spsr, 6, 4);
+ if HavePANExt() then PSTATE.PAN = [spsr[22]] else ();
+ if HaveUAOExt() then PSTATE.UAO = [spsr[23]] else ();
+ ()
+}
+
+val DRPSInstruction : unit -> unit effect {wreg, rreg, undef, escape}
+
+function DRPSInstruction () = {
+ SynchronizeContext();
+ if (HaveRASExt() & [aget_SCTLR()[21]] == 0b1) & ConstrainUnpredictableBool(Unpredictable_IESBinDebug) then ErrorSynchronizationBarrier(MBReqDomain_FullSystem, MBReqTypes_All) else ();
+ SetPSTATEFromPSR(aget_SPSR());
+ if UsingAArch32() then {
+ (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V @ PSTATE.Q @ PSTATE.GE @ PSTATE.SS @ PSTATE.A @ PSTATE.I @ PSTATE.F) = undefined : bits(13);
+ PSTATE.IT = 0x00;
+ PSTATE.T = 0b1;
+ DLR = undefined : bits(32);
+ DSPSR = undefined : bits(32)
+ } else {
+ (PSTATE.N @ PSTATE.Z @ PSTATE.C @ PSTATE.V @ PSTATE.SS @ PSTATE.D @ PSTATE.A @ PSTATE.I @ PSTATE.F) = undefined : bits(9);
+ DLR_EL0 = undefined : bits(64);
+ DSPSR_EL0 = undefined : bits(32)
+ };
+ UpdateEDSCRFields();
+ ()
+}
+
+val aarch64_branch_unconditional_dret : unit -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_branch_unconditional_dret () = DRPSInstruction()
+
+val AArch64_ExceptionReturn : (bits(64), bits(32)) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_ExceptionReturn (new_pc__arg, spsr) = {
+ new_pc = new_pc__arg;
+ SynchronizeContext();
+ iesb_req : bool = undefined;
+ if HaveRASExt() & [aget_SCTLR()[21]] == 0b1 then {
+ ErrorSynchronizationBarrier(MBReqDomain_FullSystem, MBReqTypes_All);
+ iesb_req = true;
+ TakeUnmaskedPhysicalSErrorInterrupts(iesb_req)
+ } else ();
+ SetPSTATEFromPSR(spsr);
+ ClearExclusiveLocal(ProcessorID());
+ SendEventLocal();
+ if PSTATE.IL == 0b1 then {
+ new_pc = __SetSlice_bits(64, 32, new_pc, 32, undefined);
+ new_pc = __SetSlice_bits(64, 2, new_pc, 0, undefined)
+ } else if UsingAArch32() then if PSTATE.T == 0b0 then new_pc = __SetSlice_bits(64, 1, new_pc, 0, 0b0) else new_pc = __SetSlice_bits(64, 2, new_pc, 0, 0b00) else new_pc = AArch64_BranchAddr(new_pc);
+ if UsingAArch32() then BranchTo(slice(new_pc, 0, 32), BranchType_UNKNOWN) else BranchToAddr(new_pc, BranchType_ERET)
+}
+
+val aarch64_branch_unconditional_eret : (bool, bool) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_branch_unconditional_eret (pac, use_key_a) = {
+ AArch64_CheckForERetTrap(pac, use_key_a);
+ target : bits(64) = aget_ELR();
+ if pac then if use_key_a then target = AuthIA(aget_ELR(), aget_SP()) else target = AuthIB(aget_ELR(), aget_SP()) else ();
+ AArch64_ExceptionReturn(target, aget_SPSR())
+}
+
+val AArch32_GeneralExceptionsToAArch64 : unit -> bool effect {escape, rreg, undef}
+
+function AArch32_GeneralExceptionsToAArch64 () = return(PSTATE.EL == EL0 & ~(ELUsingAArch32(EL1)) | ((HaveEL(EL2) & ~(IsSecure())) & ~(ELUsingAArch32(EL2))) & [HCR_EL2[27]] == 0b1)
+
+val AArch32_EnterHypMode : (ExceptionRecord, bits(32), int) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch32_EnterHypMode (exception, preferred_exception_return, 'vect_offset) = {
+ SynchronizeContext();
+ assert((HaveEL(EL2) & ~(IsSecure())) & ELUsingAArch32(EL2), "((HaveEL(EL2) && !(IsSecure())) && ELUsingAArch32(EL2))");
+ spsr : bits(32) = GetPSRFromPSTATE();
+ if ~(exception.typ == Exception_IRQ | exception.typ == Exception_FIQ) then AArch32_ReportHypEntry(exception) else ();
+ AArch32_WriteMode(M32_Hyp);
+ aset_SPSR(spsr);
+ ELR_hyp = preferred_exception_return;
+ PSTATE.T = [HSCTLR[30]];
+ PSTATE.SS = 0b0;
+ if ~(HaveEL(EL3)) | [aget_SCR_GEN()[3]] == 0b0 then PSTATE.A = 0b1 else ();
+ if ~(HaveEL(EL3)) | [aget_SCR_GEN()[1]] == 0b0 then PSTATE.I = 0b1 else ();
+ if ~(HaveEL(EL3)) | [aget_SCR_GEN()[2]] == 0b0 then PSTATE.F = 0b1 else ();
+ PSTATE.E = [HSCTLR[25]];
+ PSTATE.IL = 0b0;
+ PSTATE.IT = 0x00;
+ BranchTo(slice(HVBAR, 5, 27) @ __GetSlice_int(5, vect_offset, 0), BranchType_UNKNOWN);
+ EndOfInstruction()
+}
+
+val AArch32_TakeUndefInstrException__0 : unit -> unit effect {escape, undef, wreg, rreg}
+
+val AArch32_TakeUndefInstrException__1 : ExceptionRecord -> unit effect {escape, rreg, undef, wreg}
+
+overload AArch32_TakeUndefInstrException = {
+ AArch32_TakeUndefInstrException__0,
+ AArch32_TakeUndefInstrException__1
+}
+
+function AArch32_TakeUndefInstrException__0 () = {
+ exception : ExceptionRecord = ExceptionSyndrome(Exception_Uncategorized);
+ AArch32_TakeUndefInstrException(exception)
+}
+
+function AArch32_TakeUndefInstrException__1 exception = {
+ route_to_hyp : bool = ((HaveEL(EL2) & ~(IsSecure())) & PSTATE.EL == EL0) & [HCR[27]] == 0b1;
+ preferred_exception_return : bits(32) = ThisInstrAddr();
+ vect_offset : int = 4;
+ lr_offset : int = if CurrentInstrSet() == InstrSet_A32 then 4 else 2;
+ if PSTATE.EL == EL2 then AArch32_EnterHypMode(exception, preferred_exception_return, vect_offset) else if route_to_hyp then AArch32_EnterHypMode(exception, preferred_exception_return, 20) else AArch32_EnterMode(M32_Undef, preferred_exception_return, lr_offset, vect_offset)
+}
+
+val UnallocatedEncoding : unit -> unit effect {escape, rreg, undef, wreg}
+
+function UnallocatedEncoding () = {
+ if UsingAArch32() & AArch32_ExecutingCP10or11Instr() then FPEXC = __SetSlice_bits(32, 1, FPEXC, 29, 0b0) else ();
+ if UsingAArch32() & ~(AArch32_GeneralExceptionsToAArch64()) then AArch32_TakeUndefInstrException() else AArch64_UndefinedFault()
+}
+
+val aarch64_system_exceptions_runtime_hvc : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_exceptions_runtime_hvc imm = {
+ if (~(HaveEL(EL2)) | PSTATE.EL == EL0) | PSTATE.EL == EL1 & IsSecure() then UnallocatedEncoding() else ();
+ hvc_enable : bits(1) = if HaveEL(EL3) then [SCR_EL3[8]] else ~([HCR_EL2[29]]);
+ if hvc_enable == 0b0 then AArch64_UndefinedFault() else AArch64_CallHypervisor(imm)
+}
+
+val system_exceptions_runtime_hvc_decode : (bits(3), bits(16), bits(3), bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_exceptions_runtime_hvc_decode (opc, imm16, op2, LL) = {
+ __unconditional = true;
+ imm : bits(16) = imm16;
+ aarch64_system_exceptions_runtime_hvc(imm)
+}
+
+val aarch64_memory_single_general_register : (AccType, int, ExtendType, int, MemOp, int, bool, int, int, bool, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_general_register (acctype, 'datasize, extend_type, 'm, memop, 'n, postindex, 'regsize, 'shift, signed, 't, wback__arg) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ wback = wback__arg;
+ offset : bits(64) = ExtendReg(m, extend_type, shift);
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ wb_unknown : bool = false;
+ rt_unknown : bool = false;
+ c : Constraint = undefined;
+ if ((memop == MemOp_LOAD & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPLD);
+ assert(c == Constraint_WBSUPPRESS | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_WBSUPPRESS) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_WBSUPPRESS => wback = false,
+ Constraint_UNKNOWN => wb_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if ((memop == MemOp_STORE & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPST);
+ assert(c == Constraint_NONE | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_NONE) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ if memop != MemOp_PREFETCH then CheckSPAlignment() else ();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined else data = aget_X(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ if signed then aset_X(t, SignExtend(data, regsize)) else aset_X(t, ZeroExtend(data, regsize))
+ },
+ MemOp_PREFETCH => Prefetch(address, __GetSlice_int(5, t, 0))
+ };
+ if wback then {
+ if wb_unknown then address = undefined else if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_single_general_immediate_unsigned : (AccType, int, MemOp, int, bits(64), bool, int, bool, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_general_immediate_unsigned (acctype, 'datasize, memop, 'n, offset, postindex, 'regsize, signed, 't, wback__arg) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ wback = wback__arg;
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ wb_unknown : bool = false;
+ rt_unknown : bool = false;
+ c : Constraint = undefined;
+ if ((memop == MemOp_LOAD & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPLD);
+ assert(c == Constraint_WBSUPPRESS | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_WBSUPPRESS) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_WBSUPPRESS => wback = false,
+ Constraint_UNKNOWN => wb_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if ((memop == MemOp_STORE & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPST);
+ assert(c == Constraint_NONE | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_NONE) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ if memop != MemOp_PREFETCH then CheckSPAlignment() else ();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined else data = aget_X(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ if signed then aset_X(t, SignExtend(data, regsize)) else aset_X(t, ZeroExtend(data, regsize))
+ },
+ MemOp_PREFETCH => Prefetch(address, __GetSlice_int(5, t, 0))
+ };
+ if wback then {
+ if wb_unknown then address = undefined else if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_single_general_immediate_signed_postidx : (AccType, int, MemOp, int, bits(64), bool, int, bool, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_general_immediate_signed_postidx (acctype, 'datasize, memop, 'n, offset, postindex, 'regsize, signed, 't, wback__arg) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ wback = wback__arg;
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ wb_unknown : bool = false;
+ rt_unknown : bool = false;
+ c : Constraint = undefined;
+ if ((memop == MemOp_LOAD & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPLD);
+ assert(c == Constraint_WBSUPPRESS | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_WBSUPPRESS) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_WBSUPPRESS => wback = false,
+ Constraint_UNKNOWN => wb_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if ((memop == MemOp_STORE & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPST);
+ assert(c == Constraint_NONE | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_NONE) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ if memop != MemOp_PREFETCH then CheckSPAlignment() else ();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined else data = aget_X(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ if signed then aset_X(t, SignExtend(data, regsize)) else aset_X(t, ZeroExtend(data, regsize))
+ },
+ MemOp_PREFETCH => Prefetch(address, __GetSlice_int(5, t, 0))
+ };
+ if wback then {
+ if wb_unknown then address = undefined else if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_single_general_immediate_signed_pac : (int, bits(64), int, bool, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_general_immediate_signed_pac ('n, offset, 't, use_key_a, wback__arg) = {
+ wback = wback__arg;
+ address : bits(64) = undefined;
+ data : bits(64) = undefined;
+ wb_unknown : bool = false;
+ c : Constraint = undefined;
+ if (wback & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPLD);
+ assert(c == Constraint_WBSUPPRESS | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_WBSUPPRESS) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_WBSUPPRESS => wback = false,
+ Constraint_UNKNOWN => wb_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if use_key_a then address = AuthDA(address, aget_X(31)) else address = AuthDB(address, aget_X(31));
+ address = address + offset;
+ data = aget_Mem(address, 8, AccType_NORMAL);
+ aset_X(t, data);
+ if wback then {
+ if wb_unknown then address = undefined else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_single_general_immediate_signed_offset_unpriv : (AccType, int, MemOp, int, bits(64), bool, int, bool, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_general_immediate_signed_offset_unpriv (acctype, 'datasize, memop, 'n, offset, postindex, 'regsize, signed, 't, wback__arg) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ wback = wback__arg;
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ wb_unknown : bool = false;
+ rt_unknown : bool = false;
+ c : Constraint = undefined;
+ if ((memop == MemOp_LOAD & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPLD);
+ assert(c == Constraint_WBSUPPRESS | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_WBSUPPRESS) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_WBSUPPRESS => wback = false,
+ Constraint_UNKNOWN => wb_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if ((memop == MemOp_STORE & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPST);
+ assert(c == Constraint_NONE | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_NONE) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ if memop != MemOp_PREFETCH then CheckSPAlignment() else ();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined else data = aget_X(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ if signed then aset_X(t, SignExtend(data, regsize)) else aset_X(t, ZeroExtend(data, regsize))
+ },
+ MemOp_PREFETCH => Prefetch(address, __GetSlice_int(5, t, 0))
+ };
+ if wback then {
+ if wb_unknown then address = undefined else if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_single_general_immediate_signed_offset_normal : (AccType, int, MemOp, int, bits(64), bool, int, bool, int, bool) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function aarch64_memory_single_general_immediate_signed_offset_normal (acctype, 'datasize, memop, 'n, offset, postindex, 'regsize, signed, 't, wback__arg) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ wback = wback__arg;
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ wb_unknown : bool = false;
+ rt_unknown : bool = false;
+ c : Constraint = undefined;
+ if ((memop == MemOp_LOAD & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPLD);
+ assert(c == Constraint_WBSUPPRESS | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_WBSUPPRESS) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_WBSUPPRESS => wback = false,
+ Constraint_UNKNOWN => wb_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if ((memop == MemOp_STORE & wback) & n == t) & n != 31 then {
+ c = ConstrainUnpredictable(Unpredictable_WBOVERLAPST);
+ assert(c == Constraint_NONE | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_NONE) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ if memop != MemOp_PREFETCH then CheckSPAlignment() else ();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset else ();
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined else data = aget_X(t);
+ aset_Mem(address, datasize / 8, acctype, data)
+ },
+ MemOp_LOAD => {
+ data = aget_Mem(address, datasize / 8, acctype);
+ if signed then aset_X(t, SignExtend(data, regsize)) else aset_X(t, ZeroExtend(data, regsize))
+ },
+ MemOp_PREFETCH => Prefetch(address, __GetSlice_int(5, t, 0))
+ };
+ if wback then {
+ if wb_unknown then address = undefined else if postindex then address = address + offset else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_pair_simdfp_postidx : forall ('datasize : Int).
+ (AccType, atom('datasize), MemOp, int, bits(64), bool, int, int, bool) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_pair_simdfp_postidx (acctype, datasize, memop, n, offset, postindex, t, t2, wback) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ address : bits(64) = undefined;
+ data1 : bits('datasize) = undefined;
+ data2 : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ if memop == MemOp_LOAD & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset
+ else ();
+ match memop {
+ MemOp_STORE => {
+ data1 = aget_V(t);
+ data2 = aget_V(t2);
+ aset_Mem(address + 0, dbytes, acctype, data1);
+ aset_Mem(address + dbytes, dbytes, acctype, data2)
+ },
+ MemOp_LOAD => {
+ data1 = aget_Mem(address + 0, dbytes, acctype);
+ data2 = aget_Mem(address + dbytes, dbytes, acctype);
+ if rt_unknown then {
+ data1 = undefined;
+ data2 = undefined
+ } else ();
+ aset_V(t, data1);
+ aset_V(t2, data2)
+ }
+ };
+ if wback then {
+ if postindex then address = address + offset
+ else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_pair_simdfp_noalloc : forall ('datasize : Int).
+ (AccType, atom('datasize), MemOp, int, bits(64), bool, int, int, bool) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_pair_simdfp_noalloc (acctype, datasize, memop, n, offset, postindex, t, t2, wback) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ CheckFPAdvSIMDEnabled64();
+ address : bits(64) = undefined;
+ data1 : bits('datasize) = undefined;
+ data2 : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ if memop == MemOp_LOAD & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset
+ else ();
+ match memop {
+ MemOp_STORE => {
+ data1 = aget_V(t);
+ data2 = aget_V(t2);
+ aset_Mem(address + 0, dbytes, acctype, data1);
+ aset_Mem(address + dbytes, dbytes, acctype, data2)
+ },
+ MemOp_LOAD => {
+ data1 = aget_Mem(address + 0, dbytes, acctype);
+ data2 = aget_Mem(address + dbytes, dbytes, acctype);
+ if rt_unknown then {
+ data1 = undefined;
+ data2 = undefined
+ } else ();
+ aset_V(t, data1);
+ aset_V(t2, data2)
+ }
+ };
+ if wback then {
+ if postindex then address = address + offset
+ else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_pair_general_postidx : forall ('datasize : Int).
+ (AccType, atom('datasize), MemOp, int, bits(64), bool, bool, int, int, bool) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_pair_general_postidx (acctype, datasize, memop, n, offset, postindex, signed, t, t2, wback__arg) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ wback = wback__arg;
+ address : bits(64) = undefined;
+ data1 : bits('datasize) = undefined;
+ data2 : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ wb_unknown : bool = false;
+ if ((memop == MemOp_LOAD & wback) & (t == n | t2 == n)) & n != 31 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_WBOVERLAPLD);
+ assert(c == Constraint_WBSUPPRESS | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_WBSUPPRESS) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_WBSUPPRESS => wback = false,
+ Constraint_UNKNOWN => wb_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if ((memop == MemOp_STORE & wback) & (t == n | t2 == n)) & n != 31 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_WBOVERLAPST);
+ assert(c == Constraint_NONE | c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_NONE) || ((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if memop == MemOp_LOAD & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset
+ else ();
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown & t == n then data1 = undefined
+ else data1 = aget_X(t);
+ if rt_unknown & t2 == n then data2 = undefined
+ else data2 = aget_X(t2);
+ aset_Mem(address + 0, dbytes, acctype, data1);
+ aset_Mem(address + dbytes, dbytes, acctype, data2)
+ },
+ MemOp_LOAD => {
+ data1 = aget_Mem(address + 0, dbytes, acctype);
+ data2 = aget_Mem(address + dbytes, dbytes, acctype);
+ if rt_unknown then {
+ data1 = undefined;
+ data2 = undefined
+ } else ();
+ if signed then {
+ aset_X(t, SignExtend(data1, 64));
+ aset_X(t2, SignExtend(data2, 64))
+ } else {
+ aset_X(t, data1);
+ aset_X(t2, data2)
+ }
+ }
+ };
+ if wback then {
+ if wb_unknown then address = undefined
+ else if postindex then address = address + offset
+ else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_pair_general_noalloc : forall ('datasize : Int).
+ (AccType, atom('datasize), MemOp, int, bits(64), bool, int, int, bool) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_pair_general_noalloc (acctype, datasize, memop, n, offset, postindex, t, t2, wback) = let 'dbytes = ex_int(datasize / 8) in {
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ data1 : bits('datasize) = undefined;
+ data2 : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ if memop == MemOp_LOAD & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else address = aget_X(n);
+ if ~(postindex) then address = address + offset
+ else ();
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown & t == n then data1 = undefined
+ else data1 = aget_X(t);
+ if rt_unknown & t2 == n then data2 = undefined
+ else data2 = aget_X(t2);
+ aset_Mem(address + 0, dbytes, acctype, data1);
+ aset_Mem(address + dbytes, dbytes, acctype, data2)
+ },
+ MemOp_LOAD => {
+ data1 = aget_Mem(address + 0, dbytes, acctype);
+ data2 = aget_Mem(address + dbytes, dbytes, acctype);
+ if rt_unknown then {
+ data1 = undefined;
+ data2 = undefined
+ } else ();
+ aset_X(t, data1);
+ aset_X(t2, data2)
+ }
+ };
+ if wback then {
+ if postindex then address = address + offset
+ else ();
+ if n == 31 then aset_SP(address) else aset_X(n, address)
+ } else ()
+}
+
+val aarch64_memory_exclusive_single : forall ('datasize : Int) 'elsize ('regsize : Int).
+ (AccType, atom('datasize), atom('elsize), MemOp, int, bool, atom('regsize), int, int, int) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_exclusive_single (acctype, datasize, elsize, memop, n, pair, regsize, s, t, t2) = {
+ assert(constraint('regsize >= 0), "destsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(- 'elsize + 'datasize >= 0 & 'elsize >= 0));
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ rn_unknown : bool = false;
+ if (memop == MemOp_LOAD & pair) & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if memop == MemOp_STORE then {
+ if s == t | pair & s == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_DATAOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if s == n & n != 31 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_BASEOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rn_unknown = true,
+ Constraint_NONE => rn_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ()
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else if rn_unknown then address = undefined
+ else address = aget_X(n);
+ secondstage : bool = undefined;
+ iswrite : bool = undefined;
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined
+ else if pair then let 'v = ex_int(datasize / 2) in {
+ assert(constraint(2 * 'v = 'datasize));
+ el1 : bits('v) = aget_X(t);
+ el2 : bits('v) = aget_X(t2);
+ data = if BigEndian() then el1 @ el2 else el2 @ el1
+ } else data = aget_X(t);
+ status : bits(1) = 0b1;
+ if AArch64_ExclusiveMonitorsPass(address, dbytes) then {
+ aset_Mem(address, dbytes, acctype, data);
+ status = ExclusiveMonitorsStatus()
+ } else ();
+ aset_X(s, ZeroExtend(status, 32))
+ },
+ MemOp_LOAD => {
+ AArch64_SetExclusiveMonitors(address, dbytes);
+ if pair then
+ if rt_unknown then aset_X(t, undefined : bits(32)) else if elsize == 32 then {
+ data = aget_Mem(address, dbytes, acctype);
+ if BigEndian() then {
+ aset_X(t, slice(data, elsize, negate(elsize) + datasize));
+ aset_X(t2, slice(data, 0, elsize))
+ } else {
+ aset_X(t, slice(data, 0, elsize));
+ aset_X(t2, slice(data, elsize, negate(elsize) + datasize))
+ }
+ } else {
+ if address != Align(address, dbytes) then {
+ iswrite = false;
+ secondstage = false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage))
+ } else ();
+ aset_X(t, aget_Mem(address + 0, 8, acctype));
+ aset_X(t2, aget_Mem(address + 8, 8, acctype))
+ }
+ else {
+ data = aget_Mem(address, dbytes, acctype);
+ aset_X(t, ZeroExtend(data, regsize))
+ }
+ }
+ }
+}
+
+val memory_exclusive_single_decode : (bits(2), bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_exclusive_single_decode (size, o2, L, o1, Rs, o0, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ s : int = UInt(Rs);
+ acctype : AccType = if o0 == 0b1 then AccType_ORDERED else AccType_ATOMIC;
+ pair : bool = false;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ elsize : int = shl_int(8, UInt(size));
+ regsize : int = if elsize == 64 then 64 else 32;
+ datasize : int = if pair then elsize * 2 else elsize;
+ aarch64_memory_exclusive_single(acctype, datasize, elsize, memop, n, pair, regsize, s, t, t2)
+}
+
+val aarch64_memory_exclusive_pair : forall ('datasize : Int) ('regsize : Int) ('elsize : Int).
+ (AccType, atom('datasize), atom('elsize), MemOp, int, bool, atom('regsize), int, int, int) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_exclusive_pair (acctype, datasize, elsize, memop, n, pair, regsize, s, t, t2) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(- 'elsize + 'datasize >= 0 & 'elsize >= 0), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ rn_unknown : bool = false;
+ if (memop == MemOp_LOAD & pair) & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if memop == MemOp_STORE then {
+ if s == t | pair & s == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_DATAOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if s == n & n != 31 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_BASEOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rn_unknown = true,
+ Constraint_NONE => rn_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ()
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else if rn_unknown then address = undefined
+ else address = aget_X(n);
+ secondstage : bool = undefined;
+ iswrite : bool = undefined;
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined
+ else if pair then let 'v = ex_int(datasize / 2) in {
+ assert(constraint(2 * 'v = 'datasize));
+ el1 : bits('v) = aget_X(t);
+ el2 : bits('v) = aget_X(t2);
+ data = if BigEndian() then el1 @ el2 else el2 @ el1
+ } else data = aget_X(t);
+ status : bits(1) = 0b1;
+ if AArch64_ExclusiveMonitorsPass(address, dbytes) then {
+ aset_Mem(address, dbytes, acctype, data);
+ status = ExclusiveMonitorsStatus()
+ } else ();
+ aset_X(s, ZeroExtend(status, 32))
+ },
+ MemOp_LOAD => {
+ AArch64_SetExclusiveMonitors(address, dbytes);
+ if pair then
+ if rt_unknown then aset_X(t, undefined : bits(32)) else if elsize == 32 then {
+ data = aget_Mem(address, dbytes, acctype);
+ if BigEndian() then {
+ aset_X(t, slice(data, elsize, negate(elsize) + datasize));
+ aset_X(t2, slice(data, 0, elsize))
+ } else {
+ aset_X(t, slice(data, 0, elsize));
+ aset_X(t2, slice(data, elsize, negate(elsize) + datasize))
+ }
+ } else {
+ if address != Align(address, dbytes) then {
+ iswrite = false;
+ secondstage = false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage))
+ } else ();
+ aset_X(t, aget_Mem(address + 0, 8, acctype));
+ aset_X(t2, aget_Mem(address + 8, 8, acctype))
+ }
+ else {
+ data = aget_Mem(address, dbytes, acctype);
+ aset_X(t, ZeroExtend(data, regsize))
+ }
+ }
+ }
+}
+
+val memory_exclusive_pair_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_exclusive_pair_decode (sz, o2, L, o1, Rs, o0, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ s : int = UInt(Rs);
+ acctype : AccType = if o0 == 0b1 then AccType_ORDERED else AccType_ATOMIC;
+ pair : bool = true;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ elsize : int = shl_int(32, UInt(sz));
+ regsize : int = if elsize == 64 then 64 else 32;
+ datasize : int = if pair then elsize * 2 else elsize;
+ aarch64_memory_exclusive_pair(acctype, datasize, elsize, memop, n, pair, regsize, s, t, t2)
+}
+
+val aarch64_integer_crc : forall ('size : Int).
+ (bool, int, int, int, atom('size)) -> unit effect {escape, undef, rreg, wreg}
+
+function aarch64_integer_crc (crc32c, d, m, n, size) = {
+ assert(constraint('size >= 2));
+ if ~(HaveCRCExt()) then UnallocatedEncoding() else ();
+ acc : bits(32) = aget_X(n);
+ val_name : bits('size) = aget_X(m);
+ poly : bits(32) = __GetSlice_int(32, if crc32c then 517762881 else 79764919, 0);
+ tempacc : bits('size + 32) = BitReverse(acc) @ Zeros(size);
+ tempval : bits('size + 32) = BitReverse(val_name) @ Zeros(32);
+ aset_X(d, BitReverse(Poly32Mod2(tempacc ^ tempval, poly)))
+}
+
+val vector_transfer_vector_insert_decode : (bits(1), bits(1), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_insert_decode (Q, op, imm5, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'size : int = LowestSetBit(imm5);
+ if size > 3 then UnallocatedEncoding() else ();
+ assert('size <= 3);
+ dst_index : int = UInt(slice(imm5, size + 1, negate(size) + 4));
+ src_index : int = UInt(slice(imm4, size, negate(size) + 4));
+ idxdsize : int = if [imm4[3]] == 0b1 then 128 else 64;
+ esize : int = shl_int(8, size);
+ aarch64_vector_transfer_vector_insert(d, dst_index, esize, idxdsize, n, src_index)
+}
+
+val vector_transfer_vector_extract_decode : (bits(1), bits(2), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_extract_decode (Q, op2, Rm, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if Q == 0b0 & [imm4[3]] == 0b1 then UnallocatedEncoding() else ();
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ position : int = shl_int(UInt(imm4), 3);
+ aarch64_vector_transfer_vector_extract(d, datasize, m, n, position)
+}
+
+val vector_transfer_vector_cpydup_sisd_decode : (bits(1), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_cpydup_sisd_decode (op, imm5, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'size : int = LowestSetBit(imm5);
+ if size > 3 then UnallocatedEncoding() else ();
+ assert('size <= 3);
+ index : int = UInt(slice(imm5, size + 1, negate(size) + 4));
+ idxdsize : int = if [imm5[4]] == 0b1 then 128 else 64;
+ esize : int = shl_int(8, size);
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_transfer_vector_cpydup_sisd(d, datasize, elements, esize, idxdsize, index, n)
+}
+
+val vector_transfer_integer_move_unsigned_decode : (bits(1), bits(1), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_integer_move_unsigned_decode (Q, op, imm5, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ size : int = undefined;
+ match Q @ imm5 {
+ [bitzero] @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ [bitone] => size = 0,
+ [bitzero] @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ [bitone] @ [bitzero] => size = 1,
+ [bitzero] @ _ : bits(1) @ _ : bits(1) @ [bitone] @ [bitzero] @ [bitzero] => size = 2,
+ [bitone] @ _ : bits(1) @ [bitone] @ [bitzero] @ [bitzero] @ [bitzero] => size = 3,
+ _ => UnallocatedEncoding()
+ };
+ let 'size2 = size;
+ assert(size2 <= 4);
+ idxdsize : int = if [imm5[4]] == 0b1 then 128 else 64;
+ index : int = UInt(slice(imm5, size + 1, negate(size2) + 4));
+ esize : int = shl_int(8, size2);
+ let 'datasize : {|64, 32|} = if Q == 0b1 then 64 else 32;
+ aarch64_vector_transfer_integer_move_unsigned(d, datasize, esize, idxdsize, index, n)
+}
+
+val vector_transfer_integer_move_signed_decode : (bits(1), bits(1), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_integer_move_signed_decode (Q, op, imm5, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ size : int = undefined;
+ match Q @ imm5 {
+ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ [bitone] => size = 0,
+ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ _ : bits(1) @ [bitone] @ [bitzero] => size = 1,
+ [bitone] @ _ : bits(1) @ _ : bits(1) @ [bitone] @ [bitzero] @ [bitzero] => size = 2,
+ _ => UnallocatedEncoding()
+ };
+ let 'size2 = size;
+ assert('size2 <= 4);
+ idxdsize : int = if [imm5[4]] == 0b1 then 128 else 64;
+ index : int = UInt(slice(imm5, size + 1, negate(size2) + 4));
+ esize : int = shl_int(8, size2);
+ let 'datasize : {|64, 32|} = if Q == 0b1 then 64 else 32;
+ aarch64_vector_transfer_integer_move_signed(d, datasize, esize, idxdsize, index, n)
+}
+
+val vector_transfer_integer_insert_decode : (bits(1), bits(1), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_integer_insert_decode (Q, op, imm5, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'size : int = LowestSetBit(imm5);
+ if size > 3 then UnallocatedEncoding() else ();
+ assert('size <= 3);
+ index : int = UInt(slice(imm5, size + 1, negate(size) + 4));
+ esize : int = shl_int(8, size);
+ let 'datasize : {|128|} = 128;
+ aarch64_vector_transfer_integer_insert(d, datasize, esize, index, n)
+}
+
+val vector_reduce_fp16maxnm_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fp16maxnm_simd_decode (Q, U, o1, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMINNUM else ReduceOp_FMAXNUM;
+ aarch64_vector_reduce_fp16maxnm_simd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_fp16max_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fp16max_simd_decode (Q, U, o1, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMIN else ReduceOp_FMAX;
+ aarch64_vector_reduce_fp16max_simd(d, datasize, esize, n, op)
+}
+
+val vector_logical_decode : (bits(1), bits(1), bits(1), bits(1), bits(1), bits(4), bits(1), bits(1), bits(1), bits(1), bits(1), bits(1), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_logical_decode (Q, op, a, b, c, cmode, o2, d, e, f, g, h, Rd) = {
+ __unconditional = true;
+ rd : int = UInt(Rd);
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ imm : bits('datasize) = undefined;
+ imm64 : bits(64) = undefined;
+ operation : ImmediateOp = undefined;
+ match cmode @ op {
+ [bitzero] @ _ : bits(1) @ _ : bits(1) @ [bitzero] @ [bitzero] => operation = ImmediateOp_MOVI,
+ [bitzero] @ _ : bits(1) @ _ : bits(1) @ [bitzero] @ [bitone] => operation = ImmediateOp_MVNI,
+ [bitzero] @ _ : bits(1) @ _ : bits(1) @ [bitone] @ [bitzero] => operation = ImmediateOp_ORR,
+ [bitzero] @ _ : bits(1) @ _ : bits(1) @ [bitone] @ [bitone] => operation = ImmediateOp_BIC,
+ [bitone] @ [bitzero] @ _ : bits(1) @ [bitzero] @ [bitzero] => operation = ImmediateOp_MOVI,
+ [bitone] @ [bitzero] @ _ : bits(1) @ [bitzero] @ [bitone] => operation = ImmediateOp_MVNI,
+ [bitone] @ [bitzero] @ _ : bits(1) @ [bitone] @ [bitzero] => operation = ImmediateOp_ORR,
+ [bitone] @ [bitzero] @ _ : bits(1) @ [bitone] @ [bitone] => operation = ImmediateOp_BIC,
+ [bitone] @ [bitone] @ [bitzero] @ _ : bits(1) @ [bitzero] => operation = ImmediateOp_MOVI,
+ [bitone] @ [bitone] @ [bitzero] @ _ : bits(1) @ [bitone] => operation = ImmediateOp_MVNI,
+ [bitone] @ [bitone] @ [bitone] @ [bitzero] @ _ : bits(1) => operation = ImmediateOp_MOVI,
+ 0b11110 => operation = ImmediateOp_MOVI,
+ 0b11111 => {
+ if Q == 0b0 then UnallocatedEncoding() else ();
+ operation = ImmediateOp_MOVI
+ }
+ };
+ imm64 = AdvSIMDExpandImm(op, cmode, ((((((a @ b) @ c) @ d) @ e) @ f) @ g) @ h);
+ let 'immsize = datasize / 64;
+ assert(constraint('immsize * 64 = 'datasize));
+ imm = replicate_bits(imm64, 'immsize);
+ aarch64_vector_logical(datasize, imm, operation, rd)
+}
+
+val vector_fp16_movi_decode : (bits(1), bits(1), bits(1), bits(1), bits(1), bits(4), bits(1), bits(1), bits(1), bits(1), bits(1), bits(1), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_fp16_movi_decode (Q, op, a, b, c, cmode, o2, d, e, f, g, h, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ rd : int = UInt(Rd);
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ imm : bits('datasize) = undefined;
+ imm8 : bits(8) = ((((((a @ b) @ c) @ d) @ e) @ f) @ g) @ h;
+ imm16 : bits(16) = ((([imm8[7]] @ ~([imm8[6]])) @ replicate_bits([imm8[6]], 2)) @ slice(imm8, 0, 6)) @ Zeros(6);
+ let 'immsize = datasize / 16;
+ assert(constraint('immsize * 16 = 'datasize));
+ imm = replicate_bits(imm16, immsize);
+ aarch64_vector_fp16_movi(datasize, imm, rd)
+}
+
+val vector_crypto_sm4_sm4enckey_decode : (bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm4_sm4enckey_decode (Rm, O, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sm4_sm4enckey(d, m, n)
+}
+
+val vector_crypto_sm4_sm4enc_decode : (bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm4_sm4enc_decode (Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ aarch64_vector_crypto_sm4_sm4enc(d, n)
+}
+
+val vector_crypto_sm3_sm3tt2b_decode : (bits(5), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm3_sm3tt2b_decode (Rm, imm2, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ i : int = UInt(imm2);
+ aarch64_vector_crypto_sm3_sm3tt2b(d, i, m, n)
+}
+
+val vector_crypto_sm3_sm3tt2a_decode : (bits(5), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm3_sm3tt2a_decode (Rm, imm2, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ i : int = UInt(imm2);
+ aarch64_vector_crypto_sm3_sm3tt2a(d, i, m, n)
+}
+
+val vector_crypto_sm3_sm3tt1b_decode : (bits(5), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm3_sm3tt1b_decode (Rm, imm2, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ i : int = UInt(imm2);
+ aarch64_vector_crypto_sm3_sm3tt1b(d, i, m, n)
+}
+
+val vector_crypto_sm3_sm3tt1a_decode : (bits(5), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm3_sm3tt1a_decode (Rm, imm2, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ i : int = UInt(imm2);
+ aarch64_vector_crypto_sm3_sm3tt1a(d, i, m, n)
+}
+
+val vector_crypto_sm3_sm3ss1_decode : (bits(2), bits(5), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm3_sm3ss1_decode (Op0, Rm, Ra, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ a : int = UInt(Ra);
+ aarch64_vector_crypto_sm3_sm3ss1(a, d, m, n)
+}
+
+val vector_crypto_sm3_sm3partw2_decode : (bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm3_sm3partw2_decode (Rm, O, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sm3_sm3partw2(d, m, n)
+}
+
+val vector_crypto_sm3_sm3partw1_decode : (bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sm3_sm3partw1_decode (Rm, O, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveChCryptoExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sm3_sm3partw1(d, m, n)
+}
+
+val vector_crypto_sha512_sha512su1_decode : (bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha512_sha512su1_decode (Rm, O, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sha512_sha512su1(d, m, n)
+}
+
+val vector_crypto_sha512_sha512su0_decode : (bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha512_sha512su0_decode (Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ aarch64_vector_crypto_sha512_sha512su0(d, n)
+}
+
+val vector_crypto_sha512_sha512h_decode : (bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha512_sha512h_decode (Rm, O, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sha512_sha512h(d, m, n)
+}
+
+val vector_crypto_sha512_sha512h2_decode : (bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha512_sha512h2_decode (Rm, O, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sha512_sha512h2(d, m, n)
+}
+
+val vector_crypto_sha3op_sha256sched1_decode : (bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3op_sha256sched1_decode (size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha3op_sha256sched1(d, m, n)
+}
+
+val vector_crypto_sha3op_sha256hash_decode : (bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3op_sha256hash_decode (size, Rm, P, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ part1 : bool = P == 0b0;
+ aarch64_vector_crypto_sha3op_sha256hash(d, m, n, part1)
+}
+
+val vector_crypto_sha3op_sha1sched0_decode : (bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3op_sha1sched0_decode (size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha3op_sha1sched0(d, m, n)
+}
+
+val vector_crypto_sha3op_sha1hash_parity_decode : (bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3op_sha1hash_parity_decode (size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha3op_sha1hash_parity(d, m, n)
+}
+
+val vector_crypto_sha3op_sha1hash_majority_decode : (bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3op_sha1hash_majority_decode (size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha3op_sha1hash_majority(d, m, n)
+}
+
+val vector_crypto_sha3op_sha1hash_choose_decode : (bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3op_sha1hash_choose_decode (size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha3op_sha1hash_choose(d, m, n)
+}
+
+val vector_crypto_sha3_xar_decode : (bits(5), bits(6), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3_xar_decode (Rm, imm6, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sha3_xar(d, imm6, m, n)
+}
+
+val vector_crypto_sha3_rax1_decode : (bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3_rax1_decode (Rm, O, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ aarch64_vector_crypto_sha3_rax1(d, m, n)
+}
+
+val vector_crypto_sha3_eor3_decode : (bits(2), bits(5), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3_eor3_decode (Op0, Rm, Ra, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ a : int = UInt(Ra);
+ aarch64_vector_crypto_sha3_eor3(a, d, m, n)
+}
+
+val vector_crypto_sha3_bcax_decode : (bits(2), bits(5), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha3_bcax_decode (Op0, Rm, Ra, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveCryptoExt2()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ a : int = UInt(Ra);
+ aarch64_vector_crypto_sha3_bcax(a, d, m, n)
+}
+
+val vector_crypto_sha2op_sha256sched0_decode : (bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha2op_sha256sched0_decode (size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha2op_sha256sched0(d, n)
+}
+
+val vector_crypto_sha2op_sha1sched1_decode : (bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha2op_sha1sched1_decode (size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha2op_sha1sched1(d, n)
+}
+
+val vector_crypto_sha2op_sha1hash_decode : (bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_sha2op_sha1hash_decode (size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ aarch64_vector_crypto_sha2op_sha1hash(d, n)
+}
+
+val vector_crypto_aes_round_decode : (bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_aes_round_decode (size, D, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ decrypt : bool = D == 0b1;
+ aarch64_vector_crypto_aes_round(d, decrypt, n)
+}
+
+val vector_crypto_aes_mix_decode : (bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_crypto_aes_mix_decode (size, D, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ decrypt : bool = D == 0b1;
+ aarch64_vector_crypto_aes_mix(d, decrypt, n)
+}
+
+val vector_arithmetic_unary_special_sqrtfp16_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_sqrtfp16_decode (Q, U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_sqrtfp16(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_special_sqrtest_fp16_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_sqrtest_fp16_sisd_decode (U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_unary_special_sqrtest_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_special_sqrtest_fp16_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_sqrtest_fp16_simd_decode (Q, U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_sqrtest_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_special_recip_fp16_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_recip_fp16_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_unary_special_recip_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_special_recip_fp16_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_recip_fp16_simd_decode (Q, U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_recip_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_special_frecpxfp16_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_frecpxfp16_decode (U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_unary_special_frecpxfp16(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_rev_decode : (bits(1), bits(1), bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_rev_decode (Q, U, size, o0, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ op : bits(2) = o0 @ U;
+ if UInt(op) + UInt(size) >= 3 then UnallocatedEncoding() else ();
+ container_size : int = undefined;
+ match op {
+ 0b10 => container_size = 16,
+ 0b01 => container_size = 32,
+ 0b00 => container_size = 64
+ };
+ containers : int = datasize / container_size;
+ elements_per_container : int = container_size / esize;
+ aarch64_vector_arithmetic_unary_rev(containers, d, datasize, elements_per_container, esize, n)
+}
+
+val vector_arithmetic_unary_fp16_round_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_fp16_round_decode (Q, U, o2, o1, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ exact : bool = false;
+ rounding : FPRounding = undefined;
+ match (U @ o1) @ o2 {
+ [bitzero] @ _ : bits(1) @ _ : bits(1) => rounding = FPDecodeRounding(o1 @ o2),
+ 0b100 => rounding = FPRounding_TIEAWAY,
+ 0b101 => UnallocatedEncoding(),
+ 0b110 => {
+ rounding = FPRoundingMode(FPCR);
+ exact = true
+ },
+ 0b111 => rounding = FPRoundingMode(FPCR)
+ };
+ aarch64_vector_arithmetic_unary_fp16_round(d, datasize, elements, esize, exact, n, rounding)
+}
+
+val vector_arithmetic_unary_fp16_conv_int_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_fp16_conv_int_sisd_decode (U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_int_sisd(d, datasize, elements, esize, n, unsigned)
+}
+
+val vector_arithmetic_unary_fp16_conv_int_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_fp16_conv_int_simd_decode (Q, U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_int_sisd(d, datasize, elements, esize, n, unsigned)
+}
+
+val vector_arithmetic_unary_fp16_conv_float_tieaway_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_fp16_conv_float_tieaway_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ rounding : FPRounding = FPRounding_TIEAWAY;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_tieaway_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val vector_arithmetic_unary_fp16_conv_float_tieaway_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_fp16_conv_float_tieaway_simd_decode (Q, U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ rounding : FPRounding = FPRounding_TIEAWAY;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_tieaway_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val vector_arithmetic_unary_fp16_conv_float_bulk_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_fp16_conv_float_bulk_sisd_decode (U, o2, o1, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ rounding : FPRounding = FPDecodeRounding(o1 @ o2);
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_bulk_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val vector_arithmetic_unary_fp16_conv_float_bulk_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_fp16_conv_float_bulk_simd_decode (Q, U, o2, o1, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ rounding : FPRounding = FPDecodeRounding(o1 @ o2);
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_bulk_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val vector_arithmetic_unary_diffneg_fp16_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_diffneg_fp16_decode (Q, U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ neg : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_diffneg_fp16(d, datasize, elements, esize, n, neg)
+}
+
+val vector_arithmetic_unary_cmp_fp16_lessthan_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_fp16_lessthan_sisd_decode (U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ comparison : CompareOp = CompareOp_LT;
+ aarch64_vector_arithmetic_unary_cmp_fp16_lessthan_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_fp16_lessthan_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_fp16_lessthan_simd_decode (Q, U, a, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ comparison : CompareOp = CompareOp_LT;
+ aarch64_vector_arithmetic_unary_cmp_fp16_lessthan_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_fp16_bulk_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_fp16_bulk_sisd_decode (U, a, op, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ comparison : CompareOp = undefined;
+ match op @ U {
+ 0b00 => comparison = CompareOp_GT,
+ 0b01 => comparison = CompareOp_GE,
+ 0b10 => comparison = CompareOp_EQ,
+ 0b11 => comparison = CompareOp_LE
+ };
+ aarch64_vector_arithmetic_unary_cmp_fp16_bulk_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_fp16_bulk_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_fp16_bulk_simd_decode (Q, U, a, op, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ comparison : CompareOp = undefined;
+ match op @ U {
+ 0b00 => comparison = CompareOp_GT,
+ 0b01 => comparison = CompareOp_GE,
+ 0b10 => comparison = CompareOp_EQ,
+ 0b11 => comparison = CompareOp_LE
+ };
+ aarch64_vector_arithmetic_unary_cmp_fp16_bulk_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_binary_uniform_sub_fp16_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_sub_fp16_sisd_decode (U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ abs : bool = true;
+ aarch64_vector_arithmetic_binary_uniform_sub_fp16_sisd(abs, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_sub_fp16_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_sub_fp16_simd_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ abs : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_sub_fp16_simd(abs, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_rsqrtsfp16_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_rsqrtsfp16_sisd_decode (U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_binary_uniform_rsqrtsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_rsqrtsfp16_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_rsqrtsfp16_simd_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_rsqrtsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_recpsfp16_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_recpsfp16_sisd_decode (U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_binary_uniform_recpsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_recpsfp16_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_recpsfp16_simd_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_recpsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp16_product_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp16_product_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_product(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp16_fused_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp16_fused_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ sub_op : bool = a == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_fused(d, datasize, elements, esize, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp16_extended_sisd_decode : (bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp16_extended_sisd_decode (U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_extended_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp16_extended_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp16_extended_simd_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_extended_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_maxmin_fp16_2008_decode : (bits(1), bits(1), bits(1), bits(5), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_maxmin_fp16_2008_decode (Q, U, a, Rm, Op3, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ pair : bool = U == 0b1;
+ minimum : bool = a == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_2008(d, datasize, elements, esize, m, minimum, n, pair)
+}
+
+val vector_arithmetic_binary_uniform_maxmin_fp16_1985_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_maxmin_fp16_1985_decode (Q, U, o1, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ pair : bool = U == 0b1;
+ minimum : bool = o1 == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_1985(d, datasize, elements, esize, m, minimum, n, pair)
+}
+
+val vector_arithmetic_binary_uniform_divfp16_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_divfp16_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_divfp16(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_cmp_fp16_sisd_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_fp16_sisd_decode (U, E, Rm, ac, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ cmp : CompareOp = undefined;
+ abs : bool = undefined;
+ match (E @ U) @ ac {
+ 0b000 => {
+ cmp = CompareOp_EQ;
+ abs = false
+ },
+ 0b010 => {
+ cmp = CompareOp_GE;
+ abs = false
+ },
+ 0b011 => {
+ cmp = CompareOp_GE;
+ abs = true
+ },
+ 0b110 => {
+ cmp = CompareOp_GT;
+ abs = false
+ },
+ 0b111 => {
+ cmp = CompareOp_GT;
+ abs = true
+ },
+ _ => UnallocatedEncoding()
+ };
+ aarch64_vector_arithmetic_binary_uniform_cmp_fp16_sisd(abs, cmp, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_cmp_fp16_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_fp16_simd_decode (Q, U, E, Rm, ac, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ cmp : CompareOp = undefined;
+ abs : bool = undefined;
+ match (E @ U) @ ac {
+ 0b000 => {
+ cmp = CompareOp_EQ;
+ abs = false
+ },
+ 0b010 => {
+ cmp = CompareOp_GE;
+ abs = false
+ },
+ 0b011 => {
+ cmp = CompareOp_GE;
+ abs = true
+ },
+ 0b110 => {
+ cmp = CompareOp_GT;
+ abs = false
+ },
+ 0b111 => {
+ cmp = CompareOp_GT;
+ abs = true
+ },
+ _ => UnallocatedEncoding()
+ };
+ aarch64_vector_arithmetic_binary_uniform_cmp_fp16_sisd(abs, cmp, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_cmp_fp_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_fp_sisd_decode (U, E, sz, Rm, ac, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ cmp : CompareOp = undefined;
+ abs : bool = undefined;
+ match (E @ U) @ ac {
+ 0b000 => {
+ cmp = CompareOp_EQ;
+ abs = false
+ },
+ 0b010 => {
+ cmp = CompareOp_GE;
+ abs = false
+ },
+ 0b011 => {
+ cmp = CompareOp_GE;
+ abs = true
+ },
+ 0b110 => {
+ cmp = CompareOp_GT;
+ abs = false
+ },
+ 0b111 => {
+ cmp = CompareOp_GT;
+ abs = true
+ },
+ _ => UnallocatedEncoding()
+ };
+ aarch64_vector_arithmetic_binary_uniform_cmp_fp16_sisd(abs, cmp, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_add_fp16_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_fp16_decode (Q, U, a, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ pair : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_fp16(d, datasize, elements, esize, m, n, pair)
+}
+
+val vector_arithmetic_binary_element_mul_long_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_long_decode (Q, U, size, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_element_mul_long(d, datasize, elements, esize, idxdsize, index, m, n, part, unsigned)
+}
+
+val vector_arithmetic_binary_element_mul_int_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_int_decode (Q, U, size, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_element_mul_int(d, datasize, elements, esize, idxdsize, index, m, n)
+}
+
+val vector_arithmetic_binary_element_mul_high_sisd_decode : (bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_high_sisd_decode (U, size, L, M, Rm, op, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ round : bool = op == 0b1;
+ aarch64_vector_arithmetic_binary_element_mul_high_sisd(d, datasize, elements, esize, idxdsize, index, m, n, round)
+}
+
+val vector_arithmetic_binary_element_mul_high_simd_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_high_simd_decode (Q, U, size, L, M, Rm, op, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ round : bool = op == 0b1;
+ aarch64_vector_arithmetic_binary_element_mul_high_sisd(d, datasize, elements, esize, idxdsize, index, m, n, round)
+}
+
+val vector_arithmetic_binary_element_mul_fp16_sisd_decode : (bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_fp16_sisd_decode (U, size, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = UInt((H @ L) @ M);
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ mulx_op : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_element_mul_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, mulx_op, n)
+}
+
+val vector_arithmetic_binary_element_mul_fp16_simd_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_fp16_simd_decode (Q, U, size, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = UInt((H @ L) @ M);
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ mulx_op : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_element_mul_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, mulx_op, n)
+}
+
+val vector_arithmetic_binary_element_mul_fp_sisd_decode : (bits(1), bits(1), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_fp_sisd_decode (U, sz, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = M;
+ match sz @ L {
+ [bitzero] @ _ : bits(1) => index = UInt(H @ L),
+ 0b10 => index = UInt(H),
+ 0b11 => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ mulx_op : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_element_mul_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, mulx_op, n)
+}
+
+val vector_arithmetic_binary_element_mul_double_sisd_decode : (bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_double_sisd_decode (U, size, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ part : int = 0;
+ aarch64_vector_arithmetic_binary_element_mul_double_sisd(d, datasize, elements, esize, idxdsize, index, m, n, part)
+}
+
+val vector_arithmetic_binary_element_mul_double_simd_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_double_simd_decode (Q, U, size, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_element_mul_double_sisd(d, datasize, elements, esize, idxdsize, index, m, n, part)
+}
+
+val vector_arithmetic_binary_element_mulacc_long_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_long_decode (Q, U, size, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_long(d, datasize, elements, esize, idxdsize, index, m, n, part, sub_op, unsigned)
+}
+
+val vector_arithmetic_binary_element_mulacc_int_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_int_decode (Q, U, size, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_int(d, datasize, elements, esize, idxdsize, index, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_high_sisd_decode : (bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_high_sisd_decode (U, size, L, M, Rm, S, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveQRDMLAHExt()) then UnallocatedEncoding() else ();
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ rounding : bool = true;
+ sub_op : bool = S == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_high_sisd(d, datasize, elements, esize, idxdsize, index, m, n, rounding, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_high_simd_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_high_simd_decode (Q, U, size, L, M, Rm, S, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveQRDMLAHExt()) then UnallocatedEncoding() else ();
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ rounding : bool = true;
+ sub_op : bool = S == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_high_sisd(d, datasize, elements, esize, idxdsize, index, m, n, rounding, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_fp16_sisd_decode : (bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_fp16_sisd_decode (U, size, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = UInt((H @ L) @ M);
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ datasize : int = esize;
+ elements : int = 1;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_fp16_simd_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_fp16_simd_decode (Q, U, size, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = UInt((H @ L) @ M);
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_fp_sisd_decode : (bits(1), bits(1), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_fp_sisd_decode (U, sz, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = M;
+ match sz @ L {
+ [bitzero] @ _ : bits(1) => index = UInt(H @ L),
+ 0b10 => index = UInt(H),
+ 0b11 => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(32, UInt(sz));
+ datasize : int = esize;
+ elements : int = 1;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_double_sisd_decode : (bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_double_sisd_decode (U, size, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ part : int = 0;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_double_sisd(d, datasize, elements, esize, idxdsize, index, m, n, part, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_double_simd_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_double_simd_decode (Q, U, size, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = undefined;
+ match size {
+ 0b01 => {
+ index = UInt((H @ L) @ M);
+ Rmhi = 0b0
+ },
+ 0b10 => {
+ index = UInt(H @ L);
+ Rmhi = M
+ },
+ _ => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_double_sisd(d, datasize, elements, esize, idxdsize, index, m, n, part, sub_op)
+}
+
+val system_exceptions_debug_exception_decode : (bits(3), bits(16), bits(3), bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_exceptions_debug_exception_decode (opc, imm16, op2, LL) = {
+ __unconditional = true;
+ target_level : bits(2) = LL;
+ if LL == 0b00 then UnallocatedEncoding() else ();
+ if ~(Halted()) then AArch64_UndefinedFault() else ();
+ aarch64_system_exceptions_debug_exception(target_level)
+}
+
+val system_barriers_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(2), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_barriers_decode (L, op0, op1, CRn, CRm, opc, Rt) = {
+ __unconditional = true;
+ op : MemBarrierOp = undefined;
+ domain : MBReqDomain = undefined;
+ types : MBReqTypes = undefined;
+ match opc {
+ 0b00 => op = MemBarrierOp_DSB,
+ 0b01 => op = MemBarrierOp_DMB,
+ 0b10 => op = MemBarrierOp_ISB,
+ _ => UnallocatedEncoding()
+ };
+ match slice(CRm, 2, 2) {
+ 0b00 => domain = MBReqDomain_OuterShareable,
+ 0b01 => domain = MBReqDomain_Nonshareable,
+ 0b10 => domain = MBReqDomain_InnerShareable,
+ 0b11 => domain = MBReqDomain_FullSystem
+ };
+ match slice(CRm, 0, 2) {
+ 0b01 => types = MBReqTypes_Reads,
+ 0b10 => types = MBReqTypes_Writes,
+ 0b11 => types = MBReqTypes_All,
+ _ => {
+ types = MBReqTypes_All;
+ domain = MBReqDomain_FullSystem
+ }
+ };
+ aarch64_system_barriers(domain, op, types)
+}
+
+val memory_vector_single_postinc_aarch64_memory_vector_single_nowb__decode : (bits(1), bits(1), bits(1), bits(5), bits(3), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_vector_single_postinc_aarch64_memory_vector_single_nowb__decode (Q, L, R, Rm, opcode, S, size, Rn, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ wback : bool = true;
+ scale : int = UInt(slice(opcode, 1, 2));
+ selem : int = UInt([opcode[0]] @ R) + 1;
+ replicate : bool = false;
+ index : int = undefined;
+ match scale {
+ 3 => {
+ if L == 0b0 | S == 0b1 then UnallocatedEncoding() else ();
+ scale = UInt(size);
+ replicate = true
+ },
+ 0 => index = UInt((Q @ S) @ size),
+ 1 => {
+ if [size[0]] == 0b1 then UnallocatedEncoding() else ();
+ index = UInt((Q @ S) @ [size[1]])
+ },
+ 2 => {
+ if [size[1]] == 0b1 then UnallocatedEncoding() else ();
+ if [size[0]] == 0b0 then index = UInt(Q @ S) else {
+ if S == 0b1 then UnallocatedEncoding() else ();
+ index = UInt(Q);
+ scale = 3
+ }
+ }
+ };
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ esize : int = shl_int(8, scale);
+ aarch64_memory_vector_single_nowb(datasize, esize, index, m, memop, n, replicate, selem, t, wback)
+}
+
+val memory_vector_single_nowb_aarch64_memory_vector_single_nowb__decode : (bits(1), bits(1), bits(1), bits(3), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_vector_single_nowb_aarch64_memory_vector_single_nowb__decode (Q, L, R, opcode, S, size, Rn, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ n : int = UInt(Rn);
+ m : int = undefined;
+ wback : bool = false;
+ scale : int = UInt(slice(opcode, 1, 2));
+ selem : int = UInt([opcode[0]] @ R) + 1;
+ replicate : bool = false;
+ index : int = undefined;
+ match scale {
+ 3 => {
+ if L == 0b0 | S == 0b1 then UnallocatedEncoding() else ();
+ scale = UInt(size);
+ replicate = true
+ },
+ 0 => index = UInt((Q @ S) @ size),
+ 1 => {
+ if [size[0]] == 0b1 then UnallocatedEncoding() else ();
+ index = UInt((Q @ S) @ [size[1]])
+ },
+ 2 => {
+ if [size[1]] == 0b1 then UnallocatedEncoding() else ();
+ if [size[0]] == 0b0 then index = UInt(Q @ S) else {
+ if S == 0b1 then UnallocatedEncoding() else ();
+ index = UInt(Q);
+ scale = 3
+ }
+ }
+ };
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ esize : int = shl_int(8, scale);
+ aarch64_memory_vector_single_nowb(datasize, esize, index, m, memop, n, replicate, selem, t, wback)
+}
+
+val memory_single_simdfp_register_aarch64_memory_single_simdfp_register__decode : (bits(2), bits(1), bits(2), bits(5), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_simdfp_register_aarch64_memory_single_simdfp_register__decode (size, V, opc, Rm, option_name, S, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt([opc[1]] @ size);
+ if scale > 4 then UnallocatedEncoding() else ();
+ if [option_name[1]] == 0b0 then UnallocatedEncoding() else ();
+ extend_type : ExtendType = DecodeRegExtend(option_name);
+ shift : int = if S == 0b1 then scale else 0;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ m : int = UInt(Rm);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_simdfp_register(acctype, datasize, extend_type, m, memop, n, postindex, shift, t, wback)
+}
+
+val memory_single_simdfp_immediate_unsigned_aarch64_memory_single_simdfp_immediate_signed_postidx__decode : (bits(2), bits(1), bits(2), bits(12), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_simdfp_immediate_unsigned_aarch64_memory_single_simdfp_immediate_signed_postidx__decode (size, V, opc, imm12, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt([opc[1]] @ size);
+ if scale > 4 then UnallocatedEncoding() else ();
+ offset : bits(64) = LSL(ZeroExtend(imm12, 64), scale);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_simdfp_immediate_signed_postidx(acctype, datasize, memop, n, offset, postindex, t, wback)
+}
+
+val memory_single_simdfp_immediate_signed_preidx_aarch64_memory_single_simdfp_immediate_signed_postidx__decode : (bits(2), bits(1), bits(2), bits(9), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_simdfp_immediate_signed_preidx_aarch64_memory_single_simdfp_immediate_signed_postidx__decode (size, V, opc, imm9, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = false;
+ scale : int = UInt([opc[1]] @ size);
+ if scale > 4 then UnallocatedEncoding() else ();
+ offset : bits(64) = SignExtend(imm9, 64);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_simdfp_immediate_signed_postidx(acctype, datasize, memop, n, offset, postindex, t, wback)
+}
+
+val memory_single_simdfp_immediate_signed_postidx_aarch64_memory_single_simdfp_immediate_signed_postidx__decode : (bits(2), bits(1), bits(2), bits(9), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_simdfp_immediate_signed_postidx_aarch64_memory_single_simdfp_immediate_signed_postidx__decode (size, V, opc, imm9, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = true;
+ scale : int = UInt([opc[1]] @ size);
+ if scale > 4 then UnallocatedEncoding() else ();
+ offset : bits(64) = SignExtend(imm9, 64);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_simdfp_immediate_signed_postidx(acctype, datasize, memop, n, offset, postindex, t, wback)
+}
+
+val memory_single_simdfp_immediate_signed_offset_normal_aarch64_memory_single_simdfp_immediate_signed_offset_normal__decode : (bits(2), bits(1), bits(2), bits(9), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_simdfp_immediate_signed_offset_normal_aarch64_memory_single_simdfp_immediate_signed_offset_normal__decode (size, V, opc, imm9, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt([opc[1]] @ size);
+ if scale > 4 then UnallocatedEncoding() else ();
+ offset : bits(64) = SignExtend(imm9, 64);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_simdfp_immediate_signed_offset_normal(acctype, datasize, memop, n, offset, postindex, t, wback)
+}
+
+val memory_single_general_register_aarch64_memory_single_general_register__decode : (bits(2), bits(1), bits(2), bits(5), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_general_register_aarch64_memory_single_general_register__decode (size, V, opc, Rm, option_name, S, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt(size);
+ if [option_name[1]] == 0b0 then UnallocatedEncoding() else ();
+ extend_type : ExtendType = DecodeRegExtend(option_name);
+ shift : int = if S == 0b1 then scale else 0;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ m : int = UInt(Rm);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = undefined;
+ signed : bool = undefined;
+ regsize : int = undefined;
+ if [opc[1]] == 0b0 then {
+ memop = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ regsize = if size == 0b11 then 64 else 32;
+ signed = false
+ } else if size == 0b11 then {
+ memop = MemOp_PREFETCH;
+ if [opc[0]] == 0b1 then UnallocatedEncoding() else ()
+ } else {
+ memop = MemOp_LOAD;
+ if size == 0b10 & [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ regsize = if [opc[0]] == 0b1 then 32 else 64;
+ signed = true
+ };
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_general_register(acctype, datasize, extend_type, m, memop, n, postindex, regsize, shift, signed, t, wback)
+}
+
+val memory_single_general_immediate_unsigned_aarch64_memory_single_general_immediate_unsigned__decode : (bits(2), bits(1), bits(2), bits(12), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_general_immediate_unsigned_aarch64_memory_single_general_immediate_unsigned__decode (size, V, opc, imm12, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt(size);
+ offset : bits(64) = LSL(ZeroExtend(imm12, 64), scale);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = undefined;
+ signed : bool = undefined;
+ regsize : int = undefined;
+ if [opc[1]] == 0b0 then {
+ memop = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ regsize = if size == 0b11 then 64 else 32;
+ signed = false
+ } else if size == 0b11 then {
+ memop = MemOp_PREFETCH;
+ if [opc[0]] == 0b1 then UnallocatedEncoding() else ()
+ } else {
+ memop = MemOp_LOAD;
+ if size == 0b10 & [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ regsize = if [opc[0]] == 0b1 then 32 else 64;
+ signed = true
+ };
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_general_immediate_unsigned(acctype, datasize, memop, n, offset, postindex, regsize, signed, t, wback)
+}
+
+val memory_single_general_immediate_unsigned_aarch64_memory_single_general_immediate_signed_postidx__decode : (bits(2), bits(1), bits(2), bits(12), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_general_immediate_unsigned_aarch64_memory_single_general_immediate_signed_postidx__decode (size, V, opc, imm12, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt(size);
+ offset : bits(64) = LSL(ZeroExtend(imm12, 64), scale);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = undefined;
+ signed : bool = undefined;
+ regsize : int = undefined;
+ if [opc[1]] == 0b0 then {
+ memop = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ regsize = if size == 0b11 then 64 else 32;
+ signed = false
+ } else if size == 0b11 then UnallocatedEncoding() else {
+ memop = MemOp_LOAD;
+ if size == 0b10 & [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ regsize = if [opc[0]] == 0b1 then 32 else 64;
+ signed = true
+ };
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_general_immediate_signed_postidx(acctype, datasize, memop, n, offset, postindex, regsize, signed, t, wback)
+}
+
+val memory_single_general_immediate_signed_preidx_aarch64_memory_single_general_immediate_signed_postidx__decode : (bits(2), bits(1), bits(2), bits(9), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_general_immediate_signed_preidx_aarch64_memory_single_general_immediate_signed_postidx__decode (size, V, opc, imm9, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = false;
+ scale : int = UInt(size);
+ offset : bits(64) = SignExtend(imm9, 64);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = undefined;
+ signed : bool = undefined;
+ regsize : int = undefined;
+ if [opc[1]] == 0b0 then {
+ memop = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ regsize = if size == 0b11 then 64 else 32;
+ signed = false
+ } else if size == 0b11 then UnallocatedEncoding() else {
+ memop = MemOp_LOAD;
+ if size == 0b10 & [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ regsize = if [opc[0]] == 0b1 then 32 else 64;
+ signed = true
+ };
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_general_immediate_signed_postidx(acctype, datasize, memop, n, offset, postindex, regsize, signed, t, wback)
+}
+
+val memory_single_general_immediate_signed_postidx_aarch64_memory_single_general_immediate_signed_postidx__decode : (bits(2), bits(1), bits(2), bits(9), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_general_immediate_signed_postidx_aarch64_memory_single_general_immediate_signed_postidx__decode (size, V, opc, imm9, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = true;
+ scale : int = UInt(size);
+ offset : bits(64) = SignExtend(imm9, 64);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = undefined;
+ signed : bool = undefined;
+ regsize : int = undefined;
+ if [opc[1]] == 0b0 then {
+ memop = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ regsize = if size == 0b11 then 64 else 32;
+ signed = false
+ } else if size == 0b11 then UnallocatedEncoding() else {
+ memop = MemOp_LOAD;
+ if size == 0b10 & [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ regsize = if [opc[0]] == 0b1 then 32 else 64;
+ signed = true
+ };
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_general_immediate_signed_postidx(acctype, datasize, memop, n, offset, postindex, regsize, signed, t, wback)
+}
+
+val memory_single_general_immediate_signed_pac_decode : (bits(2), bits(1), bits(1), bits(1), bits(9), bits(1), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_general_immediate_signed_pac_decode (size, V, M, S, imm9, W, Rn, Rt) = {
+ __unconditional = true;
+ if ~(HavePACExt()) | size != 0b11 then UnallocatedEncoding() else ();
+ t : int = UInt(Rt);
+ n : int = UInt(Rn);
+ wback : bool = W == 0b1;
+ use_key_a : bool = M == 0b0;
+ S10 : bits(10) = S @ imm9;
+ scale : int = 3;
+ offset : bits(64) = LSL(SignExtend(S10, 64), scale);
+ aarch64_memory_single_general_immediate_signed_pac(n, offset, t, use_key_a, wback)
+}
+
+val memory_single_general_immediate_signed_offset_unpriv_aarch64_memory_single_general_immediate_signed_offset_unpriv__decode : (bits(2), bits(1), bits(2), bits(9), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function memory_single_general_immediate_signed_offset_unpriv_aarch64_memory_single_general_immediate_signed_offset_unpriv__decode (size, V, opc, imm9, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt(size);
+ offset : bits(64) = SignExtend(imm9, 64);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_UNPRIV;
+ if ((HaveNVExt() & HaveEL(EL2)) & [HCR_EL2[42]] == 0b1) & [HCR_EL2[43]] == 0b1 then
+ acctype = AccType_NORMAL
+ else ();
+ memop : MemOp = undefined;
+ signed : bool = undefined;
+ regsize : int = undefined;
+ if [opc[1]] == 0b0 then {
+ memop = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ regsize = if size == 0b11 then 64 else 32;
+ signed = false
+ } else if size == 0b11 then UnallocatedEncoding() else {
+ memop = MemOp_LOAD;
+ if size == 0b10 & [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ regsize = if [opc[0]] == 0b1 then 32 else 64;
+ signed = true
+ };
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_general_immediate_signed_offset_unpriv(acctype, datasize, memop, n, offset, postindex, regsize, signed, t, wback)
+}
+
+val memory_single_general_immediate_signed_offset_normal_aarch64_memory_single_general_immediate_signed_offset_normal__decode : (bits(2), bits(1), bits(2), bits(9), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_single_general_immediate_signed_offset_normal_aarch64_memory_single_general_immediate_signed_offset_normal__decode (size, V, opc, imm9, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ scale : int = UInt(size);
+ offset : bits(64) = SignExtend(imm9, 64);
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = undefined;
+ signed : bool = undefined;
+ regsize : int = undefined;
+ if [opc[1]] == 0b0 then {
+ memop = if [opc[0]] == 0b1 then MemOp_LOAD else MemOp_STORE;
+ regsize = if size == 0b11 then 64 else 32;
+ signed = false
+ } else if size == 0b11 then {
+ memop = MemOp_PREFETCH;
+ if [opc[0]] == 0b1 then UnallocatedEncoding() else ()
+ } else {
+ memop = MemOp_LOAD;
+ if size == 0b10 & [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ regsize = if [opc[0]] == 0b1 then 32 else 64;
+ signed = true
+ };
+ datasize : int = shl_int(8, scale);
+ aarch64_memory_single_general_immediate_signed_offset_normal(acctype, datasize, memop, n, offset, postindex, regsize, signed, t, wback)
+}
+
+val memory_pair_simdfp_preidx_aarch64_memory_pair_simdfp_postidx__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_simdfp_preidx_aarch64_memory_pair_simdfp_postidx__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = false;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if opc == 0b11 then UnallocatedEncoding() else ();
+ scale : int = 2 + UInt(opc);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_simdfp_postidx(acctype, datasize, memop, n, offset, postindex, t, t2, wback)
+}
+
+val memory_pair_simdfp_postidx_aarch64_memory_pair_simdfp_postidx__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_simdfp_postidx_aarch64_memory_pair_simdfp_postidx__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = true;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if opc == 0b11 then UnallocatedEncoding() else ();
+ scale : int = 2 + UInt(opc);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_simdfp_postidx(acctype, datasize, memop, n, offset, postindex, t, t2, wback)
+}
+
+val memory_pair_simdfp_offset_aarch64_memory_pair_simdfp_postidx__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_simdfp_offset_aarch64_memory_pair_simdfp_postidx__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_VEC;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if opc == 0b11 then UnallocatedEncoding() else ();
+ scale : int = 2 + UInt(opc);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_simdfp_postidx(acctype, datasize, memop, n, offset, postindex, t, t2, wback)
+}
+
+val memory_pair_simdfp_noalloc_aarch64_memory_pair_simdfp_noalloc__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_simdfp_noalloc_aarch64_memory_pair_simdfp_noalloc__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_VECSTREAM;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if opc == 0b11 then UnallocatedEncoding() else ();
+ scale : int = 2 + UInt(opc);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_simdfp_noalloc(acctype, datasize, memop, n, offset, postindex, t, t2, wback)
+}
+
+val memory_pair_general_preidx_aarch64_memory_pair_general_postidx__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_general_preidx_aarch64_memory_pair_general_postidx__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = false;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if (L @ [opc[0]]) == 0b01 | opc == 0b11 then UnallocatedEncoding() else ();
+ signed : bool = [opc[0]] != 0b0;
+ scale : int = 2 + UInt([opc[1]]);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_general_postidx(acctype, datasize, memop, n, offset, postindex, signed, t, t2, wback)
+}
+
+val memory_pair_general_postidx_aarch64_memory_pair_general_postidx__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_general_postidx_aarch64_memory_pair_general_postidx__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = true;
+ postindex : bool = true;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if (L @ [opc[0]]) == 0b01 | opc == 0b11 then UnallocatedEncoding() else ();
+ signed : bool = [opc[0]] != 0b0;
+ scale : int = 2 + UInt([opc[1]]);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_general_postidx(acctype, datasize, memop, n, offset, postindex, signed, t, t2, wback)
+}
+
+val memory_pair_general_offset_aarch64_memory_pair_general_postidx__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_general_offset_aarch64_memory_pair_general_postidx__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_NORMAL;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if (L @ [opc[0]]) == 0b01 | opc == 0b11 then UnallocatedEncoding() else ();
+ signed : bool = [opc[0]] != 0b0;
+ scale : int = 2 + UInt([opc[1]]);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_general_postidx(acctype, datasize, memop, n, offset, postindex, signed, t, t2, wback)
+}
+
+val memory_pair_general_noalloc_aarch64_memory_pair_general_noalloc__decode : (bits(2), bits(1), bits(1), bits(7), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_pair_general_noalloc_aarch64_memory_pair_general_noalloc__decode (opc, V, L, imm7, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ wback : bool = false;
+ postindex : bool = false;
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ t2 : int = UInt(Rt2);
+ acctype : AccType = AccType_STREAM;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ if [opc[0]] == 0b1 then UnallocatedEncoding() else ();
+ scale : int = 2 + UInt([opc[1]]);
+ datasize : int = shl_int(8, scale);
+ offset : bits(64) = LSL(SignExtend(imm7, 64), scale);
+ aarch64_memory_pair_general_noalloc(acctype, datasize, memop, n, offset, postindex, t, t2, wback)
+}
+
+val memory_literal_simdfp_decode : (bits(2), bits(1), bits(19), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_literal_simdfp_decode (opc, V, imm19, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ size : int = undefined;
+ offset : bits(64) = undefined;
+ match opc {
+ 0b00 => size = 4,
+ 0b01 => size = 8,
+ 0b10 => size = 16,
+ 0b11 => UnallocatedEncoding()
+ };
+ offset = SignExtend(imm19 @ 0b00, 64);
+ aarch64_memory_literal_simdfp(offset, size, t)
+}
+
+val memory_atomicops_swp_decode : (bits(2), bits(1), bits(1), bits(1), bits(5), bits(1), bits(3), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_atomicops_swp_decode (size, V, A, R, Rs, o3, opc, Rn, Rt) = {
+ __unconditional = true;
+ if ~(HaveAtomicExt()) then UnallocatedEncoding() else ();
+ t : int = UInt(Rt);
+ n : int = UInt(Rn);
+ s : int = UInt(Rs);
+ datasize : int = shl_int(8, UInt(size));
+ regsize : int = if datasize == 64 then 64 else 32;
+ ldacctype : AccType = if A == 0b1 & Rt != 0b11111 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ stacctype : AccType = if R == 0b1 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ aarch64_memory_atomicops_swp(datasize, ldacctype, n, regsize, s, stacctype, t)
+}
+
+val memory_atomicops_st_decode : (bits(2), bits(1), bits(1), bits(1), bits(5), bits(1), bits(3), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_atomicops_st_decode (size, V, A, R, Rs, o3, opc, Rn, Rt) = {
+ __unconditional = true;
+ if ~(HaveAtomicExt()) then UnallocatedEncoding() else ();
+ n : int = UInt(Rn);
+ s : int = UInt(Rs);
+ datasize : int = shl_int(8, UInt(size));
+ regsize : int = if datasize == 64 then 64 else 32;
+ ldacctype : AccType = AccType_ATOMICRW;
+ stacctype : AccType = if R == 0b1 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ op : MemAtomicOp = undefined;
+ match opc {
+ 0b000 => op = MemAtomicOp_ADD,
+ 0b001 => op = MemAtomicOp_BIC,
+ 0b010 => op = MemAtomicOp_EOR,
+ 0b011 => op = MemAtomicOp_ORR,
+ 0b100 => op = MemAtomicOp_SMAX,
+ 0b101 => op = MemAtomicOp_SMIN,
+ 0b110 => op = MemAtomicOp_UMAX,
+ 0b111 => op = MemAtomicOp_UMIN
+ };
+ aarch64_memory_atomicops_st(datasize, ldacctype, n, op, s, stacctype)
+}
+
+val memory_atomicops_ld_decode : (bits(2), bits(1), bits(1), bits(1), bits(5), bits(1), bits(3), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_atomicops_ld_decode (size, V, A, R, Rs, o3, opc, Rn, Rt) = {
+ __unconditional = true;
+ if ~(HaveAtomicExt()) then UnallocatedEncoding() else ();
+ t : int = UInt(Rt);
+ n : int = UInt(Rn);
+ s : int = UInt(Rs);
+ datasize : int = shl_int(8, UInt(size));
+ regsize : int = if datasize == 64 then 64 else 32;
+ ldacctype : AccType = if A == 0b1 & Rt != 0b11111 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ stacctype : AccType = if R == 0b1 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ op : MemAtomicOp = undefined;
+ match opc {
+ 0b000 => op = MemAtomicOp_ADD,
+ 0b001 => op = MemAtomicOp_BIC,
+ 0b010 => op = MemAtomicOp_EOR,
+ 0b011 => op = MemAtomicOp_ORR,
+ 0b100 => op = MemAtomicOp_SMAX,
+ 0b101 => op = MemAtomicOp_SMIN,
+ 0b110 => op = MemAtomicOp_UMAX,
+ 0b111 => op = MemAtomicOp_UMIN
+ };
+ aarch64_memory_atomicops_ld(datasize, ldacctype, n, op, regsize, s, stacctype, t)
+}
+
+val memory_atomicops_cas_single_decode : (bits(2), bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_atomicops_cas_single_decode (size, o2, L, o1, Rs, o0, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ if ~(HaveAtomicExt()) then UnallocatedEncoding() else ();
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ s : int = UInt(Rs);
+ datasize : int = shl_int(8, UInt(size));
+ regsize : int = if datasize == 64 then 64 else 32;
+ ldacctype : AccType = if L == 0b1 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ stacctype : AccType = if o0 == 0b1 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ aarch64_memory_atomicops_cas_single(datasize, ldacctype, n, regsize, s, stacctype, t)
+}
+
+val memory_atomicops_cas_pair_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_atomicops_cas_pair_decode (sz, o2, L, o1, Rs, o0, Rt2, Rn, Rt) = {
+ __unconditional = true;
+ if ~(HaveAtomicExt()) then UnallocatedEncoding() else ();
+ if [Rs[0]] == 0b1 then UnallocatedEncoding() else ();
+ if [Rt[0]] == 0b1 then UnallocatedEncoding() else ();
+ n : int = UInt(Rn);
+ t : int = UInt(Rt);
+ s : int = UInt(Rs);
+ datasize : int = shl_int(32, UInt(sz));
+ regsize : int = datasize;
+ ldacctype : AccType = if L == 0b1 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ stacctype : AccType = if o0 == 0b1 then AccType_ORDEREDRW else AccType_ATOMICRW;
+ aarch64_memory_atomicops_cas_pair(datasize, ldacctype, n, regsize, s, stacctype, t)
+}
+
+val integer_pac_strip_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_strip_dp_1src_decode (sf, S, opcode2, D, Rn, Rd) = {
+ __unconditional = true;
+ data : bool = D == 0b1;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_strip_dp_1src(d, data)
+}
+
+val integer_pac_pacib_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_pacib_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_pacib_dp_1src(d, n, source_is_sp)
+}
+
+val integer_pac_pacia_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_pacia_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_pacia_dp_1src(d, n, source_is_sp)
+}
+
+val integer_pac_pacga_dp_2src_decode : (bits(1), bits(1), bits(1), bits(5), bits(6), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_pacga_dp_2src_decode (sf, op, S, Rm, opcode2, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if m == 31 then source_is_sp = true else ();
+ aarch64_integer_pac_pacga_dp_2src(d, m, n, source_is_sp)
+}
+
+val integer_pac_pacdb_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_pacdb_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_pacdb_dp_1src(d, n, source_is_sp)
+}
+
+val integer_pac_pacda_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_pacda_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_pacda_dp_1src(d, n, source_is_sp)
+}
+
+val integer_pac_autib_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_autib_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_autib_dp_1src(d, n, source_is_sp)
+}
+
+val integer_pac_autia_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_autia_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_autia_dp_1src(d, n, source_is_sp)
+}
+
+val integer_pac_autdb_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_autdb_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_autdb_dp_1src(d, n, source_is_sp)
+}
+
+val integer_pac_autda_dp_1src_decode : (bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_pac_autda_dp_1src_decode (sf, S, opcode2, Z, Rn, Rd) = {
+ __unconditional = true;
+ source_is_sp : bool = false;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if ~(HavePACExt()) then UnallocatedEncoding() else ();
+ if Z == 0b0 then if n == 31 then source_is_sp = true else () else if n != 31 then UnallocatedEncoding() else ();
+ aarch64_integer_pac_autda_dp_1src(d, n, source_is_sp)
+}
+
+val integer_insext_insert_movewide_decode : (bits(1), bits(2), bits(2), bits(16), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_insext_insert_movewide_decode (sf, opc, hw, imm16, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ imm : bits(16) = imm16;
+ pos : int = undefined;
+ opcode : MoveWideOp = undefined;
+ match opc {
+ 0b00 => opcode = MoveWideOp_N,
+ 0b10 => opcode = MoveWideOp_Z,
+ 0b11 => opcode = MoveWideOp_K,
+ _ => UnallocatedEncoding()
+ };
+ if sf == 0b0 & [hw[1]] == 0b1 then UnallocatedEncoding() else ();
+ pos = UInt(hw @ 0x0);
+ aarch64_integer_insext_insert_movewide(d, datasize, imm, opcode, pos)
+}
+
+val integer_crc_decode : (bits(1), bits(1), bits(1), bits(5), bits(3), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_crc_decode (sf, op, S, Rm, opcode2, C, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if sf == 0b1 & sz != 0b11 then UnallocatedEncoding() else ();
+ if sf == 0b0 & sz == 0b11 then UnallocatedEncoding() else ();
+ size : int = shl_int(8, UInt(sz));
+ crc32c : bool = C == 0b1;
+ aarch64_integer_crc(crc32c, d, m, n, size)
+}
+
+val integer_arithmetic_rev_decode : (bits(1), bits(1), bits(5), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_rev_decode (sf, S, opcode2, opc, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ container_size : int = undefined;
+ match opc {
+ 0b00 => Unreachable(),
+ 0b01 => container_size = 16,
+ 0b10 => container_size = 32,
+ 0b11 => {
+ if sf == 0b0 then UnallocatedEncoding() else ();
+ container_size = 64
+ }
+ };
+ aarch64_integer_arithmetic_rev(container_size, d, datasize, n)
+}
+
+val float_move_fp_select_decode : (bits(1), bits(1), bits(2), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_move_fp_select_decode (M, S, typ, Rm, cond, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ condition : bits(4) = cond;
+ aarch64_float_move_fp_select(condition, d, datasize, m, n)
+}
+
+val float_move_fp_imm_decode : (bits(1), bits(1), bits(2), bits(8), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg}
+
+function float_move_fp_imm_decode (M, S, typ, imm8, imm5, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16
+ else UnallocatedEncoding()
+ };
+ let 'datasize2 = ex_int(datasize);
+ assert(constraint('datasize2 in {16, 32, 64}));
+ imm : bits('datasize2) = VFPExpandImm(imm8);
+ aarch64_float_move_fp_imm(d, datasize2, imm)
+}
+
+val float_convert_int_decode : (bits(1), bits(1), bits(2), bits(2), bits(3), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg}
+
+function float_convert_int_decode (sf, S, typ, rmode, opcode, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'intsize : {|32, 64|} = if sf == 0b1 then 64 else 32;
+ fltsize : int = undefined;
+ op : FPConvOp = undefined;
+ rounding : FPRounding = undefined;
+ unsigned : bool = undefined;
+ part : int = undefined;
+ match typ {
+ 0b00 => fltsize = 32,
+ 0b01 => fltsize = 64,
+ 0b10 => {
+ if (slice(opcode, 1, 2) @ rmode) != 0xD then UnallocatedEncoding() else ();
+ fltsize = 128
+ },
+ 0b11 => if HaveFP16Ext() then fltsize = 16
+ else UnallocatedEncoding()
+ };
+ match slice(opcode, 1, 2) @ rmode {
+ [bitzero] @ [bitzero] @ _ : bits(1) @ _ : bits(1) => {
+ rounding = FPDecodeRounding(rmode);
+ unsigned = [opcode[0]] == 0b1;
+ op = FPConvOp_CVT_FtoI
+ },
+ 0x4 => {
+ rounding = FPRoundingMode(FPCR);
+ unsigned = [opcode[0]] == 0b1;
+ op = FPConvOp_CVT_ItoF
+ },
+ 0x8 => {
+ rounding = FPRounding_TIEAWAY;
+ unsigned = [opcode[0]] == 0b1;
+ op = FPConvOp_CVT_FtoI
+ },
+ 0xC => {
+ if fltsize != 16 & fltsize != intsize then UnallocatedEncoding() else ();
+ op = if [opcode[0]] == 0b1 then FPConvOp_MOV_ItoF else FPConvOp_MOV_FtoI;
+ part = 0
+ },
+ 0xD => {
+ if intsize : int != 64 | fltsize != 128 then UnallocatedEncoding() else ();
+ op = if [opcode[0]] == 0b1 then FPConvOp_MOV_ItoF else FPConvOp_MOV_FtoI;
+ part = 1;
+ fltsize = 64
+ },
+ 0xF => {
+ if ~(HaveFJCVTZSExt()) then UnallocatedEncoding() else ();
+ rounding = FPRounding_ZERO;
+ unsigned = [opcode[0]] == 0b1;
+ op = FPConvOp_CVT_FtoI_JS
+ },
+ _ => UnallocatedEncoding()
+ };
+ let 'fltsize2 = ex_int(fltsize);
+ assert(constraint('fltsize2 >= 0));
+ aarch64_float_convert_int(d, fltsize2, intsize, n, op, part, rounding, unsigned)
+}
+
+val float_convert_fp_decode : (bits(1), bits(1), bits(2), bits(2), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg}
+
+function float_convert_fp_decode (M, S, typ, opc, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if typ == opc then UnallocatedEncoding() else ();
+ srcsize : int = undefined;
+ match typ {
+ 0b00 => srcsize = 32,
+ 0b01 => srcsize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => srcsize = 16
+ };
+ dstsize : int = undefined;
+ match opc {
+ 0b00 => dstsize = 32,
+ 0b01 => dstsize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => dstsize = 16
+ };
+ let 'dstsize2 = ex_int(dstsize) in let 'srcsize2 = ex_int(srcsize) in {
+ assert(constraint('srcsize2 >= 0 & 'dstsize2 >= 0));
+ aarch64_float_convert_fp(d, dstsize2, n, srcsize2)
+ }
+}
+
+val float_convert_fix_decode : (bits(1), bits(1), bits(2), bits(2), bits(3), bits(6), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg}
+
+function float_convert_fix_decode (sf, S, typ, rmode, opcode, scale, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ intsize : int = if sf == 0b1 then 64 else 32;
+ fltsize : int = undefined;
+ op : FPConvOp = undefined;
+ rounding : FPRounding = undefined;
+ unsigned : bool = undefined;
+ match typ {
+ 0b00 => fltsize = 32,
+ 0b01 => fltsize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then fltsize = 16
+ else UnallocatedEncoding()
+ };
+ if sf == 0b0 & [scale[5]] == 0b0 then UnallocatedEncoding() else ();
+ fracbits : int = 64 - UInt(scale);
+ match slice(opcode, 1, 2) @ rmode {
+ 0x3 => {
+ rounding = FPRounding_ZERO;
+ unsigned = [opcode[0]] == 0b1;
+ op = FPConvOp_CVT_FtoI
+ },
+ 0x4 => {
+ rounding = FPRoundingMode(FPCR);
+ unsigned = [opcode[0]] == 0b1;
+ op = FPConvOp_CVT_ItoF
+ },
+ _ => UnallocatedEncoding()
+ };
+ let 'fltsize2 = ex_int(fltsize) in let 'intsize2 = ex_int(intsize) in {
+ assert(constraint('fltsize2 >= 0 & 'intsize2 >= 0));
+ aarch64_float_convert_fix(d, fltsize2, fracbits, intsize2, n, op, rounding, unsigned)
+ }
+}
+
+val float_compare_uncond_decode : (bits(1), bits(1), bits(2), bits(5), bits(2), bits(5), bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_compare_uncond_decode (M, S, typ, Rm, op, Rn, opc) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ signal_all_nans : bool = [opc[1]] == 0b1;
+ cmp_with_zero : bool = [opc[0]] == 0b1;
+ aarch64_float_compare_uncond(cmp_with_zero, datasize, m, n, signal_all_nans)
+}
+
+val float_compare_cond_decode : (bits(1), bits(1), bits(2), bits(5), bits(4), bits(5), bits(1), bits(4)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_compare_cond_decode (M, S, typ, Rm, cond, Rn, op, nzcv) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ signal_all_nans : bool = op == 0b1;
+ condition : bits(4) = cond;
+ flags : bits(4) = nzcv;
+ aarch64_float_compare_cond(condition, datasize, flags, m, n, signal_all_nans)
+}
+
+val float_arithmetic_unary_decode : (bits(1), bits(1), bits(2), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_arithmetic_unary_decode (M, S, typ, opc, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ fpop : FPUnaryOp = undefined;
+ match opc {
+ 0b00 => fpop = FPUnaryOp_MOV,
+ 0b01 => fpop = FPUnaryOp_ABS,
+ 0b10 => fpop = FPUnaryOp_NEG,
+ 0b11 => fpop = FPUnaryOp_SQRT
+ };
+ aarch64_float_arithmetic_unary(d, datasize, fpop, n)
+}
+
+val float_arithmetic_round_decode : (bits(1), bits(1), bits(2), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_arithmetic_round_decode (M, S, typ, rmode, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ exact : bool = false;
+ rounding : FPRounding = undefined;
+ match rmode {
+ [bitzero] @ _ : bits(1) @ _ : bits(1) => rounding = FPDecodeRounding(slice(rmode, 0, 2)),
+ 0b100 => rounding = FPRounding_TIEAWAY,
+ 0b101 => UnallocatedEncoding(),
+ 0b110 => {
+ rounding = FPRoundingMode(FPCR);
+ exact = true
+ },
+ 0b111 => rounding = FPRoundingMode(FPCR)
+ };
+ aarch64_float_arithmetic_round(d, datasize, exact, n, rounding)
+}
+
+val float_arithmetic_mul_product_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_arithmetic_mul_product_decode (M, S, typ, Rm, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ negated : bool = op == 0b1;
+ aarch64_float_arithmetic_mul_product(d, datasize, m, n, negated)
+}
+
+val float_arithmetic_mul_addsub_decode : (bits(1), bits(1), bits(2), bits(1), bits(5), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_arithmetic_mul_addsub_decode (M, S, typ, o1, Rm, o0, Ra, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ a : int = UInt(Ra);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ opa_neg : bool = o1 == 0b1;
+ op1_neg : bool = o0 != o1;
+ aarch64_float_arithmetic_mul_addsub(a, d, datasize, m, n, op1_neg, opa_neg)
+}
+
+val float_arithmetic_maxmin_decode : (bits(1), bits(1), bits(2), bits(5), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_arithmetic_maxmin_decode (M, S, typ, Rm, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ operation : FPMaxMinOp = undefined;
+ match op {
+ 0b00 => operation = FPMaxMinOp_MAX,
+ 0b01 => operation = FPMaxMinOp_MIN,
+ 0b10 => operation = FPMaxMinOp_MAXNUM,
+ 0b11 => operation = FPMaxMinOp_MINNUM
+ };
+ aarch64_float_arithmetic_maxmin(d, datasize, m, n, operation)
+}
+
+val float_arithmetic_div_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_arithmetic_div_decode (M, S, typ, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ aarch64_float_arithmetic_div(d, datasize, m, n)
+}
+
+val float_arithmetic_addsub_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function float_arithmetic_addsub_decode (M, S, typ, Rm, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ datasize : int = undefined;
+ match typ {
+ 0b00 => datasize = 32,
+ 0b01 => datasize = 64,
+ 0b10 => UnallocatedEncoding(),
+ 0b11 => if HaveFP16Ext() then datasize = 16 else UnallocatedEncoding()
+ };
+ sub_op : bool = op == 0b1;
+ aarch64_float_arithmetic_addsub(d, datasize, m, n, sub_op)
+}
+
+val branch_unconditional_register_decode : (bits(1), bits(1), bits(2), bits(5), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function branch_unconditional_register_decode (Z, opc, op, op2, op3, A, M, Rn, Rm) = {
+ __unconditional = true;
+ n : int = UInt(Rn);
+ branch_type : BranchType = undefined;
+ m : int = UInt(Rm);
+ pac : bool = A == 0b1;
+ use_key_a : bool = M == 0b0;
+ source_is_sp : bool = Z == 0b1 & m == 31;
+ if ~(pac) & m != 0 then UnallocatedEncoding() else if pac & ~(HavePACExt()) then UnallocatedEncoding() else ();
+ match op {
+ 0b00 => branch_type = BranchType_JMP,
+ 0b01 => branch_type = BranchType_CALL,
+ 0b10 => branch_type = BranchType_RET,
+ _ => UnallocatedEncoding()
+ };
+ if pac then {
+ if Z == 0b0 & m != 31 then UnallocatedEncoding() else ();
+ if branch_type == BranchType_RET then {
+ if n != 31 then UnallocatedEncoding() else ();
+ n = 30;
+ source_is_sp = true
+ } else ()
+ } else ();
+ aarch64_branch_unconditional_register(branch_type, m, n, pac, source_is_sp, use_key_a)
+}
+
+val branch_unconditional_eret_decode : (bits(4), bits(5), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function branch_unconditional_eret_decode (opc, op2, op3, A, M, Rn, op4) = {
+ __unconditional = true;
+ if PSTATE.EL == EL0 then UnallocatedEncoding() else ();
+ pac : bool = A == 0b1;
+ use_key_a : bool = M == 0b0;
+ if ~(pac) & op4 != 0b00000 then UnallocatedEncoding() else if pac & (~(HavePACExt()) | op4 != 0b11111) then UnallocatedEncoding() else ();
+ if Rn != 0b11111 then UnallocatedEncoding() else ();
+ aarch64_branch_unconditional_eret(pac, use_key_a)
+}
+
+val branch_unconditional_dret_decode : (bits(4), bits(5), bits(6), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function branch_unconditional_dret_decode (opc, op2, op3, Rt, op4) = {
+ __unconditional = true;
+ if ~(Halted()) | PSTATE.EL == EL0 then UnallocatedEncoding() else ();
+ aarch64_branch_unconditional_dret()
+}
+
+val AArch64_CheckSystemAccess : (bits(2), bits(3), bits(4), bits(4), bits(3), bits(5), bits(1)) -> unit effect {escape, undef, rreg, wreg}
+
+function AArch64_CheckSystemAccess (op0, op1, crn, crm, op2, rt, read) = {
+ unallocated : bool = false;
+ need_secure : bool = false;
+ min_EL : bits(2) = undefined;
+ rcs_el0_trap : bool = undefined;
+ if (((HaveEL(EL2) & ~(IsSecure())) & [HCR_EL2[20]] == 0b1) & (op0 & 0b01) == 0b01) & (crn & 0xB) == 0xB then {
+ rcs_el0_trap = undefined;
+ if PSTATE.EL == EL0 & rcs_el0_trap then AArch64_SystemRegisterTrap(EL2, op0, op2, op1, crn, rt, crm, read) else if PSTATE.EL == EL1 then AArch64_SystemRegisterTrap(EL2, op0, op2, op1, crn, rt, crm, read) else ()
+ } else ();
+ match op1 {
+ [bitzero] @ [bitzero] @ _ : bits(1) => min_EL = EL1,
+ 0b010 => min_EL = EL1,
+ 0b011 => min_EL = EL0,
+ 0b100 => min_EL = EL2,
+ 0b101 => {
+ if ~(HaveVirtHostExt()) then UnallocatedEncoding() else ();
+ min_EL = EL2
+ },
+ 0b110 => min_EL = EL3,
+ 0b111 => {
+ min_EL = EL1;
+ need_secure = true
+ }
+ };
+ if UInt(PSTATE.EL) < UInt(min_EL) then
+ if ((((PSTATE.EL == EL1 & min_EL == EL2) & HaveNVExt()) & ~(IsSecure())) & HaveEL(EL2)) & [HCR_EL2[42]] == 0b1 then AArch64_SystemRegisterTrap(EL2, op0, op2, op1, crn, rt, crm, read) else UnallocatedEncoding()
+ else if need_secure & ~(IsSecure()) then UnallocatedEncoding() else if AArch64_CheckUnallocatedSystemAccess(op0, op1, crn, crm, op2, read) then UnallocatedEncoding() else ();
+ target_el : bits(2) = undefined;
+ take_trap : bool = undefined;
+ (take_trap, target_el) = AArch64_CheckAdvSIMDFPSystemRegisterTraps(op0, op1, crn, crm, op2, read);
+ if take_trap then AArch64_AdvSIMDFPAccessTrap(target_el) else ();
+ (take_trap, target_el) = AArch64_CheckSystemRegisterTraps(op0, op1, crn, crm, op2, read);
+ if take_trap then AArch64_SystemRegisterTrap(target_el, op0, op2, op1, crn, rt, crm, read) else ()
+}
+
+val system_sysops_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_sysops_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ AArch64_CheckSystemAccess(0b01, op1, CRn, CRm, op2, Rt, L);
+ t : int = UInt(Rt);
+ sys_op0 : int = 1;
+ sys_op1 : int = UInt(op1);
+ sys_op2 : int = UInt(op2);
+ sys_crn : int = UInt(CRn);
+ sys_crm : int = UInt(CRm);
+ has_result : bool = L == 0b1;
+ aarch64_system_sysops(has_result, sys_crm, sys_crn, sys_op0, sys_op1, sys_op2, t)
+}
+
+val system_register_system_decode : (bits(1), bits(1), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_register_system_decode (L, o0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ AArch64_CheckSystemAccess(0b1 @ o0, op1, CRn, CRm, op2, Rt, L);
+ t : int = UInt(Rt);
+ sys_op0 : int = 2 + UInt(o0);
+ sys_op1 : int = UInt(op1);
+ sys_op2 : int = UInt(op2);
+ sys_crn : int = UInt(CRn);
+ sys_crm : int = UInt(CRm);
+ read : bool = L == 0b1;
+ aarch64_system_register_system(read, sys_crm, sys_crn, sys_op0, sys_op1, sys_op2, t)
+}
+
+val system_register_cpsr_decode : (bits(1), bits(2), bits(3), bits(4), bits(4), bits(3), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_register_cpsr_decode (L, op0, op1, CRn, CRm, op2, Rt) = {
+ __unconditional = true;
+ AArch64_CheckSystemAccess(0b00, op1, 0x4, CRm, op2, 0b11111, 0b0);
+ operand : bits(4) = CRm;
+ field : PSTATEField = undefined;
+ match op1 @ op2 {
+ 0b000011 => {
+ if ~(HaveUAOExt()) then UnallocatedEncoding() else ();
+ field = PSTATEField_UAO
+ },
+ 0b000100 => {
+ if ~(HavePANExt()) then UnallocatedEncoding() else ();
+ field = PSTATEField_PAN
+ },
+ 0b000101 => field = PSTATEField_SP,
+ 0b011110 => field = PSTATEField_DAIFSet,
+ 0b011111 => field = PSTATEField_DAIFClr,
+ _ => UnallocatedEncoding()
+ };
+ if (op1 == 0b011 & PSTATE.EL == EL0) & (IsInHost() | [SCTLR_EL1[9]] == 0b0) then AArch64_SystemRegisterTrap(EL1, 0b00, op2, op1, 0x4, 0b11111, CRm, 0b0) else ();
+ aarch64_system_register_cpsr(field, operand)
+}
+
+val AArch64_CheckForSMCUndefOrTrap : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function AArch64_CheckForSMCUndefOrTrap imm = {
+ if PSTATE.EL == EL0 then UnallocatedEncoding() else ();
+ route_to_el2 : bool = undefined;
+ if ~(HaveEL(EL3)) then if (HaveEL(EL2) & ~(IsSecure())) & PSTATE.EL == EL1 then if (HaveNVExt() & [HCR_EL2[42]] == 0b1) & [HCR_EL2[19]] == 0b1 then route_to_el2 = true else UnallocatedEncoding() else UnallocatedEncoding() else route_to_el2 = ((HaveEL(EL2) & ~(IsSecure())) & PSTATE.EL == EL1) & [HCR_EL2[19]] == 0b1;
+ exception : ExceptionRecord = undefined;
+ vect_offset : int = undefined;
+ if route_to_el2 then {
+ preferred_exception_return : bits(64) = ThisInstrAddr();
+ vect_offset = 0;
+ exception = ExceptionSyndrome(Exception_MonitorCall);
+ __tmp_4 : bits(25) = exception.syndrome;
+ __tmp_4 = __SetSlice_bits(25, 16, __tmp_4, 0, imm);
+ exception.syndrome = __tmp_4;
+ AArch64_TakeException(EL2, exception, preferred_exception_return, vect_offset)
+ } else ()
+}
+
+val aarch64_system_exceptions_runtime_smc : bits(16) -> unit effect {escape, rreg, undef, wreg}
+
+function aarch64_system_exceptions_runtime_smc imm = {
+ AArch64_CheckForSMCUndefOrTrap(imm);
+ if [SCR_EL3[7]] == 0b1 then AArch64_UndefinedFault() else AArch64_CallSecureMonitor(imm)
+}
+
+val system_exceptions_runtime_smc_decode : (bits(3), bits(16), bits(3), bits(2)) -> unit effect {escape, rreg, undef, wreg}
+
+function system_exceptions_runtime_smc_decode (opc, imm16, op2, LL) = {
+ __unconditional = true;
+ imm : bits(16) = imm16;
+ aarch64_system_exceptions_runtime_smc(imm)
+}
+
+val ReservedValue : unit -> unit effect {escape, rreg, undef, wreg}
+
+function ReservedValue () = if UsingAArch32() & ~(AArch32_GeneralExceptionsToAArch64()) then AArch32_TakeUndefInstrException() else AArch64_UndefinedFault()
+
+val vector_transfer_vector_permute_zip_decode : (bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_permute_zip_decode (Q, size, Rm, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ part : int = UInt(op);
+ pairs : int = elements / 2;
+ aarch64_vector_transfer_vector_permute_zip(d, datasize, esize, m, n, pairs, part)
+}
+
+val vector_transfer_vector_permute_unzip_decode : (bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_permute_unzip_decode (Q, size, Rm, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ part : int = UInt(op);
+ aarch64_vector_transfer_vector_permute_unzip(d, datasize, elements, esize, m, n, part)
+}
+
+val vector_transfer_vector_permute_transpose_decode : (bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_permute_transpose_decode (Q, size, Rm, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ part : int = UInt(op);
+ pairs : int = elements / 2;
+ aarch64_vector_transfer_vector_permute_transpose(d, datasize, esize, m, n, pairs, part)
+}
+
+val vector_transfer_vector_cpydup_simd_decode : (bits(1), bits(1), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_vector_cpydup_simd_decode (Q, op, imm5, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'size : int = LowestSetBit(imm5);
+ if size > 3 then UnallocatedEncoding() else ();
+ assert('size <= 3);
+ index : int = UInt(slice(imm5, size + 1, negate(size) + 4));
+ idxdsize : int = if [imm5[4]] == 0b1 then 128 else 64;
+ if size == 3 & Q == 0b0 then ReservedValue() else ();
+ esize : int = shl_int(8, size);
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_transfer_vector_cpydup_sisd(d, datasize, elements, esize, idxdsize, index, n)
+}
+
+val vector_transfer_integer_dup_decode : (bits(1), bits(1), bits(5), bits(4), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_transfer_integer_dup_decode (Q, op, imm5, imm4, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ size : int = LowestSetBit(imm5);
+ if size > 3 then UnallocatedEncoding() else ();
+ if size == 3 & Q == 0b0 then ReservedValue() else ();
+ esize : int = shl_int(8, size);
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_transfer_integer_dup(d, datasize, elements, esize, n)
+}
+
+val vector_shift_right_sisd_decode : (bits(1), bits(4), bits(3), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_right_sisd_decode (U, immh, immb, o1, o0, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if [immh[3]] != 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, 3);
+ datasize : int = esize;
+ elements : int = 1;
+ shift : int = esize * 2 - UInt(immh @ immb);
+ unsigned : bool = U == 0b1;
+ round : bool = o1 == 0b1;
+ accumulate : bool = o0 == 0b1;
+ aarch64_vector_shift_right_sisd(accumulate, d, datasize, elements, esize, n, round, shift, unsigned)
+}
+
+val vector_shift_right_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_right_simd_decode (Q, U, immh, immb, o1, o0, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if ([immh[3]] @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ shift : int = esize * 2 - UInt(immh @ immb);
+ unsigned : bool = U == 0b1;
+ round : bool = o1 == 0b1;
+ accumulate : bool = o0 == 0b1;
+ aarch64_vector_shift_right_sisd(accumulate, d, datasize, elements, esize, n, round, shift, unsigned)
+}
+
+val vector_shift_rightnarrow_uniform_sisd_decode : (bits(1), bits(4), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_rightnarrow_uniform_sisd_decode (U, immh, immb, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then ReservedValue() else ();
+ if [immh[3]] == 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ datasize : int = esize;
+ elements : int = 1;
+ part : int = 0;
+ shift : int = 2 * esize - UInt(immh @ immb);
+ round : bool = op == 0b1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_shift_rightnarrow_uniform_sisd(d, datasize, elements, esize, n, part, round, shift, unsigned)
+}
+
+val vector_shift_rightnarrow_uniform_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_rightnarrow_uniform_simd_decode (Q, U, immh, immb, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if [immh[3]] == 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ shift : int = 2 * esize - UInt(immh @ immb);
+ round : bool = op == 0b1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_shift_rightnarrow_uniform_sisd(d, datasize, elements, esize, n, part, round, shift, unsigned)
+}
+
+val vector_shift_rightnarrow_nonuniform_sisd_decode : (bits(1), bits(4), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_rightnarrow_nonuniform_sisd_decode (U, immh, immb, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then ReservedValue() else ();
+ if [immh[3]] == 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ datasize : int = esize;
+ elements : int = 1;
+ part : int = 0;
+ shift : int = 2 * esize - UInt(immh @ immb);
+ round : bool = op == 0b1;
+ aarch64_vector_shift_rightnarrow_nonuniform_sisd(d, datasize, elements, esize, n, part, round, shift)
+}
+
+val vector_shift_rightnarrow_nonuniform_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_rightnarrow_nonuniform_simd_decode (Q, U, immh, immb, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if [immh[3]] == 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ shift : int = 2 * esize - UInt(immh @ immb);
+ round : bool = op == 0b1;
+ aarch64_vector_shift_rightnarrow_nonuniform_sisd(d, datasize, elements, esize, n, part, round, shift)
+}
+
+val vector_shift_rightnarrow_logical_decode : (bits(1), bits(1), bits(4), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_rightnarrow_logical_decode (Q, U, immh, immb, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if [immh[3]] == 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ shift : int = 2 * esize - UInt(immh @ immb);
+ round : bool = op == 0b1;
+ aarch64_vector_shift_rightnarrow_logical(d, datasize, elements, esize, n, part, round, shift)
+}
+
+val vector_shift_rightinsert_sisd_decode : (bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_rightinsert_sisd_decode (U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if [immh[3]] != 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, 3);
+ datasize : int = esize;
+ elements : int = 1;
+ shift : int = esize * 2 - UInt(immh @ immb);
+ aarch64_vector_shift_rightinsert_sisd(d, datasize, elements, esize, n, shift)
+}
+
+val vector_shift_rightinsert_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_rightinsert_simd_decode (Q, U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if ([immh[3]] @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ shift : int = esize * 2 - UInt(immh @ immb);
+ aarch64_vector_shift_rightinsert_sisd(d, datasize, elements, esize, n, shift)
+}
+
+val vector_shift_left_sisd_decode : (bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_left_sisd_decode (U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if [immh[3]] != 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, 3);
+ datasize : int = esize;
+ elements : int = 1;
+ shift : int = UInt(immh @ immb) - esize;
+ aarch64_vector_shift_left_sisd(d, datasize, elements, esize, n, shift)
+}
+
+val vector_shift_left_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_left_simd_decode (Q, U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if ([immh[3]] @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ shift : int = UInt(immh @ immb) - esize;
+ aarch64_vector_shift_left_sisd(d, datasize, elements, esize, n, shift)
+}
+
+val vector_shift_leftsat_sisd_decode : (bits(1), bits(4), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_leftsat_sisd_decode (U, immh, immb, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ datasize : int = esize;
+ elements : int = 1;
+ shift : int = UInt(immh @ immb) - esize;
+ src_unsigned : bool = undefined;
+ dst_unsigned : bool = undefined;
+ match op @ U {
+ 0b00 => UnallocatedEncoding(),
+ 0b01 => {
+ src_unsigned = false;
+ dst_unsigned = true
+ },
+ 0b10 => {
+ src_unsigned = false;
+ dst_unsigned = false
+ },
+ 0b11 => {
+ src_unsigned = true;
+ dst_unsigned = true
+ }
+ };
+ aarch64_vector_shift_leftsat_sisd(d, datasize, dst_unsigned, elements, esize, n, shift, src_unsigned)
+}
+
+val vector_shift_leftsat_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_leftsat_simd_decode (Q, U, immh, immb, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if ([immh[3]] @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ shift : int = UInt(immh @ immb) - esize;
+ src_unsigned : bool = undefined;
+ dst_unsigned : bool = undefined;
+ match op @ U {
+ 0b00 => UnallocatedEncoding(),
+ 0b01 => {
+ src_unsigned = false;
+ dst_unsigned = true
+ },
+ 0b10 => {
+ src_unsigned = false;
+ dst_unsigned = false
+ },
+ 0b11 => {
+ src_unsigned = true;
+ dst_unsigned = true
+ }
+ };
+ aarch64_vector_shift_leftsat_sisd(d, datasize, dst_unsigned, elements, esize, n, shift, src_unsigned)
+}
+
+val vector_shift_leftlong_decode : (bits(1), bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_leftlong_decode (Q, U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if [immh[3]] == 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ shift : int = UInt(immh @ immb) - esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_shift_leftlong(d, datasize, elements, esize, n, part, shift, unsigned)
+}
+
+val vector_shift_leftinsert_sisd_decode : (bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_leftinsert_sisd_decode (U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if [immh[3]] != 0b1 then ReservedValue() else ();
+ esize : int = shl_int(8, 3);
+ datasize : int = esize;
+ elements : int = 1;
+ shift : int = UInt(immh @ immb) - esize;
+ aarch64_vector_shift_leftinsert_sisd(d, datasize, elements, esize, n, shift)
+}
+
+val vector_shift_leftinsert_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_leftinsert_simd_decode (Q, U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if ([immh[3]] @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(8, HighestSetBit(immh));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ shift : int = UInt(immh @ immb) - esize;
+ aarch64_vector_shift_leftinsert_sisd(d, datasize, elements, esize, n, shift)
+}
+
+val vector_shift_conv_int_sisd_decode : (bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_conv_int_sisd_decode (U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (immh & 0xE) == 0x0 | (immh & 0xE) == 0x2 & ~(HaveFP16Ext()) then ReservedValue() else ();
+ esize : int = if (immh & 0x8) == 0x8 then 64 else if (immh & 0xC) == 0x4 then 32 else 16;
+ datasize : int = esize;
+ elements : int = 1;
+ fracbits : int = esize * 2 - UInt(immh @ immb);
+ unsigned : bool = U == 0b1;
+ rounding : FPRounding = FPRoundingMode(FPCR);
+ aarch64_vector_shift_conv_int_sisd(d, datasize, elements, esize, fracbits, n, rounding, unsigned)
+}
+
+val vector_shift_conv_int_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_conv_int_simd_decode (Q, U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if (immh & 0xE) == 0x0 | (immh & 0xE) == 0x2 & ~(HaveFP16Ext()) then ReservedValue() else ();
+ if ([immh[3]] @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = if (immh & 0x8) == 0x8 then 64 else if (immh & 0xC) == 0x4 then 32 else 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ fracbits : int = esize * 2 - UInt(immh @ immb);
+ unsigned : bool = U == 0b1;
+ rounding : FPRounding = FPRoundingMode(FPCR);
+ aarch64_vector_shift_conv_int_sisd(d, datasize, elements, esize, fracbits, n, rounding, unsigned)
+}
+
+val vector_shift_conv_float_sisd_decode : (bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_conv_float_sisd_decode (U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (immh & 0xE) == 0x0 | (immh & 0xE) == 0x2 & ~(HaveFP16Ext()) then ReservedValue() else ();
+ esize : int = if (immh & 0x8) == 0x8 then 64 else if (immh & 0xC) == 0x4 then 32 else 16;
+ datasize : int = esize;
+ elements : int = 1;
+ fracbits : int = esize * 2 - UInt(immh @ immb);
+ unsigned : bool = U == 0b1;
+ rounding : FPRounding = FPRounding_ZERO;
+ aarch64_vector_shift_conv_float_sisd(d, datasize, elements, esize, fracbits, n, rounding, unsigned)
+}
+
+val vector_shift_conv_float_simd_decode : (bits(1), bits(1), bits(4), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_shift_conv_float_simd_decode (Q, U, immh, immb, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if immh == 0x0 then throw(Error_See("asimdimm")) else ();
+ if (immh & 0xE) == 0x0 | (immh & 0xE) == 0x2 & ~(HaveFP16Ext()) then ReservedValue() else ();
+ if ([immh[3]] @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = if (immh & 0x8) == 0x8 then 64 else if (immh & 0xC) == 0x4 then 32 else 16;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ fracbits : int = esize * 2 - UInt(immh @ immb);
+ unsigned : bool = U == 0b1;
+ rounding : FPRounding = FPRounding_ZERO;
+ aarch64_vector_shift_conv_float_sisd(d, datasize, elements, esize, fracbits, n, rounding, unsigned)
+}
+
+val vector_reduce_intmax_decode : (bits(1), bits(1), bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_intmax_decode (Q, U, size, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b100 then ReservedValue() else ();
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ min : bool = op == 0b1;
+ aarch64_vector_reduce_intmax(d, datasize, elements, esize, min, n, unsigned)
+}
+
+val vector_reduce_fp16maxnm_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fp16maxnm_sisd_decode (U, o1, sz, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ if sz == 0b1 then ReservedValue() else ();
+ datasize : int = esize * 2;
+ elements : int = 2;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMINNUM else ReduceOp_FMAXNUM;
+ aarch64_vector_reduce_fp16maxnm_sisd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_fp16max_sisd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fp16max_sisd_decode (U, o1, sz, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ if sz == 0b1 then ReservedValue() else ();
+ datasize : int = esize * 2;
+ elements : int = 2;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMIN else ReduceOp_FMAX;
+ aarch64_vector_reduce_fp16max_sisd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_fp16add_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fp16add_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFP16Ext()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ esize : int = 16;
+ if sz == 0b1 then ReservedValue() else ();
+ datasize : int = esize * 2;
+ elements : int = 2;
+ op : ReduceOp = ReduceOp_FADD;
+ aarch64_vector_reduce_fp16add_sisd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_fpmaxnm_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fpmaxnm_simd_decode (Q, U, o1, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) != 0b01 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMINNUM else ReduceOp_FMAXNUM;
+ aarch64_vector_reduce_fp16maxnm_simd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_fpmax_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_fpmax_simd_decode (Q, U, o1, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) != 0b01 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ op : ReduceOp = if o1 == 0b1 then ReduceOp_FMIN else ReduceOp_FMAX;
+ aarch64_vector_reduce_fp16max_simd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_add_sisd_decode : (bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_add_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size != 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize * 2;
+ elements : int = 2;
+ op : ReduceOp = ReduceOp_ADD;
+ aarch64_vector_reduce_add_sisd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_add_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_add_simd_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b100 then ReservedValue() else ();
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ op : ReduceOp = ReduceOp_ADD;
+ aarch64_vector_reduce_add_simd(d, datasize, esize, n, op)
+}
+
+val vector_reduce_addlong_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_reduce_addlong_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b100 then ReservedValue() else ();
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_reduce_addlong(d, datasize, elements, esize, n, unsigned)
+}
+
+val vector_arithmetic_unary_special_sqrt_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_sqrt_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_sqrtfp16(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_special_sqrtest_int_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_sqrtest_int_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if sz == 0b1 then ReservedValue() else ();
+ esize : int = 32;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_sqrtest_int(d, datasize, elements, n)
+}
+
+val vector_arithmetic_unary_special_sqrtest_float_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_sqrtest_float_simd_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_sqrtest_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_special_recip_int_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_recip_int_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if sz == 0b1 then ReservedValue() else ();
+ esize : int = 32;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_recip_int(d, datasize, elements, n)
+}
+
+val vector_arithmetic_unary_special_recip_float_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_special_recip_float_simd_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_special_recip_fp16_sisd(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_shift_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_shift_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ shift : int = esize;
+ unsigned : bool = false;
+ aarch64_vector_arithmetic_unary_shift(d, datasize, elements, esize, n, part, shift, unsigned)
+}
+
+val vector_arithmetic_unary_float_xtn_sisd_decode : (bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_xtn_sisd_decode (U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if sz == 0b0 then ReservedValue() else ();
+ esize : int = 32;
+ datasize : int = esize;
+ elements : int = 1;
+ part : int = 0;
+ aarch64_vector_arithmetic_unary_float_xtn_sisd(d, datasize, elements, esize, n, part)
+}
+
+val vector_arithmetic_unary_float_xtn_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_xtn_simd_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if sz == 0b0 then ReservedValue() else ();
+ esize : int = 32;
+ let 'datasize : {|64|} = 64;
+ elements : int = 2;
+ part : int = UInt(Q);
+ aarch64_vector_arithmetic_unary_float_xtn_sisd(d, datasize, elements, esize, n, part)
+}
+
+val vector_arithmetic_unary_float_round_decode : (bits(1), bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_round_decode (Q, U, o2, sz, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ exact : bool = false;
+ rounding : FPRounding = undefined;
+ match (U @ o1) @ o2 {
+ [bitzero] @ _ : bits(1) @ _ : bits(1) => rounding = FPDecodeRounding(o1 @ o2),
+ 0b100 => rounding = FPRounding_TIEAWAY,
+ 0b101 => UnallocatedEncoding(),
+ 0b110 => {
+ rounding = FPRoundingMode(FPCR);
+ exact = true
+ },
+ 0b111 => rounding = FPRoundingMode(FPCR)
+ };
+ aarch64_vector_arithmetic_unary_fp16_round(d, datasize, elements, esize, exact, n, rounding)
+}
+
+val vector_arithmetic_unary_float_conv_int_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_conv_int_simd_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_int_sisd(d, datasize, elements, esize, n, unsigned)
+}
+
+val vector_arithmetic_unary_float_conv_float_tieaway_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_conv_float_tieaway_simd_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ rounding : FPRounding = FPRounding_TIEAWAY;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_tieaway_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val vector_arithmetic_unary_float_conv_float_bulk_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_float_conv_float_bulk_simd_decode (Q, U, o2, sz, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ rounding : FPRounding = FPDecodeRounding(o1 @ o2);
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_fp16_conv_float_bulk_sisd(d, datasize, elements, esize, n, rounding, unsigned)
+}
+
+val vector_arithmetic_unary_extract_sqxtun_sisd_decode : (bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_extract_sqxtun_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ part : int = 0;
+ elements : int = 1;
+ aarch64_vector_arithmetic_unary_extract_sqxtun_sisd(d, datasize, elements, esize, n, part)
+}
+
+val vector_arithmetic_unary_extract_sqxtun_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_extract_sqxtun_simd_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_extract_sqxtun_sisd(d, datasize, elements, esize, n, part)
+}
+
+val vector_arithmetic_unary_extract_sat_sisd_decode : (bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_extract_sat_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ part : int = 0;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_extract_sat_sisd(d, datasize, elements, esize, n, part, unsigned)
+}
+
+val vector_arithmetic_unary_extract_sat_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_extract_sat_simd_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_extract_sat_sisd(d, datasize, elements, esize, n, part, unsigned)
+}
+
+val vector_arithmetic_unary_extract_nosat_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_extract_nosat_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_unary_extract_nosat(d, datasize, elements, esize, n, part)
+}
+
+val vector_arithmetic_unary_diffneg_sat_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_diffneg_sat_simd_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ neg : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_diffneg_sat_sisd(d, datasize, elements, esize, n, neg)
+}
+
+val vector_arithmetic_unary_diffneg_int_sisd_decode : (bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_diffneg_int_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size != 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ neg : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_diffneg_int_sisd(d, datasize, elements, esize, n, neg)
+}
+
+val vector_arithmetic_unary_diffneg_int_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_diffneg_int_simd_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ neg : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_diffneg_int_sisd(d, datasize, elements, esize, n, neg)
+}
+
+val vector_arithmetic_unary_diffneg_float_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_diffneg_float_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ neg : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_diffneg_fp16(d, datasize, elements, esize, n, neg)
+}
+
+val vector_arithmetic_unary_cnt_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cnt_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size != 0b00 then ReservedValue() else ();
+ esize : int = 8;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / 8;
+ aarch64_vector_arithmetic_unary_cnt(d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_int_lessthan_sisd_decode : (bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_int_lessthan_sisd_decode (U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size != 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ comparison : CompareOp = CompareOp_LT;
+ aarch64_vector_arithmetic_unary_cmp_int_lessthan_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_int_lessthan_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_int_lessthan_simd_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ comparison : CompareOp = CompareOp_LT;
+ aarch64_vector_arithmetic_unary_cmp_int_lessthan_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_int_bulk_sisd_decode : (bits(1), bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_int_bulk_sisd_decode (U, size, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size != 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ comparison : CompareOp = undefined;
+ match op @ U {
+ 0b00 => comparison = CompareOp_GT,
+ 0b01 => comparison = CompareOp_GE,
+ 0b10 => comparison = CompareOp_EQ,
+ 0b11 => comparison = CompareOp_LE
+ };
+ aarch64_vector_arithmetic_unary_cmp_int_bulk_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_int_bulk_simd_decode : (bits(1), bits(1), bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_int_bulk_simd_decode (Q, U, size, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ comparison : CompareOp = undefined;
+ match op @ U {
+ 0b00 => comparison = CompareOp_GT,
+ 0b01 => comparison = CompareOp_GE,
+ 0b10 => comparison = CompareOp_EQ,
+ 0b11 => comparison = CompareOp_LE
+ };
+ aarch64_vector_arithmetic_unary_cmp_int_bulk_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_float_lessthan_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_float_lessthan_simd_decode (Q, U, sz, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ comparison : CompareOp = CompareOp_LT;
+ aarch64_vector_arithmetic_unary_cmp_fp16_lessthan_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_cmp_float_bulk_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_cmp_float_bulk_simd_decode (Q, U, sz, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ comparison : CompareOp = undefined;
+ match op @ U {
+ 0b00 => comparison = CompareOp_GT,
+ 0b01 => comparison = CompareOp_GE,
+ 0b10 => comparison = CompareOp_EQ,
+ 0b11 => comparison = CompareOp_LE
+ };
+ aarch64_vector_arithmetic_unary_cmp_fp16_bulk_sisd(comparison, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_clsz_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_clsz_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ countop : CountOp = if U == 0b1 then CountOp_CLZ else CountOp_CLS;
+ aarch64_vector_arithmetic_unary_clsz(countop, d, datasize, elements, esize, n)
+}
+
+val vector_arithmetic_unary_add_saturating_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_add_saturating_simd_decode (Q, U, size, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_add_saturating_sisd(d, datasize, elements, esize, n, unsigned)
+}
+
+val vector_arithmetic_unary_add_pairwise_decode : (bits(1), bits(1), bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_unary_add_pairwise_decode (Q, U, size, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / (2 * esize);
+ acc : bool = op == 0b1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_unary_add_pairwise(acc, d, datasize, elements, esize, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_sub_saturating_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_sub_saturating_simd_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_sub_saturating_sisd(d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_sub_int_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_sub_int_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_sub_int(d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_sub_fp_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_sub_fp_simd_decode (Q, U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ abs : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_sub_fp16_simd(abs, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_shift_sisd_decode : (bits(1), bits(2), bits(5), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_shift_sisd_decode (U, size, Rm, R, S, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ rounding : bool = R == 0b1;
+ saturating : bool = S == 0b1;
+ if S == 0b0 & size != 0b11 then ReservedValue() else ();
+ aarch64_vector_arithmetic_binary_uniform_shift_sisd(d, datasize, elements, esize, m, n, rounding, saturating, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_shift_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_shift_simd_decode (Q, U, size, Rm, R, S, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ rounding : bool = R == 0b1;
+ saturating : bool = S == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_shift_sisd(d, datasize, elements, esize, m, n, rounding, saturating, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_rsqrts_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_rsqrts_simd_decode (Q, U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_rsqrtsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_recps_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_recps_simd_decode (Q, U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_recpsfp16_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_mul_int_product_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_int_product_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if U == 0b1 & size != 0b00 then ReservedValue() else ();
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ poly : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_int_product(d, datasize, elements, esize, m, n, poly)
+}
+
+val vector_arithmetic_binary_uniform_mul_int_doubling_sisd_decode : (bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_int_doubling_sisd_decode (U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 | size == 0b00 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ rounding : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_sisd(d, datasize, elements, esize, m, n, rounding)
+}
+
+val vector_arithmetic_binary_uniform_mul_int_doubling_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_int_doubling_simd_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 | size == 0b00 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ rounding : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_sisd(d, datasize, elements, esize, m, n, rounding)
+}
+
+val vector_arithmetic_binary_uniform_mul_int_doubling_accum_sisd_decode : (bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_int_doubling_accum_sisd_decode (U, size, Rm, S, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveQRDMLAHExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 | size == 0b00 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ rounding : bool = true;
+ sub_op : bool = S == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_accum_sisd(d, datasize, elements, esize, m, n, rounding, sub_op)
+}
+
+val vector_arithmetic_binary_uniform_mul_int_doubling_accum_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_int_doubling_accum_simd_decode (Q, U, size, Rm, S, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveQRDMLAHExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 | size == 0b00 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ rounding : bool = true;
+ sub_op : bool = S == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_int_doubling_accum_sisd(d, datasize, elements, esize, m, n, rounding, sub_op)
+}
+
+val vector_arithmetic_binary_uniform_mul_int_dotp_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_int_dotp_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveDOTPExt()) then throw(Error_Undefined()) else ();
+ if size != 0b10 then ReservedValue() else ();
+ signed : bool = U == 0b0;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_mul_int_dotp(d, datasize, elements, esize, m, n, signed)
+}
+
+val vector_arithmetic_binary_uniform_mul_int_accum_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_int_accum_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ sub_op : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_int_accum(d, datasize, elements, esize, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp_product_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp_product_decode (Q, U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_product(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp_fused_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp_fused_decode (Q, U, op, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ sub_op : bool = op == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_fused(d, datasize, elements, esize, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp_extended_simd_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp_extended_simd_decode (Q, U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp16_extended_sisd(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_mul_fp_complex_decode : (bits(1), bits(1), bits(2), bits(5), bits(2), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_mul_fp_complex_decode (Q, U, size, Rm, rot, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFCADDExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b00 then ReservedValue() else ();
+ if Q == 0b0 & size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ if ~(HaveFP16Ext()) & esize == 16 then ReservedValue() else ();
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_mul_fp_complex(d, datasize, elements, esize, m, n, rot)
+}
+
+val vector_arithmetic_binary_uniform_maxmin_single_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_maxmin_single_decode (Q, U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ minimum : bool = o1 == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_maxmin_single(d, datasize, elements, esize, m, minimum, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_maxmin_pair_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_maxmin_pair_decode (Q, U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ minimum : bool = o1 == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_maxmin_pair(d, datasize, elements, esize, m, minimum, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_maxmin_fp_2008_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_maxmin_fp_2008_decode (Q, U, o1, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ pair : bool = U == 0b1;
+ minimum : bool = o1 == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_2008(d, datasize, elements, esize, m, minimum, n, pair)
+}
+
+val vector_arithmetic_binary_uniform_maxmin_fp_1985_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_maxmin_fp_1985_decode (Q, U, o1, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ pair : bool = U == 0b1;
+ minimum : bool = o1 == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_maxmin_fp16_1985(d, datasize, elements, esize, m, minimum, n, pair)
+}
+
+val vector_arithmetic_binary_uniform_div_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_div_decode (Q, U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_divfp16(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_diff_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_diff_decode (Q, U, size, Rm, ac, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ accumulate : bool = ac == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_diff(accumulate, d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_cmp_int_sisd_decode : (bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_int_sisd_decode (U, size, Rm, eq, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size != 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ unsigned : bool = U == 0b1;
+ cmp_eq : bool = eq == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_cmp_int_sisd(cmp_eq, d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_cmp_int_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_int_simd_decode (Q, U, size, Rm, eq, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ cmp_eq : bool = eq == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_cmp_int_sisd(cmp_eq, d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_cmp_fp_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_fp_simd_decode (Q, U, E, sz, Rm, ac, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ cmp : CompareOp = undefined;
+ abs : bool = undefined;
+ match (E @ U) @ ac {
+ 0b000 => {
+ cmp = CompareOp_EQ;
+ abs = false
+ },
+ 0b010 => {
+ cmp = CompareOp_GE;
+ abs = false
+ },
+ 0b011 => {
+ cmp = CompareOp_GE;
+ abs = true
+ },
+ 0b110 => {
+ cmp = CompareOp_GT;
+ abs = false
+ },
+ 0b111 => {
+ cmp = CompareOp_GT;
+ abs = true
+ },
+ _ => UnallocatedEncoding()
+ };
+ aarch64_vector_arithmetic_binary_uniform_cmp_fp16_sisd(abs, cmp, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_cmp_bitwise_sisd_decode : (bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_bitwise_sisd_decode (U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size != 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ and_test : bool = U == 0b0;
+ aarch64_vector_arithmetic_binary_uniform_cmp_bitwise_sisd(and_test, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_cmp_bitwise_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_cmp_bitwise_simd_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ and_test : bool = U == 0b0;
+ aarch64_vector_arithmetic_binary_uniform_cmp_bitwise_sisd(and_test, d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_add_wrapping_single_sisd_decode : (bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_wrapping_single_sisd_decode (U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size != 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ sub_op : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_wrapping_single_sisd(d, datasize, elements, esize, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_uniform_add_wrapping_single_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_wrapping_single_simd_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ sub_op : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_wrapping_single_sisd(d, datasize, elements, esize, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_uniform_add_wrapping_pair_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_wrapping_pair_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_add_wrapping_pair(d, datasize, elements, esize, m, n)
+}
+
+val vector_arithmetic_binary_uniform_add_saturating_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_saturating_simd_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (size @ Q) == 0b110 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_saturating_sisd(d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_add_halving_truncating_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_halving_truncating_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_halving_truncating(d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_add_halving_rounding_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_halving_rounding_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_halving_rounding(d, datasize, elements, esize, m, n, unsigned)
+}
+
+val vector_arithmetic_binary_uniform_add_fp_decode : (bits(1), bits(1), bits(1), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_fp_decode (Q, U, sz, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ pair : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_uniform_add_fp16(d, datasize, elements, esize, m, n, pair)
+}
+
+val vector_arithmetic_binary_uniform_add_fp_complex_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_uniform_add_fp_complex_decode (Q, U, size, Rm, rot, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFCADDExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b00 then ReservedValue() else ();
+ if Q == 0b0 & size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ if ~(HaveFP16Ext()) & esize == 16 then ReservedValue() else ();
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_uniform_add_fp_complex(d, datasize, elements, esize, m, n, rot)
+}
+
+val vector_arithmetic_binary_element_mul_fp_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mul_fp_simd_decode (Q, U, sz, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = M;
+ match sz @ L {
+ [bitzero] @ _ : bits(1) => index = UInt(H @ L),
+ 0b10 => index = UInt(H),
+ 0b11 => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ mulx_op : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_element_mul_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, mulx_op, n)
+}
+
+val vector_arithmetic_binary_element_mulacc_fp_simd_decode : (bits(1), bits(1), bits(1), bits(1), bits(1), bits(4), bits(1), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_fp_simd_decode (Q, U, sz, L, M, Rm, o2, H, Rn, Rd) = {
+ __unconditional = true;
+ idxdsize : int = if H == 0b1 then 128 else 64;
+ index : int = undefined;
+ Rmhi : bits(1) = M;
+ match sz @ L {
+ [bitzero] @ _ : bits(1) => index = UInt(H @ L),
+ 0b10 => index = UInt(H),
+ 0b11 => UnallocatedEncoding()
+ };
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rmhi @ Rm);
+ if (sz @ Q) == 0b10 then ReservedValue() else ();
+ esize : int = shl_int(32, UInt(sz));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ sub_op : bool = o2 == 0b1;
+ aarch64_vector_arithmetic_binary_element_mulacc_fp16_sisd(d, datasize, elements, esize, idxdsize, index, m, n, sub_op)
+}
+
+val vector_arithmetic_binary_element_mulacc_complex_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(2), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_mulacc_complex_decode (Q, U, size, L, M, Rm, rot, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveFCADDExt()) then UnallocatedEncoding() else ();
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(M @ Rm);
+ if size == 0b00 | size == 0b11 then ReservedValue() else ();
+ index : int = undefined;
+ if size == 0b01 then index = UInt(H @ L) else ();
+ if size == 0b10 then index = UInt(H) else ();
+ esize : int = shl_int(8, UInt(size));
+ if ~(HaveFP16Ext()) & esize == 16 then ReservedValue() else ();
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ if size == 0b10 & (L == 0b1 | Q == 0b0) then ReservedValue() else ();
+ if (size == 0b01 & H == 0b1) & Q == 0b0 then ReservedValue() else ();
+ aarch64_vector_arithmetic_binary_element_mulacc_complex(d, datasize, elements, esize, index, m, n, rot)
+}
+
+val vector_arithmetic_binary_element_dotp_decode : (bits(1), bits(1), bits(2), bits(1), bits(1), bits(4), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_element_dotp_decode (Q, U, size, L, M, Rm, H, Rn, Rd) = {
+ __unconditional = true;
+ if ~(HaveDOTPExt()) then throw(Error_Undefined()) else ();
+ if size != 0b10 then ReservedValue() else ();
+ signed : bool = U == 0b0;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(M @ Rm);
+ index : int = UInt(H @ L);
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_element_dotp(d, datasize, elements, esize, index, m, n, signed)
+}
+
+val vector_arithmetic_binary_disparate_mul_product_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_mul_product_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_mul_product(d, datasize, elements, esize, m, n, part, unsigned)
+}
+
+val vector_arithmetic_binary_disparate_mul_poly_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_mul_poly_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b01 | size == 0b10 then ReservedValue() else ();
+ if size == 0b11 & ~(HaveCryptoExt()) then UnallocatedEncoding() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_disparate_mul_poly(d, datasize, elements, esize, m, n, part)
+}
+
+val vector_arithmetic_binary_disparate_mul_double_sisd_decode : (bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_mul_double_sisd_decode (U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b00 | size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ part : int = 0;
+ aarch64_vector_arithmetic_binary_disparate_mul_double_sisd(d, datasize, elements, esize, m, n, part)
+}
+
+val vector_arithmetic_binary_disparate_mul_double_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_mul_double_simd_decode (Q, U, size, Rm, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b00 | size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ aarch64_vector_arithmetic_binary_disparate_mul_double_sisd(d, datasize, elements, esize, m, n, part)
+}
+
+val vector_arithmetic_binary_disparate_mul_dmacc_sisd_decode : (bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_mul_dmacc_sisd_decode (U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b00 | size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ datasize : int = esize;
+ elements : int = 1;
+ part : int = 0;
+ sub_op : bool = o1 == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_mul_dmacc_sisd(d, datasize, elements, esize, m, n, part, sub_op)
+}
+
+val vector_arithmetic_binary_disparate_mul_dmacc_simd_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_mul_dmacc_simd_decode (Q, U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b00 | size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ sub_op : bool = o1 == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_mul_dmacc_sisd(d, datasize, elements, esize, m, n, part, sub_op)
+}
+
+val vector_arithmetic_binary_disparate_mul_accum_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_mul_accum_decode (Q, U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ sub_op : bool = o1 == 0b1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_mul_accum(d, datasize, elements, esize, m, n, part, sub_op, unsigned)
+}
+
+val vector_arithmetic_binary_disparate_diff_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_diff_decode (Q, U, size, Rm, op, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ accumulate : bool = op == 0b0;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_diff(accumulate, d, datasize, elements, esize, m, n, part, unsigned)
+}
+
+val vector_arithmetic_binary_disparate_addsub_wide_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_addsub_wide_decode (Q, U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ sub_op : bool = o1 == 0b1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_addsub_wide(d, datasize, elements, esize, m, n, part, sub_op, unsigned)
+}
+
+val vector_arithmetic_binary_disparate_addsub_narrow_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_addsub_narrow_decode (Q, U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ sub_op : bool = o1 == 0b1;
+ round : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_addsub_narrow(d, datasize, elements, esize, m, n, part, round, sub_op)
+}
+
+val vector_arithmetic_binary_disparate_addsub_long_decode : (bits(1), bits(1), bits(2), bits(5), bits(1), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function vector_arithmetic_binary_disparate_addsub_long_decode (Q, U, size, Rm, o1, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ if size == 0b11 then ReservedValue() else ();
+ esize : int = shl_int(8, UInt(size));
+ let 'datasize : {|64|} = 64;
+ part : int = UInt(Q);
+ elements : int = datasize / esize;
+ sub_op : bool = o1 == 0b1;
+ unsigned : bool = U == 0b1;
+ aarch64_vector_arithmetic_binary_disparate_addsub_long(d, datasize, elements, esize, m, n, part, sub_op, unsigned)
+}
+
+val memory_vector_multiple_postinc_aarch64_memory_vector_multiple_nowb__decode : (bits(1), bits(1), bits(5), bits(4), bits(2), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_vector_multiple_postinc_aarch64_memory_vector_multiple_nowb__decode (Q, L, Rm, opcode, size, Rn, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ wback : bool = true;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ esize : int = shl_int(8, UInt(size));
+ elements : int = datasize / esize;
+ rpt : int = undefined;
+ selem : int = undefined;
+ match opcode {
+ 0x0 => {
+ rpt = 1;
+ selem = 4
+ },
+ 0x2 => {
+ rpt = 4;
+ selem = 1
+ },
+ 0x4 => {
+ rpt = 1;
+ selem = 3
+ },
+ 0x6 => {
+ rpt = 3;
+ selem = 1
+ },
+ 0x7 => {
+ rpt = 1;
+ selem = 1
+ },
+ 0x8 => {
+ rpt = 1;
+ selem = 2
+ },
+ 0xA => {
+ rpt = 2;
+ selem = 1
+ },
+ _ => UnallocatedEncoding()
+ };
+ if (size @ Q) == 0b110 & selem != 1 then ReservedValue() else ();
+ aarch64_memory_vector_multiple_nowb(datasize, elements, esize, m, memop, n, rpt, selem, t, wback)
+}
+
+val memory_vector_multiple_nowb_aarch64_memory_vector_multiple_nowb__decode : (bits(1), bits(1), bits(4), bits(2), bits(5), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_vector_multiple_nowb_aarch64_memory_vector_multiple_nowb__decode (Q, L, opcode, size, Rn, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ n : int = UInt(Rn);
+ m : int = undefined;
+ wback : bool = false;
+ memop : MemOp = if L == 0b1 then MemOp_LOAD else MemOp_STORE;
+ let 'datasize : {|128, 64|} = if Q == 0b1 then 128 else 64;
+ esize : int = shl_int(8, UInt(size));
+ elements : int = datasize / esize;
+ rpt : int = undefined;
+ selem : int = undefined;
+ match opcode {
+ 0x0 => {
+ rpt = 1;
+ selem = 4
+ },
+ 0x2 => {
+ rpt = 4;
+ selem = 1
+ },
+ 0x4 => {
+ rpt = 1;
+ selem = 3
+ },
+ 0x6 => {
+ rpt = 3;
+ selem = 1
+ },
+ 0x7 => {
+ rpt = 1;
+ selem = 1
+ },
+ 0x8 => {
+ rpt = 1;
+ selem = 2
+ },
+ 0xA => {
+ rpt = 2;
+ selem = 1
+ },
+ _ => UnallocatedEncoding()
+ };
+ if (size @ Q) == 0b110 & selem != 1 then ReservedValue() else ();
+ aarch64_memory_vector_multiple_nowb(datasize, elements, esize, m, memop, n, rpt, selem, t, wback)
+}
+
+val integer_logical_shiftedreg_decode : (bits(1), bits(2), bits(2), bits(1), bits(5), bits(6), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_logical_shiftedreg_decode (sf, opc, shift, N, Rm, imm6, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ setflags : bool = undefined;
+ op : LogicalOp = undefined;
+ match opc {
+ 0b00 => {
+ op = LogicalOp_AND;
+ setflags = false
+ },
+ 0b01 => {
+ op = LogicalOp_ORR;
+ setflags = false
+ },
+ 0b10 => {
+ op = LogicalOp_EOR;
+ setflags = false
+ },
+ 0b11 => {
+ op = LogicalOp_AND;
+ setflags = true
+ }
+ };
+ if sf == 0b0 & [imm6[5]] == 0b1 then ReservedValue() else ();
+ shift_type : ShiftType = DecodeShift(shift);
+ shift_amount : int = UInt(imm6);
+ invert : bool = N == 0b1;
+ aarch64_integer_logical_shiftedreg(d, datasize, invert, m, n, op, setflags, shift_amount, shift_type)
+}
+
+val integer_insext_extract_immediate_decode : (bits(1), bits(2), bits(1), bits(1), bits(5), bits(6), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_insext_extract_immediate_decode (sf, op21, N, o0, Rm, imms, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ lsb : int = undefined;
+ if N != sf then UnallocatedEncoding() else ();
+ if sf == 0b0 & [imms[5]] == 0b1 then ReservedValue() else ();
+ lsb = UInt(imms);
+ aarch64_integer_insext_extract_immediate(d, datasize, lsb, m, n)
+}
+
+val integer_arithmetic_addsub_shiftedreg_decode : (bits(1), bits(1), bits(1), bits(2), bits(5), bits(6), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_addsub_shiftedreg_decode (sf, op, S, shift, Rm, imm6, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ sub_op : bool = op == 0b1;
+ setflags : bool = S == 0b1;
+ if shift == 0b11 then ReservedValue() else ();
+ if sf == 0b0 & [imm6[5]] == 0b1 then ReservedValue() else ();
+ shift_type : ShiftType = DecodeShift(shift);
+ shift_amount : int = UInt(imm6);
+ aarch64_integer_arithmetic_addsub_shiftedreg(d, datasize, m, n, setflags, shift_amount, shift_type, sub_op)
+}
+
+val integer_arithmetic_addsub_immediate_decode : (bits(1), bits(1), bits(1), bits(2), bits(12), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg}
+
+function integer_arithmetic_addsub_immediate_decode (sf, op, S, shift, imm12, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ sub_op : bool = op == 0b1;
+ setflags : bool = S == 0b1;
+ imm : bits('datasize) = undefined;
+ match shift {
+ 0b00 => imm = ZeroExtend(imm12, datasize),
+ 0b01 => imm = ZeroExtend(imm12 @ Zeros(12), datasize),
+ [bitone] @ _ : bits(1) => ReservedValue()
+ };
+ aarch64_integer_arithmetic_addsub_immediate(d, datasize, imm, n, setflags, sub_op)
+}
+
+val integer_arithmetic_addsub_extendedreg_decode : (bits(1), bits(1), bits(1), bits(2), bits(5), bits(3), bits(3), bits(5), bits(5)) -> unit effect {escape, rreg, undef, wreg}
+
+function integer_arithmetic_addsub_extendedreg_decode (sf, op, S, opt, Rm, option_name, imm3, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ m : int = UInt(Rm);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ sub_op : bool = op == 0b1;
+ setflags : bool = S == 0b1;
+ extend_type : ExtendType = DecodeRegExtend(option_name);
+ shift : int = UInt(imm3);
+ if shift > 4 then ReservedValue() else ();
+ aarch64_integer_arithmetic_addsub_extendedreg(d, datasize, extend_type, m, n, setflags, shift, sub_op)
+}
+
+val DecodeBitMasks : forall ('M : Int), 1 >= 0 & 6 >= 0 & 6 >= 0 & 'M >= 0 & 'M >= 0.
+ (bits(1), bits(6), bits(6), bool) -> (bits('M), bits('M)) effect {escape, rreg, undef, wreg}
+
+function DecodeBitMasks (immN, imms, immr, immediate) = {
+ tmask : bits(64) = undefined;
+ wmask : bits(64) = undefined;
+ tmask_and : bits(6) = undefined;
+ wmask_and : bits(6) = undefined;
+ tmask_or : bits(6) = undefined;
+ wmask_or : bits(6) = undefined;
+ levels : bits(6) = undefined;
+ let 'len = HighestSetBit(immN @ ~(imms));
+ assert('len >= 0);
+ if len < 1 then ReservedValue() else ();
+ assert('M >= shl_int(1, len), "(M >= (1 << len))");
+ levels = ZeroExtend(Ones(len), 6);
+ if immediate & (imms & levels) == levels then ReservedValue() else ();
+ S : int = UInt(imms & levels);
+ R : int = UInt(immr & levels);
+ diff : int = S - R;
+ tmask_and = __GetSlice_int(6, diff, 0) | ~(levels);
+ tmask_or = __GetSlice_int(6, diff, 0) & levels;
+ tmask = Ones(64);
+ tmask = tmask & replicate_bits(replicate_bits([tmask_and[0]], 1) @ Ones(1), 32) | replicate_bits(Zeros(1) @ replicate_bits([tmask_or[0]], 1), 32);
+ tmask = tmask & replicate_bits(replicate_bits([tmask_and[1]], 2) @ Ones(2), 16) | replicate_bits(Zeros(2) @ replicate_bits([tmask_or[1]], 2), 16);
+ tmask = tmask & replicate_bits(replicate_bits([tmask_and[2]], 4) @ Ones(4), 8) | replicate_bits(Zeros(4) @ replicate_bits([tmask_or[2]], 4), 8);
+ tmask = tmask & replicate_bits(replicate_bits([tmask_and[3]], 8) @ Ones(8), 4) | replicate_bits(Zeros(8) @ replicate_bits([tmask_or[3]], 8), 4);
+ tmask = tmask & replicate_bits(replicate_bits([tmask_and[4]], 16) @ Ones(16), 2) | replicate_bits(Zeros(16) @ replicate_bits([tmask_or[4]], 16), 2);
+ tmask = tmask & replicate_bits(replicate_bits([tmask_and[5]], 32) @ Ones(32), 1) | replicate_bits(Zeros(32) @ replicate_bits([tmask_or[5]], 32), 1);
+ wmask_and = immr | ~(levels);
+ wmask_or = immr & levels;
+ wmask = Zeros(64);
+ wmask = wmask & replicate_bits(Ones(1) @ replicate_bits([wmask_and[0]], 1), 32) | replicate_bits(replicate_bits([wmask_or[0]], 1) @ Zeros(1), 32);
+ wmask = wmask & replicate_bits(Ones(2) @ replicate_bits([wmask_and[1]], 2), 16) | replicate_bits(replicate_bits([wmask_or[1]], 2) @ Zeros(2), 16);
+ wmask = wmask & replicate_bits(Ones(4) @ replicate_bits([wmask_and[2]], 4), 8) | replicate_bits(replicate_bits([wmask_or[2]], 4) @ Zeros(4), 8);
+ wmask = wmask & replicate_bits(Ones(8) @ replicate_bits([wmask_and[3]], 8), 4) | replicate_bits(replicate_bits([wmask_or[3]], 8) @ Zeros(8), 4);
+ wmask = wmask & replicate_bits(Ones(16) @ replicate_bits([wmask_and[4]], 16), 2) | replicate_bits(replicate_bits([wmask_or[4]], 16) @ Zeros(16), 2);
+ wmask = wmask & replicate_bits(Ones(32) @ replicate_bits([wmask_and[5]], 32), 1) | replicate_bits(replicate_bits([wmask_or[5]], 32) @ Zeros(32), 1);
+ if __GetSlice_int(1, diff, 6) != 0b0 then wmask = wmask & tmask
+ else wmask = wmask | tmask;
+ return((slice(wmask, 0, 'M), slice(tmask, 0, 'M)))
+}
+
+val integer_logical_immediate_decode : (bits(1), bits(2), bits(1), bits(6), bits(6), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg}
+
+function integer_logical_immediate_decode (sf, opc, N, immr, imms, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ setflags : bool = undefined;
+ op : LogicalOp = undefined;
+ match opc {
+ 0b00 => {
+ op = LogicalOp_AND;
+ setflags = false
+ },
+ 0b01 => {
+ op = LogicalOp_ORR;
+ setflags = false
+ },
+ 0b10 => {
+ op = LogicalOp_EOR;
+ setflags = false
+ },
+ 0b11 => {
+ op = LogicalOp_AND;
+ setflags = true
+ }
+ };
+ imm : bits('datasize) = undefined;
+ if sf == 0b0 & N != 0b0 then ReservedValue() else ();
+ __anon1 : bits('datasize) = undefined;
+ (imm, __anon1) = DecodeBitMasks(N, imms, immr, true) : (bits('datasize), bits('datasize));
+ aarch64_integer_logical_immediate(d, datasize, imm, n, op, setflags)
+}
+
+val integer_bitfield_decode : (bits(1), bits(2), bits(1), bits(6), bits(6), bits(5), bits(5)) -> unit effect {escape, undef, rreg, wreg}
+
+function integer_bitfield_decode (sf, opc, N, immr, imms, Rn, Rd) = {
+ __unconditional = true;
+ d : int = UInt(Rd);
+ n : int = UInt(Rn);
+ let 'datasize : {|64, 32|} = if sf == 0b1 then 64 else 32;
+ inzero : bool = undefined;
+ extend : bool = undefined;
+ R : int = undefined;
+ S : int = undefined;
+ wmask : bits('datasize) = undefined;
+ tmask : bits('datasize) = undefined;
+ match opc {
+ 0b00 => {
+ inzero = true;
+ extend = true
+ },
+ 0b01 => {
+ inzero = false;
+ extend = false
+ },
+ 0b10 => {
+ inzero = true;
+ extend = false
+ },
+ 0b11 => UnallocatedEncoding()
+ };
+ if sf == 0b1 & N != 0b1 then ReservedValue() else ();
+ if sf == 0b0 & ((N != 0b0 | [immr[5]] != 0b0) | [imms[5]] != 0b0) then ReservedValue() else ();
+ R = UInt(immr);
+ S = UInt(imms);
+ (wmask, tmask) = DecodeBitMasks(N, imms, immr, false) : (bits('datasize), bits('datasize));
+ aarch64_integer_bitfield(R, S, d, datasize, extend, inzero, n, tmask, wmask)
+}
diff --git a/aarch64/no_vector/spec.sail b/aarch64/no_vector/spec.sail
index 024393ad..775800f3 100644
--- a/aarch64/no_vector/spec.sail
+++ b/aarch64/no_vector/spec.sail
@@ -1374,7 +1374,7 @@ function AArch64_SysRegWrite ('op0, 'op1, 'crn, 'crm, 'op2, val_name) = assert(f
val AArch64_SysRegRead : (int, int, int, int, int) -> bits(64) effect {escape, undef}
-function AArch64_SysRegRead _ = {
+function AArch64_SysRegRead(_, _, _, _, _) = {
assert(false, "Tried to read system register");
undefined
}
diff --git a/cheri/Makefile b/cheri/Makefile
index 72eaf26b..421c4460 100644
--- a/cheri/Makefile
+++ b/cheri/Makefile
@@ -31,15 +31,24 @@ cheri_trace: $(CHERI_SAILS) $(CHERI_MAIN)
cheri.c: $(CHERI_SAILS) $(CHERI_MAIN)
$(SAIL) -memo_z3 -c $^ 1> $@
-latex: $(CHERI_SAILS)
+latex_128: $(MIPS_SAIL_DIR)/prelude.sail $(CHERI_SAIL_DIR)/cheri_types.sail $(CHERI_SAIL_DIR)/cheri_prelude_128.sail
+ rm -rf sail_latexcc
+ $(SAIL) -latex -latex_prefix sailcc -o sail_latexcc $^
+
+latex_256: $(CHERI_SAILS)
+ rm -rf sail_latex
$(SAIL) -latex $^
+latex: latex_128 latex_256
+
cheri128: $(CHERI128_SAILS) $(CHERI_MAIN)
$(SAIL) -ocaml -o $@ $^
cheri128_trace: $(CHERI128_SAILS) $(CHERI_MAIN)
$(SAIL) -ocaml_trace -o $@ $^
+LOC_FILES:=$(CHERI_SAILS) $(CHERI_MAIN)
+include ../etc/loc.mk
# TODO Using bit lists for now in Lem generation; for machine words,
# monomorphisation is needed due to some variable length bitvectors, e.g. in
@@ -50,7 +59,7 @@ cheri_no_tlb.lem: $(CHERI_NO_TLB_SAILS)
$(SAIL) -lem -o cheri_no_tlb -lem_lib Mips_extras -undefined_gen -memo_z3 $^
cheri_no_tlb_types.lem: cheri_no_tlb.lem
-cheri.lem: $(CHERI_SAILS)
+cheri.lem: $(CHERI_SAILS) $(CHERI_MAIN)
$(SAIL) -lem -o cheri -auto_mono -mono_rewrites -lem_mwords -lem_lib Mips_extras -undefined_gen -memo_z3 $^
cheri_types.lem: cheri.lem
@@ -75,54 +84,4 @@ C%.thy: c%.lem c%_types.lem $(MIPS_SAIL_DIR)/mips_extras.lem
lem -hol -outdir . -lib $(SAIL_DIR)/lib/hol -lib $(SAIL_DIR)/src/gen_lib -lib $(SAIL_DIR)/src/lem_interp $^
clean:
- rm -rf cheri cheri128 _sbuild inst_*.sail cheri.c
-
-EXTRACT_INST=sed -n "/START_${1}\b/,/END_${1}\b/p" cheri_insts.sail | sed 's/^ //;1d;$$d' > inst_$1.sail
-extract: cheri_insts.sail
- $(call EXTRACT_INST,CGetPerms)
- $(call EXTRACT_INST,CGetType)
- $(call EXTRACT_INST,CGetBase)
- $(call EXTRACT_INST,CGetOffset)
- $(call EXTRACT_INST,CGetLen)
- $(call EXTRACT_INST,CGetTag)
- $(call EXTRACT_INST,CGetSealed)
- $(call EXTRACT_INST,CGetAddr)
- $(call EXTRACT_INST,CGetPCC)
- $(call EXTRACT_INST,CGetPCCSetOffset)
- $(call EXTRACT_INST,CGetCause)
- $(call EXTRACT_INST,CSetCause)
- $(call EXTRACT_INST,CAndPerm)
- $(call EXTRACT_INST,CToPtr)
- $(call EXTRACT_INST,CSub)
- $(call EXTRACT_INST,CPtrCmp)
- $(call EXTRACT_INST,CIncOffset)
- $(call EXTRACT_INST,CIncOffsetImmediate)
- $(call EXTRACT_INST,CSetOffset)
- $(call EXTRACT_INST,CSetBounds)
- $(call EXTRACT_INST,CSetBoundsImmediate)
- $(call EXTRACT_INST,CSetBoundsExact)
- $(call EXTRACT_INST,CClearTag)
- $(call EXTRACT_INST,CMOVX)
- $(call EXTRACT_INST,ClearRegs)
- $(call EXTRACT_INST,CFromPtr)
- $(call EXTRACT_INST,CBuildCap)
- $(call EXTRACT_INST,CCopyType)
- $(call EXTRACT_INST,CCheckPerm)
- $(call EXTRACT_INST,CCheckType)
- $(call EXTRACT_INST,CTestSubset)
- $(call EXTRACT_INST,CSeal)
- $(call EXTRACT_INST,CCSeal)
- $(call EXTRACT_INST,CUnseal)
- $(call EXTRACT_INST,CCall)
- $(call EXTRACT_INST,CCall2)
- $(call EXTRACT_INST,CReturn)
- $(call EXTRACT_INST,CBtag)
- $(call EXTRACT_INST,CBz)
- $(call EXTRACT_INST,CJALR)
- $(call EXTRACT_INST,CLoad)
- $(call EXTRACT_INST,CStore)
- $(call EXTRACT_INST,CSC)
- $(call EXTRACT_INST,CLC)
- $(call EXTRACT_INST,CReadHwr)
- $(call EXTRACT_INST,CWriteHwr)
-
+ rm -rf cheri cheri_trace cheri128 cheri128_trace _sbuild inst_*.sail cheri.c sail_latex sail_latexcc
diff --git a/cheri/ROOT b/cheri/ROOT
new file mode 100644
index 00000000..244413d5
--- /dev/null
+++ b/cheri/ROOT
@@ -0,0 +1,4 @@
+session "Sail-CHERI" = "Sail" +
+ options [document = false]
+ theories
+ Cheri_lemmas
diff --git a/cheri/cheri_insts.sail b/cheri/cheri_insts.sail
index 0a4538b9..2f083411 100644
--- a/cheri/cheri_insts.sail
+++ b/cheri/cheri_insts.sail
@@ -203,19 +203,16 @@ union clause ast = CGetAddr : (regno, regno)
function clause execute (CGetPerm(rd, cb)) =
{
- /* START_CGetPerms */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
else
let capVal = readCapReg(cb) in
wGPR(rd) = zero_extend(getCapPerms(capVal));
- /* END_CGetPerms */
}
function clause execute (CGetType(rd, cb)) =
{
- /* START_CGetType */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
@@ -224,36 +221,30 @@ function clause execute (CGetType(rd, cb)) =
wGPR(rd) = if (capVal.sealed)
then zero_extend(capVal.otype)
else (bitone ^^ 64)
- /* END_CGetType */
}
function clause execute (CGetBase(rd, cb)) =
{
- /* START_CGetBase */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
else
let capVal = readCapReg(cb) in
wGPR(rd) = to_bits(64, getCapBase(capVal));
- /* END_CGetBase */
}
function clause execute (CGetOffset(rd, cb)) =
{
- /* START_CGetOffset */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
else
let capVal = readCapReg(cb) in
wGPR(rd) = to_bits(64, getCapOffset(capVal));
- /* END_CGetOffset */
}
function clause execute (CGetLen(rd, cb)) =
{
- /* START_CGetLen */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
@@ -261,49 +252,41 @@ function clause execute (CGetLen(rd, cb)) =
let capVal = readCapReg(cb) in
let len65 = getCapLength(capVal) in
wGPR(rd) = to_bits(64, if len65 > MAX_U64 then MAX_U64 else len65);
- /* END_CGetLen */
}
function clause execute (CGetTag(rd, cb)) =
{
- /* START_CGetTag */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
else
let capVal = readCapReg(cb) in
wGPR(rd) = zero_extend(capVal.tag);
- /* END_CGetTag */
}
function clause execute (CGetSealed(rd, cb)) =
{
- /* START_CGetSealed */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
else
let capVal = readCapReg(cb) in
wGPR(rd) = zero_extend(capVal.sealed);
- /* END_CGetSealed */
}
function clause execute (CGetAddr(rd, cb)) =
{
- /* START_CGetAddr */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
else
let capVal = readCapReg(cb) in
wGPR(rd) = to_bits(64, getCapCursor(capVal));
- /* END_CGetAddr */
}
union clause ast = CGetPCC : regno
function clause execute (CGetPCC(cd)) =
{
- /* START_CGetPCC */
checkCP2usable();
if (register_inaccessible(cd)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cd)
@@ -312,14 +295,12 @@ function clause execute (CGetPCC(cd)) =
let (success, pcc2) = setCapOffset(pcc, PC) in
{assert (success, ""); /* guaranteed to be in-bounds */
writeCapReg(cd, pcc2)};
- /* END_CGetPCC */
}
union clause ast = CGetPCCSetOffset : (regno, regno)
function clause execute (CGetPCCSetOffset(cd, rs)) =
{
- /* START_CGetPCCSetOffset */
checkCP2usable();
if (register_inaccessible(cd)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cd)
@@ -331,26 +312,22 @@ function clause execute (CGetPCCSetOffset(cd, rs)) =
writeCapReg(cd, newPCC)
else
writeCapReg(cd, int_to_cap(rs_val));
- /* END_CGetPCCSetOffset */
}
/* Get and Set CP2 cause register */
union clause ast = CGetCause : regno
function clause execute (CGetCause(rd)) =
{
- /* START_CGetCause */
checkCP2usable();
if not (pcc_access_system_regs ()) then
raise_c2_exception_noreg(CapEx_AccessSystemRegsViolation)
else
wGPR(rd) = zero_extend(CapCause.bits())
- /* END_CGetCause */
}
union clause ast = CSetCause : regno
function clause execute (CSetCause(rt)) =
{
- /* START_CSetCause */
checkCP2usable();
if not (pcc_access_system_regs ()) then
raise_c2_exception_noreg(CapEx_AccessSystemRegsViolation)
@@ -360,13 +337,11 @@ function clause execute (CSetCause(rt)) =
CapCause->ExcCode() = rt_val[15..8];
CapCause->RegNum() = rt_val[7..0];
}
- /* END_CSetCause */
}
union clause ast = CReadHwr : (regno, regno)
function clause execute (CReadHwr(cd, sel)) =
{
- /* START_CReadHwr */
checkCP2usable();
let (needSup, needAccessSys) : (bool, bool) = match unsigned(sel) {
0 => (false, false), /* DDC -- no access control */
@@ -399,13 +374,11 @@ function clause execute (CReadHwr(cd, sel)) =
};
writeCapReg(cd, capVal);
};
- /* END_CReadHwr */
}
union clause ast = CWriteHwr : (regno, regno)
function clause execute (CWriteHwr(cb, sel)) =
{
- /* START_CWriteHwr */
checkCP2usable();
let (needSup, needAccessSys) : (bool, bool) = match unsigned(sel) {
0 => (false, false), /* DDC -- no access control */
@@ -438,13 +411,11 @@ function clause execute (CWriteHwr(cb, sel)) =
_ => assert(false, "should be unreachable code")
};
};
- /* END_CWriteHwr */
}
union clause ast = CAndPerm : (regno, regno, regno)
function clause execute(CAndPerm(cd, cb, rt)) =
{
- /* START_CAndPerm */
checkCP2usable();
let cb_val = readCapReg(cb);
let rt_val = rGPR(rt);
@@ -460,7 +431,6 @@ function clause execute(CAndPerm(cd, cb, rt)) =
let perms = getCapPerms(cb_val) in
let newCap = setCapPerms(cb_val, (perms & rt_val[30..0])) in
writeCapReg(cd, newCap);
- /* END_CAndPerm */
}
@@ -468,7 +438,6 @@ function clause execute(CAndPerm(cd, cb, rt)) =
union clause ast = CToPtr : (regno, regno, regno)
function clause execute(CToPtr(rd, cb, ct)) =
{
- /* START_CToPtr */
checkCP2usable();
let ct_val = readCapReg(ct);
let cb_val = readCapReg(cb);
@@ -493,7 +462,6 @@ function clause execute(CToPtr(rd, cb, ct)) =
else
to_bits(64, getCapCursor(cb_val) - ctBase)
}
- /* END_CToPtr */
}
@@ -501,7 +469,6 @@ function clause execute(CToPtr(rd, cb, ct)) =
union clause ast = CSub : (regno, regno, regno)
function clause execute(CSub(rd, cb, ct)) =
{
- /* START_CSub */
checkCP2usable();
let ct_val = readCapReg(ct);
let cb_val = readCapReg(cb);
@@ -513,13 +480,11 @@ function clause execute(CSub(rd, cb, ct)) =
{
wGPR(rd) = to_bits(64, getCapCursor(cb_val) - getCapCursor(ct_val))
}
- /* END_CSub */
}
union clause ast = CPtrCmp : (regno, regno, regno, CPtrCmpOp)
function clause execute(CPtrCmp(rd, cb, ct, op)) =
{
- /* START_CPtrCmp */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
@@ -560,13 +525,11 @@ function clause execute(CPtrCmp(rd, cb, ct, op)) =
};
wGPR(rd) = zero_extend (cmp)
}
- /* END_CPtrCmp */
}
union clause ast = CIncOffset : (regno, regno, regno)
function clause execute (CIncOffset(cd, cb, rt)) =
{
- /* START_CIncOffset */
checkCP2usable();
cb_val = readCapReg(cb);
rt_val = rGPR(rt);
@@ -582,13 +545,11 @@ function clause execute (CIncOffset(cd, cb, rt)) =
writeCapReg(cd, newCap)
else
writeCapReg(cd, int_to_cap(to_bits(64, getCapBase(cb_val)) + rt_val))
- /* END_CIncOffset */
}
union clause ast = CIncOffsetImmediate : (regno, regno, bits(11))
function clause execute (CIncOffsetImmediate(cd, cb, imm)) =
{
- /* START_CIncOffsetImmediate */
checkCP2usable();
let cb_val = readCapReg(cb);
let imm64 : bits(64) = sign_extend(imm) in
@@ -604,13 +565,11 @@ function clause execute (CIncOffsetImmediate(cd, cb, imm)) =
writeCapReg(cd, newCap)
else
writeCapReg(cd, int_to_cap(to_bits(64, getCapBase(cb_val)) + imm64))
- /* END_CIncOffsetImmediate */
}
union clause ast = CSetOffset : (regno, regno, regno)
function clause execute (CSetOffset(cd, cb, rt)) =
{
- /* START_CSetOffset */
checkCP2usable();
let cb_val = readCapReg(cb);
let rt_val = rGPR(rt);
@@ -626,13 +585,11 @@ function clause execute (CSetOffset(cd, cb, rt)) =
writeCapReg(cd, newCap)
else
writeCapReg(cd, int_to_cap(to_bits(64, getCapBase(cb_val)) + rt_val))
- /* END_CSetOffset */
}
union clause ast = CSetBounds : (regno, regno, regno)
function clause execute (CSetBounds(cd, cb, rt)) =
{
- /* START_CSetBounds */
checkCP2usable();
let cb_val = readCapReg(cb);
let rt_val = unsigned(rGPR(rt));
@@ -655,13 +612,11 @@ function clause execute (CSetBounds(cd, cb, rt)) =
else
let (_, newCap) = setCapBounds(cb_val, to_bits(64, cursor), to_bits(65, newTop)) in
writeCapReg(cd, newCap) /* ignore exact */
- /* END_CSetBounds */
}
union clause ast = CSetBoundsImmediate : (regno, regno, bits(11))
function clause execute (CSetBoundsImmediate(cd, cb, imm)) =
{
- /* START_CSetBoundsImmediate */
checkCP2usable();
cb_val = readCapReg(cb);
immU = unsigned(imm);
@@ -684,13 +639,11 @@ function clause execute (CSetBoundsImmediate(cd, cb, imm)) =
else
let (_, newCap) = setCapBounds(cb_val, to_bits(64, cursor), to_bits(65, newTop)) in
writeCapReg(cd, newCap) /* ignore exact */
- /* END_CSetBoundsImmediate */
}
union clause ast = CSetBoundsExact : (regno, regno, regno)
function clause execute (CSetBoundsExact(cd, cb, rt)) =
{
- /* START_CSetBoundsExact */
checkCP2usable();
cb_val = readCapReg(cb);
rt_val = unsigned(rGPR(rt));
@@ -716,13 +669,11 @@ function clause execute (CSetBoundsExact(cd, cb, rt)) =
raise_c2_exception(CapEx_InexactBounds, cb)
else
writeCapReg(cd, newCap)
- /* END_CSetBoundsExact */
}
union clause ast = CClearTag : (regno, regno)
function clause execute (CClearTag(cd, cb)) =
{
- /* START_CClearTag */
checkCP2usable();
if (register_inaccessible(cd)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cd)
@@ -733,13 +684,11 @@ function clause execute (CClearTag(cd, cb)) =
cb_val = readCapReg(cb);
writeCapReg(cd, {cb_val with tag=false});
}
- /* END_CClearTag */
}
union clause ast = CMOVX : (regno,regno,regno,bool)
function clause execute (CMOVX(cd, cb, rt, ismovn)) =
{
- /* START_CMOVX */
checkCP2usable();
if (register_inaccessible(cd)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cd)
@@ -747,13 +696,11 @@ function clause execute (CMOVX(cd, cb, rt, ismovn)) =
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
else if ((rGPR(rt) == zeros()) ^ ismovn) then
writeCapReg(cd) = readCapReg(cb);
- /* END_CMOVX */
}
union clause ast = ClearRegs : (ClearRegSet, bits(16))
function clause execute (ClearRegs(regset, m)) =
{
- /* START_ClearRegs */
if ((regset == CLo) | (regset == CHi)) then
checkCP2usable();
if (regset == CHi) then
@@ -769,13 +716,11 @@ function clause execute (ClearRegs(regset, m)) =
CLo => writeCapReg(to_bits(5, i)) = null_cap,
CHi => writeCapReg(to_bits(5, i+16)) = null_cap
}
- /* END_ClearRegs */
}
union clause ast = CFromPtr : (regno, regno, regno)
function clause execute (CFromPtr(cd, cb, rt)) =
{
- /* START_CFromPtr */
checkCP2usable();
cb_val = readCapReg(cb);
rt_val = rGPR(rt);
@@ -795,13 +740,11 @@ function clause execute (CFromPtr(cd, cb, rt)) =
writeCapReg(cd, newCap)
else
writeCapReg(cd, int_to_cap(to_bits(64, getCapBase(cb_val)) + rt_val))
- /* END_CFromPtr */
}
union clause ast = CBuildCap : (regno, regno, regno)
function clause execute (CBuildCap(cd, cb, ct)) =
{
- /* START_CBuildCap */
checkCP2usable();
cb_val = readCapReg(cb);
ct_val = readCapReg(ct);
@@ -839,14 +782,12 @@ function clause execute (CBuildCap(cd, cb, ct)) =
assert(representable, ""); /* similarly offset should be representable XXX except for fastRepCheck */
writeCapReg(cd, cd3);
}
- /* END_CBuildCap */
}
union clause ast = CCopyType : (regno, regno, regno)
function clause execute (CCopyType(cd, cb, ct)) =
{
- /* START_CCopyType */
- checkCP2usable();
+ checkCP2usable();
cb_val = readCapReg(cb);
ct_val = readCapReg(ct);
cb_base = getCapBase(cb_val);
@@ -874,13 +815,11 @@ function clause execute (CCopyType(cd, cb, ct)) =
}
} else
writeCapReg(cd, int_to_cap(bitone ^^ 64))
- /* END_CCopyType */
}
union clause ast = CCheckPerm : (regno, regno)
function clause execute (CCheckPerm(cs, rt)) =
{
- /* START_CCheckPerm */
checkCP2usable();
cs_val = readCapReg(cs);
cs_perms : bits(64) = zero_extend(getCapPerms(cs_val));
@@ -891,13 +830,11 @@ function clause execute (CCheckPerm(cs, rt)) =
raise_c2_exception(CapEx_TagViolation, cs)
else if ((cs_perms & rt_perms) != rt_perms) then
raise_c2_exception(CapEx_UserDefViolation, cs)
- /* END_CCheckPerm */
}
union clause ast = CCheckType : (regno, regno)
function clause execute (CCheckType(cs, cb)) =
{
- /* START_CCheckType */
checkCP2usable();
cs_val = readCapReg(cs);
cb_val = readCapReg(cb);
@@ -915,13 +852,11 @@ function clause execute (CCheckType(cs, cb)) =
raise_c2_exception(CapEx_SealViolation, cb)
else if ((cs_val.otype) != (cb_val.otype)) then
raise_c2_exception(CapEx_TypeViolation, cs)
- /* END_CCheckType */
}
union clause ast = CTestSubset : (regno, regno, regno)
function clause execute (CTestSubset(rd, cb, ct)) =
{
- /* START_CTestSubset */
checkCP2usable();
cb_val = readCapReg(cb);
ct_val = readCapReg(ct);
@@ -948,13 +883,11 @@ function clause execute (CTestSubset(rd, cb, ct)) =
0b1;
wGPR(rd) = zero_extend(result);
}
- /* END_CTestSubset */
}
union clause ast = CSeal : (regno, regno, regno)
function clause execute (CSeal(cd, cs, ct)) =
{
- /* START_CSeal */
checkCP2usable();
cs_val = readCapReg(cs);
ct_val = readCapReg(ct);
@@ -989,13 +922,11 @@ function clause execute (CSeal(cd, cs, ct)) =
raise_c2_exception(CapEx_InexactBounds, cs)
else
writeCapReg(cd, newCap)
- /* END_CSeal */
}
union clause ast = CCSeal : (regno, regno, regno)
function clause execute (CCSeal(cd, cs, ct)) =
{
- /* START_CCSeal */
checkCP2usable();
cs_val = readCapReg(cs);
ct_val = readCapReg(ct);
@@ -1030,13 +961,11 @@ function clause execute (CCSeal(cd, cs, ct)) =
raise_c2_exception(CapEx_InexactBounds, cs)
else
writeCapReg(cd, newCap)
- /* END_CCSeal */
}
union clause ast = CUnseal : (regno, regno, regno)
function clause execute (CUnseal(cd, cs, ct)) =
{
- /* START_CUnseal */
checkCP2usable();
cs_val = readCapReg(cs);
ct_val = readCapReg(ct);
@@ -1069,14 +998,12 @@ function clause execute (CUnseal(cd, cs, ct)) =
otype=zeros(),
global=(cs_val.global & ct_val.global)
})
- /* END_CUnseal */
}
union clause ast = CCall : (regno, regno, bits(11))
function clause execute (CCall(cs, cb, 0b00000000000)) = /* selector=0 */
{
/* Partial implementation of CCall with checks in hardware, but raising a trap to perform trusted stack manipulation */
- /* START_CCall */
checkCP2usable();
cs_val = readCapReg(cs);
cb_val = readCapReg(cb);
@@ -1105,13 +1032,11 @@ function clause execute (CCall(cs, cb, 0b00000000000)) = /* selector=0 */
raise_c2_exception(CapEx_LengthViolation, cs)
else
raise_c2_exception(CapEx_CallTrap, cs);
- /* END_CCall */
}
function clause execute (CCall(cs, cb, 0b00000000001)) = /* selector=1 */
{
/* Jump-like implementation of CCall that unseals arguments */
- /* START_CCall2 */
checkCP2usable();
cs_val = readCapReg(cs);
cb_val = readCapReg(cb);
@@ -1154,22 +1079,18 @@ function clause execute (CCall(cs, cb, 0b00000000001)) = /* selector=1 */
otype=zeros()
});
}
- /* END_CCall2 */
}
union clause ast = CReturn : unit
function clause execute (CReturn()) =
{
- /* START_CReturn */
checkCP2usable();
raise_c2_exception_noreg(CapEx_ReturnTrap)
- /* END_CReturn */
}
union clause ast = CBX : (regno, bits(16), bool)
function clause execute (CBX(cb, imm, notset)) =
{
- /* START_CBtag */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
@@ -1178,13 +1099,11 @@ function clause execute (CBX(cb, imm, notset)) =
let offset : bits(64) = (sign_extend(imm @ 0b00) + 4) in
execute_branch(PC + offset);
}
- /* END_CBtag */
}
union clause ast = CBZ : (regno, bits(16), bool)
function clause execute (CBZ(cb, imm, notzero)) =
{
- /* START_CBz */
checkCP2usable();
if (register_inaccessible(cb)) then
raise_c2_exception(CapEx_AccessSystemRegsViolation, cb)
@@ -1193,13 +1112,11 @@ function clause execute (CBZ(cb, imm, notzero)) =
let offset : bits(64) = (sign_extend(imm @ 0b00) + 4) in
execute_branch(PC + offset);
}
- /* END_CBz */
}
union clause ast = CJALR : (regno, regno, bool)
function clause execute(CJALR(cd, cb, link)) =
{
- /* START_CJALR */
checkCP2usable();
cb_val = readCapReg(cb);
cb_ptr = getCapCursor(cb_val);
@@ -1232,13 +1149,11 @@ function clause execute(CJALR(cd, cb, link)) =
assert(false, "");
execute_branch_pcc(cb_val);
}
- /* END_CJALR */
}
union clause ast = CLoad : (regno, regno, regno, bits(8), bool, WordType, bool)
function clause execute (CLoad(rd, cb, rt, offset, signext, width, linked)) =
{
- /* START_CLoad */
checkCP2usable();
cb_val = readCapReg(cb);
if (register_inaccessible(cb)) then
@@ -1275,14 +1190,12 @@ function clause execute (CLoad(rd, cb, rt, offset, signext, width, linked)) =
wGPR(rd) = memResult;
}
}
- /* END_CLoad */
}
union clause ast = CStore : (regno, regno, regno, regno, bits(8), WordType, bool)
function clause execute (CStore(rs, cb, rt, rd, offset, width, conditional)) =
{
- /* START_CStore */
checkCP2usable();
cb_val = readCapReg(cb);
if (register_inaccessible(cb)) then
@@ -1333,13 +1246,11 @@ function clause execute (CStore(rs, cb, rt, rd, offset, width, conditional)) =
}
}
}
- /* END_CStore */
}
union clause ast = CSC : (regno, regno, regno, regno, bits(11), bool)
function clause execute (CSC(cs, cb, rt, rd, offset, conditional)) =
{
- /* START_CSC */
checkCP2usable();
cs_val = readCapReg(cs);
cb_val = readCapReg(cb);
@@ -1385,13 +1296,11 @@ function clause execute (CSC(cs, cb, rt, rd, offset, conditional)) =
MEMw_tagged(pAddr, cs_val.tag, capStructToMemBits(cs_val));
}
}
- /* END_CSC */
}
union clause ast = CLC : (regno, regno, regno, bits(11), bool)
function clause execute (CLC(cd, cb, rt, offset, linked)) =
{
- /* START_CLC */
checkCP2usable();
cb_val = readCapReg(cb);
if (register_inaccessible(cd)) then
@@ -1433,7 +1342,6 @@ function clause execute (CLC(cd, cb, rt, offset, linked)) =
}
}
}
- /* END_CLC */
}
union clause ast = C2Dump : regno
diff --git a/cheri/cheri_prelude_256.sail b/cheri/cheri_prelude_256.sail
index 5590bbb8..babfc117 100644
--- a/cheri/cheri_prelude_256.sail
+++ b/cheri/cheri_prelude_256.sail
@@ -196,20 +196,32 @@ function sealCap(cap, otype) : (CapStruct, bits(24)) -> (bool, CapStruct) =
(true, {cap with sealed=true, otype=otype})
function getCapBase(c) : CapStruct -> uint64 = unsigned(c.base)
-function getCapTop(c) : CapStruct -> CapLen = unsigned(c.base) + unsigned(c.length)
+function getCapTop(c) : CapStruct -> CapLen = unsigned(c.base) + unsigned(c.length) /* XXX bug here? -- should be mod 2^64 */
function getCapOffset(c) : CapStruct -> uint64 = (unsigned(c.address) - unsigned(c.base)) % (pow2(64))
function getCapLength(c) : CapStruct -> CapLen = unsigned(c.length)
function getCapCursor(c) : CapStruct -> uint64 = unsigned(c.address)
-function setCapOffset(c, offset) : (CapStruct, bits(64)) -> (bool, CapStruct) =
+/*!
+Set the offset capability of the a capability to given value and return the result, along with a boolean indicating true if the operation preserved the existing bounds of the capability. When using compressed capabilities, setting the offset far outside the capability bounds can cause the result to become unrepresentable (XXX mention guarantees). Additionally in some implementations a fast representablity check may be used that could cause the operation to return failure even though the capability would be representable (XXX provide details).
+ */
+val setCapOffset : (CapStruct, bits(64)) -> (bool, CapStruct)
+function setCapOffset(c, offset) =
(true, {c with address=c.base + offset})
-function incCapOffset(c, delta) : (CapStruct, bits(64)) -> (bool, CapStruct) =
+/*!
+\function{incCapOffset} is the same as \function{setCapOffset} except that the 64-bit value is added to the current capability offset modulo $2^{64}$ (i.e. signed twos-complement arithemtic).
+ */
+val incCapOffset : (CapStruct, bits(64)) -> (bool, CapStruct)
+function incCapOffset(c, delta) =
let newAddr : bits(64) = c.address + delta in
(true, {c with address = newAddr})
-function setCapBounds(cap, base, top) : (CapStruct, bits(64), bits(65)) -> (bool, CapStruct) =
+/*!
+Returns a capability derived from the given capability by setting the base and top to values provided. The offset of the resulting capability is zero. In case the requested bounds are not exactly representable the returned boolean is false and the returned capability has bounds at least including the region bounded by base and top but rounded to representable values.
+ */
+val setCapBounds : (CapStruct, bits(64), bits(65)) -> (bool, CapStruct)
+function setCapBounds(cap, base, top) =
let length : bits(65) = top - (0b0 @ base) in
(true, {cap with base = base, length = length[63..0], address = base})
diff --git a/cheri/cheri_prelude_common.sail b/cheri/cheri_prelude_common.sail
index ecb98ef8..47c8759c 100644
--- a/cheri/cheri_prelude_common.sail
+++ b/cheri/cheri_prelude_common.sail
@@ -128,7 +128,11 @@ let CapRegs : vector(32, dec, register(CapReg)) =
let max_otype = MAX(24) /*0xffffff*/
let have_cp2 = true
-function readCapReg(n) : regno -> CapStruct =
+/*!
+This function reads a given capability register and returns its contents converted to a CapStruct.
+*/
+val readCapReg : regno -> CapStruct effect {rreg}
+function readCapReg(n) =
let 'i = unsigned(n) in
capRegToCapStruct(reg_deref(CapRegs[i]))
@@ -257,21 +261,17 @@ function pcc_access_system_regs () =
let pcc = capRegToCapStruct(PCC) in
(pcc.access_system_regs)
+/*!
+The following function should be called before reading or writing any capability register to check whether it is one of the protected system capabilities. Although it is usually a general purpose capabilty the invoked data capabiltiy (IDC) is restricted in the branch delay slot of the CCall (selector one) instruction to protect the confidentiality and integrity of the invoked sandbox.
+ */
val register_inaccessible : regno -> bool effect {rreg}
function register_inaccessible(r) =
- if (r == IDC) & inCCallDelay then true else
- let is_sys_reg : bool = match r {
- 0b11011 => true,
- 0b11100 => true,
- 0b11101 => true,
- 0b11110 => true,
- 0b11111 => true,
- _ => false
- } in
- if is_sys_reg then
- not (pcc_access_system_regs ())
- else
- false
+ ((r == IDC) & inCCallDelay) |
+ ((r == KR1C |
+ r == KR2C |
+ r == KDC |
+ r == KCC |
+ r == EPCC) & not (pcc_access_system_regs ()))
val MEMr_tag = "read_tag_bool" : bits(64) -> bool effect { rmemt }
val MEMw_tag = "write_tag_bool" : (bits(64) , bool) -> unit effect { wmvt }
@@ -399,13 +399,20 @@ function TranslatePC (vAddr) = {
else
TLBTranslate(to_bits(64, absPC), Instruction) /* XXX assert absPC never gets truncated due to above check and top <= 2^64 for valid caps */
}
+
+/*!
+All capability instrucitons must first check that the capability
+co-processor is enabled using the following function that raises a
+co-processor unusable exception if a CP0Status.CU2 is not set. This
+allows the operating system to only save and restore the full
+capability context for processes that use capabilities.
+*/
val checkCP2usable : unit -> unit effect {rreg, wreg, escape}
function checkCP2usable () =
- if not ((CP0Status.CU())[2]) then
- {
- (CP0Cause->CE()) = 0b10;
- (SignalException(CpU));
- }
+ if not (CP0Status.CU()[2]) then {
+ CP0Cause->CE() = 0b10;
+ SignalException(CpU);
+ }
function init_cp2_state () = {
let defaultBits = capStructToCapReg(default_cap);
diff --git a/doc/examples/overload.sail b/doc/examples/overload.sail
new file mode 100644
index 00000000..b6213cdd
--- /dev/null
+++ b/doc/examples/overload.sail
@@ -0,0 +1,10 @@
+val print_int : int -> unit
+
+val print_string : string -> unit
+
+overload print = {print_int, print_string}
+
+function main() : unit -> unit = {
+ print("Hello, World!");
+ print(4)
+} \ No newline at end of file
diff --git a/doc/examples/zeros.sail b/doc/examples/zeros.sail
new file mode 100644
index 00000000..7781c9ff
--- /dev/null
+++ b/doc/examples/zeros.sail
@@ -0,0 +1,5 @@
+val zero_extend_1 : forall 'm 'n, 'm <= 'n. bits('m) -> bits('n)
+
+val zero_extend_2 : forall 'm 'n, 'm <= 'n. (bits('m), int('n)) -> bits('n)
+
+overload zero_extend = {zero_extend_1, zero_extend_2} \ No newline at end of file
diff --git a/doc/tutorial.tex b/doc/tutorial.tex
index 61ec73f3..98a06710 100644
--- a/doc/tutorial.tex
+++ b/doc/tutorial.tex
@@ -43,7 +43,7 @@ expressions of the appropriate size that are available within the
surrounding scope, in this case \ll{n} and \ll{length(xs)}. If no
suitable expressions are found to trivially rewrite these type
variables, then additional function parameters will be automatically
-added to pass around this information at runtime. This feature is
+added to pass around this information at run-time. This feature is
however very useful for implementing functions with implicit
parameters, e.g. we can implement a zero extension function that
implicitly picks up its result length from the calling context as
@@ -61,6 +61,7 @@ since there is only a single line of code within the foreach block, we
can drop it and simply write: \mrbfnmyreplicatebitsthree
\subsection{Numeric Types}
+\label{sec:numeric}
Sail has three basic numeric types, \ll{int}, \ll{nat}, and
\ll{range}. The type \ll{int} is an arbitrary precision mathematical
@@ -69,7 +70,7 @@ number. The type \ll{range('n,'m)} is an inclusive range between the
\ll{Int}-kinded type variables \ll{'n} and \ll{'m}. The type
\ll{int('o)} is an integer exactly equal to the \ll{Int}-kinded type
variable \ll{'n}, i.e. \ll{int('o)} $=$ \ll{range('o,'o)}. These types
-can be used interchangably provided the rules summarised in the below
+can be used interchangeably provided the rules summarised in the below
diagram are satisfied (via constraint solving).
\begin{center}
@@ -97,8 +98,9 @@ up being equal to \ll{6 : range(5, 6)}. This kind of implicit casting
from bits to other numeric types would be highly undesirable.
\subsection{Vector Type}
+\label{sec:vec}
-Sail has the builtin type vector, which is a polymorphic type for
+Sail has the built-in type vector, which is a polymorphic type for
fixed-length vectors. For example, we could define a vector \ll{v} of
three integers as follows:
\begin{lstlisting}
@@ -210,7 +212,7 @@ A vector index can be updated using
\lstinline[mathescape]{[$\textit{vector}$ with $\textit{index}$ = $\textit{expression}$]}
notation.
%
-Similarly, a subrange of a vector can be updated using
+Similarly, a sub-range of a vector can be updated using
%
\lstinline[mathescape]{[$\textit{vector}$ with $\textit{index}_{msb}$ .. $\textit{index}_{lsb}$ = $\textit{expression}$]},
%
@@ -218,12 +220,12 @@ where the order of the indexes is the same as described above for
increasing and decreasing vectors.
These expressions are actually just syntactic sugar for several
-builtin functions, namely \ll{vector_access}, \ll{vector_subrange},
+built-in functions, namely \ll{vector_access}, \ll{vector_subrange},
\ll{vector_update}, and \ll{vector_update_subrange}.
\subsection{List Type}
-In addition to vectors, Sail also has \ll{list} as a builtin type. For
+In addition to vectors, Sail also has \ll{list} as a built-in type. For
example:
\begin{lstlisting}
let l : list(int) = [|1, 2, 3|]
@@ -317,7 +319,7 @@ match option {
\end{lstlisting}
Note that like how calling a function with a unit argument can be done
as \ll{f()} rather than \ll{f(())}, matching on a constructor \ll{C}
-with a unit type can be acheived by using \ll{C()} rather than
+with a unit type can be achieved by using \ll{C()} rather than
\ll{C(())}.
\paragraph{Matching on bit vectors}
@@ -327,16 +329,16 @@ Sail allows numerous ways to match on bitvectors, for example:
match v {
0xFF => print("hex match"),
0x0000_0001 => print("binary match"),
- 0xF @ v : bits(4) => print("vector concatentation pattern"),
+ 0xF @ v : bits(4) => print("vector concatenation pattern"),
0xF @ [bitone, _, b1, b0] => print("vector pattern"),
_ : bits(4) @ v : bits(4) => print("annotated wildcard pattern")
}
\end{lstlisting}
We can match on bitvector literals in either hex or binary forms. We
-also have vector concatantion patterns, of the form
+also have vector concatenation patterns, of the form
\lstinline[mathescape]{$\mathit{pattern}$ @ $\ldots$ @ $\mathit{pattern}$}.
-We must be able to infer the length of all the subpatterns in a vector
-concatentation pattern, hence why in the example above all the
+We must be able to infer the length of all the sub-patterns in a vector
+concatenation pattern, hence why in the example above all the
wildcard and variable patterns beneath vector concatenation patterns
have type annotations. In the context of a pattern the \ll{:} operator
binds tighter than the \ll{@} operator (as it does elsewhere).
@@ -347,7 +349,7 @@ bits. In the above example, \ll{b0} and \ll{b1} will have type
being the other bit literal pattern.
Note that because vectors in sail are type-polymorphic, we can also
-use both vector concatentation patterns and vector patterns to match
+use both vector concatenation patterns and vector patterns to match
against non-bit vectors.
\paragraph{Matching on lists}
@@ -371,8 +373,115 @@ match ys {
\paragraph{As patterns}
+Like OCaml, Sail also supports naming parts of patterns using the
+\ll{as} keyword. For example, in the above list pattern we could bind
+the entire list as ys as follows:
+\begin{lstlisting}
+match ys {
+ x :: xs as zs => print("cons with as pattern"),
+ [||] => print("empty list")
+}
+\end{lstlisting}
+The as pattern has lower precedence than any other keyword or operator
+in a pattern, so in this example zs will refer to \ll{x :: xs}.
+
\subsection{Mutable and Immutable Variables}
+Local immutable bindings can be introduced via the \ll{let} keyword,
+which has the following form
+\begin{center}
+ \ll{let} \textit{pattern} \ll{=} \textit{expression} \ll{in} \textit{expression}
+\end{center}
+The pattern is matched against the first expression, binding any
+identifiers in that pattern. The pattern can have any form, as in the
+branches of a match statement, but it should be complete (i.e. it
+should not fail to match)\footnote{although this is not checked right
+ now}.
+
+When used in a block, we allow a variant of the let statement, where
+it can be terminated by a semicolon rather than the in keyword.
+\begin{lstlisting}[mathescape]
+{
+ let $\textit{pattern}$ = $\textit{expression}_0$;
+ $\textit{expression}_1$;
+ $\vdots$
+ $\textit{expression}_n$
+}
+\end{lstlisting}
+This is equivalent to the following
+\begin{lstlisting}[mathescape]
+{
+ let $\textit{pattern}$ = $\textit{expression}_0$ in {
+ $\textit{expression}_1$;
+ $\vdots$
+ $\textit{expression}_n$
+ }
+}
+\end{lstlisting}
+If we were to write
+\begin{lstlisting}[mathescape]
+{
+ let $\textit{pattern}$ = $\textit{expression}_0$ in
+ $\textit{expression}_1$;
+ $\vdots$
+ $\textit{expression}_n$ // pattern not visible
+}
+\end{lstlisting}
+instead, then \textit{pattern} would only be bound within
+$\textit{expression}_1$ and not any further expressions. In general
+the block-form of let statements terminated with a semicolon should be
+preferred within blocks.
+
+Variables bound within function arguments, match statement, and
+let-bindings are always immutable, but Sail also allows mutable
+variables. Mutable variables are bound implicitly by using the
+assignment operator within a block.
+\begin{lstlisting}
+{
+ x : int = 3 // Create a new mutable variable x initialised to 3
+ x = 2 // Rebind it to the value 2
+}
+\end{lstlisting}
+The assignment operator is the equality symbol, as in C and other
+programming languages. Sail supports a rich language of
+\emph{l-expression} forms, which can appear on the left of an
+assignment. These will be described in Subsection~\ref{sec:lexp}. Note
+that we could have written
+\begin{lstlisting}
+{
+ x = 3;
+ x = 2
+}
+\end{lstlisting}
+but it would not have type-checked. The reason for this is if a
+mutable variable is declared without a type, Sail will try to infer
+the most specific type from the left hand side of the
+expression. However, in this case Sail will infer the type as
+\ll{int(3)} and will therefore complain when we try to reassign it to
+\ll{2}, as the type \ll{int(2)} is not a subtype of \ll{int(3)}. We
+therefore declare it as an \ll{int} which as mentioned in
+Section~\ref{sec:numeric} is a supertype of all numeric types. Sail
+will not allow us to change the type of a variable once it has been
+created with a specific type. We could have a more specific type for
+the variable \ll{x}, so
+\begin{lstlisting}
+{
+ x : {|2, 3|} = 3;
+ x = 2
+}
+\end{lstlisting}
+would allow \ll{x} to be either 2 or 3, but not any other value. The
+\lstinline+{|2, 3|}+ syntax is equivalent to \lstinline+{'n, 'n in {2, 3}. int('n)}+.
+
+\subsubsection{l-expressions}
+\label{sec:lexp}
+
+Sail allows for setter functions to be declared in a very simple way:
+\ll{f(x) = y} is sugar for \ll{f(x, y)}. This feature is commonly used
+when setting
+
+\fbox{TODO}
+
\subsection{Type declarations}
\subsubsection{Enumerations}
@@ -444,6 +553,7 @@ names of either the constructors nor the type itself, other than they
must be valid identifiers.
\subsubsection{Bitfields}
+\label{sec:bitfield}
The following example creates a bitfield type called \ll{cr} and a
register \ll{CR} of that type.
@@ -478,7 +588,7 @@ function \lstinline[mathescape]{update_$F$} is also defined. For more
details on getters and setters, see Section~\ref{sec:getset}. A
singleton bit in a bitfield definition, such as \ll{LT : 7} will be
defined as a bitvector of length one, and not as a value of type
-\ll{bit}, which mirrors the behavior of ARM's ASL language.
+\ll{bit}, which mirrors the behaviour of ARM's ASL language.
\subsection{Operators}
@@ -541,7 +651,7 @@ summarised in Table~\ref{tbl:operators}.
\paragraph{Type operators}
Sail allows operators to be used at the type level. For example, we
-could define a synonym for the builtin \ll{range} type as:
+could define a synonym for the built-in \ll{range} type as:
\lstinputlisting{examples/type_operator.sail} Note that we can't use
\ll{..} as an operator name, because that is reserved syntax for
vector slicing. Operators used in types always share precedence with
@@ -549,10 +659,179 @@ identically named operators at the expression level.
\subsection{Ad-hoc Overloading}
+Sail has a flexible overloading mechanism using the \ll{overload}
+keyword
+\begin{center}
+ \ll{overload} \textit{name} \ll{=} \lstinline+{+ \textit{name}$_1$ \ll{,} \ldots \ll{,} \textit{name}$_n$ \lstinline+}+
+\end{center}
+This takes an identifier name, and a list of other identifier names to
+overload that name with. When the overloaded name is seen in a Sail
+definition, the type-checker will try each of the overloads in order
+from left to right (i.e. from $\textit{name}_1$ to $\textit{name}_n$).
+until it finds one that causes the resulting expression to type-check
+correctly.
+
+Multiple \ll{overload} declarations are permitted for the same
+identifier, with each overload declaration after the first adding it's
+list of identifier names to the right of the overload list (so earlier
+overload declarations take precedence over later ones). As such, we
+could split every identifier from above syntax example into it's own
+line like so:
+\begin{center}
+ \ll{overload} \textit{name} \ll{=} \lstinline+{+ \textit{name}$_1$ \lstinline+}+\\
+ $\vdots$\\
+ \ll{overload} \textit{name} \ll{=} \lstinline+{+ \textit{name}$_n$ \lstinline+}+
+\end{center}
+
+As an example for how overloaded functions can be used, consider the
+following example, where we define a function \ll{print_int} and a
+function \ll{print_string} for printing integers and strings
+respectively. We overload \ll{print} as either \ll{print_int} or
+\ll{print_string}, so we can print either number such as 4, or strings
+like \ll{"Hello, World!"} in the following \ll{main} function
+definition.
+
+\lstinputlisting{examples/overload.sail}
+
+We can see that the overloading has had the desired effect by dumping
+the type-checked AST to stdout using the following command
+\verb+sail -ddump_tc_ast examples/overload.sail+. This will print the
+following, which shows how the overloading has been resolved
+\begin{lstlisting}
+function main () : unit = {
+ print_string("Hello, World!");
+ print_int(4)
+}
+\end{lstlisting}
+This option can be quite useful for testing how overloading has been
+resolved. Since the overloadings are done in the order they are listed
+in the source file, it can be important to ensure that this order is
+correct. A common idiom in the standard library is to have versions of
+functions that guarantee more constraints about there output be
+overloaded with functions that accept more inputs but guarantee less
+about their results. For example, we might have two division functions:
+\begin{lstlisting}
+val div1 : forall 'm, 'n >= 0 & 'm > 0. (int('n), int('m)) -> {'o, 'o >= 0. int('o)}
+
+val div2 : (int, int) -> option(int)
+\end{lstlisting}
+The first guarantees that if the first argument is greater than or
+equal to zero, and the second argument is greater than zero, then the
+result will be greater than or not equal to zero. If we overload these
+definitions as
+\begin{lstlisting}
+overload operator / = {div1, div2}
+\end{lstlisting}
+Then the first will be applied when the constraints on it's inputs can
+be resolved, and therefore the guarantees on it's output can be
+guaranteed, but the second will be used when this is not the case, and
+indeed, we will need to manually check for the division by zero case
+due to the option type. Note that the return type can be very
+different between different cases in the overloaded.
+
+The amount of arguments overloaded functions can have can also vary,
+so we can use this to define functions with optional arguments, e.g.
+\lstinputlisting{examples/zeros.sail} In this example, we can call
+\ll{zero_extend} and the return length is implicit (likely using
+\ll{sizeof}, see Section~\ref{sec:sizeof}) or we can provide it
+ourselves as an explicit argument.
+
\subsection{Getters and Setters}
\label{sec:getset}
+We have already seen some examples of getters and setters in
+Subsection~\ref{sec:bitfield}, but they can be used in many other
+contexts.
+
+\fbox{TODO}
+
\subsection{Sizeof and Constraint}
+\label{sec:sizeof}
+
+As already mention in Section~\ref{sec:functions}, Sail allows for
+arbitrary type variables to be included within expressions. However,
+we can go slightly further than this, and include both arbitrary
+(type-level) numeric expressions in code, as well as type
+constraints. For example, if we have a function that takes two
+bitvectors as arguments, then there are several ways we could compute
+the sum of their lengths.
+\begin{lstlisting}
+val f : forall 'n 'm. (bits('n), bits('m)) -> unit
+
+function f(xs, ys) = {
+ let len = length(xs) + length(ys);
+ let len = 'n + 'm;
+ let len = sizeof('n + 'm);
+ ()
+}
+\end{lstlisting}
+Note that the second line is equivalent to
+\begin{lstlisting}
+ let len = sizeof('n) + sizeof('n)
+\end{lstlisting}
+
+There is also the \ll{constraint} keyword, which takes a type-level constraint and allows it to be used as a boolean expression, so we could write:
+\begin{lstlisting}
+function f(xs, ys) = {
+ if constraint('n <= 'm) {
+ // Do something
+ }
+}
+\end{lstlisting}
+Rather than the equivlent test \ll{length(xs) <= length(ys)}. This way
+of writing expressions can be succint, and can also make it very
+explicit what constraints will be generated during flow
+typing. However, all the constraint and sizeof definitions must be
+re-written to produce executable code, which can result in the
+generated theorem prover output diverging (in appearance) somewhat
+from the source input. In general, it is probably best to use
+\ll{sizeof} and \ll{constraint} sparingly.
+
+However, as previously mentioned both \ll{sizeof} and \ll{constraint}
+can refer to type variables that only appear in the output or are
+otherwise not accessible at runtime, and so can be used to implement
+implicit arguments, as was seen for \ll{replicate_bits} in
+Section~\ref{sec:functions}.
\subsection{Preludes and Default Environment}
\label{sec:prelude}
+By default Sail has almost no built-in types or functions, except for
+the primitive types described in this Chapter. This is because
+different vendor-pseudocode's have varying naming conventions and
+styles for even the most basic operators, so we aim to provide
+flexibility and avoid committing to any particular naming convention or
+set of built-ins. However, each Sail backend typically implements
+specific external names, so for a PowerPC ISA description one might
+have:
+\begin{lstlisting}[mathescape]
+val EXTZ = "zero_extend" : $\ldots$
+\end{lstlisting}
+while for ARM, one would have
+\begin{lstlisting}[mathescape]
+val ZeroExtend = "zero_extend" : $\ldots$
+\end{lstlisting}
+where each backend knows about the \ll{"zero_extend"} external name,
+but the actual Sail functions are named appropriately for each
+vendor's pseudocode. As such each Sail ISA spec tends to have it's own
+prelude.
+
+However, the \verb+lib+ directory in the Sail repository contains some
+files that can be included into any ISA specification for some basic
+operations. These are listed below:
+\begin{description}
+ \item[flow.sail] Contains basic definitions required for flow
+ typing to work correctly.
+ \item[arith.sail] Contains simple arithmetic operations for
+ integers.
+ \item[vector\_dec.sail] Contains operations on decreasing
+ (\ll{dec}) indexed vectors, see Section~\ref{sec:vec}.
+ \item[vector\_inc.sail] Like \verb+vector_dec.sail+, except
+ for increasing (\ll{inc}) indexed vectors.
+ \item[option.sail] Contains the definition of the option
+ type, and some related utility functions.
+ \item[prelude.sail] Contains all the above files, and chooses
+ between \verb+vector_dec.sail+ and \verb+vector_inc.sail+ based on
+ the default order (which must be set before including this file).
+ \item[smt.sail] Defines operators allowing div, mod, and abs
+ to be used in types by exposing them to the Z3 SMT solver.
+\end{description}
diff --git a/doc/types.tex b/doc/types.tex
new file mode 100644
index 00000000..11037c91
--- /dev/null
+++ b/doc/types.tex
@@ -0,0 +1,256 @@
+\section{Type System}
+\label{sec:types}
+
+\newcommand{\tcheck}[3]{#1 \vdash #2 \Leftarrow #3}
+\newcommand{\tinfer}[3]{#1 \vdash #2 \Rightarrow #3}
+\newcommand{\msail}[1]{\text{\lstinline[mathescape]+#1+}}
+
+\subsection{Blocks}
+\label{subsec:blocks}
+
+\[
+\frac{\tcheck{\Gamma}{E_0}{\text{\lstinline+bool+}}
+ \qquad
+ \tcheck{\Gamma}{M}{\text{\lstinline+string+}}
+ \qquad
+ \tcheck{\mathrm{FlowThen}(\Gamma, E_0)}{\text{\lstinline[mathescape]+\{$E_1$; $\ldots$; $E_n$\}+}}{A}}
+ {\tcheck{\Gamma}{\text{\lstinline[mathescape]+\{assert($E_0$, $M$); $E_1$; $\ldots$; $E_n$ \}+}}{A}}
+\]
+
+\[
+\frac{\tcheck{\mathrm{BindAssignment}(\Gamma, L_0, E_0)}{\text{\lstinline[mathescape]+\{$E_1$; $\ldots$; $E_n$\}+}}{A}}
+ {\tcheck{\Gamma}{\text{\lstinline[mathescape]+\{$L_0$ = $E_0$; $E_1$; $\ldots$; $E_n$ \}+}}{A}}
+\]
+
+\[
+\frac{\tcheck{\Gamma}{E_0}{\text{\lstinline+unit+}}
+ \qquad
+ \tcheck{\Gamma}{\text{\lstinline[mathescape]+\{$E_1$; $\ldots$; $E_n$\}+}}{A}}
+ {\tcheck{\Gamma}{\text{\lstinline[mathescape]+\{$E_0$; $E_1$; $\ldots$; $E_n$ \}+}}{A}}
+\]
+
+\[
+\frac{\tcheck{\Gamma}{E}{A}}
+ {\tcheck{\Gamma}{\text{\lstinline[mathescape]+\{$E$\}+}}{A}}
+\]
+
+\subsection{Let bindings}
+
+Note that \lstinline[mathescape]+{let x = y; $E_0$; $\ldots$; $E_n$}+
+is equivalent to \lstinline[mathescape]+{let x = y in {$E_0$; $\ldots$; $E_n$}}+,
+which is why there are no special cases for let bindings in Subsection~\ref{subsec:blocks}.
+
+\[
+\frac{\tcheck{\Gamma}{E_0}{B}
+ \qquad
+ \tcheck{\mathrm{BindPattern}(\Gamma, P, B)}{E_1}{A}}
+ {\tcheck{\Gamma}{\msail{let $\;P\;$ : $\;B\;$ = $\;E_0\;$ in $\;E_1$}}{A}}
+\]
+
+\[
+\frac{\tinfer{\Gamma}{E_0}{B}
+ \qquad
+ \tcheck{\mathrm{BindPattern}(\Gamma, P, B)}{E_1}{A}}
+ {\tcheck{\Gamma}{\msail{let $\;P\;$ = $\;E_0\;$ in $\;E_1$}}{A}}
+\]
+
+\paragraph{Pattern bindings} The $\mathrm{BindPattern}$ and $\mathrm{BindPattern}'$ functions are used to bind patterns into an environment. The first few cases are simple, if we bind an identifier $x$ against a type $T$, where $x$ is either immutable or unbound, then $x : T$ is added to the environment. If we bind a type against a wildcard pattern, then the environment is returned unchanged. An \lstinline+as+ pattern binds its variable with the appropriate type then recursively binds the rest of the pattern. When binding patterns we always bind against the base type, and bring existentials into scope, which is why $\mathrm{BindPattern}$ does this and then calls the $\mathrm{BindPattern}'$ function which implements all the cases.
+\begin{align*}
+ \mathrm{BindPattern}(\Gamma, P, T) &= \mathrm{BindPattern}'(\Gamma \lhd T, P, \mathrm{Base}(T))\\
+ \mathrm{BindPattern}'(\Gamma, x, T) &= \Gamma \oplus x : T, \tag{$x$ is unbound or immutable}\\
+ \mathrm{BindPattern}'(\Gamma, \msail{_}, T) &= \Gamma,\\
+ \mathrm{BindPattern}'(\Gamma, \msail{$P\;$ as $\;x$}, T) &= \mathrm{BindPattern}(\Gamma \oplus x : T, P, T). \tag{$x$ is unbound or immutable}
+\end{align*}
+If we try to bind a numeric literal $n$ against a type
+\lstinline[mathescape]+int($N$)+ then we add a constraint to the
+environment that the nexp $N$ is equal to $n$.
+\begin{align*}
+\mathrm{BindPattern}'(\Gamma, n, \msail{int($N$)}) &= \Gamma \oplus (N = n).
+\end{align*}
+We also have some rules for typechecking lists, as well as user
+defined constructors in unions (omitted here)
+\begin{align*}
+ \mathrm{BindPattern}'(\Gamma, [], \msail{list($A$)}) &= \Gamma,\\
+ \mathrm{BindPattern}'(\Gamma, \msail{$P_{\mathit{hd}}\;$ :: $\;P_{\mathit{tl}}$}, \msail{list($A$)})
+ &= \mathrm{BindPattern}(\mathrm{BindPattern}(\Gamma,P_{\mathit{hd}},A),P_{\mathit{tl}},\msail{list($A$)}).
+\end{align*}
+
+The pattern binding code follows a similar structure to the
+bi-directional nature of the typechecking rules---the
+$\mathrm{BindPattern}$ function acts like a checking rule where we
+provide the type, and there is also an $\mathrm{InferPattern}$
+function which acts like bind pattern but infers the types from the
+patterns. There is therefore a final case
+$\mathrm{BindPattern}(\Gamma, P, T) = \Gamma'$ where
+$(\Gamma', T') = \mathrm{InferPattern}(\Gamma, P)$ and $T \subseteq T'$.
+
+The $\mathrm{InferPattern}$ function is defined by the following cases
+\begin{align*}
+ \mathrm{InferPattern}(\Gamma,x) &= (\Gamma, T_{\mathit{enum}}), \tag{$x$ is an element of enumeration $T_{\mathit{enum}}$}\\
+ \mathrm{InferPattern}(\Gamma,L) &= (\Gamma, \mathrm{InferLiteral}(L)), \tag{$L$ is a literal}\\
+ \mathrm{InferPattern}(\Gamma,\msail{$P\;$ : $\;T$}) &= (\mathrm{BindPattern}(\Gamma,P,T), T).
+\end{align*}
+
+\paragraph{Type patterns} There is one additional case for $\mathrm{BindPattern}'$ which we haven't discussed. \TODO{type patterns}
+
+\subsection{If statements}
+
+\[
+\frac{\tcheck{\Gamma}{E_{\mathit{if}}}{\msail{bool}}
+ \qquad
+ \tcheck{\mathrm{FlowThen}(\Gamma, E_{\mathit{if}})}{E_{\mathit{then}}}{A}
+ \qquad
+ \tcheck{\mathrm{FlowElse}(\Gamma, E_{\mathit{if}})}{E_{\mathit{else}}}{A}}
+ {\tcheck{\Gamma}{\msail{if $\;E_{\mathit{if}}\;$ then $\;E_{\mathit{then}}\;$ else $\;E_{\mathit{else}}\;$}}{A}}
+\]
+
+\subsection{Return}
+
+When checking the body of a function, the expected return type of the
+function is placed into the context $\Gamma$.
+
+\[
+\frac{\tcheck{\Gamma}{E}{\mathrm{Return}(\Gamma)}}
+ {\tcheck{\Gamma}{\msail{return($E$)}}{A}}
+\]
+
+\subsection{Functions}
+
+Depending on the context, functions can be either checked or
+inferred---although the only difference between the two cases is that
+in the checking case we can use the expected return type to resolve
+some of the function quantifiers, whereas in the inferring case we
+cannot.
+
+\begin{align*}
+ \frac{
+ f : \forall Q, C.(B_0,\ldots,B_n) \rightarrow R \in \Gamma
+ \quad \textsc{InferFun}(\Gamma,Q,C,(B_0,\ldots,B_n),R,(x_0,\ldots,x_n)) = R'
+ } {
+ \tinfer{\Gamma}{f(x_0, \ldots, x_n)}{R'}
+ }
+\end{align*}
+
+\begin{align*}
+ \frac{
+ f : \forall Q, C.(B_0,\ldots,B_n) \rightarrow R \in \Gamma
+ \quad \textsc{CheckFun}(\Gamma,Q,C,(B_0,\ldots,B_n),R,(x_0,\ldots,x_n),R')
+ } {
+ \tcheck{\Gamma}{f(x_0, \ldots, x_n)}{R'}
+ }
+\end{align*}
+
+The rules for checking or inferring functions are rather more
+complicated than the other typing rules and are hard to describe in
+purely logical terms, so they are instead presented as an algorithm in
+Figure~\ref{fig:funapp}. Roughly the inference algorithm works as
+follows:
+
+\begin{enumerate}
+\item \textsc{InferFun} takes as input the typing context $\Gamma$, the
+ list of quantifiers $Q$ (a list of type variable/kind pairs), a
+ constraint $C$, the function argument types $B_0\ldots B_n$, the
+ function return type $R$, and finally this list of argument
+ expressions the function is applied to $x_0\ldots x_n$.
+
+\item We create an empty list of unsolved typing goals
+ (expression/type pairs) called $\mathit{unsolved}$, a list of
+ constraints $\mathit{Constraints}$, and a set of existential
+ variables $\mathit{Existentials}$.
+
+\item We iterate over each argument expression and type $x_m$ and
+ $B_m$, if $x_m$ contains free type variables in $Q$ we infer the
+ type of $x_n$ and attempt to unify that inferred type with $B_m$. If
+ this unification step fails we add $(x_m, B_m)$ to the list of
+ unsolved goals. This unification step may generate new existential
+ variables and constraints which are added to $\mathit{Existentials}$
+ and $\mathit{Constraints}$ as needed. The results of this
+ unification step are used to resolve the univarsally-quantified type
+ variables in $Q$. If $x_m$ does not contain free type variables in
+ $Q$, then we simply check it against $B_m$.
+
+\item After this loop has finished we expect all the type variables in
+ $Q$ to have been resolved. If not, we throw a type error.
+
+\item We now try to prove the function's constraint $C$ using the
+ resolved type variables, and check any remaining function arguments
+ in $\mathit{unsolved}$.
+
+\item Finally, we add any new existentials and constraints to the
+ function's return type $R$, simplifying if at all possible (using
+ \textsc{SimplifyExist}), before returning this type as the inferred
+ type of the function.
+\end{enumerate}
+
+\noindent The \textsc{CheckFun} calls the \textsc{InferFun} function, but it
+takes an additional $X$ argument which the the required return type in
+the context where the function being checked is called. It
+additionally unifies the function's declared return type with the
+expected return type, and uses this to resolve any quantifiers in $Q$,
+provided that the return type is not existentially quantified. It may
+also be required to coerce $R$ into $X$.
+
+\begin{figure}[p]
+\begin{algorithmic}[1]
+ \Function{InferFun}{$\Gamma,Q, C, (B_0,\ldots,B_n), R, (x_0, \ldots, x_n)$}
+ \State $\mathit{unsolved}\gets []$;
+ $\mathit{Constraints}\gets []$;
+ $\mathit{Existentials}\gets \emptyset$
+ \ForAll{$m \in 0, \ldots, n$}
+ \If{$B_m$ contains type variables in $Q$}
+ \State $\Gamma \vdash x_m \Rightarrow E$
+ \Comment Infer the type of $x_m$ as $E$
+ \State $\mathit{unifiers}, \mathit{existentials}, \mathit{constraint} \gets$ \Call{CoerceAndUnify}{$\Gamma,E,B$}
+ \If{\textsc{CoerceAndUnify} failed with \textsc{UnificationError}}
+ \State $\mathit{unsolved}\gets (x_m,B_m) : \mathit{unsolved}$
+ \State \textbf{continue}
+ \Comment Skip to next iteration of loop
+ \ElsIf{$\mathit{existentials}$ is not empty}
+ \State Add type variables $\mathit{existentials}$ to $\Gamma$
+ \State Add constraint $\mathit{constraint}$ to $\Gamma$
+ \State $\mathit{Constraints}\gets \mathit{constraint} : \mathit{Constraints}$
+ \State $\mathit{Existentials}\gets \mathit{existentials} \cup \mathit{Existentials}$
+ \EndIf
+ \ForAll{$(\mathit{nvar}, \mathit{nexp}) \in \mathit{unifiers}$}
+ \State $B_0,...,B_n\gets B_0[\mathit{nvar} := \mathit{nexp}],\ldots,B_n[\mathit{nvar} := \mathit{nexp}]$
+ \State $R\gets R[\mathit{nvar} := \mathit{nexp}]$;
+ $C\gets C[\mathit{nvar} := \mathit{nexp}]$
+ \State Remove $\mathit{nvar}$ from $Q$
+ \EndFor
+ \ElsIf{$B_m$ does not contain type variables in $Q$}
+ \State $\tcheck{\Gamma}{x_m}{B_m}$
+ \Comment Check type of $x_m$ against $B_m$
+ \EndIf
+ \EndFor
+ \If{$Q$ is not empty}
+ \State \textbf{raise} \textsc{TypeError}
+ \Comment Unresolved universal quantifers
+ \EndIf
+ \State \Call{Prove}{$\Gamma, C$}
+ \ForAll{$(x_m,B_m) \in \mathit{unsolved}$}
+ $\tcheck{\Gamma}{x_m}{B_m}$
+ \EndFor
+ \State \Return \Call{SimplifyExist}{$\mathtt{exist}\ \mathit{Existentials}, \mathit{Constraints}.\ R$}
+ \EndFunction\\
+
+ \Function{CheckFun}{$\Gamma,Q,C,(B_0,\ldots,B_n),R,(x_0,\ldots,x_n),X$}
+ \If{$X$ and $R$ are not existentially quantified}
+ \State $\mathit{unifiers}, \_, \_ \gets$ \Call{Unify}{$\Gamma,R,X$}
+ \If{\textsc{Unify} failed with \textsc{UnificationError}}
+ \textbf{skip}
+ \Else
+ \ForAll{$(\mathit{nvar}, \mathit{nexp}) \in \mathit{unifiers}$}
+ \State $B_0,...,B_n\gets B_0[\mathit{nvar} := \mathit{nexp}],\ldots,B_n[\mathit{nvar} := \mathit{nexp}]$
+ \State $R\gets R[\mathit{nvar} := \mathit{nexp}]$;
+ $C\gets C[\mathit{nvar} := \mathit{nexp}]$
+ \State Remove $\mathit{nvar}$ from $Q$
+ \EndFor
+ \EndIf
+ \EndIf
+ \State $R'\gets$ \Call{InferFun}{$\Gamma,Q,C,(B_0,\ldots,B_n),R,(x_0,\ldots,x_n)$}
+ \State \Return \Call{Coerce}{$R',X$}
+ \EndFunction
+\end{algorithmic}
+\label{fig:funapp}
+\caption{Inference and checking algorithms for function calls}
+\end{figure}
diff --git a/etc/loc.mk b/etc/loc.mk
new file mode 100644
index 00000000..bf79a723
--- /dev/null
+++ b/etc/loc.mk
@@ -0,0 +1,13 @@
+TEMPDIR:=sloc_tmp
+loc: $(LOC_FILES)
+ @rm -rf $(TEMPDIR)
+ @mkdir -p $(TEMPDIR)
+ @cp $^ $(TEMPDIR)
+ @for f in $(TEMPDIR)/*.sail; do mv "$$f" "$${f%.sail}.c"; done
+ @sloccount --details $(TEMPDIR) | grep ansic
+ @sloccount $(TEMPDIR) | grep ansic
+ rm -rf $(TEMPDIR)
+
+cloc: $(LOC_FILES)
+ cloc --by-file --force-lang C,sail $^
+
diff --git a/lib/arith.sail b/lib/arith.sail
index fa091772..54ecdbbc 100644
--- a/lib/arith.sail
+++ b/lib/arith.sail
@@ -5,7 +5,7 @@ $include <flow.sail>
// ***** Addition *****
-val add_atom = {ocaml: "add_int", lem: "integerAdd"} : forall 'n 'm.
+val add_atom = {ocaml: "add_int", lem: "integerAdd", c: "add_int"} : forall 'n 'm.
(atom('n), atom('m)) -> atom('n + 'm)
val add_int = {ocaml: "add_int", lem: "integerAdd", c: "add_int"} : (int, int) -> int
diff --git a/lib/elf.sail b/lib/elf.sail
index e953839d..2d799d4d 100644
--- a/lib/elf.sail
+++ b/lib/elf.sail
@@ -3,11 +3,13 @@ $define _ELF
val elf_entry = {
ocaml: "Elf_loader.elf_entry",
+ lem: "elf_entry",
c: "elf_entry"
} : unit -> int
val elf_tohost = {
ocaml: "Elf_loader.elf_tohost",
+ lem: "elf_tohost",
c: "elf_tohost"
} : unit -> int
diff --git a/lib/vector_dec.sail b/lib/vector_dec.sail
index 1307bb56..17603e03 100644
--- a/lib/vector_dec.sail
+++ b/lib/vector_dec.sail
@@ -68,11 +68,13 @@ val vector_update = {
val add_bits = {
ocaml: "add_vec",
+ lem: "add_vec",
c: "add_bits"
} : forall 'n. (bits('n), bits('n)) -> bits('n)
val add_bits_int = {
ocaml: "add_vec_int",
+ lem: "add_vec_int",
c: "add_bits_int"
} : forall 'n. (bits('n), int) -> bits('n)
diff --git a/mips/Makefile b/mips/Makefile
index a6a5bbdf..c0670380 100644
--- a/mips/Makefile
+++ b/mips/Makefile
@@ -28,5 +28,8 @@ M%.thy: m%.lem m%_types.lem mips_extras.lem
lem -isa -outdir . -lib $(SAIL_DIR)/src/gen_lib -lib $(SAIL_DIR)/src/lem_interp $^
sed -i 's/datatype ast/datatype (plugins only: size) ast/' M$*_types.thy
+LOC_FILES:=$(MIPS_PRE) $(MIPS_TLB) $(MIPS_SAILS) $(MIPS_MAIN)
+include ../etc/loc.mk
+
clean:
rm -rf mips Mips.thy mips.lem _sbuild
diff --git a/mips/main.sail b/mips/main.sail
index 2c4ee064..8ec91ba6 100644
--- a/mips/main.sail
+++ b/mips/main.sail
@@ -38,6 +38,7 @@ function fetch_and_execute () = {
val elf_entry = {
ocaml: "Elf_loader.elf_entry",
+ lem: "elf_entry",
c: "elf_entry"
} : unit -> int
diff --git a/mips/mips_extras.lem b/mips/mips_extras.lem
index 28fa07fb..f0f6a0c5 100644
--- a/mips/mips_extras.lem
+++ b/mips/mips_extras.lem
@@ -111,3 +111,13 @@ let undefined_atom i = return i
let undefined_nat () = return (0:ii)
let skip () = return ()
+
+val elf_entry : unit -> integer
+let elf_entry () = 0
+declare ocaml target_rep function elf_entry = `Elf_loader.elf_entry`
+
+let print_bits msg bs = prerr_endline (msg ^ (string_of_bits bs))
+
+val get_time_ns : unit -> integer
+let get_time_ns () = 0
+declare ocaml target_rep function get_time_ns = `(fun () -> Big_int.of_int (int_of_float (1e9 *. Unix.gettimeofday ())))`
diff --git a/mips/mips_prelude.sail b/mips/mips_prelude.sail
index 9e81a5d0..f9049b5d 100644
--- a/mips/mips_prelude.sail
+++ b/mips/mips_prelude.sail
@@ -469,11 +469,18 @@ function int_of_AccessLevel level =
Kernel => 2
}
+/*!
+Returns whether the first AccessLevel is sufficient to grant access at the second, required, access level.
+ */
val grantsAccess : (AccessLevel, AccessLevel) -> bool
function grantsAccess (currentLevel, requiredLevel) =
int_of_AccessLevel(currentLevel) >= int_of_AccessLevel(requiredLevel)
-function getAccessLevel() : unit -> AccessLevel=
+/*!
+Returns the current effective access level determined by accessing the relevant parts of the MIPS status register.
+ */
+val getAccessLevel : unit -> AccessLevel effect {rreg}
+function getAccessLevel() =
if ((CP0Status.EXL()) | (CP0Status.ERL())) then
Kernel
else match CP0Status.KSU()
diff --git a/mips/prelude.sail b/mips/prelude.sail
index 281ef5ea..5bb79f97 100644
--- a/mips/prelude.sail
+++ b/mips/prelude.sail
@@ -90,8 +90,14 @@ function or_vec (xs, ys) = builtin_or_vec(xs, ys)
overload operator | = {or_bool, or_vec}
+/*!
+The \function{unsigned} function converts a bit vector to an integer assuming an unsigned representation:
+*/
val unsigned = {ocaml: "uint", lem: "uint"} : forall 'n. bits('n) -> range(0, 2 ^ 'n - 1)
+/*!
+The \function{signed} function converts a bit vector to an integer assuming a signed twos-complement representation:
+*/
val signed = {ocaml: "sint", lem: "sint"} : forall 'n. bits('n) -> range(- (2 ^ ('n - 1)), 2 ^ ('n - 1) - 1)
val "get_slice_int" : forall 'w. (atom('w), int, int) -> bits('w)
@@ -245,10 +251,10 @@ infix 4 >=_s
infix 4 <_u
infix 4 >=_u
-val operator <_s = {lem: "slt_vec"} : forall 'n. (bits('n), bits('n)) -> bool
-val operator >=_s = {lem: "sgteq_vec"} : forall 'n. (bits('n), bits('n)) -> bool
-val operator <_u = {lem: "ult_vec"} : forall 'n. (bits('n), bits('n)) -> bool
-val operator >=_u = {lem: "ugteq_vec"} : forall 'n. (bits('n), bits('n)) -> bool
+val operator <_s : forall 'n. (bits('n), bits('n)) -> bool
+val operator >=_s : forall 'n. (bits('n), bits('n)) -> bool
+val operator <_u : forall 'n. (bits('n), bits('n)) -> bool
+val operator >=_u : forall 'n. (bits('n), bits('n)) -> bool
function operator <_s (x, y) = signed(x) < signed(y)
function operator >=_s (x, y) = signed(x) >= signed(y)
@@ -286,6 +292,9 @@ val operator *_s = "mults_vec" : forall 'n . (bits('n), bits('n)) -> bits(2 * 'n
infix 7 *_u
val operator *_u = "mult_vec" : forall 'n . (bits('n), bits('n)) -> bits(2 * 'n)
+/*!
+\function{to\_bits} converts an integer to a bit vector of given length. If the integer is negative a twos-complement representation is used. If the integer is too large (or too negative) to fit in the requested length then it is truncated to the least significant bits.
+*/
val to_bits : forall 'l.(atom('l), int) -> bits('l)
function to_bits (l, n) = get_slice_int(l, n, 0)
diff --git a/riscv/Makefile b/riscv/Makefile
index 7173f24d..9d3a2196 100644
--- a/riscv/Makefile
+++ b/riscv/Makefile
@@ -43,6 +43,10 @@ riscv_sequentialScript.sml : riscv_sequential.lem riscv_extras_sequential.lem
riscv_sequential_types.lem \
riscv_sequential.lem
+# we exclude prelude.sail here, most code there should move to sail lib
+LOC_FILES:=$(SAIL_SRCS) main.sail
+include ../etc/loc.mk
+
clean:
-rm -rf riscv _sbuild
-rm -f riscv.lem riscv_types.lem
diff --git a/riscv/main.sail b/riscv/main.sail
index 5d7b1108..28afe5ac 100644
--- a/riscv/main.sail
+++ b/riscv/main.sail
@@ -43,7 +43,6 @@ function main () = {
loop ()
} catch {
Error_not_implemented(s) => print_string("Error: Not implemented: ", s),
- Error_misaligned_access() => print("Error: misaligned_access"),
Error_EBREAK() => print("EBREAK"),
Error_internal_error() => print("Error: internal error")
}
diff --git a/riscv/prelude.sail b/riscv/prelude.sail
index d667573e..c92497c1 100644
--- a/riscv/prelude.sail
+++ b/riscv/prelude.sail
@@ -350,11 +350,11 @@ infix 4 <_u
infix 4 >=_u
infix 4 <=_u
-val operator <_s = {lem: "slt_vec"} : forall 'n. (bits('n), bits('n)) -> bool
-val operator >=_s = {lem: "sgteq_vec"} : forall 'n. (bits('n), bits('n)) -> bool
-val operator <_u = {lem: "ult_vec"} : forall 'n. (bits('n), bits('n)) -> bool
-val operator >=_u = {lem: "ugteq_vec"} : forall 'n. (bits('n), bits('n)) -> bool
-val operator <=_u = {lem: "ulteq_vec"} : forall 'n. (bits('n), bits('n)) -> bool
+val operator <_s : forall 'n. (bits('n), bits('n)) -> bool
+val operator >=_s : forall 'n. (bits('n), bits('n)) -> bool
+val operator <_u : forall 'n. (bits('n), bits('n)) -> bool
+val operator >=_u : forall 'n. (bits('n), bits('n)) -> bool
+val operator <=_u : forall 'n. (bits('n), bits('n)) -> bool
function operator <_s (x, y) = signed(x) < signed(y)
function operator >=_s (x, y) = signed(x) >= signed(y)
diff --git a/riscv/riscv.sail b/riscv/riscv.sail
index d93c7dae..374ea4a9 100644
--- a/riscv/riscv.sail
+++ b/riscv/riscv.sail
@@ -254,7 +254,8 @@ function clause print_insn (LOAD(imm, rs1, rd, is_unsigned, width, aq, rl)) =
(HALF, true) => "lhu ",
(WORD, false) => "lw ",
(WORD, true) => "lwu ",
- (_, _) => "ld.bad "
+ (DOUBLE, false) => "ld ",
+ (DOUBLE, true) => "ldu "
} in
insn ^ rd ^ ", " ^ rs1 ^ ", " ^ BitStr(imm)
@@ -822,6 +823,7 @@ function readCSR csr : csreg -> xlenbits =
0xF13 => mimpid,
0xF14 => mhartid,
0x300 => mstatus.bits(),
+ 0x301 => misa.bits(),
0x302 => medeleg.bits(),
0x303 => mideleg.bits(),
0x304 => mie.bits(),
@@ -850,6 +852,9 @@ function readCSR csr : csreg -> xlenbits =
0xC01 => mtime,
0xC02 => minstret,
+ /* trigger/debug */
+ 0x7a0 => ~(tselect), /* this indicates we don't have any trigger support */
+
_ => { print_bits("unhandled read to CSR ", csr);
0x0000_0000_0000_0000 }
}
@@ -882,6 +887,9 @@ function writeCSR (csr : csreg, value : xlenbits) -> unit =
0x144 => { mip = legalize_sip(mip, mideleg, value); Some(mip.bits()) },
0x180 => { satp = legalize_satp(cur_Architecture(), satp, value); Some(satp) },
+ /* trigger/debug */
+ 0x7a0 => { tselect = value; Some(tselect) },
+
_ => None()
} in
match res {
@@ -889,9 +897,6 @@ function writeCSR (csr : csreg, value : xlenbits) -> unit =
None() => print_bits("unhandled write to CSR ", csr)
}
-val signalIllegalInstruction : unit -> unit effect {escape}
-function signalIllegalInstruction () = not_implemented ("illegal instruction")
-
function clause execute CSR(csr, rs1, rd, is_imm, op) =
let rs1_val : xlenbits = if is_imm then EXTZ(rs1) else X(rs1) in
let isWrite : bool = match op {
diff --git a/riscv/riscv_step.sail b/riscv/riscv_step.sail
index 7783317a..7ddd8a44 100644
--- a/riscv/riscv_step.sail
+++ b/riscv/riscv_step.sail
@@ -51,7 +51,6 @@ function step() = {
false
},
None() => {
- print_bits("PC: ", PC);
match fetch() {
F_Error(e, addr) => {
handle_mem_exception(addr, e);
@@ -60,12 +59,12 @@ function step() = {
F_RVC(h) => {
match decodeCompressed(h) {
None() => {
- print(BitStr(h) ^ " : <no-decode>");
+ print("PC: " ^ BitStr(PC) ^ " instr: " ^ BitStr(h) ^ " : <no-decode>");
handle_decode_exception(EXTZ(h));
false
},
Some(ast) => {
- print(BitStr(h) ^ " : " ^ ast);
+ print("PC: " ^ BitStr(PC) ^ " instr: " ^ BitStr(h) ^ " : " ^ ast);
nextPC = PC + 2;
execute(ast);
true
@@ -75,12 +74,12 @@ function step() = {
F_Base(w) => {
match decode(w) {
None() => {
- print(BitStr(w) ^ " : <no-decode>");
+ print("PC: " ^ BitStr(PC) ^ " instr: " ^ BitStr(w) ^ " : <no-decode>");
handle_decode_exception(EXTZ(w));
false
},
Some(ast) => {
- print(BitStr(w) ^ " : " ^ ast);
+ print("PC: " ^ BitStr(PC) ^ " instr: " ^ BitStr(w) ^ " : " ^ ast);
nextPC = PC + 4;
execute(ast);
true
diff --git a/riscv/riscv_sys.sail b/riscv/riscv_sys.sail
index ce5ef321..803531bd 100644
--- a/riscv/riscv_sys.sail
+++ b/riscv/riscv_sys.sail
@@ -404,6 +404,9 @@ register sepc : xlenbits
register scause : Mcause
register stval : xlenbits
+/* disabled trigger/debug module */
+register tselect : xlenbits
+
/* csr name printer */
val cast csr_name : csreg -> string
@@ -466,6 +469,8 @@ function csr_name(csr) = {
0xB80 => "mcycleh",
0xB82 => "minstreth",
/* TODO: other hpm counters and events */
+ /* trigger/debug */
+ 0x7a0 => "tselect",
_ => "UNKNOWN"
}
}
@@ -515,6 +520,9 @@ function is_CSR_defined (csr : bits(12), p : Privilege) -> bool =
/* supervisor mode: address translation */
0x180 => p == Machine | p == Supervisor, // satp
+ /* disabled trigger/debug module */
+ 0x7a0 => p == Machine,
+
_ => false
}
@@ -619,7 +627,7 @@ union ctl_result = {
function handle_trap(del_priv : Privilege, intr : bool, c : exc_code, pc : xlenbits, info : option(xlenbits))
-> xlenbits = {
- print("handling " ^ (if intr then "int#" else "exc#") ^ BitStr(c) ^ " at priv " ^ del_priv);
+ print("handling " ^ (if intr then "int#" else "exc#") ^ BitStr(c) ^ " at priv " ^ del_priv ^ " with tval " ^ BitStr(tval(info)));
match (del_priv) {
Machine => {
mcause->IsInterrupt() = intr;
diff --git a/riscv/riscv_types.sail b/riscv/riscv_types.sail
index 2a5a03ec..ee0eb94d 100644
--- a/riscv/riscv_types.sail
+++ b/riscv/riscv_types.sail
@@ -259,7 +259,6 @@ function trapVectorMode_of_bits (m) = {
union exception = {
Error_not_implemented : string,
- Error_misaligned_access : unit,
Error_EBREAK : unit,
Error_internal_error : unit
}
diff --git a/riscv/riscv_vmem.sail b/riscv/riscv_vmem.sail
index 0c1b9949..7fddb047 100644
--- a/riscv/riscv_vmem.sail
+++ b/riscv/riscv_vmem.sail
@@ -128,7 +128,7 @@ union PTW_Result = {
val walk39 : (vaddr39, AccessType, Privilege, bool, bool, paddr39, nat, bool) -> PTW_Result effect {rmem, escape}
function walk39(vaddr, ac, priv, mxr, sum, ptb, level, global) -> PTW_Result = {
let va = Mk_SV39_Vaddr(vaddr);
- let pt_ofs : paddr39 = shiftl(EXTZ(shiftr(va.VPNi(), (level * SV39_LEVEL_BITS))[SV39_LEVEL_BITS .. 0]),
+ let pt_ofs : paddr39 = shiftl(EXTZ(shiftr(va.VPNi(), (level * SV39_LEVEL_BITS))[(SV39_LEVEL_BITS - 1) .. 0]),
PTE39_LOG_SIZE);
let pte_addr = ptb + pt_ofs;
match (checked_mem_read(Data, EXTZ(pte_addr), 8)) {
@@ -142,22 +142,32 @@ function walk39(vaddr, ac, priv, mxr, sum, ptb, level, global) -> PTW_Result = {
PTW_Failure(PTW_Invalid_PTE)
} else {
if isPTEPtr(pbits) then {
- if level == 0 then PTW_Failure(PTW_Invalid_PTE)
- else walk39(vaddr, ac, priv, mxr, sum, EXTZ(pte.PPNi()), level - 1, is_global)
+ if level == 0 then {
+ /* last-level PTE contains a pointer instead of a leaf */
+ PTW_Failure(PTW_Invalid_PTE)
+ } else {
+ /* walk down the pointer to the next level */
+ walk39(vaddr, ac, priv, mxr, sum, EXTZ(shiftl(pte.PPNi(), PAGESIZE_BITS)), level - 1, is_global)
+ }
} else { /* leaf PTE */
- if ~ (checkPTEPermission(ac, priv, mxr, sum, pattr)) then
+ if ~ (checkPTEPermission(ac, priv, mxr, sum, pattr)) then {
PTW_Failure(PTW_No_Permission)
- else {
+ } else {
if level > 0 then { /* superpage */
- let masked = pte.PPNi() & EXTZ(shiftl(0b1, level * SV39_LEVEL_BITS) - 1);
- if masked != EXTZ(0b0) then
+ /* fixme hack: to get a mask of appropriate size */
+ let mask = shiftl(pte.PPNi() ^ pte.PPNi() ^ EXTZ(0b1), level * SV39_LEVEL_BITS) - 1;
+ if (pte.PPNi() & mask) != EXTZ(0b0) then {
+ /* misaligned superpage mapping */
PTW_Failure(PTW_Misaligned)
- else {
- let ppn = pte.PPNi() | (EXTZ(va.VPNi()) & EXTZ(shiftl(0b1, level * SV39_LEVEL_BITS) - 1));
+ } else {
+ /* add the appropriate bits of the VPN to the superpage PPN */
+ let ppn = pte.PPNi() | (EXTZ(va.VPNi()) & mask);
PTW_Success(append(ppn, va.PgOfs()), pte, pte_addr, level, is_global)
}
- } else
+ } else {
+ /* normal leaf PTE */
PTW_Success(append(pte.PPNi(), va.PgOfs()), pte, pte_addr, level, is_global)
+ }
}
}
}
@@ -184,7 +194,8 @@ val make_TLB39_Entry : (asid64, bool, vaddr39, paddr39, SV39_PTE, nat, paddr39)
function make_TLB39_Entry(asid, global, vAddr, pAddr, pte, level, pteAddr) = {
let shift : nat = PAGESIZE_BITS + (level * SV39_LEVEL_BITS);
- let vAddrMask : vaddr39 = shiftl(EXTZ(0b1), shift) - 1;
+ /* fixme hack: use a better idiom for masks */
+ let vAddrMask : vaddr39 = shiftl(vAddr ^ vAddr ^ EXTZ(0b1), shift) - 1;
let vMatchMask : vaddr39 = ~ (vAddrMask);
struct {
asid = asid,
@@ -245,7 +256,7 @@ union TR39_Result = {
TR39_Failure : PTW_Error
}
-let enable_dirty_update = true
+let enable_dirty_update = false
val translate39 : (vaddr39, AccessType, Privilege, bool, bool, nat) -> TR39_Result effect {rreg, wreg, wmv, escape, rmem}
function translate39(vAddr, ac, priv, mxr, sum, level) = {
@@ -260,8 +271,10 @@ function translate39(vAddr, ac, priv, mxr, sum, level) = {
None() => TR39_Address(ent.pAddr | EXTZ(vAddr & ent.vAddrMask)),
Some(pbits) => {
if ~ (enable_dirty_update)
- then TR39_Failure(PTW_PTE_Update)
- else {
+ then {
+ /* pte needs dirty/accessed update but that is not enabled */
+ TR39_Failure(PTW_PTE_Update)
+ } else {
/* update PTE entry and TLB */
n_ent : TLB39_Entry = ent;
n_ent.pte = update_BITS(ent.pte, pbits.bits());
@@ -288,15 +301,20 @@ function translate39(vAddr, ac, priv, mxr, sum, level) = {
},
Some(pbits) =>
if ~ (enable_dirty_update)
- then TR39_Failure(PTW_PTE_Update)
- else {
+ then {
+ /* pte needs dirty/accessed update but that is not enabled */
+ TR39_Failure(PTW_PTE_Update)
+ } else {
w_pte : SV39_PTE = update_BITS(pte, pbits.bits());
match checked_mem_write(EXTZ(pteAddr), 8, w_pte.bits()) {
MemValue(_) => {
addToTLB39(asid, vAddr, pAddr, w_pte, pteAddr, level, global);
TR39_Address(pAddr)
},
- MemException(e) => TR39_Failure(PTW_Access)
+ MemException(e) => {
+ /* pte is not in valid memory */
+ TR39_Failure(PTW_Access)
+ }
}
}
}
diff --git a/src/c_backend.ml b/src/c_backend.ml
index 5cf282f9..23a8c92e 100644
--- a/src/c_backend.ml
+++ b/src/c_backend.ml
@@ -1927,7 +1927,7 @@ and compile_block ctx = function
let setup, _, call, cleanup = compile_aexp ctx exp in
let rest = compile_block ctx exps in
let gs = gensym () in
- setup @ [idecl CT_unit gs; call (CL_id gs)] @ cleanup @ rest
+ iblock (setup @ [idecl CT_unit gs; call (CL_id gs)] @ cleanup) :: rest
(** Compile a sail type definition into a IR one. Most of the
actual work of translating the typedefs into C is done by the code
diff --git a/src/elf_loader.ml b/src/elf_loader.ml
index 89987647..02ff072b 100644
--- a/src/elf_loader.ml
+++ b/src/elf_loader.ml
@@ -65,9 +65,9 @@ let rec break n = function
| (_ :: _ as xs) -> [Lem_list.take n xs] @ break n (Lem_list.drop n xs)
let print_segment seg =
- let (Byte_sequence.Sequence bs) = seg.Elf_interpreted_segment.elf64_segment_body in
+ let bs = seg.Elf_interpreted_segment.elf64_segment_body in
prerr_endline "0011 2233 4455 6677 8899 aabb ccdd eeff 0123456789abcdef";
- List.iter (fun bs -> prerr_endline (hex_line bs)) (break 16 bs)
+ List.iter (fun bs -> prerr_endline (hex_line bs)) (break 16 (Byte_sequence.char_list_of_byte_sequence bs))
let read name =
let info = Sail_interface.populate_and_obtain_global_symbol_init_info name in
@@ -112,7 +112,7 @@ let write_file chan paddr i byte =
let load_segment ?writer:(writer=write_sail_lib) seg =
let open Elf_interpreted_segment in
- let (Byte_sequence.Sequence bs) = seg.elf64_segment_body in
+ let bs = seg.elf64_segment_body in
let paddr = seg.elf64_segment_paddr in
let base = seg.elf64_segment_base in
let offset = seg.elf64_segment_offset in
@@ -121,7 +121,7 @@ let load_segment ?writer:(writer=write_sail_lib) seg =
prerr_endline ("Segment base address: " ^ Big_int.to_string base);
prerr_endline ("Segment physical address: " ^ Big_int.to_string paddr);
print_segment seg;
- List.iteri (writer paddr) (List.map int_of_char bs)
+ List.iteri (writer paddr) (List.map int_of_char (Byte_sequence.char_list_of_byte_sequence bs))
let load_elf ?writer:(writer=write_sail_lib) name =
let segments, e_entry, symbol_map = read name in
diff --git a/src/gen_lib/prompt.lem b/src/gen_lib/prompt.lem
index de683047..830f2350 100644
--- a/src/gen_lib/prompt.lem
+++ b/src/gen_lib/prompt.lem
@@ -38,6 +38,12 @@ end
declare {isabelle} termination_argument foreachM = automatic
+val and_boolM : forall 'rv 'e. monad 'rv bool 'e -> monad 'rv bool 'e -> monad 'rv bool 'e
+let and_boolM l r = l >>= (fun l -> if l then r else return false)
+
+val or_boolM : forall 'rv 'e. monad 'rv bool 'e -> monad 'rv bool 'e -> monad 'rv bool 'e
+let or_boolM l r = l >>= (fun l -> if l then return true else r)
+
val bool_of_bitU_fail : forall 'rv 'e. bitU -> monad 'rv bool 'e
let bool_of_bitU_fail = function
| B0 -> return false
diff --git a/src/gen_lib/sail_operators.lem b/src/gen_lib/sail_operators.lem
index d4275c87..0c5da675 100644
--- a/src/gen_lib/sail_operators.lem
+++ b/src/gen_lib/sail_operators.lem
@@ -194,32 +194,14 @@ let neq_bv l r = not (eq_bv l r)
let inline neq_mword l r = (l <> r)
-val ult_bv : forall 'a. Bitvector 'a => 'a -> 'a -> bool
-let ult_bv l r = lexicographicLess (List.reverse (bits_of l)) (List.reverse (bits_of r))
-let ulteq_bv l r = (eq_bv l r) || (ult_bv l r)
-let ugt_bv l r = not (ulteq_bv l r)
-let ugteq_bv l r = (eq_bv l r) || (ugt_bv l r)
-
-val slt_bv : forall 'a. Bitvector 'a => 'a -> 'a -> bool
-let slt_bv l r =
- match (most_significant l, most_significant r) with
- | (B0, B0) -> ult_bv l r
- | (B0, B1) -> false
- | (B1, B0) -> true
- | (B1, B1) ->
- let l' = add_one_bit_ignore_overflow (bits_of l) in
- let r' = add_one_bit_ignore_overflow (bits_of r) in
- ugt_bv l' r'
- | (BU, BU) -> ult_bv l r
- | (BU, _) -> true
- | (_, BU) -> false
- end
-let slteq_bv l r = (eq_bv l r) || (slt_bv l r)
-let sgt_bv l r = not (slteq_bv l r)
-let sgteq_bv l r = (eq_bv l r) || (sgt_bv l r)
-
-val ucmp_mword : forall 'a. Size 'a => (integer -> integer -> bool) -> mword 'a -> mword 'a -> bool
-let inline ucmp_mword cmp l r = cmp (unsignedIntegerFromWord l) (unsignedIntegerFromWord r)
-
-val scmp_mword : forall 'a. Size 'a => (integer -> integer -> bool) -> mword 'a -> mword 'a -> bool
-let inline scmp_mword cmp l r = cmp (signedIntegerFromWord l) (signedIntegerFromWord r)
+val get_slice_int_bv : forall 'a. Bitvector 'a => integer -> integer -> integer -> 'a
+let get_slice_int_bv len n lo =
+ let hi = lo + len - 1 in
+ let bs = bools_of_int (hi + 1) n in
+ of_bools (subrange_list false bs hi lo)
+
+val set_slice_int_bv : forall 'a. Bitvector 'a => integer -> integer -> integer -> 'a -> integer
+let set_slice_int_bv len n lo v =
+ let hi = lo + len - 1 in
+ let bs = bits_of_int (hi + 1) n in
+ maybe_failwith (signed_of_bits (update_subrange_list false bs hi lo (bits_of v)))
diff --git a/src/gen_lib/sail_operators_bitlists.lem b/src/gen_lib/sail_operators_bitlists.lem
index b0a29b5e..19e9b519 100644
--- a/src/gen_lib/sail_operators_bitlists.lem
+++ b/src/gen_lib/sail_operators_bitlists.lem
@@ -35,6 +35,9 @@ let zero_extend bits len = extz_bits len bits
val sign_extend : list bitU -> integer -> list bitU
let sign_extend bits len = exts_bits len bits
+val zeros : integer -> list bitU
+let zeros len = repeat [B0] len
+
val vector_truncate : list bitU -> integer -> list bitU
let vector_truncate bs len = extz_bv len bs
@@ -289,23 +292,21 @@ let duplicate_oracle b n =
val reverse_endianness : list bitU -> list bitU
let reverse_endianness v = reverse_endianness_list v
+val get_slice_int : integer -> integer -> integer -> list bitU
+let get_slice_int = get_slice_int_bv
+
+val set_slice_int : integer -> integer -> integer -> list bitU -> integer
+let set_slice_int = set_slice_int_bv
+
+val slice : list bitU -> integer -> integer -> list bitU
+let slice v lo len =
+ subrange_vec_dec v (lo + len - 1) lo
+
+val set_slice : integer -> integer -> list bitU -> integer -> list bitU -> list bitU
+let set_slice (out_len:ii) (slice_len:ii) out (n:ii) v =
+ update_subrange_vec_dec out (n + slice_len - 1) n v
+
val eq_vec : list bitU -> list bitU -> bool
val neq_vec : list bitU -> list bitU -> bool
-val ult_vec : list bitU -> list bitU -> bool
-val slt_vec : list bitU -> list bitU -> bool
-val ugt_vec : list bitU -> list bitU -> bool
-val sgt_vec : list bitU -> list bitU -> bool
-val ulteq_vec : list bitU -> list bitU -> bool
-val slteq_vec : list bitU -> list bitU -> bool
-val ugteq_vec : list bitU -> list bitU -> bool
-val sgteq_vec : list bitU -> list bitU -> bool
let eq_vec = eq_bv
let neq_vec = neq_bv
-let ult_vec = ult_bv
-let slt_vec = slt_bv
-let ugt_vec = ugt_bv
-let sgt_vec = sgt_bv
-let ulteq_vec = ulteq_bv
-let slteq_vec = slteq_bv
-let ugteq_vec = ugteq_bv
-let sgteq_vec = sgteq_bv
diff --git a/src/gen_lib/sail_operators_mwords.lem b/src/gen_lib/sail_operators_mwords.lem
index 8bcc0319..22d5b246 100644
--- a/src/gen_lib/sail_operators_mwords.lem
+++ b/src/gen_lib/sail_operators_mwords.lem
@@ -76,6 +76,9 @@ let zero_extend w _ = Machine_word.zeroExtend w
val sign_extend : forall 'a 'b. Size 'a, Size 'b => mword 'a -> integer -> mword 'b
let sign_extend w _ = Machine_word.signExtend w
+val zeros : forall 'a. Size 'a => integer -> mword 'a
+let zeros _ = Machine_word.wordFromNatural 0
+
val vector_truncate : forall 'a 'b. Size 'a, Size 'b => mword 'a -> integer -> mword 'b
let vector_truncate w _ = Machine_word.zeroExtend w
@@ -310,23 +313,21 @@ let duplicate b n = maybe_failwith (duplicate_maybe b n)
val reverse_endianness : forall 'a. Size 'a => mword 'a -> mword 'a
let reverse_endianness v = wordFromBitlist (reverse_endianness_list (bitlistFromWord v))
+val get_slice_int : forall 'a. Size 'a => integer -> integer -> integer -> mword 'a
+let get_slice_int = get_slice_int_bv
+
+val set_slice_int : forall 'a. Size 'a => integer -> integer -> integer -> mword 'a -> integer
+let set_slice_int = set_slice_int_bv
+
+val slice : forall 'a 'b. Size 'a, Size 'b => mword 'a -> integer -> integer -> mword 'b
+let slice v lo len =
+ subrange_vec_dec v (lo + len - 1) lo
+
+val set_slice : forall 'a 'b. Size 'a, Size 'b => integer -> integer -> mword 'a -> integer -> mword 'b -> mword 'a
+let set_slice (out_len:ii) (slice_len:ii) out (n:ii) v =
+ update_subrange_vec_dec out (n + slice_len - 1) n v
+
val eq_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
val neq_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val ult_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val slt_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val ugt_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val sgt_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val ulteq_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val slteq_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val ugteq_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
-val sgteq_vec : forall 'a. Size 'a => mword 'a -> mword 'a -> bool
let inline eq_vec = eq_mword
let inline neq_vec = neq_mword
-let inline ult_vec = ucmp_mword (<)
-let inline slt_vec = scmp_mword (<)
-let inline ugt_vec = ucmp_mword (>)
-let inline sgt_vec = scmp_mword (>)
-let inline ulteq_vec = ucmp_mword (<=)
-let inline slteq_vec = scmp_mword (<=)
-let inline ugteq_vec = ucmp_mword (>=)
-let inline sgteq_vec = scmp_mword (>=)
diff --git a/src/gen_lib/sail_values.lem b/src/gen_lib/sail_values.lem
index 2d9eda9c..5c6dc593 100644
--- a/src/gen_lib/sail_values.lem
+++ b/src/gen_lib/sail_values.lem
@@ -414,6 +414,7 @@ let rec hexstring_of_bits bs = match bs with
| (Just n, Just s) -> Just (n :: s)
| _ -> Nothing
end
+ | [] -> Just []
| _ -> Nothing
end
declare {isabelle} termination_argument hexstring_of_bits = automatic
diff --git a/src/gen_lib/state_monad.lem b/src/gen_lib/state_monad.lem
index 781bc129..89021890 100644
--- a/src/gen_lib/state_monad.lem
+++ b/src/gen_lib/state_monad.lem
@@ -265,3 +265,16 @@ let update_reg_field_bit regfield i reg_val bit =
let new_field_value = set_bit (regfield.field_is_inc) current_field_value i (to_bitU bit) in
regfield.set_field reg_val new_field_value
let write_reg_field_bit reg regfield i = update_reg reg (update_reg_field_bit regfield i)*)
+
+(* TODO Add Show typeclass for value and exception type *)
+val show_result : forall 'a 'e. result 'a 'e -> string
+let show_result = function
+ | Value _ -> "Value ()"
+ | Ex (Failure msg) -> "Failure " ^ msg
+ | Ex (Throw _) -> "Throw"
+end
+
+val prerr_results : forall 'a 'e 's. SetType 's => set (result 'a 'e * 's) -> unit
+let prerr_results rs =
+ let _ = Set.map (fun (r, _) -> let _ = prerr_endline (show_result r) in ()) rs in
+ ()
diff --git a/src/latex.ml b/src/latex.ml
index 01cf55b2..0520d074 100644
--- a/src/latex.ml
+++ b/src/latex.ml
@@ -123,18 +123,19 @@ let commands = ref StringSet.empty
let rec latex_command ?prefix:(prefix="") ?label:(label=None) dir cmd no_loc ((l, _) as annot) =
let labelling = match label with
| None -> ""
- | Some l -> Printf.sprintf "\\label{%s%s}" prefix l
+ | Some l -> Printf.sprintf "\\label{%s}" l
in
let cmd = !opt_prefix_latex ^ prefix ^ cmd in
- if StringSet.mem cmd !commands then
+ let lcmd = String.lowercase cmd in (* lowercase to avoid file names differing only by case *)
+ if StringSet.mem lcmd !commands then
latex_command ~label:label dir (cmd ^ "v") no_loc annot
else
begin
- commands := StringSet.add cmd !commands;
- let oc = open_out (Filename.concat dir (cmd ^ "_sail.tex")) in
+ commands := StringSet.add lcmd !commands;
+ let oc = open_out (Filename.concat dir (cmd ^ ".tex")) in
output_string oc (Pretty_print_sail.to_string (latex_loc no_loc l));
close_out oc;
- string (Printf.sprintf "\\newcommand{\\%s}{%s " cmd labelling) ^^ (docstring l) ^^ string (Printf.sprintf "\\lstinputlisting{%s/%s_sail.tex}}" dir cmd)
+ string (Printf.sprintf "\\newcommand{\\%s}{%s " cmd labelling) ^^ (docstring l) ^^ string (Printf.sprintf "\\lstinputlisting[language=sail]{%s/%s.tex}}" dir cmd)
end
let latex_command_id ?prefix:(prefix="") dir id no_loc annot =
diff --git a/src/pretty_print_lem.ml b/src/pretty_print_lem.ml
index a7da28bc..c3e96d57 100644
--- a/src/pretty_print_lem.ml
+++ b/src/pretty_print_lem.ml
@@ -113,10 +113,7 @@ let rec fix_id remove_tick name = match name with
let doc_id_lem (Id_aux(i,_)) =
match i with
| Id i -> string (fix_id false i)
- | DeIid x ->
- (* add an extra space through empty to avoid a closing-comment
- * token in case of x ending with star. *)
- parens (separate space [colon; string x; empty])
+ | DeIid x -> string (Util.zencode_string ("op " ^ x))
let doc_id_lem_type (Id_aux(i,_)) =
match i with
@@ -124,10 +121,7 @@ let doc_id_lem_type (Id_aux(i,_)) =
| Id("nat") -> string "ii"
| Id("option") -> string "maybe"
| Id i -> string (fix_id false i)
- | DeIid x ->
- (* add an extra space through empty to avoid a closing-comment
- * token in case of x ending with star. *)
- parens (separate space [colon; string x; empty])
+ | DeIid x -> string (Util.zencode_string ("op " ^ x))
let doc_id_lem_ctor (Id_aux(i,_)) =
match i with
@@ -137,10 +131,11 @@ let doc_id_lem_ctor (Id_aux(i,_)) =
| Id("Some") -> string "Just"
| Id("None") -> string "Nothing"
| Id i -> string (fix_id false (String.capitalize i))
- | DeIid x ->
- (* add an extra space through empty to avoid a closing-comment
- * token in case of x ending with star. *)
- separate space [colon; string (String.capitalize x); empty]
+ | DeIid x -> string (Util.zencode_string ("op " ^ x))
+
+let deinfix = function
+ | Id_aux (Id v, l) -> Id_aux (DeIid v, l)
+ | Id_aux (DeIid v, l) -> Id_aux (DeIid v, l)
let doc_var_lem kid = string (fix_id true (string_of_kid kid))
@@ -622,31 +617,34 @@ let doc_exp_lem, doc_let_lem =
raise (Reporting_basic.err_unreachable l
"E_vector_append should have been rewritten before pretty-printing")
| E_cons(le,re) -> doc_op (group (colon^^colon)) (expY le) (expY re)
- | E_if(c,t,e) ->
- let epp = if_exp ctxt false c t e in
- if aexp_needed then parens (align epp) else epp
+ | E_if(c,t,e) -> wrap_parens (align (if_exp ctxt false c t e))
| E_for(id,exp1,exp2,exp3,(Ord_aux(order,_)),exp4) ->
raise (report l "E_for should have been rewritten before pretty-printing")
| E_loop _ ->
raise (report l "E_loop should have been rewritten before pretty-printing")
| E_let(leb,e) ->
- let epp = let_exp ctxt leb ^^ space ^^ string "in" ^^ hardline ^^ expN e in
- if aexp_needed then parens epp else epp
+ wrap_parens (let_exp ctxt leb ^^ space ^^ string "in" ^^ hardline ^^ expN e)
| E_app(f,args) ->
begin match f with
- (* temporary hack to make the loop body a function of the temporary variables *)
| Id_aux (Id "None", _) as none -> doc_id_lem_ctor none
+ | Id_aux (Id "and_bool", _) | Id_aux (Id "or_bool", _)
+ when effectful (effect_of full_exp) ->
+ let call = doc_id_lem (append_id f "M") in
+ wrap_parens (hang 2 (flow (break 1) (call :: List.map expY args)))
+ (* temporary hack to make the loop body a function of the temporary variables *)
| Id_aux (Id "foreach", _) ->
begin
match args with
| [exp1; exp2; exp3; ord_exp; vartuple; body] ->
let loopvar, body = match body with
- | E_aux (E_let (LB_aux (LB_val (
- P_aux (P_typ (_, P_aux (P_var (P_aux (P_id id, _), _), _)), _), _), _), body), _) -> id, body
- | E_aux (E_let (LB_aux (LB_val (
- P_aux (P_var (P_aux (P_id id, _), _), _), _), _), body), _) -> id, body
- | E_aux (E_let (LB_aux (LB_val (
- P_aux (P_id id, _), _), _), body), _) -> id, body
+ | E_aux (E_let (LB_aux (LB_val (_, _), _),
+ E_aux (E_let (LB_aux (LB_val (_, _), _),
+ E_aux (E_if (_,
+ E_aux (E_let (LB_aux (LB_val (
+ ((P_aux (P_typ (_, P_aux (P_var (P_aux (P_id id, _), _), _)), _))
+ | (P_aux (P_var (P_aux (P_id id, _), _), _))
+ | (P_aux (P_id id, _))), _), _),
+ body), _), _), _)), _)), _) -> id, body
| _ -> raise (Reporting_basic.err_unreachable l ("Unable to find loop variable in " ^ string_of_exp body)) in
let step = match ord_exp with
| E_aux (E_lit (L_aux (L_false, _)), _) ->
@@ -751,7 +749,7 @@ let doc_exp_lem, doc_let_lem =
| _ ->
doc_id_lem_ctor f ^^ space ^^
parens (separate_map comma (expV false) args) in
- if aexp_needed then parens (align epp) else epp
+ wrap_parens (align epp)
| _ ->
let call, is_extern = match annot with
| Some (env, _, _) when Env.is_extern f env "lem" ->
@@ -804,8 +802,7 @@ let doc_exp_lem, doc_let_lem =
else if is_ctor env id then doc_id_lem_ctor id
else doc_id_lem id
| E_lit lit -> doc_lit_lem lit
- | E_cast(typ,e) ->
- expV aexp_needed e
+ | E_cast(typ,e) -> expV aexp_needed e
| E_tuple exps ->
parens (align (group (separate_map (comma ^^ break 1) expN exps)))
| E_record(FES_aux(FES_Fexps(fexps,_),_)) ->
@@ -815,10 +812,9 @@ let doc_exp_lem, doc_let_lem =
(* when Env.is_record tid env -> *)
tid
| _ -> raise (report l ("cannot get record type from annot " ^ string_of_annot annot ^ " of exp " ^ string_of_exp full_exp)) in
- let epp = anglebars (space ^^ (align (separate_map
- (semi_sp ^^ break 1)
- (doc_fexp ctxt recordtyp) fexps)) ^^ space) in
- if aexp_needed then parens epp else epp
+ wrap_parens (anglebars (space ^^ (align (separate_map
+ (semi_sp ^^ break 1)
+ (doc_fexp ctxt recordtyp) fexps)) ^^ space))
| E_record_update(e,(FES_aux(FES_Fexps(fexps,_),_))) ->
let recordtyp = match annot with
| Some (env, Typ_aux (Typ_id tid,_), _)
@@ -837,7 +833,7 @@ let doc_exp_lem, doc_let_lem =
let start = match nexp_simp start with
| Nexp_aux (Nexp_constant i, _) -> Big_int.to_string i
| _ -> if dir then "0" else string_of_int (List.length exps) in
- let expspp =
+ (* let expspp =
match exps with
| [] -> empty
| e :: es ->
@@ -848,7 +844,8 @@ let doc_exp_lem, doc_let_lem =
expN e),
if count = 20 then 0 else count + 1)
(expN e,0) es in
- align (group expspp) in
+ align (group expspp) in *)
+ let expspp = align (group (flow_map (semi ^^ break 0) expN exps)) in
let epp = brackets expspp in
let (epp,aexp_needed) =
if is_bit_typ etyp && !opt_mwords then
@@ -866,19 +863,17 @@ let doc_exp_lem, doc_let_lem =
brackets (separate_map semi (expN) exps)
| E_case(e,pexps) ->
let only_integers e = expY e in
- let epp =
- group ((separate space [string "match"; only_integers e; string "with"]) ^/^
- (separate_map (break 1) (doc_case ctxt) pexps) ^/^
- (string "end")) in
- if aexp_needed then parens (align epp) else align epp
+ wrap_parens
+ (group ((separate space [string "match"; only_integers e; string "with"]) ^/^
+ (separate_map (break 1) (doc_case ctxt) pexps) ^/^
+ (string "end")))
| E_try (e, pexps) ->
if effectful (effect_of e) then
let try_catch = if ctxt.early_ret then "try_catchR" else "try_catch" in
- let epp =
- group ((separate space [string try_catch; expY e; string "(function "]) ^/^
- (separate_map (break 1) (doc_case ctxt) pexps) ^/^
- (string "end)")) in
- if aexp_needed then parens (align epp) else align epp
+ wrap_parens
+ (group ((separate space [string try_catch; expY e; string "(function "]) ^/^
+ (separate_map (break 1) (doc_case ctxt) pexps) ^/^
+ (string "end)")))
else
raise (Reporting_basic.err_todo l "Warning: try-block around pure expression")
| E_throw e ->
@@ -887,8 +882,7 @@ let doc_exp_lem, doc_let_lem =
| E_assert (e1,e2) ->
align (liftR (separate space [string (appendS "assert_exp"); expY e1; expY e2]))
| E_app_infix (e1,id,e2) ->
- raise (Reporting_basic.err_unreachable l
- "E_app_infix should have been rewritten before pretty-printing")
+ expV aexp_needed (E_aux (E_app (deinfix id, [e1; e2]), (l, annot)))
| E_var(lexp, eq_exp, in_exp) ->
raise (report l "E_vars should have been removed before pretty-printing")
| E_internal_plet (pat,e1,e2) ->
@@ -913,7 +907,7 @@ let doc_exp_lem, doc_let_lem =
in
infix 0 1 middle (expV b e1) (expN e2)
in
- if aexp_needed then parens (align epp) else epp
+ wrap_parens (align epp)
| E_internal_return (e1) ->
wrap_parens (align (separate space [string (appendS "return"); expY e1]))
| E_sizeof nexp ->
diff --git a/src/rewrites.ml b/src/rewrites.ml
index 582901dc..b59a248e 100644
--- a/src/rewrites.ml
+++ b/src/rewrites.ml
@@ -2581,6 +2581,14 @@ let rewrite_defs_letbind_effects =
| E_cast (typ,exp') ->
n_exp_name exp' (fun exp' ->
k (rewrap (E_cast (typ,exp'))))
+ | E_app (op_bool, [l; r])
+ when string_of_id op_bool = "and_bool" || string_of_id op_bool = "or_bool" ->
+ (* Leave effectful operands of Boolean "and"/"or" in place to allow
+ short-circuiting. *)
+ let newreturn = effectful l || effectful r in
+ let l = n_exp_term newreturn l in
+ let r = n_exp_term newreturn r in
+ k (rewrap (E_app (op_bool, [l; r])))
| E_app (id,exps) ->
n_exp_nameL exps (fun exps ->
k (rewrap (E_app (id,exps))))
@@ -2916,20 +2924,21 @@ let rec rewrite_var_updates ((E_aux (expaux,((l,_) as annot))) as exp) =
|> mk_var_exps_pats pl env
in
let exp4 = rewrite_var_updates (add_vars overwrite exp4 vars) in
- let ord_exp, kids, constr, lower, upper =
- match destruct_range env (typ_of exp1), destruct_range env (typ_of exp2) with
+ let ord_exp, kids, constr, lower, upper, lower_exp, upper_exp =
+ match destruct_numeric env (typ_of exp1), destruct_numeric env (typ_of exp2) with
| None, _ | _, None ->
raise (Reporting_basic.err_unreachable el "Could not determine loop bounds")
- | Some (kids1, constr1, l1, u1), Some (kids2, constr2, l2, u2) ->
+ | Some (kids1, constr1, n1), Some (kids2, constr2, n2) ->
let kids = kids1 @ kids2 in
let constr = nc_and constr1 constr2 in
- let ord_exp, lower, upper =
+ let ord_exp, lower, upper, lower_exp, upper_exp =
if is_order_inc order
- then (annot_exp (E_lit (mk_lit L_true)) el env bool_typ, l1, u2)
- else (annot_exp (E_lit (mk_lit L_false)) el env bool_typ, l2, u1)
+ then (annot_exp (E_lit (mk_lit L_true)) el env bool_typ, n1, n2, exp1, exp2)
+ else (annot_exp (E_lit (mk_lit L_false)) el env bool_typ, n2, n1, exp2, exp1)
in
- ord_exp, kids, constr, lower, upper
+ ord_exp, kids, constr, lower, upper, lower_exp, upper_exp
in
+ (* Bind the loop variable in the body, annotated with constraints *)
let lvar_kid = mk_kid ("loop_" ^ string_of_id id) in
let lvar_nc = nc_and constr (nc_and (nc_lteq lower (nvar lvar_kid)) (nc_lteq (nvar lvar_kid) upper)) in
let lvar_typ = mk_typ (Typ_exist (lvar_kid :: kids, lvar_nc, atom_typ (nvar lvar_kid))) in
@@ -2938,7 +2947,33 @@ let rec rewrite_var_updates ((E_aux (expaux,((l,_) as annot))) as exp) =
TP_aux (TP_var lvar_kid, gen_loc el))) el env lvar_typ)) in
let lb = fix_eff_lb (annot_letbind (lvar_pat, exp1) el env lvar_typ) in
let body = fix_eff_exp (annot_exp (E_let (lb, exp4)) el env (typ_of exp4)) in
- let v = fix_eff_exp (annot_exp (E_app (mk_id "foreach", [exp1; exp2; exp3; ord_exp; tuple_exp vars; body])) el env (typ_of body)) in
+ (* If lower > upper, the loop body never gets executed, and the type
+ checker might not be able to prove that the initial value exp1
+ satisfies the constraints on the loop variable.
+
+ Make this explicit by guarding the loop body with lower <= upper.
+ (for type-checking; the guard is later removed again by the Lem
+ pretty-printer). This could be implemented with an assertion, but
+ that would force the loop to be effectful, so we use an if-expression
+ instead. This code assumes that the loop bounds have (possibly
+ existential) atom types, and the loop body has type unit. *)
+ let lower_kid = mk_kid ("loop_" ^ string_of_id id ^ "_lower") in
+ let lower_pat = P_var (annot_pat P_wild el env (typ_of lower_exp), mk_typ_pat (TP_app (mk_id "atom", [mk_typ_pat (TP_var lower_kid)]))) in
+ let lb_lower = annot_letbind (lower_pat, lower_exp) el env (typ_of lower_exp) in
+ let upper_kid = mk_kid ("loop_" ^ string_of_id id ^ "_upper") in
+ let upper_pat = P_var (annot_pat P_wild el env (typ_of upper_exp), mk_typ_pat (TP_app (mk_id "atom", [mk_typ_pat (TP_var upper_kid)]))) in
+ let lb_upper = annot_letbind (upper_pat, upper_exp) el env (typ_of upper_exp) in
+ let guard = annot_exp (E_constraint (nc_lteq (nvar lower_kid) (nvar upper_kid))) el env bool_typ in
+ let unit_exp = annot_exp (E_lit (mk_lit L_unit)) el env unit_typ in
+ let skip_val = tuple_exp (if overwrite then vars else unit_exp :: vars) in
+ let guarded_body =
+ fix_eff_exp (annot_exp (E_let (lb_lower,
+ fix_eff_exp (annot_exp (E_let (lb_upper,
+ fix_eff_exp (annot_exp (E_if (guard, body, skip_val))
+ el env (typ_of exp4))))
+ el env (typ_of exp4))))
+ el env (typ_of exp4)) in
+ let v = fix_eff_exp (annot_exp (E_app (mk_id "foreach", [exp1; exp2; exp3; ord_exp; tuple_exp vars; guarded_body])) el env (typ_of body)) in
Added_vars (v, tuple_pat (if overwrite then varpats else pat :: varpats))
| E_loop(loop,cond,body) ->
let vars, varpats =
@@ -2967,7 +3002,7 @@ let rec rewrite_var_updates ((E_aux (expaux,((l,_) as annot))) as exp) =
(* after rewrite_defs_letbind_effects c has no variable updates *)
let env = env_of_annot annot in
let typ = typ_of e1 in
- let eff = union_eff_exps [e1;e2] in
+ let eff = union_eff_exps [c;e1;e2] in
let v = E_aux (E_if (c,e1,e2), (gen_loc el, Some (env, typ, eff))) in
Added_vars (v, tuple_pat (if overwrite then varpats else pat :: varpats))
| E_case (e1,ps) ->
diff --git a/src/state.ml b/src/state.ml
index 49fa5a99..5a360456 100644
--- a/src/state.ml
+++ b/src/state.ml
@@ -60,6 +60,11 @@ open Pretty_print_sail
let defs_of_string = ast_of_def_string Ast_util.inc_ord
+let is_defined defs name = IdSet.mem (mk_id name) (ids_of_defs (Defs defs))
+
+let has_default_order defs =
+ List.exists (function DEF_default (DT_aux (DT_order _, _)) -> true | _ -> false) defs
+
let find_registers defs =
List.fold_left
(fun acc def ->
@@ -77,18 +82,100 @@ let generate_regstate = function
| [] -> ["type regstate = unit"]
| registers ->
let reg (typ, id) = Printf.sprintf "%s : %s" (string_of_id id) (to_string (doc_typ typ)) in
- let initreg (_, id) = Printf.sprintf "%s = undefined" (string_of_id id) in
- let regstate =
- "struct regstate = { " ^
- (String.concat ", " (List.map reg registers)) ^
- " }"
- in
- let initstate =
- "let initial_regstate : regstate = struct { " ^
- (String.concat ", " (List.map initreg registers)) ^
- " }"
- in
- regstate :: (if !Initial_check.opt_undefined_gen then [initstate] else [])
+ ["struct regstate = { " ^ (String.concat ", " (List.map reg registers)) ^ " }"]
+
+let generate_initial_regstate defs =
+ let registers = find_registers defs in
+ if registers = [] then [] else
+ try
+ (* Recursively choose a default value for every type in the spec.
+ vals, constructed below, maps user-defined types to default values. *)
+ let rec lookup_init_val vals (Typ_aux (typ_aux, _) as typ) =
+ match typ_aux with
+ | Typ_id id ->
+ if string_of_id id = "bool" then "false" else
+ if string_of_id id = "bit" then "bitzero" else
+ if string_of_id id = "int" then "0" else
+ if string_of_id id = "nat" then "0" else
+ if string_of_id id = "real" then "0" else
+ if string_of_id id = "string" then "\"\"" else
+ if string_of_id id = "unit" then "()" else
+ Bindings.find id vals []
+ | Typ_app (id, _) when string_of_id id = "list" -> "[||]"
+ | Typ_app (id, [Typ_arg_aux (Typ_arg_nexp nexp, _)]) when string_of_id id = "atom" ->
+ string_of_nexp nexp
+ | Typ_app (id, [Typ_arg_aux (Typ_arg_nexp nexp, _); _]) when string_of_id id = "range" ->
+ string_of_nexp nexp
+ | Typ_app (id, [Typ_arg_aux (Typ_arg_nexp (Nexp_aux (Nexp_constant len, _)), _); _ ;
+ Typ_arg_aux (Typ_arg_typ etyp, _)])
+ when string_of_id id = "vector" ->
+ (* Output a list of initial values of the vector elements, or a
+ literal binary zero value if this is a bitvector and the
+ environment has a default indexing order (required by the
+ typechecker for binary and hex literals) *)
+ let literal_bitvec = is_bit_typ etyp && has_default_order defs in
+ let init_elem = if literal_bitvec then "0" else lookup_init_val vals etyp in
+ let rec elems len =
+ if (Nat_big_num.less_equal len Nat_big_num.zero) then [] else
+ init_elem :: elems (Nat_big_num.pred len)
+ in
+ if literal_bitvec then "0b" ^ (String.concat "" (elems len)) else
+ "[" ^ (String.concat ", " (elems len)) ^ "]"
+ | Typ_app (id, args) -> Bindings.find id vals args
+ | Typ_tup typs ->
+ "(" ^ (String.concat ", " (List.map (lookup_init_val vals) typs)) ^ ")"
+ | Typ_exist (_, _, typ) -> lookup_init_val vals typ
+ | _ -> raise Not_found
+ in
+ (* Helper functions to instantiate type arguments *)
+ let typ_subst_targ kid (Typ_arg_aux (arg, _)) typ = match arg with
+ | Typ_arg_nexp (Nexp_aux (nexp, _)) -> typ_subst_nexp kid nexp typ
+ | Typ_arg_typ (Typ_aux (typ', _)) -> typ_subst_typ kid typ' typ
+ | Typ_arg_order (Ord_aux (ord, _)) -> typ_subst_order kid ord typ
+ in
+ let typ_subst_quant_item typ (QI_aux (qi, _)) arg = match qi with
+ | QI_id (KOpt_aux ((KOpt_none kid | KOpt_kind (_, kid)), _)) ->
+ typ_subst_targ kid arg typ
+ | _ -> typ
+ in
+ let typ_subst_typquant tq args typ =
+ List.fold_left2 typ_subst_quant_item typ (quant_items tq) args
+ in
+ let add_typ_init_val vals = function
+ | TD_enum (id, _, id1 :: _, _) ->
+ (* Choose the first value of an enumeration type as default *)
+ Bindings.add id (fun _ -> string_of_id id1) vals
+ | TD_variant (id, _, tq, (Tu_aux (Tu_ty_id (typ1, id1), _)) :: _, _) ->
+ (* Choose the first variant of a union type as default *)
+ let init_val args =
+ let typ1 = typ_subst_typquant tq args typ1 in
+ string_of_id id1 ^ " (" ^ lookup_init_val vals typ1 ^ ")"
+ in
+ Bindings.add id init_val vals
+ | TD_abbrev (id, _, TypSchm_aux (TypSchm_ts (tq, typ), _)) ->
+ let init_val args = lookup_init_val vals (typ_subst_typquant tq args typ) in
+ Bindings.add id init_val vals
+ | TD_record (id, _, tq, fields, _) ->
+ let init_val args =
+ let init_field (typ, id) =
+ let typ = typ_subst_typquant tq args typ in
+ string_of_id id ^ " = " ^ lookup_init_val vals typ
+ in
+ "struct { " ^ (String.concat ", " (List.map init_field fields)) ^ " }"
+ in
+ Bindings.add id init_val vals
+ | TD_bitfield (id, typ, _) ->
+ Bindings.add id (fun _ -> lookup_init_val vals typ) vals
+ | _ -> vals
+ in
+ let init_vals = List.fold_left (fun vals def -> match def with
+ | DEF_type (TD_aux (td, _)) -> add_typ_init_val vals td
+ | _ -> vals) Bindings.empty defs
+ in
+ let init_reg (typ, id) = string_of_id id ^ " = " ^ lookup_init_val init_vals typ in
+ ["let initial_regstate : regstate = struct { " ^ (String.concat ", " (List.map init_reg registers)) ^ " }"]
+ with
+ | _ -> [] (* Do not generate an initial register state if anything goes wrong *)
let rec regval_constr_id mwords (Typ_aux (t, l) as typ) = match t with
| Typ_id id -> id
@@ -135,9 +222,8 @@ let generate_regval_typ typs =
(String.concat ", " (builtins :: List.map constr (Bindings.bindings typs))) ^
" }"]
-let add_regval_conv id typ defs =
+let add_regval_conv id typ (Defs defs) =
let id = string_of_id id in
- let is_defined name = IdSet.mem (mk_id name) (ids_of_defs defs) in
let typ_str = to_string (doc_typ typ) in
(* Create a function that converts from regval to the target type. *)
let from_name = id ^ "_of_regval" in
@@ -146,14 +232,14 @@ let add_regval_conv id typ defs =
Printf.sprintf "function %s Regval_%s(v) = Some(v)" from_name id;
Printf.sprintf "and %s _ = None()" from_name
] in
- let from_defs = if is_defined from_name then [] else [from_val; from_function] in
+ let from_defs = if is_defined defs from_name then [] else [from_val; from_function] in
(* Create a function that converts from target type to regval. *)
let to_name = "regval_of_" ^ id in
let to_val = Printf.sprintf "val %s : %s -> register_value" to_name typ_str in
let to_function = Printf.sprintf "function %s v = Regval_%s(v)" to_name id in
- let to_defs = if is_defined to_name then [] else [to_val; to_function] in
+ let to_defs = if is_defined defs to_name then [] else [to_val; to_function] in
let cdefs = concat_ast (List.map defs_of_string (from_defs @ to_defs)) in
- append_ast defs cdefs
+ append_ast (Defs defs) cdefs
let rec regval_convs_lem mwords (Typ_aux (t, _) as typ) = match t with
| Typ_app _ when is_vector_typ typ && not (mwords && is_bitvector_typ typ) ->
@@ -393,17 +479,16 @@ let generate_regstate_defs mwords defs =
let gen_undef = !Initial_check.opt_undefined_gen in
Initial_check.opt_undefined_gen := false;
let registers = find_registers defs in
- let def_ids = ids_of_defs (Defs defs) in
- let has_def name = IdSet.mem (mk_id name) def_ids in
let regtyps = register_base_types mwords (List.map fst registers) in
let option_typ =
- if has_def "option" then [] else
+ if is_defined defs "option" then [] else
["union option ('a : Type) = {None : unit, Some : 'a}"]
in
- let regval_typ = if has_def "register_value" then [] else generate_regval_typ regtyps in
- let regstate_typ = if has_def "regstate" then [] else generate_regstate registers in
+ let regval_typ = if is_defined defs "register_value" then [] else generate_regval_typ regtyps in
+ let regstate_typ = if is_defined defs "regstate" then [] else generate_regstate registers in
+ let initregstate = if is_defined defs "initial_regstate" then [] else generate_initial_regstate defs in
let defs =
- option_typ @ regval_typ @ regstate_typ
+ option_typ @ regval_typ @ regstate_typ @ initregstate
|> List.map defs_of_string
|> concat_ast
|> Bindings.fold add_regval_conv regtyps
diff --git a/src/type_check.mli b/src/type_check.mli
index ed240839..1c0e2f09 100644
--- a/src/type_check.mli
+++ b/src/type_check.mli
@@ -271,6 +271,8 @@ val destruct_exist : Env.t -> typ -> (kid list * n_constraint * typ) option
val destruct_range : Env.t -> typ -> (kid list * n_constraint * nexp * nexp) option
+val destruct_numeric : Env.t -> typ -> (kid list * n_constraint * nexp) option
+
val destruct_vector : Env.t -> typ -> (nexp * order * typ) option
type uvar =
@@ -283,6 +285,9 @@ val string_of_uvar : uvar -> string
val subst_unifiers : uvar KBindings.t -> typ -> typ
+val typ_subst_nexp : kid -> nexp_aux -> typ -> typ
+val typ_subst_typ : kid -> typ_aux -> typ -> typ
+val typ_subst_order : kid -> order_aux -> typ -> typ
val typ_subst_kid : kid -> kid -> typ -> typ
val unify : l -> Env.t -> typ -> typ -> uvar KBindings.t * kid list * n_constraint option
diff --git a/test/arm/run_tests.sh b/test/arm/run_tests.sh
index 15a45e7c..f758d634 100755
--- a/test/arm/run_tests.sh
+++ b/test/arm/run_tests.sh
@@ -73,7 +73,7 @@ then
else
red "compiling no_vector specification" "fail";
- for $i in `ls *.elf`;
+ for i in `ls *.elf`;
do
red "failed $i" "fail"
done
diff --git a/test/builtins/get_slice_int.sail b/test/builtins/get_slice_int.sail
index 70894155..73c495fa 100644
--- a/test/builtins/get_slice_int.sail
+++ b/test/builtins/get_slice_int.sail
@@ -4,6 +4,12 @@ $include <exception_basic.sail>
$include <flow.sail>
$include <vector_dec.sail>
+val int_of_string = {
+ ocaml: "Nat_big_num.of_string",
+ lem: "integerOfString",
+ c: "reinit_mpz_t_of_sail_string"
+} : string -> int
+
function main (() : unit) -> unit = {
assert(get_slice_int(1, -10, 6) == 1^0x1, "get_slice_int(1, -10, 6) == 1^0x1");
assert(get_slice_int(1, -1, 6) == 1^0x1, "get_slice_int(1, -1, 6) == 1^0x1");
@@ -251,20 +257,20 @@ function main (() : unit) -> unit = {
assert(get_slice_int(64, 173015152, 0) == 64^0xa500070, "get_slice_int(64, 173015152, 0) == 64^0xa500070");
assert(get_slice_int(64, 173015156, 0) == 64^0xa500074, "get_slice_int(64, 173015156, 0) == 64^0xa500074");
assert(get_slice_int(64, 180224, 0) == 64^0x2c000, "get_slice_int(64, 180224, 0) == 64^0x2c000");
- assert(get_slice_int(64, 18446708893632421888, 0) == 64^0xffffe000ffffe000, "get_slice_int(64, 18446708893632421888, 0) == 64^0xffffe000ffffe000");
- assert(get_slice_int(64, 18446744073658712064, 0) == 64^0xfffffffffcf84000, "get_slice_int(64, 18446744073658712064, 0) == 64^0xfffffffffcf84000");
- assert(get_slice_int(64, 18446744073658777600, 0) == 64^0xfffffffffcf94000, "get_slice_int(64, 18446744073658777600, 0) == 64^0xfffffffffcf94000");
- assert(get_slice_int(64, 18446744073658843136, 0) == 64^0xfffffffffcfa4000, "get_slice_int(64, 18446744073658843136, 0) == 64^0xfffffffffcfa4000");
- assert(get_slice_int(64, 18446744073660252160, 0) == 64^0xfffffffffd0fc000, "get_slice_int(64, 18446744073660252160, 0) == 64^0xfffffffffd0fc000");
- assert(get_slice_int(64, 18446744073660317696, 0) == 64^0xfffffffffd10c000, "get_slice_int(64, 18446744073660317696, 0) == 64^0xfffffffffd10c000");
- assert(get_slice_int(64, 18446744073660465152, 0) == 64^0xfffffffffd130000, "get_slice_int(64, 18446744073660465152, 0) == 64^0xfffffffffd130000");
- assert(get_slice_int(64, 18446744073660530688, 0) == 64^0xfffffffffd140000, "get_slice_int(64, 18446744073660530688, 0) == 64^0xfffffffffd140000");
- assert(get_slice_int(64, 18446744073660727296, 0) == 64^0xfffffffffd170000, "get_slice_int(64, 18446744073660727296, 0) == 64^0xfffffffffd170000");
- assert(get_slice_int(64, 18446744073660841984, 0) == 64^0xfffffffffd18c000, "get_slice_int(64, 18446744073660841984, 0) == 64^0xfffffffffd18c000");
- assert(get_slice_int(64, 18446744073708961792, 0) == 64^0xfffffffffff70000, "get_slice_int(64, 18446744073708961792, 0) == 64^0xfffffffffff70000");
- assert(get_slice_int(64, 18446744073709027328, 0) == 64^0xfffffffffff80000, "get_slice_int(64, 18446744073709027328, 0) == 64^0xfffffffffff80000");
- assert(get_slice_int(64, 18446744073709289472, 0) == 64^0xfffffffffffc0000, "get_slice_int(64, 18446744073709289472, 0) == 64^0xfffffffffffc0000");
- assert(get_slice_int(64, 18446744073709355008, 0) == 64^0xfffffffffffd0000, "get_slice_int(64, 18446744073709355008, 0) == 64^0xfffffffffffd0000");
+ assert(get_slice_int(64, int_of_string("18446708893632421888"), 0) == 64^0xffffe000ffffe000, "get_slice_int(64, 18446708893632421888, 0) == 64^0xffffe000ffffe000");
+ assert(get_slice_int(64, int_of_string("18446744073658712064"), 0) == 64^0xfffffffffcf84000, "get_slice_int(64, 18446744073658712064, 0) == 64^0xfffffffffcf84000");
+ assert(get_slice_int(64, int_of_string("18446744073658777600"), 0) == 64^0xfffffffffcf94000, "get_slice_int(64, 18446744073658777600, 0) == 64^0xfffffffffcf94000");
+ assert(get_slice_int(64, int_of_string("18446744073658843136"), 0) == 64^0xfffffffffcfa4000, "get_slice_int(64, 18446744073658843136, 0) == 64^0xfffffffffcfa4000");
+ assert(get_slice_int(64, int_of_string("18446744073660252160"), 0) == 64^0xfffffffffd0fc000, "get_slice_int(64, 18446744073660252160, 0) == 64^0xfffffffffd0fc000");
+ assert(get_slice_int(64, int_of_string("18446744073660317696"), 0) == 64^0xfffffffffd10c000, "get_slice_int(64, 18446744073660317696, 0) == 64^0xfffffffffd10c000");
+ assert(get_slice_int(64, int_of_string("18446744073660465152"), 0) == 64^0xfffffffffd130000, "get_slice_int(64, 18446744073660465152, 0) == 64^0xfffffffffd130000");
+ assert(get_slice_int(64, int_of_string("18446744073660530688"), 0) == 64^0xfffffffffd140000, "get_slice_int(64, 18446744073660530688, 0) == 64^0xfffffffffd140000");
+ assert(get_slice_int(64, int_of_string("18446744073660727296"), 0) == 64^0xfffffffffd170000, "get_slice_int(64, 18446744073660727296, 0) == 64^0xfffffffffd170000");
+ assert(get_slice_int(64, int_of_string("18446744073660841984"), 0) == 64^0xfffffffffd18c000, "get_slice_int(64, 18446744073660841984, 0) == 64^0xfffffffffd18c000");
+ assert(get_slice_int(64, int_of_string("18446744073708961792"), 0) == 64^0xfffffffffff70000, "get_slice_int(64, 18446744073708961792, 0) == 64^0xfffffffffff70000");
+ assert(get_slice_int(64, int_of_string("18446744073709027328"), 0) == 64^0xfffffffffff80000, "get_slice_int(64, 18446744073709027328, 0) == 64^0xfffffffffff80000");
+ assert(get_slice_int(64, int_of_string("18446744073709289472"), 0) == 64^0xfffffffffffc0000, "get_slice_int(64, 18446744073709289472, 0) == 64^0xfffffffffffc0000");
+ assert(get_slice_int(64, int_of_string("18446744073709355008"), 0) == 64^0xfffffffffffd0000, "get_slice_int(64, 18446744073709355008, 0) == 64^0xfffffffffffd0000");
assert(get_slice_int(64, 75248, 0) == 64^0x125f0, "get_slice_int(64, 75248, 0) == 64^0x125f0");
assert(get_slice_int(64, 75252, 0) == 64^0x125f4, "get_slice_int(64, 75252, 0) == 64^0x125f4");
assert(get_slice_int(64, 75256, 0) == 64^0x125f8, "get_slice_int(64, 75256, 0) == 64^0x125f8");
@@ -465,4 +471,4 @@ function main (() : unit) -> unit = {
assert(get_slice_int(64, 91820, 0) == 64^0x166ac, "get_slice_int(64, 91820, 0) == 64^0x166ac");
assert(get_slice_int(64, 91824, 0) == 64^0x166b0, "get_slice_int(64, 91824, 0) == 64^0x166b0");
assert(get_slice_int(64, 91828, 0) == 64^0x166b4, "get_slice_int(64, 91828, 0) == 64^0x166b4");
-} \ No newline at end of file
+}
diff --git a/test/builtins/run_tests.sh b/test/builtins/run_tests.sh
index b1d19639..1fe2d182 100755
--- a/test/builtins/run_tests.sh
+++ b/test/builtins/run_tests.sh
@@ -5,6 +5,7 @@ set -e
DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
cd $DIR
SAILDIR="$DIR/../.."
+LEMBUILDDIR="$DIR/_lembuild"
RED='\033[0;31m'
GREEN='\033[0;32m'
@@ -81,7 +82,54 @@ do
red "compiling $(basename $file) (OCaml)" "fail"
fi;
+ mkdir -p "$LEMBUILDDIR"
+
+ if "$SAILDIR/sail" -no_warn -lem -lem_mwords -lem_lib Test_extras -undefined_gen -o out "$file" 1> /dev/null 2> /dev/null;
+ then
+ mv out.lem out_types.lem "$LEMBUILDDIR"
+ if lem -ocaml -lib "$SAILDIR/src/lem_interp" \
+ -outdir "$LEMBUILDDIR" \
+ "$SAILDIR/src/gen_lib/sail_values.lem" \
+ "$SAILDIR/src/gen_lib/sail_operators.lem" \
+ "$SAILDIR/src/gen_lib/sail_operators_mwords.lem" \
+ "$SAILDIR/src/lem_interp/sail_instr_kinds.lem" \
+ "$SAILDIR/src/gen_lib/prompt.lem" \
+ "$SAILDIR/src/gen_lib/state_monad.lem" \
+ "$SAILDIR/src/gen_lib/state.lem" \
+ "$SAILDIR/src/gen_lib/prompt_monad.lem" \
+ "test_extras.lem" "$LEMBUILDDIR/out_types.lem" "$LEMBUILDDIR/out.lem" 1> /dev/null 2> /dev/null;
+ then
+ cd "$LEMBUILDDIR"
+ if ocamlfind ocamlc -linkpkg -package zarith -package lem \
+ sail_values.ml sail_operators.ml \
+ sail_instr_kinds.ml prompt_monad.ml prompt.ml \
+ sail_operators_mwords.ml state_monad.ml state.ml \
+ test_extras.ml out_types.ml out.ml ../test.ml \
+ -o test 1> /dev/null 2> /dev/null
+ then
+ green "compiling $(basename $file) (Lem)" "ok"
+ if ./test 1> /dev/null 2> /dev/null
+ then
+ green "tested $(basename ${file%.sail}) (Lem)" "ok"
+ else
+ red "tested $(basename ${file%.sail}) (Lem)" "fail"
+ fi
+ else
+ red "compiling $(basename $file) (Sail->Lem->Ocaml->Bytecode)" "fail"
+ fi
+ cd "$DIR"
+ else
+ red "compiling $(basename $file) (Sail->Lem->Ocaml)" "fail"
+ fi
+ else
+ red "compiling $(basename $file) (Sail->Lem)" "fail"
+ fi;
+
rm -rf $DIR/_sbuild/;
+ rm -rf "$LEMBUILDDIR";
+ rm -f Out_lemmas.thy;
+ rm -f out_types.lem;
+ rm -f out.lem;
rm -f ${file%.sail}.c;
rm -f a.out;
rm -f out
diff --git a/test/builtins/set_slice_bits.sail b/test/builtins/set_slice_bits.sail
index 67b11cdf..07c01e5b 100644
--- a/test/builtins/set_slice_bits.sail
+++ b/test/builtins/set_slice_bits.sail
@@ -680,4 +680,4 @@ function main (() : unit) -> unit = {
assert(set_slice_bits(64, 9, 64^0x0, 14, 9^0x0) == 64^0x0, "set_slice_bits(64, 9, 64^0x0, 14, 9^0x0) == 64^0x0");
assert(set_slice_bits(64, 9, 64^0x0, 32, 9^0x0) == 64^0x0, "set_slice_bits(64, 9, 64^0x0, 32, 9^0x0) == 64^0x0");
assert(set_slice_bits(64, 9, 64^0x80800000, 14, 9^0x0) == 64^0x80800000, "set_slice_bits(64, 9, 64^0x80800000, 14, 9^0x0) == 64^0x80800000");
-} \ No newline at end of file
+}
diff --git a/test/builtins/signed.sail b/test/builtins/signed.sail
index 7b9160f7..21524e2f 100644
--- a/test/builtins/signed.sail
+++ b/test/builtins/signed.sail
@@ -3,6 +3,12 @@ default Order dec
$include <exception_basic.sail>
$include <vector_dec.sail>
+val int_of_string = {
+ ocaml: "Nat_big_num.of_string",
+ lem: "integerOfString",
+ c: "reinit_mpz_t_of_sail_string"
+} : string -> int
+
function main (() : unit) -> unit = {
assert(signed(zero_extend(0x0, 32)) == 0);
assert(signed(zero_extend(0x1, 32)) == 1);
@@ -1352,8 +1358,8 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0x5a5a, 64)) == 23130);
assert(signed(zero_extend(0x5a5a5a59, 64)) == 1515870809);
assert(signed(zero_extend(0x5a5a5a5a, 64)) == 1515870810);
- assert(signed(zero_extend(0x5a5a5a5a5a5a5a59, 64)) == 6510615555426900569);
- assert(signed(zero_extend(0x5a5a5a5a5a5a5a5a, 64)) == 6510615555426900570);
+ assert(signed(zero_extend(0x5a5a5a5a5a5a5a59, 64)) == int_of_string("6510615555426900569"));
+ assert(signed(zero_extend(0x5a5a5a5a5a5a5a5a, 64)) == int_of_string("6510615555426900570"));
assert(signed(zero_extend(0x5b, 64)) == 91);
assert(signed(zero_extend(0x5c, 64)) == 92);
assert(signed(zero_extend(0x5c000, 64)) == 376832);
@@ -1398,10 +1404,10 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0x6c, 64)) == 108);
assert(signed(zero_extend(0x6d, 64)) == 109);
assert(signed(zero_extend(0x6e, 64)) == 110);
- assert(signed(zero_extend(0x6ede4cbc6ede4cbb, 64)) == 7988907161199463611);
+ assert(signed(zero_extend(0x6ede4cbc6ede4cbb, 64)) == int_of_string("7988907161199463611"));
assert(signed(zero_extend(0x6f, 64)) == 111);
- assert(signed(zero_extend(0x6ffffffffffffffe, 64)) == 8070450532247928830);
- assert(signed(zero_extend(0x6fffffffffffffff, 64)) == 8070450532247928831);
+ assert(signed(zero_extend(0x6ffffffffffffffe, 64)) == int_of_string("8070450532247928830"));
+ assert(signed(zero_extend(0x6fffffffffffffff, 64)) == int_of_string("8070450532247928831"));
assert(signed(zero_extend(0x7, 64)) == 7);
assert(signed(zero_extend(0x70, 64)) == 112);
assert(signed(zero_extend(0x71, 64)) == 113);
@@ -1412,10 +1418,10 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0x76, 64)) == 118);
assert(signed(zero_extend(0x764c321, 64)) == 124044065);
assert(signed(zero_extend(0x77, 64)) == 119);
- assert(signed(zero_extend(0x7765554377655542, 64)) == 8603376411415500098);
- assert(signed(zero_extend(0x7766554477665542, 64)) == 8603657890687243586);
+ assert(signed(zero_extend(0x7765554377655542, 64)) == int_of_string("8603376411415500098"));
+ assert(signed(zero_extend(0x7766554477665542, 64)) == int_of_string("8603657890687243586"));
assert(signed(zero_extend(0x78, 64)) == 120);
- assert(signed(zero_extend(0x789abcdef0123456, 64)) == 8690466096661279830);
+ assert(signed(zero_extend(0x789abcdef0123456, 64)) == int_of_string("8690466096661279830"));
assert(signed(zero_extend(0x79, 64)) == 121);
assert(signed(zero_extend(0x7a, 64)) == 122);
assert(signed(zero_extend(0x7b, 64)) == 123);
@@ -1428,54 +1434,54 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0x7ffe, 64)) == 32766);
assert(signed(zero_extend(0x7fff, 64)) == 32767);
assert(signed(zero_extend(0x7fff7fff, 64)) == 2147450879);
- assert(signed(zero_extend(0x7fff7fff7fff7ffd, 64)) == 9223231297218904061);
- assert(signed(zero_extend(0x7fff7fff7fff7fff, 64)) == 9223231297218904063);
+ assert(signed(zero_extend(0x7fff7fff7fff7ffd, 64)) == int_of_string("9223231297218904061"));
+ assert(signed(zero_extend(0x7fff7fff7fff7fff, 64)) == int_of_string("9223231297218904063"));
assert(signed(zero_extend(0x7fffffc, 64)) == 134217724);
assert(signed(zero_extend(0x7ffffffe, 64)) == 2147483646);
assert(signed(zero_extend(0x7fffffff, 64)) == 2147483647);
- assert(signed(zero_extend(0x7fffffff00000000, 64)) == 9223372032559808512);
- assert(signed(zero_extend(0x7fffffff00000001, 64)) == 9223372032559808513);
- assert(signed(zero_extend(0x7fffffff7ffffffe, 64)) == 9223372034707292158);
- assert(signed(zero_extend(0x7fffffff7fffffff, 64)) == 9223372034707292159);
- assert(signed(zero_extend(0x7fffffff80000000, 64)) == 9223372034707292160);
- assert(signed(zero_extend(0x7fffffff80000001, 64)) == 9223372034707292161);
- assert(signed(zero_extend(0x7fffffffffff0000, 64)) == 9223372036854710272);
- assert(signed(zero_extend(0x7fffffffffff0001, 64)) == 9223372036854710273);
- assert(signed(zero_extend(0x7fffffffffff7ffe, 64)) == 9223372036854743038);
- assert(signed(zero_extend(0x7fffffffffff7fff, 64)) == 9223372036854743039);
- assert(signed(zero_extend(0x7fffffffffff8000, 64)) == 9223372036854743040);
- assert(signed(zero_extend(0x7fffffffffff8001, 64)) == 9223372036854743041);
- assert(signed(zero_extend(0x7ffffffffffffffc, 64)) == 9223372036854775804);
- assert(signed(zero_extend(0x7ffffffffffffffd, 64)) == 9223372036854775805);
- assert(signed(zero_extend(0x7ffffffffffffffe, 64)) == 9223372036854775806);
- assert(signed(zero_extend(0x7fffffffffffffff, 64)) == 9223372036854775807);
+ assert(signed(zero_extend(0x7fffffff00000000, 64)) == int_of_string("9223372032559808512"));
+ assert(signed(zero_extend(0x7fffffff00000001, 64)) == int_of_string("9223372032559808513"));
+ assert(signed(zero_extend(0x7fffffff7ffffffe, 64)) == int_of_string("9223372034707292158"));
+ assert(signed(zero_extend(0x7fffffff7fffffff, 64)) == int_of_string("9223372034707292159"));
+ assert(signed(zero_extend(0x7fffffff80000000, 64)) == int_of_string("9223372034707292160"));
+ assert(signed(zero_extend(0x7fffffff80000001, 64)) == int_of_string("9223372034707292161"));
+ assert(signed(zero_extend(0x7fffffffffff0000, 64)) == int_of_string("9223372036854710272"));
+ assert(signed(zero_extend(0x7fffffffffff0001, 64)) == int_of_string("9223372036854710273"));
+ assert(signed(zero_extend(0x7fffffffffff7ffe, 64)) == int_of_string("9223372036854743038"));
+ assert(signed(zero_extend(0x7fffffffffff7fff, 64)) == int_of_string("9223372036854743039"));
+ assert(signed(zero_extend(0x7fffffffffff8000, 64)) == int_of_string("9223372036854743040"));
+ assert(signed(zero_extend(0x7fffffffffff8001, 64)) == int_of_string("9223372036854743041"));
+ assert(signed(zero_extend(0x7ffffffffffffffc, 64)) == int_of_string("9223372036854775804"));
+ assert(signed(zero_extend(0x7ffffffffffffffd, 64)) == int_of_string("9223372036854775805"));
+ assert(signed(zero_extend(0x7ffffffffffffffe, 64)) == int_of_string("9223372036854775806"));
+ assert(signed(zero_extend(0x7fffffffffffffff, 64)) == int_of_string("9223372036854775807"));
assert(signed(zero_extend(0x8, 64)) == 8);
assert(signed(zero_extend(0x80, 64)) == 128);
assert(signed(zero_extend(0x800, 64)) == 2048);
assert(signed(zero_extend(0x8000, 64)) == 32768);
assert(signed(zero_extend(0x80000000, 64)) == 2147483648);
- assert(signed(zero_extend(0x8000000000000000, 64)) == -9223372036854775808);
- assert(signed(zero_extend(0x8000000000000001, 64)) == -9223372036854775807);
- assert(signed(zero_extend(0x8000000000000002, 64)) == -9223372036854775806);
- assert(signed(zero_extend(0x8000000000000003, 64)) == -9223372036854775805);
- assert(signed(zero_extend(0x8000000000007ffe, 64)) == -9223372036854743042);
- assert(signed(zero_extend(0x8000000000007fff, 64)) == -9223372036854743041);
- assert(signed(zero_extend(0x8000000000008000, 64)) == -9223372036854743040);
- assert(signed(zero_extend(0x8000000000008001, 64)) == -9223372036854743039);
- assert(signed(zero_extend(0x800000000000fffe, 64)) == -9223372036854710274);
- assert(signed(zero_extend(0x800000000000ffff, 64)) == -9223372036854710273);
- assert(signed(zero_extend(0x800000007ffffffe, 64)) == -9223372034707292162);
- assert(signed(zero_extend(0x800000007fffffff, 64)) == -9223372034707292161);
- assert(signed(zero_extend(0x8000000080000000, 64)) == -9223372034707292160);
- assert(signed(zero_extend(0x8000000080000001, 64)) == -9223372034707292159);
- assert(signed(zero_extend(0x80000000fffffffe, 64)) == -9223372032559808514);
- assert(signed(zero_extend(0x80000000ffffffff, 64)) == -9223372032559808513);
+ assert(signed(zero_extend(0x8000000000000000, 64)) == int_of_string("-9223372036854775808"));
+ assert(signed(zero_extend(0x8000000000000001, 64)) == int_of_string("-9223372036854775807"));
+ assert(signed(zero_extend(0x8000000000000002, 64)) == int_of_string("-9223372036854775806"));
+ assert(signed(zero_extend(0x8000000000000003, 64)) == int_of_string("-9223372036854775805"));
+ assert(signed(zero_extend(0x8000000000007ffe, 64)) == int_of_string("-9223372036854743042"));
+ assert(signed(zero_extend(0x8000000000007fff, 64)) == int_of_string("-9223372036854743041"));
+ assert(signed(zero_extend(0x8000000000008000, 64)) == int_of_string("-9223372036854743040"));
+ assert(signed(zero_extend(0x8000000000008001, 64)) == int_of_string("-9223372036854743039"));
+ assert(signed(zero_extend(0x800000000000fffe, 64)) == int_of_string("-9223372036854710274"));
+ assert(signed(zero_extend(0x800000000000ffff, 64)) == int_of_string("-9223372036854710273"));
+ assert(signed(zero_extend(0x800000007ffffffe, 64)) == int_of_string("-9223372034707292162"));
+ assert(signed(zero_extend(0x800000007fffffff, 64)) == int_of_string("-9223372034707292161"));
+ assert(signed(zero_extend(0x8000000080000000, 64)) == int_of_string("-9223372034707292160"));
+ assert(signed(zero_extend(0x8000000080000001, 64)) == int_of_string("-9223372034707292159"));
+ assert(signed(zero_extend(0x80000000fffffffe, 64)) == int_of_string("-9223372032559808514"));
+ assert(signed(zero_extend(0x80000000ffffffff, 64)) == int_of_string("-9223372032559808513"));
assert(signed(zero_extend(0x80000001, 64)) == 2147483649);
assert(signed(zero_extend(0x80000002, 64)) == 2147483650);
assert(signed(zero_extend(0x80000003, 64)) == 2147483651);
assert(signed(zero_extend(0x80008000, 64)) == 2147516416);
- assert(signed(zero_extend(0x8000800080008000, 64)) == -9223231297218904064);
- assert(signed(zero_extend(0x8000800080008002, 64)) == -9223231297218904062);
+ assert(signed(zero_extend(0x8000800080008000, 64)) == int_of_string("-9223231297218904064"));
+ assert(signed(zero_extend(0x8000800080008002, 64)) == int_of_string("-9223231297218904062"));
assert(signed(zero_extend(0x8001, 64)) == 32769);
assert(signed(zero_extend(0x80010003, 64)) == 2147549187);
assert(signed(zero_extend(0x8002, 64)) == 32770);
@@ -1503,26 +1509,26 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0x86, 64)) == 134);
assert(signed(zero_extend(0x87, 64)) == 135);
assert(signed(zero_extend(0x87654321, 64)) == 2271560481);
- assert(signed(zero_extend(0x876543210fedcba9, 64)) == -8690466096661279831);
+ assert(signed(zero_extend(0x876543210fedcba9, 64)) == int_of_string("-8690466096661279831"));
assert(signed(zero_extend(0x88, 64)) == 136);
assert(signed(zero_extend(0x8899aabb, 64)) == 2291772091);
- assert(signed(zero_extend(0x8899aabb8899aabb, 64)) == -8603657890687243589);
- assert(signed(zero_extend(0x8899aabb8899aabd, 64)) == -8603657890687243587);
- assert(signed(zero_extend(0x889aaabc889aaabd, 64)) == -8603376411415500099);
+ assert(signed(zero_extend(0x8899aabb8899aabb, 64)) == int_of_string("-8603657890687243589"));
+ assert(signed(zero_extend(0x8899aabb8899aabd, 64)) == int_of_string("-8603657890687243587"));
+ assert(signed(zero_extend(0x889aaabc889aaabd, 64)) == int_of_string("-8603376411415500099"));
assert(signed(zero_extend(0x89, 64)) == 137);
assert(signed(zero_extend(0x89ab, 64)) == 35243);
assert(signed(zero_extend(0x8a, 64)) == 138);
assert(signed(zero_extend(0x8b, 64)) == 139);
assert(signed(zero_extend(0x8c, 64)) == 140);
assert(signed(zero_extend(0x8d, 64)) == 141);
- assert(signed(zero_extend(0x8fffffffffffffff, 64)) == -8070450532247928833);
+ assert(signed(zero_extend(0x8fffffffffffffff, 64)) == int_of_string("-8070450532247928833"));
assert(signed(zero_extend(0x9, 64)) == 9);
assert(signed(zero_extend(0x90, 64)) == 144);
assert(signed(zero_extend(0x90000000, 64)) == 2415919104);
- assert(signed(zero_extend(0x9000000000000000, 64)) == -8070450532247928832);
- assert(signed(zero_extend(0x9000000000000001, 64)) == -8070450532247928831);
+ assert(signed(zero_extend(0x9000000000000000, 64)) == int_of_string("-8070450532247928832"));
+ assert(signed(zero_extend(0x9000000000000001, 64)) == int_of_string("-8070450532247928831"));
assert(signed(zero_extend(0x90000001, 64)) == 2415919105);
- assert(signed(zero_extend(0x9121b3439121b344, 64)) == -7988907161199463612);
+ assert(signed(zero_extend(0x9121b3439121b344, 64)) == int_of_string("-7988907161199463612"));
assert(signed(zero_extend(0x9200040, 64)) == 153092160);
assert(signed(zero_extend(0x920005c, 64)) == 153092188);
assert(signed(zero_extend(0x9200060, 64)) == 153092192);
@@ -1540,7 +1546,7 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0xa500074, 64)) == 173015156);
assert(signed(zero_extend(0xa5a5, 64)) == 42405);
assert(signed(zero_extend(0xa5a5a5a5, 64)) == 2779096485);
- assert(signed(zero_extend(0xa5a5a5a5a5a5a5a5, 64)) == -6510615555426900571);
+ assert(signed(zero_extend(0xa5a5a5a5a5a5a5a5, 64)) == int_of_string("-6510615555426900571"));
assert(signed(zero_extend(0xa8, 64)) == 168);
assert(signed(zero_extend(0xb, 64)) == 11);
assert(signed(zero_extend(0xb0, 64)) == 176);
@@ -1550,13 +1556,13 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0xc00fefff, 64)) == 3222269951);
assert(signed(zero_extend(0xd, 64)) == 13);
assert(signed(zero_extend(0xd0, 64)) == 208);
- assert(signed(zero_extend(0xdddddddddddddddc, 64)) == -2459565876494606884);
+ assert(signed(zero_extend(0xdddddddddddddddc, 64)) == int_of_string("-2459565876494606884"));
assert(signed(zero_extend(0xe, 64)) == 14);
assert(signed(zero_extend(0xe0, 64)) == 224);
- assert(signed(zero_extend(0xedcba9876543210e, 64)) == -1311768467463790322);
+ assert(signed(zero_extend(0xedcba9876543210e, 64)) == int_of_string("-1311768467463790322"));
assert(signed(zero_extend(0xf, 64)) == 15);
assert(signed(zero_extend(0xf00, 64)) == 3840);
- assert(signed(zero_extend(0xf000000000000000, 64)) == -1152921504606846976);
+ assert(signed(zero_extend(0xf000000000000000, 64)) == int_of_string("-1152921504606846976"));
assert(signed(zero_extend(0xff, 64)) == 255);
assert(signed(zero_extend(0xfffe, 64)) == 65534);
assert(signed(zero_extend(0xffff, 64)) == 65535);
@@ -1704,4 +1710,4 @@ function main (() : unit) -> unit = {
assert(signed(zero_extend(0xfffffffffffffffd, 64)) == -3);
assert(signed(zero_extend(0xfffffffffffffffe, 64)) == -2);
assert(signed(zero_extend(0xffffffffffffffff, 64)) == -1);
-} \ No newline at end of file
+}
diff --git a/test/builtins/test_extras.lem b/test/builtins/test_extras.lem
new file mode 100644
index 00000000..136f680e
--- /dev/null
+++ b/test/builtins/test_extras.lem
@@ -0,0 +1,22 @@
+open import Pervasives_extra
+open import Sail_instr_kinds
+open import Sail_values
+open import Sail_operators_mwords
+open import Prompt_monad
+open import State
+
+type ty0
+instance (Size ty0) let size = 0 end
+declare isabelle target_rep type ty1 = `0`
+
+val undefined_int : forall 'rv 'e. unit -> monad 'rv integer 'e
+let undefined_int () = return 0
+
+val undefined_bitvector : forall 'rv 'a 'e. Size 'a => integer -> monad 'rv (mword 'a) 'e
+let undefined_bitvector len = return (zeros(len))
+
+val undefined_unit : forall 'rv 'e. unit -> monad 'rv unit 'e
+let undefined_unit () = return ()
+
+val internal_pick : forall 'rv 'a 'e. list 'a -> monad 'rv 'a 'e
+let internal_pick xs = return (List_extra.head xs)
diff --git a/test/builtins/unsigned6.sail b/test/builtins/unsigned6.sail
index 556b0db6..ec2635d6 100644
--- a/test/builtins/unsigned6.sail
+++ b/test/builtins/unsigned6.sail
@@ -3,6 +3,12 @@ default Order dec
$include <exception_basic.sail>
$include <vector_dec.sail>
+val int_of_string = {
+ ocaml: "Nat_big_num.of_string",
+ lem: "integerOfString",
+ c: "reinit_mpz_t_of_sail_string"
+} : string -> int
+
function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0x2e015f0, 64)) == 48240112);
assert(unsigned(zero_extend(0x2e015f8, 64)) == 48240120);
@@ -2057,8 +2063,8 @@ function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0x5a5a, 64)) == 23130);
assert(unsigned(zero_extend(0x5a5a5a59, 64)) == 1515870809);
assert(unsigned(zero_extend(0x5a5a5a5a, 64)) == 1515870810);
- assert(unsigned(zero_extend(0x5a5a5a5a5a5a5a59, 64)) == 6510615555426900569);
- assert(unsigned(zero_extend(0x5a5a5a5a5a5a5a5a, 64)) == 6510615555426900570);
+ assert(unsigned(zero_extend(0x5a5a5a5a5a5a5a59, 64)) == int_of_string("6510615555426900569"));
+ assert(unsigned(zero_extend(0x5a5a5a5a5a5a5a5a, 64)) == int_of_string("6510615555426900570"));
assert(unsigned(zero_extend(0x5b, 64)) == 91);
assert(unsigned(zero_extend(0x5c, 64)) == 92);
assert(unsigned(zero_extend(0x5c000, 64)) == 376832);
@@ -2147,10 +2153,10 @@ function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0x6c, 64)) == 108);
assert(unsigned(zero_extend(0x6d, 64)) == 109);
assert(unsigned(zero_extend(0x6e, 64)) == 110);
- assert(unsigned(zero_extend(0x6ede4cbc6ede4cbb, 64)) == 7988907161199463611);
+ assert(unsigned(zero_extend(0x6ede4cbc6ede4cbb, 64)) == int_of_string("7988907161199463611"));
assert(unsigned(zero_extend(0x6f, 64)) == 111);
- assert(unsigned(zero_extend(0x6ffffffffffffffe, 64)) == 8070450532247928830);
- assert(unsigned(zero_extend(0x6fffffffffffffff, 64)) == 8070450532247928831);
+ assert(unsigned(zero_extend(0x6ffffffffffffffe, 64)) == int_of_string("8070450532247928830"));
+ assert(unsigned(zero_extend(0x6fffffffffffffff, 64)) == int_of_string("8070450532247928831"));
assert(unsigned(zero_extend(0x7, 64)) == 7);
assert(unsigned(zero_extend(0x70, 64)) == 112);
assert(unsigned(zero_extend(0x71, 64)) == 113);
@@ -2161,10 +2167,10 @@ function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0x76, 64)) == 118);
assert(unsigned(zero_extend(0x764c321, 64)) == 124044065);
assert(unsigned(zero_extend(0x77, 64)) == 119);
- assert(unsigned(zero_extend(0x7765554377655542, 64)) == 8603376411415500098);
- assert(unsigned(zero_extend(0x7766554477665542, 64)) == 8603657890687243586);
+ assert(unsigned(zero_extend(0x7765554377655542, 64)) == int_of_string("8603376411415500098"));
+ assert(unsigned(zero_extend(0x7766554477665542, 64)) == int_of_string("8603657890687243586"));
assert(unsigned(zero_extend(0x78, 64)) == 120);
- assert(unsigned(zero_extend(0x789abcdef0123456, 64)) == 8690466096661279830);
+ assert(unsigned(zero_extend(0x789abcdef0123456, 64)) == int_of_string("8690466096661279830"));
assert(unsigned(zero_extend(0x79, 64)) == 121);
assert(unsigned(zero_extend(0x7a, 64)) == 122);
assert(unsigned(zero_extend(0x7b, 64)) == 123);
@@ -2177,54 +2183,54 @@ function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0x7ffe, 64)) == 32766);
assert(unsigned(zero_extend(0x7fff, 64)) == 32767);
assert(unsigned(zero_extend(0x7fff7fff, 64)) == 2147450879);
- assert(unsigned(zero_extend(0x7fff7fff7fff7ffd, 64)) == 9223231297218904061);
- assert(unsigned(zero_extend(0x7fff7fff7fff7fff, 64)) == 9223231297218904063);
+ assert(unsigned(zero_extend(0x7fff7fff7fff7ffd, 64)) == int_of_string("9223231297218904061"));
+ assert(unsigned(zero_extend(0x7fff7fff7fff7fff, 64)) == int_of_string("9223231297218904063"));
assert(unsigned(zero_extend(0x7fffffc, 64)) == 134217724);
assert(unsigned(zero_extend(0x7ffffffe, 64)) == 2147483646);
assert(unsigned(zero_extend(0x7fffffff, 64)) == 2147483647);
- assert(unsigned(zero_extend(0x7fffffff00000000, 64)) == 9223372032559808512);
- assert(unsigned(zero_extend(0x7fffffff00000001, 64)) == 9223372032559808513);
- assert(unsigned(zero_extend(0x7fffffff7ffffffe, 64)) == 9223372034707292158);
- assert(unsigned(zero_extend(0x7fffffff7fffffff, 64)) == 9223372034707292159);
- assert(unsigned(zero_extend(0x7fffffff80000000, 64)) == 9223372034707292160);
- assert(unsigned(zero_extend(0x7fffffff80000001, 64)) == 9223372034707292161);
- assert(unsigned(zero_extend(0x7fffffffffff0000, 64)) == 9223372036854710272);
- assert(unsigned(zero_extend(0x7fffffffffff0001, 64)) == 9223372036854710273);
- assert(unsigned(zero_extend(0x7fffffffffff7ffe, 64)) == 9223372036854743038);
- assert(unsigned(zero_extend(0x7fffffffffff7fff, 64)) == 9223372036854743039);
- assert(unsigned(zero_extend(0x7fffffffffff8000, 64)) == 9223372036854743040);
- assert(unsigned(zero_extend(0x7fffffffffff8001, 64)) == 9223372036854743041);
- assert(unsigned(zero_extend(0x7ffffffffffffffc, 64)) == 9223372036854775804);
- assert(unsigned(zero_extend(0x7ffffffffffffffd, 64)) == 9223372036854775805);
- assert(unsigned(zero_extend(0x7ffffffffffffffe, 64)) == 9223372036854775806);
- assert(unsigned(zero_extend(0x7fffffffffffffff, 64)) == 9223372036854775807);
+ assert(unsigned(zero_extend(0x7fffffff00000000, 64)) == int_of_string("9223372032559808512"));
+ assert(unsigned(zero_extend(0x7fffffff00000001, 64)) == int_of_string("9223372032559808513"));
+ assert(unsigned(zero_extend(0x7fffffff7ffffffe, 64)) == int_of_string("9223372034707292158"));
+ assert(unsigned(zero_extend(0x7fffffff7fffffff, 64)) == int_of_string("9223372034707292159"));
+ assert(unsigned(zero_extend(0x7fffffff80000000, 64)) == int_of_string("9223372034707292160"));
+ assert(unsigned(zero_extend(0x7fffffff80000001, 64)) == int_of_string("9223372034707292161"));
+ assert(unsigned(zero_extend(0x7fffffffffff0000, 64)) == int_of_string("9223372036854710272"));
+ assert(unsigned(zero_extend(0x7fffffffffff0001, 64)) == int_of_string("9223372036854710273"));
+ assert(unsigned(zero_extend(0x7fffffffffff7ffe, 64)) == int_of_string("9223372036854743038"));
+ assert(unsigned(zero_extend(0x7fffffffffff7fff, 64)) == int_of_string("9223372036854743039"));
+ assert(unsigned(zero_extend(0x7fffffffffff8000, 64)) == int_of_string("9223372036854743040"));
+ assert(unsigned(zero_extend(0x7fffffffffff8001, 64)) == int_of_string("9223372036854743041"));
+ assert(unsigned(zero_extend(0x7ffffffffffffffc, 64)) == int_of_string("9223372036854775804"));
+ assert(unsigned(zero_extend(0x7ffffffffffffffd, 64)) == int_of_string("9223372036854775805"));
+ assert(unsigned(zero_extend(0x7ffffffffffffffe, 64)) == int_of_string("9223372036854775806"));
+ assert(unsigned(zero_extend(0x7fffffffffffffff, 64)) == int_of_string("9223372036854775807"));
assert(unsigned(zero_extend(0x8, 64)) == 8);
assert(unsigned(zero_extend(0x80, 64)) == 128);
assert(unsigned(zero_extend(0x800, 64)) == 2048);
assert(unsigned(zero_extend(0x8000, 64)) == 32768);
assert(unsigned(zero_extend(0x80000000, 64)) == 2147483648);
- assert(unsigned(zero_extend(0x8000000000000000, 64)) == 9223372036854775808);
- assert(unsigned(zero_extend(0x8000000000000001, 64)) == 9223372036854775809);
- assert(unsigned(zero_extend(0x8000000000000002, 64)) == 9223372036854775810);
- assert(unsigned(zero_extend(0x8000000000000003, 64)) == 9223372036854775811);
- assert(unsigned(zero_extend(0x8000000000007ffe, 64)) == 9223372036854808574);
- assert(unsigned(zero_extend(0x8000000000007fff, 64)) == 9223372036854808575);
- assert(unsigned(zero_extend(0x8000000000008000, 64)) == 9223372036854808576);
- assert(unsigned(zero_extend(0x8000000000008001, 64)) == 9223372036854808577);
- assert(unsigned(zero_extend(0x800000000000fffe, 64)) == 9223372036854841342);
- assert(unsigned(zero_extend(0x800000000000ffff, 64)) == 9223372036854841343);
- assert(unsigned(zero_extend(0x800000007ffffffe, 64)) == 9223372039002259454);
- assert(unsigned(zero_extend(0x800000007fffffff, 64)) == 9223372039002259455);
- assert(unsigned(zero_extend(0x8000000080000000, 64)) == 9223372039002259456);
- assert(unsigned(zero_extend(0x8000000080000001, 64)) == 9223372039002259457);
- assert(unsigned(zero_extend(0x80000000fffffffe, 64)) == 9223372041149743102);
- assert(unsigned(zero_extend(0x80000000ffffffff, 64)) == 9223372041149743103);
+ assert(unsigned(zero_extend(0x8000000000000000, 64)) == int_of_string("9223372036854775808"));
+ assert(unsigned(zero_extend(0x8000000000000001, 64)) == int_of_string("9223372036854775809"));
+ assert(unsigned(zero_extend(0x8000000000000002, 64)) == int_of_string("9223372036854775810"));
+ assert(unsigned(zero_extend(0x8000000000000003, 64)) == int_of_string("9223372036854775811"));
+ assert(unsigned(zero_extend(0x8000000000007ffe, 64)) == int_of_string("9223372036854808574"));
+ assert(unsigned(zero_extend(0x8000000000007fff, 64)) == int_of_string("9223372036854808575"));
+ assert(unsigned(zero_extend(0x8000000000008000, 64)) == int_of_string("9223372036854808576"));
+ assert(unsigned(zero_extend(0x8000000000008001, 64)) == int_of_string("9223372036854808577"));
+ assert(unsigned(zero_extend(0x800000000000fffe, 64)) == int_of_string("9223372036854841342"));
+ assert(unsigned(zero_extend(0x800000000000ffff, 64)) == int_of_string("9223372036854841343"));
+ assert(unsigned(zero_extend(0x800000007ffffffe, 64)) == int_of_string("9223372039002259454"));
+ assert(unsigned(zero_extend(0x800000007fffffff, 64)) == int_of_string("9223372039002259455"));
+ assert(unsigned(zero_extend(0x8000000080000000, 64)) == int_of_string("9223372039002259456"));
+ assert(unsigned(zero_extend(0x8000000080000001, 64)) == int_of_string("9223372039002259457"));
+ assert(unsigned(zero_extend(0x80000000fffffffe, 64)) == int_of_string("9223372041149743102"));
+ assert(unsigned(zero_extend(0x80000000ffffffff, 64)) == int_of_string("9223372041149743103"));
assert(unsigned(zero_extend(0x80000001, 64)) == 2147483649);
assert(unsigned(zero_extend(0x80000002, 64)) == 2147483650);
assert(unsigned(zero_extend(0x80000003, 64)) == 2147483651);
assert(unsigned(zero_extend(0x80008000, 64)) == 2147516416);
- assert(unsigned(zero_extend(0x8000800080008000, 64)) == 9223512776490647552);
- assert(unsigned(zero_extend(0x8000800080008002, 64)) == 9223512776490647554);
+ assert(unsigned(zero_extend(0x8000800080008000, 64)) == int_of_string("9223512776490647552"));
+ assert(unsigned(zero_extend(0x8000800080008002, 64)) == int_of_string("9223512776490647554"));
assert(unsigned(zero_extend(0x8001, 64)) == 32769);
assert(unsigned(zero_extend(0x80010003, 64)) == 2147549187);
assert(unsigned(zero_extend(0x8002, 64)) == 32770);
@@ -2252,26 +2258,26 @@ function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0x86, 64)) == 134);
assert(unsigned(zero_extend(0x87, 64)) == 135);
assert(unsigned(zero_extend(0x87654321, 64)) == 2271560481);
- assert(unsigned(zero_extend(0x876543210fedcba9, 64)) == 9756277977048271785);
+ assert(unsigned(zero_extend(0x876543210fedcba9, 64)) == int_of_string("9756277977048271785"));
assert(unsigned(zero_extend(0x88, 64)) == 136);
assert(unsigned(zero_extend(0x8899aabb, 64)) == 2291772091);
- assert(unsigned(zero_extend(0x8899aabb8899aabb, 64)) == 9843086183022308027);
- assert(unsigned(zero_extend(0x8899aabb8899aabd, 64)) == 9843086183022308029);
- assert(unsigned(zero_extend(0x889aaabc889aaabd, 64)) == 9843367662294051517);
+ assert(unsigned(zero_extend(0x8899aabb8899aabb, 64)) == int_of_string("9843086183022308027"));
+ assert(unsigned(zero_extend(0x8899aabb8899aabd, 64)) == int_of_string("9843086183022308029"));
+ assert(unsigned(zero_extend(0x889aaabc889aaabd, 64)) == int_of_string("9843367662294051517"));
assert(unsigned(zero_extend(0x89, 64)) == 137);
assert(unsigned(zero_extend(0x89ab, 64)) == 35243);
assert(unsigned(zero_extend(0x8a, 64)) == 138);
assert(unsigned(zero_extend(0x8b, 64)) == 139);
assert(unsigned(zero_extend(0x8c, 64)) == 140);
assert(unsigned(zero_extend(0x8d, 64)) == 141);
- assert(unsigned(zero_extend(0x8fffffffffffffff, 64)) == 10376293541461622783);
+ assert(unsigned(zero_extend(0x8fffffffffffffff, 64)) == int_of_string("10376293541461622783"));
assert(unsigned(zero_extend(0x9, 64)) == 9);
assert(unsigned(zero_extend(0x90, 64)) == 144);
assert(unsigned(zero_extend(0x90000000, 64)) == 2415919104);
- assert(unsigned(zero_extend(0x9000000000000000, 64)) == 10376293541461622784);
- assert(unsigned(zero_extend(0x9000000000000001, 64)) == 10376293541461622785);
+ assert(unsigned(zero_extend(0x9000000000000000, 64)) == int_of_string("10376293541461622784"));
+ assert(unsigned(zero_extend(0x9000000000000001, 64)) == int_of_string("10376293541461622785"));
assert(unsigned(zero_extend(0x90000001, 64)) == 2415919105);
- assert(unsigned(zero_extend(0x9121b3439121b344, 64)) == 10457836912510088004);
+ assert(unsigned(zero_extend(0x9121b3439121b344, 64)) == int_of_string("10457836912510088004"));
assert(unsigned(zero_extend(0x9200000, 64)) == 153092096);
assert(unsigned(zero_extend(0x9200004, 64)) == 153092100);
assert(unsigned(zero_extend(0x9200008, 64)) == 153092104);
@@ -2355,7 +2361,7 @@ function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0xa500074, 64)) == 173015156);
assert(unsigned(zero_extend(0xa5a5, 64)) == 42405);
assert(unsigned(zero_extend(0xa5a5a5a5, 64)) == 2779096485);
- assert(unsigned(zero_extend(0xa5a5a5a5a5a5a5a5, 64)) == 11936128518282651045);
+ assert(unsigned(zero_extend(0xa5a5a5a5a5a5a5a5, 64)) == int_of_string("11936128518282651045"));
assert(unsigned(zero_extend(0xa8, 64)) == 168);
assert(unsigned(zero_extend(0xb, 64)) == 11);
assert(unsigned(zero_extend(0xb0, 64)) == 176);
@@ -2365,160 +2371,160 @@ function main (() : unit) -> unit = {
assert(unsigned(zero_extend(0xc00fefff, 64)) == 3222269951);
assert(unsigned(zero_extend(0xd, 64)) == 13);
assert(unsigned(zero_extend(0xd0, 64)) == 208);
- assert(unsigned(zero_extend(0xdddddddddddddddc, 64)) == 15987178197214944732);
+ assert(unsigned(zero_extend(0xdddddddddddddddc, 64)) == int_of_string("15987178197214944732"));
assert(unsigned(zero_extend(0xe, 64)) == 14);
assert(unsigned(zero_extend(0xe0, 64)) == 224);
- assert(unsigned(zero_extend(0xedcba9876543210e, 64)) == 17134975606245761294);
+ assert(unsigned(zero_extend(0xedcba9876543210e, 64)) == int_of_string("17134975606245761294"));
assert(unsigned(zero_extend(0xf, 64)) == 15);
assert(unsigned(zero_extend(0xf00, 64)) == 3840);
- assert(unsigned(zero_extend(0xf000000000000000, 64)) == 17293822569102704640);
+ assert(unsigned(zero_extend(0xf000000000000000, 64)) == int_of_string("17293822569102704640"));
assert(unsigned(zero_extend(0xff, 64)) == 255);
assert(unsigned(zero_extend(0xfffe, 64)) == 65534);
assert(unsigned(zero_extend(0xffff, 64)) == 65535);
assert(unsigned(zero_extend(0xffff8000, 64)) == 4294934528);
assert(unsigned(zero_extend(0xffffc, 64)) == 1048572);
- assert(unsigned(zero_extend(0xffffe000ffffe000, 64)) == 18446708893632421888);
+ assert(unsigned(zero_extend(0xffffe000ffffe000, 64)) == int_of_string("18446708893632421888"));
assert(unsigned(zero_extend(0xfffffff, 64)) == 268435455);
assert(unsigned(zero_extend(0xfffffffe, 64)) == 4294967294);
- assert(unsigned(zero_extend(0xfffffffe77665544, 64)) == 18446744067122812228);
- assert(unsigned(zero_extend(0xfffffffe7fffffff, 64)) == 18446744067267100671);
- assert(unsigned(zero_extend(0xfffffffe80000000, 64)) == 18446744067267100672);
- assert(unsigned(zero_extend(0xfffffffef89b3cde, 64)) == 18446744069290540254);
- assert(unsigned(zero_extend(0xfffffffefffffffe, 64)) == 18446744069414584318);
- assert(unsigned(zero_extend(0xfffffffeffffffff, 64)) == 18446744069414584319);
+ assert(unsigned(zero_extend(0xfffffffe77665544, 64)) == int_of_string("18446744067122812228"));
+ assert(unsigned(zero_extend(0xfffffffe7fffffff, 64)) == int_of_string("18446744067267100671"));
+ assert(unsigned(zero_extend(0xfffffffe80000000, 64)) == int_of_string("18446744067267100672"));
+ assert(unsigned(zero_extend(0xfffffffef89b3cde, 64)) == int_of_string("18446744069290540254"));
+ assert(unsigned(zero_extend(0xfffffffefffffffe, 64)) == int_of_string("18446744069414584318"));
+ assert(unsigned(zero_extend(0xfffffffeffffffff, 64)) == int_of_string("18446744069414584319"));
assert(unsigned(zero_extend(0xffffffff, 64)) == 4294967295);
- assert(unsigned(zero_extend(0xffffffff00000000, 64)) == 18446744069414584320);
- assert(unsigned(zero_extend(0xffffffff00000001, 64)) == 18446744069414584321);
- assert(unsigned(zero_extend(0xffffffff00007fff, 64)) == 18446744069414617087);
- assert(unsigned(zero_extend(0xffffffff3ff01000, 64)) == 18446744070487281664);
- assert(unsigned(zero_extend(0xffffffff5fffffff, 64)) == 18446744071025197055);
- assert(unsigned(zero_extend(0xffffffff6dcba985, 64)) == 18446744071256648069);
- assert(unsigned(zero_extend(0xffffffff6ffffffe, 64)) == 18446744071293632510);
- assert(unsigned(zero_extend(0xffffffff6fffffff, 64)) == 18446744071293632511);
- assert(unsigned(zero_extend(0xffffffff77665544, 64)) == 18446744071417779524);
- assert(unsigned(zero_extend(0xffffffff7ffefffc, 64)) == 18446744071562002428);
- assert(unsigned(zero_extend(0xffffffff7ffffffc, 64)) == 18446744071562067964);
- assert(unsigned(zero_extend(0xffffffff7ffffffd, 64)) == 18446744071562067965);
- assert(unsigned(zero_extend(0xffffffff7ffffffe, 64)) == 18446744071562067966);
- assert(unsigned(zero_extend(0xffffffff7fffffff, 64)) == 18446744071562067967);
- assert(unsigned(zero_extend(0xffffffff80000000, 64)) == 18446744071562067968);
- assert(unsigned(zero_extend(0xffffffff80000001, 64)) == 18446744071562067969);
- assert(unsigned(zero_extend(0xffffffff94837260, 64)) == 18446744071906226784);
- assert(unsigned(zero_extend(0xffffffffdfffffff, 64)) == 18446744073172680703);
- assert(unsigned(zero_extend(0xffffffffed9fff7f, 64)) == 18446744073401270143);
- assert(unsigned(zero_extend(0xffffffffedafff8f, 64)) == 18446744073402318735);
- assert(unsigned(zero_extend(0xfffffffff0000000, 64)) == 18446744073441116160);
- assert(unsigned(zero_extend(0xfffffffff7ffffff, 64)) == 18446744073575333887);
- assert(unsigned(zero_extend(0xfffffffff89b3cde, 64)) == 18446744073585507550);
- assert(unsigned(zero_extend(0xfffffffffcefffff, 64)) == 18446744073658171391);
- assert(unsigned(zero_extend(0xfffffffffcf0ffff, 64)) == 18446744073658236927);
- assert(unsigned(zero_extend(0xfffffffffcf1ffff, 64)) == 18446744073658302463);
- assert(unsigned(zero_extend(0xfffffffffcf84000, 64)) == 18446744073658712064);
- assert(unsigned(zero_extend(0xfffffffffcf94000, 64)) == 18446744073658777600);
- assert(unsigned(zero_extend(0xfffffffffcfa4000, 64)) == 18446744073658843136);
- assert(unsigned(zero_extend(0xfffffffffd0affff, 64)) == 18446744073659940863);
- assert(unsigned(zero_extend(0xfffffffffd0bffff, 64)) == 18446744073660006399);
- assert(unsigned(zero_extend(0xfffffffffd0fc000, 64)) == 18446744073660252160);
- assert(unsigned(zero_extend(0xfffffffffd10c000, 64)) == 18446744073660317696);
- assert(unsigned(zero_extend(0xfffffffffd10ffff, 64)) == 18446744073660334079);
- assert(unsigned(zero_extend(0xfffffffffd11ffff, 64)) == 18446744073660399615);
- assert(unsigned(zero_extend(0xfffffffffd130000, 64)) == 18446744073660465152);
- assert(unsigned(zero_extend(0xfffffffffd13ffff, 64)) == 18446744073660530687);
- assert(unsigned(zero_extend(0xfffffffffd140000, 64)) == 18446744073660530688);
- assert(unsigned(zero_extend(0xfffffffffd14ffff, 64)) == 18446744073660596223);
- assert(unsigned(zero_extend(0xfffffffffd170000, 64)) == 18446744073660727296);
- assert(unsigned(zero_extend(0xfffffffffd18c000, 64)) == 18446744073660841984);
- assert(unsigned(zero_extend(0xfffffffffebffe6b, 64)) == 18446744073688579691);
- assert(unsigned(zero_extend(0xfffffffffebffefb, 64)) == 18446744073688579835);
- assert(unsigned(zero_extend(0xfffffffffebfff3b, 64)) == 18446744073688579899);
- assert(unsigned(zero_extend(0xffffffffffefffff, 64)) == 18446744073708503039);
+ assert(unsigned(zero_extend(0xffffffff00000000, 64)) == int_of_string("18446744069414584320"));
+ assert(unsigned(zero_extend(0xffffffff00000001, 64)) == int_of_string("18446744069414584321"));
+ assert(unsigned(zero_extend(0xffffffff00007fff, 64)) == int_of_string("18446744069414617087"));
+ assert(unsigned(zero_extend(0xffffffff3ff01000, 64)) == int_of_string("18446744070487281664"));
+ assert(unsigned(zero_extend(0xffffffff5fffffff, 64)) == int_of_string("18446744071025197055"));
+ assert(unsigned(zero_extend(0xffffffff6dcba985, 64)) == int_of_string("18446744071256648069"));
+ assert(unsigned(zero_extend(0xffffffff6ffffffe, 64)) == int_of_string("18446744071293632510"));
+ assert(unsigned(zero_extend(0xffffffff6fffffff, 64)) == int_of_string("18446744071293632511"));
+ assert(unsigned(zero_extend(0xffffffff77665544, 64)) == int_of_string("18446744071417779524"));
+ assert(unsigned(zero_extend(0xffffffff7ffefffc, 64)) == int_of_string("18446744071562002428"));
+ assert(unsigned(zero_extend(0xffffffff7ffffffc, 64)) == int_of_string("18446744071562067964"));
+ assert(unsigned(zero_extend(0xffffffff7ffffffd, 64)) == int_of_string("18446744071562067965"));
+ assert(unsigned(zero_extend(0xffffffff7ffffffe, 64)) == int_of_string("18446744071562067966"));
+ assert(unsigned(zero_extend(0xffffffff7fffffff, 64)) == int_of_string("18446744071562067967"));
+ assert(unsigned(zero_extend(0xffffffff80000000, 64)) == int_of_string("18446744071562067968"));
+ assert(unsigned(zero_extend(0xffffffff80000001, 64)) == int_of_string("18446744071562067969"));
+ assert(unsigned(zero_extend(0xffffffff94837260, 64)) == int_of_string("18446744071906226784"));
+ assert(unsigned(zero_extend(0xffffffffdfffffff, 64)) == int_of_string("18446744073172680703"));
+ assert(unsigned(zero_extend(0xffffffffed9fff7f, 64)) == int_of_string("18446744073401270143"));
+ assert(unsigned(zero_extend(0xffffffffedafff8f, 64)) == int_of_string("18446744073402318735"));
+ assert(unsigned(zero_extend(0xfffffffff0000000, 64)) == int_of_string("18446744073441116160"));
+ assert(unsigned(zero_extend(0xfffffffff7ffffff, 64)) == int_of_string("18446744073575333887"));
+ assert(unsigned(zero_extend(0xfffffffff89b3cde, 64)) == int_of_string("18446744073585507550"));
+ assert(unsigned(zero_extend(0xfffffffffcefffff, 64)) == int_of_string("18446744073658171391"));
+ assert(unsigned(zero_extend(0xfffffffffcf0ffff, 64)) == int_of_string("18446744073658236927"));
+ assert(unsigned(zero_extend(0xfffffffffcf1ffff, 64)) == int_of_string("18446744073658302463"));
+ assert(unsigned(zero_extend(0xfffffffffcf84000, 64)) == int_of_string("18446744073658712064"));
+ assert(unsigned(zero_extend(0xfffffffffcf94000, 64)) == int_of_string("18446744073658777600"));
+ assert(unsigned(zero_extend(0xfffffffffcfa4000, 64)) == int_of_string("18446744073658843136"));
+ assert(unsigned(zero_extend(0xfffffffffd0affff, 64)) == int_of_string("18446744073659940863"));
+ assert(unsigned(zero_extend(0xfffffffffd0bffff, 64)) == int_of_string("18446744073660006399"));
+ assert(unsigned(zero_extend(0xfffffffffd0fc000, 64)) == int_of_string("18446744073660252160"));
+ assert(unsigned(zero_extend(0xfffffffffd10c000, 64)) == int_of_string("18446744073660317696"));
+ assert(unsigned(zero_extend(0xfffffffffd10ffff, 64)) == int_of_string("18446744073660334079"));
+ assert(unsigned(zero_extend(0xfffffffffd11ffff, 64)) == int_of_string("18446744073660399615"));
+ assert(unsigned(zero_extend(0xfffffffffd130000, 64)) == int_of_string("18446744073660465152"));
+ assert(unsigned(zero_extend(0xfffffffffd13ffff, 64)) == int_of_string("18446744073660530687"));
+ assert(unsigned(zero_extend(0xfffffffffd140000, 64)) == int_of_string("18446744073660530688"));
+ assert(unsigned(zero_extend(0xfffffffffd14ffff, 64)) == int_of_string("18446744073660596223"));
+ assert(unsigned(zero_extend(0xfffffffffd170000, 64)) == int_of_string("18446744073660727296"));
+ assert(unsigned(zero_extend(0xfffffffffd18c000, 64)) == int_of_string("18446744073660841984"));
+ assert(unsigned(zero_extend(0xfffffffffebffe6b, 64)) == int_of_string("18446744073688579691"));
+ assert(unsigned(zero_extend(0xfffffffffebffefb, 64)) == int_of_string("18446744073688579835"));
+ assert(unsigned(zero_extend(0xfffffffffebfff3b, 64)) == int_of_string("18446744073688579899"));
+ assert(unsigned(zero_extend(0xffffffffffefffff, 64)) == int_of_string("18446744073708503039"));
assert(unsigned(zero_extend(0xfffffffffff, 64)) == 17592186044415);
- assert(unsigned(zero_extend(0xfffffffffff70000, 64)) == 18446744073708961792);
- assert(unsigned(zero_extend(0xfffffffffff80000, 64)) == 18446744073709027328);
- assert(unsigned(zero_extend(0xfffffffffffc0000, 64)) == 18446744073709289472);
- assert(unsigned(zero_extend(0xfffffffffffd0000, 64)) == 18446744073709355008);
- assert(unsigned(zero_extend(0xfffffffffffdffff, 64)) == 18446744073709420543);
- assert(unsigned(zero_extend(0xfffffffffffe0000, 64)) == 18446744073709420544);
- assert(unsigned(zero_extend(0xfffffffffffe7ffe, 64)) == 18446744073709453310);
- assert(unsigned(zero_extend(0xfffffffffffe7fff, 64)) == 18446744073709453311);
- assert(unsigned(zero_extend(0xfffffffffffe8000, 64)) == 18446744073709453312);
- assert(unsigned(zero_extend(0xfffffffffffe8001, 64)) == 18446744073709453313);
- assert(unsigned(zero_extend(0xfffffffffffefffe, 64)) == 18446744073709486078);
- assert(unsigned(zero_extend(0xfffffffffffeffff, 64)) == 18446744073709486079);
- assert(unsigned(zero_extend(0xffffffffffff0000, 64)) == 18446744073709486080);
- assert(unsigned(zero_extend(0xffffffffffff0001, 64)) == 18446744073709486081);
- assert(unsigned(zero_extend(0xffffffffffff641f, 64)) == 18446744073709511711);
- assert(unsigned(zero_extend(0xffffffffffff7ffc, 64)) == 18446744073709518844);
- assert(unsigned(zero_extend(0xffffffffffff7ffd, 64)) == 18446744073709518845);
- assert(unsigned(zero_extend(0xffffffffffff7ffe, 64)) == 18446744073709518846);
- assert(unsigned(zero_extend(0xffffffffffff7fff, 64)) == 18446744073709518847);
- assert(unsigned(zero_extend(0xffffffffffff8000, 64)) == 18446744073709518848);
- assert(unsigned(zero_extend(0xffffffffffff8001, 64)) == 18446744073709518849);
- assert(unsigned(zero_extend(0xffffffffffffbeff, 64)) == 18446744073709534975);
- assert(unsigned(zero_extend(0xffffffffffffbf40, 64)) == 18446744073709535040);
- assert(unsigned(zero_extend(0xffffffffffffbf41, 64)) == 18446744073709535041);
- assert(unsigned(zero_extend(0xffffffffffffbfff, 64)) == 18446744073709535231);
- assert(unsigned(zero_extend(0xffffffffffffc22c, 64)) == 18446744073709535788);
- assert(unsigned(zero_extend(0xffffffffffffc24c, 64)) == 18446744073709535820);
- assert(unsigned(zero_extend(0xffffffffffffd220, 64)) == 18446744073709539872);
- assert(unsigned(zero_extend(0xffffffffffffd23c, 64)) == 18446744073709539900);
- assert(unsigned(zero_extend(0xffffffffffffe220, 64)) == 18446744073709543968);
- assert(unsigned(zero_extend(0xffffffffffffe23c, 64)) == 18446744073709543996);
- assert(unsigned(zero_extend(0xffffffffffffefff, 64)) == 18446744073709547519);
- assert(unsigned(zero_extend(0xfffffffffffffeef, 64)) == 18446744073709551343);
- assert(unsigned(zero_extend(0xfffffffffffffeff, 64)) == 18446744073709551359);
- assert(unsigned(zero_extend(0xffffffffffffff1f, 64)) == 18446744073709551391);
- assert(unsigned(zero_extend(0xffffffffffffff2f, 64)) == 18446744073709551407);
- assert(unsigned(zero_extend(0xffffffffffffff4f, 64)) == 18446744073709551439);
- assert(unsigned(zero_extend(0xffffffffffffff6f, 64)) == 18446744073709551471);
- assert(unsigned(zero_extend(0xffffffffffffff80, 64)) == 18446744073709551488);
- assert(unsigned(zero_extend(0xffffffffffffff8f, 64)) == 18446744073709551503);
- assert(unsigned(zero_extend(0xffffffffffffff9f, 64)) == 18446744073709551519);
- assert(unsigned(zero_extend(0xffffffffffffffa0, 64)) == 18446744073709551520);
- assert(unsigned(zero_extend(0xffffffffffffffaf, 64)) == 18446744073709551535);
- assert(unsigned(zero_extend(0xffffffffffffffbf, 64)) == 18446744073709551551);
- assert(unsigned(zero_extend(0xffffffffffffffc2, 64)) == 18446744073709551554);
- assert(unsigned(zero_extend(0xffffffffffffffc4, 64)) == 18446744073709551556);
- assert(unsigned(zero_extend(0xffffffffffffffc6, 64)) == 18446744073709551558);
- assert(unsigned(zero_extend(0xffffffffffffffc8, 64)) == 18446744073709551560);
- assert(unsigned(zero_extend(0xffffffffffffffca, 64)) == 18446744073709551562);
- assert(unsigned(zero_extend(0xffffffffffffffcc, 64)) == 18446744073709551564);
- assert(unsigned(zero_extend(0xffffffffffffffce, 64)) == 18446744073709551566);
- assert(unsigned(zero_extend(0xffffffffffffffcf, 64)) == 18446744073709551567);
- assert(unsigned(zero_extend(0xffffffffffffffd0, 64)) == 18446744073709551568);
- assert(unsigned(zero_extend(0xffffffffffffffd2, 64)) == 18446744073709551570);
- assert(unsigned(zero_extend(0xffffffffffffffd4, 64)) == 18446744073709551572);
- assert(unsigned(zero_extend(0xffffffffffffffd6, 64)) == 18446744073709551574);
- assert(unsigned(zero_extend(0xffffffffffffffd8, 64)) == 18446744073709551576);
- assert(unsigned(zero_extend(0xffffffffffffffda, 64)) == 18446744073709551578);
- assert(unsigned(zero_extend(0xffffffffffffffdb, 64)) == 18446744073709551579);
- assert(unsigned(zero_extend(0xffffffffffffffdc, 64)) == 18446744073709551580);
- assert(unsigned(zero_extend(0xffffffffffffffde, 64)) == 18446744073709551582);
- assert(unsigned(zero_extend(0xffffffffffffffdf, 64)) == 18446744073709551583);
- assert(unsigned(zero_extend(0xffffffffffffffe0, 64)) == 18446744073709551584);
- assert(unsigned(zero_extend(0xffffffffffffffe2, 64)) == 18446744073709551586);
- assert(unsigned(zero_extend(0xffffffffffffffe4, 64)) == 18446744073709551588);
- assert(unsigned(zero_extend(0xffffffffffffffe6, 64)) == 18446744073709551590);
- assert(unsigned(zero_extend(0xffffffffffffffe7, 64)) == 18446744073709551591);
- assert(unsigned(zero_extend(0xffffffffffffffe8, 64)) == 18446744073709551592);
- assert(unsigned(zero_extend(0xffffffffffffffea, 64)) == 18446744073709551594);
- assert(unsigned(zero_extend(0xffffffffffffffec, 64)) == 18446744073709551596);
- assert(unsigned(zero_extend(0xffffffffffffffee, 64)) == 18446744073709551598);
- assert(unsigned(zero_extend(0xffffffffffffffef, 64)) == 18446744073709551599);
+ assert(unsigned(zero_extend(0xfffffffffff70000, 64)) == int_of_string("18446744073708961792"));
+ assert(unsigned(zero_extend(0xfffffffffff80000, 64)) == int_of_string("18446744073709027328"));
+ assert(unsigned(zero_extend(0xfffffffffffc0000, 64)) == int_of_string("18446744073709289472"));
+ assert(unsigned(zero_extend(0xfffffffffffd0000, 64)) == int_of_string("18446744073709355008"));
+ assert(unsigned(zero_extend(0xfffffffffffdffff, 64)) == int_of_string("18446744073709420543"));
+ assert(unsigned(zero_extend(0xfffffffffffe0000, 64)) == int_of_string("18446744073709420544"));
+ assert(unsigned(zero_extend(0xfffffffffffe7ffe, 64)) == int_of_string("18446744073709453310"));
+ assert(unsigned(zero_extend(0xfffffffffffe7fff, 64)) == int_of_string("18446744073709453311"));
+ assert(unsigned(zero_extend(0xfffffffffffe8000, 64)) == int_of_string("18446744073709453312"));
+ assert(unsigned(zero_extend(0xfffffffffffe8001, 64)) == int_of_string("18446744073709453313"));
+ assert(unsigned(zero_extend(0xfffffffffffefffe, 64)) == int_of_string("18446744073709486078"));
+ assert(unsigned(zero_extend(0xfffffffffffeffff, 64)) == int_of_string("18446744073709486079"));
+ assert(unsigned(zero_extend(0xffffffffffff0000, 64)) == int_of_string("18446744073709486080"));
+ assert(unsigned(zero_extend(0xffffffffffff0001, 64)) == int_of_string("18446744073709486081"));
+ assert(unsigned(zero_extend(0xffffffffffff641f, 64)) == int_of_string("18446744073709511711"));
+ assert(unsigned(zero_extend(0xffffffffffff7ffc, 64)) == int_of_string("18446744073709518844"));
+ assert(unsigned(zero_extend(0xffffffffffff7ffd, 64)) == int_of_string("18446744073709518845"));
+ assert(unsigned(zero_extend(0xffffffffffff7ffe, 64)) == int_of_string("18446744073709518846"));
+ assert(unsigned(zero_extend(0xffffffffffff7fff, 64)) == int_of_string("18446744073709518847"));
+ assert(unsigned(zero_extend(0xffffffffffff8000, 64)) == int_of_string("18446744073709518848"));
+ assert(unsigned(zero_extend(0xffffffffffff8001, 64)) == int_of_string("18446744073709518849"));
+ assert(unsigned(zero_extend(0xffffffffffffbeff, 64)) == int_of_string("18446744073709534975"));
+ assert(unsigned(zero_extend(0xffffffffffffbf40, 64)) == int_of_string("18446744073709535040"));
+ assert(unsigned(zero_extend(0xffffffffffffbf41, 64)) == int_of_string("18446744073709535041"));
+ assert(unsigned(zero_extend(0xffffffffffffbfff, 64)) == int_of_string("18446744073709535231"));
+ assert(unsigned(zero_extend(0xffffffffffffc22c, 64)) == int_of_string("18446744073709535788"));
+ assert(unsigned(zero_extend(0xffffffffffffc24c, 64)) == int_of_string("18446744073709535820"));
+ assert(unsigned(zero_extend(0xffffffffffffd220, 64)) == int_of_string("18446744073709539872"));
+ assert(unsigned(zero_extend(0xffffffffffffd23c, 64)) == int_of_string("18446744073709539900"));
+ assert(unsigned(zero_extend(0xffffffffffffe220, 64)) == int_of_string("18446744073709543968"));
+ assert(unsigned(zero_extend(0xffffffffffffe23c, 64)) == int_of_string("18446744073709543996"));
+ assert(unsigned(zero_extend(0xffffffffffffefff, 64)) == int_of_string("18446744073709547519"));
+ assert(unsigned(zero_extend(0xfffffffffffffeef, 64)) == int_of_string("18446744073709551343"));
+ assert(unsigned(zero_extend(0xfffffffffffffeff, 64)) == int_of_string("18446744073709551359"));
+ assert(unsigned(zero_extend(0xffffffffffffff1f, 64)) == int_of_string("18446744073709551391"));
+ assert(unsigned(zero_extend(0xffffffffffffff2f, 64)) == int_of_string("18446744073709551407"));
+ assert(unsigned(zero_extend(0xffffffffffffff4f, 64)) == int_of_string("18446744073709551439"));
+ assert(unsigned(zero_extend(0xffffffffffffff6f, 64)) == int_of_string("18446744073709551471"));
+ assert(unsigned(zero_extend(0xffffffffffffff80, 64)) == int_of_string("18446744073709551488"));
+ assert(unsigned(zero_extend(0xffffffffffffff8f, 64)) == int_of_string("18446744073709551503"));
+ assert(unsigned(zero_extend(0xffffffffffffff9f, 64)) == int_of_string("18446744073709551519"));
+ assert(unsigned(zero_extend(0xffffffffffffffa0, 64)) == int_of_string("18446744073709551520"));
+ assert(unsigned(zero_extend(0xffffffffffffffaf, 64)) == int_of_string("18446744073709551535"));
+ assert(unsigned(zero_extend(0xffffffffffffffbf, 64)) == int_of_string("18446744073709551551"));
+ assert(unsigned(zero_extend(0xffffffffffffffc2, 64)) == int_of_string("18446744073709551554"));
+ assert(unsigned(zero_extend(0xffffffffffffffc4, 64)) == int_of_string("18446744073709551556"));
+ assert(unsigned(zero_extend(0xffffffffffffffc6, 64)) == int_of_string("18446744073709551558"));
+ assert(unsigned(zero_extend(0xffffffffffffffc8, 64)) == int_of_string("18446744073709551560"));
+ assert(unsigned(zero_extend(0xffffffffffffffca, 64)) == int_of_string("18446744073709551562"));
+ assert(unsigned(zero_extend(0xffffffffffffffcc, 64)) == int_of_string("18446744073709551564"));
+ assert(unsigned(zero_extend(0xffffffffffffffce, 64)) == int_of_string("18446744073709551566"));
+ assert(unsigned(zero_extend(0xffffffffffffffcf, 64)) == int_of_string("18446744073709551567"));
+ assert(unsigned(zero_extend(0xffffffffffffffd0, 64)) == int_of_string("18446744073709551568"));
+ assert(unsigned(zero_extend(0xffffffffffffffd2, 64)) == int_of_string("18446744073709551570"));
+ assert(unsigned(zero_extend(0xffffffffffffffd4, 64)) == int_of_string("18446744073709551572"));
+ assert(unsigned(zero_extend(0xffffffffffffffd6, 64)) == int_of_string("18446744073709551574"));
+ assert(unsigned(zero_extend(0xffffffffffffffd8, 64)) == int_of_string("18446744073709551576"));
+ assert(unsigned(zero_extend(0xffffffffffffffda, 64)) == int_of_string("18446744073709551578"));
+ assert(unsigned(zero_extend(0xffffffffffffffdb, 64)) == int_of_string("18446744073709551579"));
+ assert(unsigned(zero_extend(0xffffffffffffffdc, 64)) == int_of_string("18446744073709551580"));
+ assert(unsigned(zero_extend(0xffffffffffffffde, 64)) == int_of_string("18446744073709551582"));
+ assert(unsigned(zero_extend(0xffffffffffffffdf, 64)) == int_of_string("18446744073709551583"));
+ assert(unsigned(zero_extend(0xffffffffffffffe0, 64)) == int_of_string("18446744073709551584"));
+ assert(unsigned(zero_extend(0xffffffffffffffe2, 64)) == int_of_string("18446744073709551586"));
+ assert(unsigned(zero_extend(0xffffffffffffffe4, 64)) == int_of_string("18446744073709551588"));
+ assert(unsigned(zero_extend(0xffffffffffffffe6, 64)) == int_of_string("18446744073709551590"));
+ assert(unsigned(zero_extend(0xffffffffffffffe7, 64)) == int_of_string("18446744073709551591"));
+ assert(unsigned(zero_extend(0xffffffffffffffe8, 64)) == int_of_string("18446744073709551592"));
+ assert(unsigned(zero_extend(0xffffffffffffffea, 64)) == int_of_string("18446744073709551594"));
+ assert(unsigned(zero_extend(0xffffffffffffffec, 64)) == int_of_string("18446744073709551596"));
+ assert(unsigned(zero_extend(0xffffffffffffffee, 64)) == int_of_string("18446744073709551598"));
+ assert(unsigned(zero_extend(0xffffffffffffffef, 64)) == int_of_string("18446744073709551599"));
assert(unsigned(zero_extend(0xfffffffffffffff, 64)) == 1152921504606846975);
- assert(unsigned(zero_extend(0xfffffffffffffff0, 64)) == 18446744073709551600);
- assert(unsigned(zero_extend(0xfffffffffffffff1, 64)) == 18446744073709551601);
- assert(unsigned(zero_extend(0xfffffffffffffff2, 64)) == 18446744073709551602);
- assert(unsigned(zero_extend(0xfffffffffffffff4, 64)) == 18446744073709551604);
- assert(unsigned(zero_extend(0xfffffffffffffff6, 64)) == 18446744073709551606);
- assert(unsigned(zero_extend(0xfffffffffffffff7, 64)) == 18446744073709551607);
- assert(unsigned(zero_extend(0xfffffffffffffff8, 64)) == 18446744073709551608);
- assert(unsigned(zero_extend(0xfffffffffffffffa, 64)) == 18446744073709551610);
- assert(unsigned(zero_extend(0xfffffffffffffffb, 64)) == 18446744073709551611);
- assert(unsigned(zero_extend(0xfffffffffffffffc, 64)) == 18446744073709551612);
- assert(unsigned(zero_extend(0xfffffffffffffffd, 64)) == 18446744073709551613);
- assert(unsigned(zero_extend(0xfffffffffffffffe, 64)) == 18446744073709551614);
- assert(unsigned(zero_extend(0xffffffffffffffff, 64)) == 18446744073709551615);
+ assert(unsigned(zero_extend(0xfffffffffffffff0, 64)) == int_of_string("18446744073709551600"));
+ assert(unsigned(zero_extend(0xfffffffffffffff1, 64)) == int_of_string("18446744073709551601"));
+ assert(unsigned(zero_extend(0xfffffffffffffff2, 64)) == int_of_string("18446744073709551602"));
+ assert(unsigned(zero_extend(0xfffffffffffffff4, 64)) == int_of_string("18446744073709551604"));
+ assert(unsigned(zero_extend(0xfffffffffffffff6, 64)) == int_of_string("18446744073709551606"));
+ assert(unsigned(zero_extend(0xfffffffffffffff7, 64)) == int_of_string("18446744073709551607"));
+ assert(unsigned(zero_extend(0xfffffffffffffff8, 64)) == int_of_string("18446744073709551608"));
+ assert(unsigned(zero_extend(0xfffffffffffffffa, 64)) == int_of_string("18446744073709551610"));
+ assert(unsigned(zero_extend(0xfffffffffffffffb, 64)) == int_of_string("18446744073709551611"));
+ assert(unsigned(zero_extend(0xfffffffffffffffc, 64)) == int_of_string("18446744073709551612"));
+ assert(unsigned(zero_extend(0xfffffffffffffffd, 64)) == int_of_string("18446744073709551613"));
+ assert(unsigned(zero_extend(0xfffffffffffffffe, 64)) == int_of_string("18446744073709551614"));
+ assert(unsigned(zero_extend(0xffffffffffffffff, 64)) == int_of_string("18446744073709551615"));
assert(unsigned(zero_extend(0x0, 8)) == 0);
assert(unsigned(zero_extend(0x20, 8)) == 32);
assert(unsigned(zero_extend(0x28, 8)) == 40);
diff --git a/test/isabelle/Aarch64_code.thy b/test/isabelle/Aarch64_code.thy
new file mode 100644
index 00000000..05e5bb2e
--- /dev/null
+++ b/test/isabelle/Aarch64_code.thy
@@ -0,0 +1,61 @@
+theory Aarch64_code
+ imports
+ Aarch64_lemmas
+ "HOL-Library.Code_Char"
+ "HOL-Library.Code_Target_Nat"
+ "HOL-Library.Code_Target_Int"
+ "HOL-Library.Code_Real_Approx_By_Float"
+begin
+
+declare [[code abort: failwith]]
+
+termination shl_int by lexicographic_order
+termination while sorry
+termination whileM sorry
+termination untilM sorry
+
+declare insert_code[code del]
+declare union_coset_filter[code del]
+
+lemma [code]: "(set xs) \<union> (set ys) = set (xs @ ys)"
+ by auto
+
+lemma [code]: "insert x (set xs) = set (x # xs)"
+ by auto
+
+declare [[code drop:
+ "less :: real \<Rightarrow> real \<Rightarrow> bool"
+ "less_eq :: real \<Rightarrow> real \<Rightarrow> bool"
+ "floor :: real \<Rightarrow> int"]]
+
+code_printing constant "floor :: real \<Rightarrow> int" \<rightharpoonup> (OCaml) "(Int'_of'_integer (Big'_int.big'_int'_of'_int (Pervasives.int'_of'_float (Pervasives.floor _))))"
+
+code_identifier constant ASR \<rightharpoonup> (OCaml) "Aarch64.asr0"
+code_identifier constant LSL \<rightharpoonup> (OCaml) "Aarch64.lsl0"
+code_identifier constant LSR \<rightharpoonup> (OCaml) "Aarch64.lsr0"
+
+fun prerr_endline' :: "String.literal \<Rightarrow> unit" where "prerr_endline' _ = ()"
+lemma [code]: "prerr_endline s = prerr_endline' (String.implode s)" by auto
+
+fun putchar' :: "char \<Rightarrow> unit" where "putchar' _ = ()"
+lemma [code]: "putchar c = putchar' (char_of_nat (nat c))" by auto
+
+code_identifier code_module List \<rightharpoonup> (OCaml) "List0"
+code_printing constant String.implode \<rightharpoonup> (OCaml) "!(let l = _ in let res = Bytes.create (List.length l) in let rec imp i = function | [] -> res | c :: l -> Bytes.set res i c; imp (i + 1) l in imp 0 l)"
+
+code_printing constant prerr_endline' \<rightharpoonup> (OCaml) "Pervasives.prerr'_endline"
+code_printing constant putchar' \<rightharpoonup> (OCaml) "Pervasives.print'_char"
+
+fun write_char_mem :: "int \<Rightarrow> char \<Rightarrow> (regstate, unit, exception) monadS" where
+ "write_char_mem addr c =
+ bindS (write_mem_eaS BC_bitU_list Write_plain (bits_of_int 64 addr) 1) (\<lambda>_.
+ bindS (write_mem_valS BC_bitU_list (bits_of_nat 8 (nat_of_char c))) (\<lambda>_.
+ returnS ()))"
+
+definition "initial_state \<equiv> init_state initial_regstate (\<lambda>seed. (False, seed)) 0"
+
+code_printing constant elf_entry \<rightharpoonup> (OCaml) "(Int'_of'_integer (Elf'_loader.elf'_entry _))"
+termination BigEndianReverse sorry
+export_code main initial_state liftState get_regval set_regval bindS returnS iteriS iterS write_char_mem integer_of_int int_of_integer "op + :: int \<Rightarrow> int \<Rightarrow> int" prerr_results in OCaml module_name "Aarch64" file "aarch64_export.ml"
+
+end
diff --git a/test/isabelle/Cheri_code.thy b/test/isabelle/Cheri_code.thy
new file mode 100644
index 00000000..cfd01413
--- /dev/null
+++ b/test/isabelle/Cheri_code.thy
@@ -0,0 +1,62 @@
+theory Cheri_code
+ imports Cheri_lemmas "HOL-Library.Code_Char" "HOL-Library.Code_Target_Nat" "HOL-Library.Code_Target_Int"
+begin
+
+declare [[code abort: failwith]]
+
+code_datatype
+ DADDIU DADDU DADDI DADD ADD ADDI ADDU ADDIU DSUBU DSUB SUB SUBU AND0 ANDI OR0
+ ORI NOR XOR0 XORI LUI DSLL DSLL32 DSLLV DSRA DSRA32 DSRAV DSRL DSRL32 DSRLV SLL
+ SLLV SRA SRAV SRL SRLV SLT SLTI SLTU SLTIU MOVN MOVZ MFHI MFLO MTHI MTLO MUL
+ MULT MULTU DMULT DMULTU MADD MADDU MSUB MSUBU DIV DIVU DDIV DDIVU J JAL JR JALR
+ BEQ BCMPZ SYSCALL_THREAD_START ImplementationDefinedStopFetching SYSCALL BREAK
+ WAIT TRAPREG TRAPIMM Load Store LWL LWR SWL SWR LDL LDR SDL SDR CACHE PREF SYNC
+ MFC0 HCF MTC0 TLBWI TLBWR TLBR TLBP RDHWR ERET CGetPerm CGetType CGetBase
+ CGetLen CGetTag CGetSealed CGetOffset CGetPCC CGetPCCSetOffset CGetCause
+ CSetCause CReadHwr CWriteHwr CAndPerm CToPtr CSub CPtrCmp CIncOffset
+ CIncOffsetImmediate CSetOffset CSetBounds CSetBoundsImmediate CSetBoundsExact
+ CClearTag CMOVX ClearRegs CFromPtr CBuildCap CCopyType CCheckPerm CCheckType
+ CTestSubset CSeal CCSeal CUnseal CCall CReturn CBX CBZ CJALR CLoad CStore CSC
+ CLC C2Dump RI CGetAddr
+
+termination whileM sorry
+
+fun prerr_endline' :: "String.literal \<Rightarrow> unit" where "prerr_endline' _ = ()"
+lemma [code]: "prerr_endline s = prerr_endline' (String.implode s)" by auto
+
+fun putchar' :: "char \<Rightarrow> unit" where "putchar' _ = ()"
+lemma [code]: "putchar c = putchar' (char_of_nat (nat c))" by auto
+
+code_identifier code_module List \<rightharpoonup> (OCaml) "List0"
+code_printing constant String.implode \<rightharpoonup> (OCaml) "!(let l = _ in let res = Bytes.create (List.length l) in let rec imp i = function | [] -> res | c :: l -> Bytes.set res i c; imp (i + 1) l in imp 0 l)"
+
+code_printing constant prerr_endline' \<rightharpoonup> (OCaml) "Pervasives.prerr'_endline"
+code_printing constant putchar' \<rightharpoonup> (OCaml) "Pervasives.print'_char"
+
+declare insert_code[code del]
+declare union_coset_filter[code del]
+
+lemma set_union_append[code]: "(set xs) \<union> (set ys) = set (xs @ ys)"
+ by auto
+
+lemma set_insert_Cons[code]: "insert x (set xs) = set (x # xs)"
+ by auto
+
+declare ast.case[code]
+
+fun write_char_mem :: "int \<Rightarrow> char \<Rightarrow> (regstate, unit, exception) monadS" where
+ "write_char_mem addr c =
+ bindS (write_mem_eaS BC_bitU_list Write_plain (bits_of_int 64 addr) 1) (\<lambda>_.
+ bindS (write_mem_valS BC_bitU_list (bits_of_nat 8 (nat_of_char c))) (\<lambda>_.
+ returnS ()))"
+
+definition "initial_state \<equiv> init_state initial_regstate (\<lambda>seed. (False, seed)) 0"
+
+code_printing constant elf_entry \<rightharpoonup> (OCaml) "(Arith.Int'_of'_integer (Elf'_loader.elf'_entry _))"
+code_printing constant get_time_ns \<rightharpoonup> (OCaml) "(Arith.Int'_of'_integer (Big'_int.big'_int'_of'_int (Pervasives.int'_of'_float (1e9 *. Unix.gettimeofday _))))"
+
+export_code main initial_state liftState get_regval set_regval bindS returnS iteriS iterS
+ write_char_mem integer_of_int int_of_integer "op + :: int \<Rightarrow> int \<Rightarrow> int" prerr_results
+ in OCaml file "cheri_export.ml"
+
+end
diff --git a/test/isabelle/Makefile b/test/isabelle/Makefile
new file mode 100644
index 00000000..43028fed
--- /dev/null
+++ b/test/isabelle/Makefile
@@ -0,0 +1,27 @@
+CHERI_DIR = ../../cheri
+AARCH64_DIR = ../../aarch64
+TGTS = run_cheri.native run_aarch64.native
+SESSION_DIRS = -d $(CHERI_DIR) -d $(AARCH64_DIR) -d .
+
+.PHONY: all clean
+
+all: $(TGTS)
+
+%.native: %.ml elf_loader.ml
+ ocamlbuild -use-ocamlfind -pkg lem -pkg linksem -pkg num -pkg unix $@
+
+run_cheri.native: cheri_export.ml
+run_aarch64.native: aarch64_export.ml
+
+cheri_export.ml: Cheri_code.thy
+ make -C $(CHERI_DIR) Cheri.thy
+ isabelle build -c $(SESSION_DIRS) Sail-CHERI-Code
+
+aarch64_export.ml: Aarch64_code.thy
+ make -C $(AARCH64_DIR) Aarch64.thy
+ isabelle build -c $(SESSION_DIRS) Sail-AArch64-Code
+
+clean:
+ -ocamlbuild -clean
+ -rm -f cheri_export.ml
+ -rm -f aarch64_export.ml
diff --git a/test/isabelle/ROOT b/test/isabelle/ROOT
new file mode 100644
index 00000000..97544a58
--- /dev/null
+++ b/test/isabelle/ROOT
@@ -0,0 +1,9 @@
+session "Sail-CHERI-Code" = "Sail-CHERI" +
+ options [document = false, quick_and_dirty]
+ theories
+ Cheri_code
+
+session "Sail-AArch64-Code" = "Sail-AArch64" +
+ options [document = false, quick_and_dirty]
+ theories
+ Aarch64_code
diff --git a/test/isabelle/elf_loader.ml b/test/isabelle/elf_loader.ml
new file mode 100644
index 00000000..6ec89ee6
--- /dev/null
+++ b/test/isabelle/elf_loader.ml
@@ -0,0 +1,126 @@
+(**************************************************************************)
+(* Sail *)
+(* *)
+(* Copyright (c) 2013-2017 *)
+(* Kathyrn Gray *)
+(* Shaked Flur *)
+(* Stephen Kell *)
+(* Gabriel Kerneis *)
+(* Robert Norton-Wright *)
+(* Christopher Pulte *)
+(* Peter Sewell *)
+(* Alasdair Armstrong *)
+(* *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(**************************************************************************)
+
+(*module Big_int = Nat_big_num*)
+
+let opt_elf_threads = ref 1
+let opt_elf_entry = ref Nat_big_num.zero
+let opt_elf_tohost = ref Nat_big_num.zero
+
+type word8 = int
+
+let escape_char c =
+ if int_of_char c <= 31 then '.'
+ else if int_of_char c >= 127 then '.'
+ else c
+
+let hex_line bs =
+ let hex_char i c =
+ (if i mod 2 == 0 && i <> 0 then " " else "") ^ Printf.sprintf "%02x" (int_of_char c)
+ in
+ String.concat "" (List.mapi hex_char bs) ^ " " ^ String.concat "" (List.map (fun c -> Printf.sprintf "%c" (escape_char c)) bs)
+
+let rec break n = function
+ | [] -> []
+ | (_ :: _ as xs) -> [Lem_list.take n xs] @ break n (Lem_list.drop n xs)
+
+let print_segment seg =
+ let bs = seg.Elf_interpreted_segment.elf64_segment_body in
+ prerr_endline "0011 2233 4455 6677 8899 aabb ccdd eeff 0123456789abcdef";
+ List.iter (fun bs -> prerr_endline (hex_line bs)) (break 16 (Byte_sequence.char_list_of_byte_sequence bs))
+
+let read name =
+ let info = Sail_interface.populate_and_obtain_global_symbol_init_info name in
+
+ prerr_endline "Elf read:";
+ let (elf_file, elf_epi, symbol_map) =
+ begin match info with
+ | Error.Fail s -> failwith (Printf.sprintf "populate_and_obtain_global_symbol_init_info: %s" s)
+ | Error.Success ((elf_file: Elf_file.elf_file),
+ (elf_epi: Sail_interface.executable_process_image),
+ (symbol_map: Elf_file.global_symbol_init_info))
+ ->
+ (* XXX disabled because it crashes if entry_point overflows an ocaml int :-(
+ prerr_endline (Sail_interface.string_of_executable_process_image elf_epi);*)
+ (elf_file, elf_epi, symbol_map)
+ end
+ in
+
+ prerr_endline "\nElf segments:";
+ let (segments, e_entry, e_machine) =
+ begin match elf_epi, elf_file with
+ | (Sail_interface.ELF_Class_32 _, _) -> failwith "cannot handle ELF_Class_32"
+ | (_, Elf_file.ELF_File_32 _) -> failwith "cannot handle ELF_File_32"
+ | (Sail_interface.ELF_Class_64 (segments, e_entry, e_machine), Elf_file.ELF_File_64 f1) ->
+ (* remove all the auto generated segments (they contain only 0s) *)
+ let segments =
+ Lem_list.mapMaybe
+ (fun (seg, prov) -> if prov = Elf_file.FromELF then Some seg else None)
+ segments
+ in
+ (segments, e_entry, e_machine)
+ end
+ in
+ (segments, e_entry, symbol_map)
+
+(*let write_sail_lib paddr i byte =
+ Sail_lib.wram (Nat_big_num.add paddr (Nat_big_num.of_int i)) byte*)
+
+let write_file chan paddr i byte =
+ output_string chan (Nat_big_num.to_string (Nat_big_num.add paddr (Nat_big_num.of_int i)) ^ "\n");
+ output_string chan (string_of_int byte ^ "\n")
+
+let load_elf name =
+ let segments, e_entry, symbol_map = read name in
+ opt_elf_entry := e_entry;
+ (if List.mem_assoc "tohost" symbol_map then
+ let (_, _, tohost_addr, _, _) = List.assoc "tohost" symbol_map in
+ opt_elf_tohost := tohost_addr);
+ (*List.iter (load_segment ~writer:writer) segments*)
+ segments
+
+(* The sail model can access this by externing a unit -> Big_int.t function
+ as Elf_loader.elf_entry. *)
+let elf_entry () = Big_int.big_int_of_string (Nat_big_num.to_string !opt_elf_entry)
+(* Used by RISCV sail model test harness for exiting test *)
+let elf_tohost () = Big_int.big_int_of_string (Nat_big_num.to_string !opt_elf_tohost)
diff --git a/test/isabelle/run_aarch64.ml b/test/isabelle/run_aarch64.ml
new file mode 100644
index 00000000..c6037866
--- /dev/null
+++ b/test/isabelle/run_aarch64.ml
@@ -0,0 +1,93 @@
+open Aarch64_export;;
+
+
+
+(**************************************************************************)
+(* Sail *)
+(* *)
+(* Copyright (c) 2013-2017 *)
+(* Kathyrn Gray *)
+(* Shaked Flur *)
+(* Stephen Kell *)
+(* Gabriel Kerneis *)
+(* Robert Norton-Wright *)
+(* Christopher Pulte *)
+(* Peter Sewell *)
+(* Alasdair Armstrong *)
+(* Brian Campbell *)
+(* Thomas Bauereiss *)
+(* Anthony Fox *)
+(* Jon French *)
+(* Dominic Mulligan *)
+(* Stephen Kell *)
+(* Mark Wassell *)
+(* *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(**************************************************************************)
+
+open Elf_loader;;
+
+let opt_file_arguments = ref ([] : string list)
+
+let options = Arg.align []
+
+let usage_msg = "Sail OCaml RTS options:"
+
+let () =
+ Arg.parse options (fun s -> opt_file_arguments := !opt_file_arguments @ [s]) usage_msg
+
+let (>>) = Aarch64.bindS
+let liftS = Aarch64.liftState (Aarch64.get_regval, Aarch64.set_regval)
+
+let load_elf_segment seg =
+ let open Elf_interpreted_segment in
+ let open Aarch64_export in
+ let bs = seg.elf64_segment_body in
+ let paddr = Big_int.big_int_of_string (Nat_big_num.to_string seg.elf64_segment_paddr) in
+ let base = Big_int.big_int_of_string (Nat_big_num.to_string seg.elf64_segment_base) in
+ let offset = Big_int.big_int_of_string (Nat_big_num.to_string seg.elf64_segment_offset) in
+ let writer i byte = Aarch64.write_char_mem (Aarch64.plus_int (Aarch64.Int_of_integer paddr) i) byte in
+ prerr_endline "\nLoading Segment";
+ prerr_endline ("Segment offset: " ^ Big_int.string_of_big_int offset);
+ prerr_endline ("Segment base address: " ^ Big_int.string_of_big_int base);
+ prerr_endline ("Segment physical address: " ^ Big_int.string_of_big_int paddr);
+ print_segment seg;
+ Aarch64.iteriS writer (Byte_sequence.char_list_of_byte_sequence bs)
+
+let _ =
+ Random.self_init ();
+ let elf_segments = match !opt_file_arguments with
+ | f :: _ -> load_elf f
+ | _ -> []
+ in
+ Aarch64.prerr_results
+ (Aarch64.initial_state |>
+ (Aarch64.iterS load_elf_segment elf_segments >> (fun _ ->
+ liftS (Aarch64.main ()))));
diff --git a/test/isabelle/run_cheri.ml b/test/isabelle/run_cheri.ml
new file mode 100644
index 00000000..e6d752b7
--- /dev/null
+++ b/test/isabelle/run_cheri.ml
@@ -0,0 +1,92 @@
+open Cheri_export;;
+
+
+
+(**************************************************************************)
+(* Sail *)
+(* *)
+(* Copyright (c) 2013-2017 *)
+(* Kathyrn Gray *)
+(* Shaked Flur *)
+(* Stephen Kell *)
+(* Gabriel Kerneis *)
+(* Robert Norton-Wright *)
+(* Christopher Pulte *)
+(* Peter Sewell *)
+(* Alasdair Armstrong *)
+(* Brian Campbell *)
+(* Thomas Bauereiss *)
+(* Anthony Fox *)
+(* Jon French *)
+(* Dominic Mulligan *)
+(* Stephen Kell *)
+(* Mark Wassell *)
+(* *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(**************************************************************************)
+
+open Elf_loader;;
+
+let opt_file_arguments = ref ([] : string list)
+
+let options = Arg.align []
+
+let usage_msg = "Sail OCaml RTS options:"
+
+let () =
+ Arg.parse options (fun s -> opt_file_arguments := !opt_file_arguments @ [s]) usage_msg
+
+let (>>) = State_monad.bindS
+let liftS = State.liftState (Cheri_types.get_regval, Cheri_types.set_regval)
+
+let load_elf_segment seg =
+ let open Elf_interpreted_segment in
+ let bs = seg.elf64_segment_body in
+ let paddr = Big_int.big_int_of_string (Nat_big_num.to_string seg.elf64_segment_paddr) in
+ let base = Big_int.big_int_of_string (Nat_big_num.to_string seg.elf64_segment_base) in
+ let offset = Big_int.big_int_of_string (Nat_big_num.to_string seg.elf64_segment_offset) in
+ let writer i byte = Cheri_code.write_char_mem (Arith.plus_int (Arith.Int_of_integer paddr) i) byte in
+ prerr_endline "\nLoading Segment";
+ prerr_endline ("Segment offset: " ^ Big_int.string_of_big_int offset);
+ prerr_endline ("Segment base address: " ^ Big_int.string_of_big_int base);
+ prerr_endline ("Segment physical address: " ^ Big_int.string_of_big_int paddr);
+ print_segment seg;
+ State.iteriS writer (Byte_sequence.char_list_of_byte_sequence bs)
+
+let _ =
+ Random.self_init ();
+ let elf_segments = match !opt_file_arguments with
+ | f :: _ -> load_elf f
+ | _ -> []
+ in
+ (* State_monad.prerr_results *)
+ (Cheri_code.initial_state |>
+ (State.iterS load_elf_segment elf_segments >> (fun _ ->
+ liftS (Cheri.main ()))));
diff --git a/test/isabelle/run_tests.sh b/test/isabelle/run_tests.sh
new file mode 100755
index 00000000..7b3f7bc1
--- /dev/null
+++ b/test/isabelle/run_tests.sh
@@ -0,0 +1,90 @@
+#!/usr/bin/env bash
+set -e
+
+DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"
+cd $DIR
+SAILDIR="$DIR/../.."
+AARCH64_TEST_DIR="$DIR/../arm"
+
+RED='\033[0;31m'
+GREEN='\033[0;32m'
+YELLOW='\033[0;33m'
+NC='\033[0m'
+
+rm -f $DIR/tests.xml
+
+pass=0
+fail=0
+XML=""
+
+function green {
+ (( pass += 1 ))
+ printf "$1: ${GREEN}$2${NC}\n"
+ XML+=" <testcase name=\"$1\"/>\n"
+}
+
+function yellow {
+ (( fail += 1 ))
+ printf "$1: ${YELLOW}$2${NC}\n"
+ XML+=" <testcase name=\"$1\">\n <error message=\"$2\">$2</error>\n </testcase>\n"
+}
+
+function red {
+ (( fail += 1 ))
+ printf "$1: ${RED}$2${NC}\n"
+ XML+=" <testcase name=\"$1\">\n <error message=\"$2\">$2</error>\n </testcase>\n"
+}
+
+function finish_suite {
+ printf "$1: Passed ${pass} out of $(( pass + fail ))\n\n"
+ XML=" <testsuite name=\"$1\" tests=\"$(( pass + fail ))\" failures=\"${fail}\" timestamp=\"$(date)\">\n$XML </testsuite>\n"
+ printf "$XML" >> $DIR/tests.xml
+ XML=""
+ pass=0
+ fail=0
+}
+
+SAILLIBDIR="$DIR/../../lib/"
+
+printf "<testsuites>\n" >> $DIR/tests.xml
+
+printf "Compiling AArch64 specification (Sail->Isabelle->OCaml)...\n"
+
+if make "run_aarch64.native" 1> /dev/null 2> /dev/null;
+then
+ green "compiled no_vector specification" "ok";
+
+ for i in `ls ${AARCH64_TEST_DIR}/*.elf`;
+ do
+ $DIR/run_aarch64.native $i 2> /dev/null 1> ${i%.elf}.result
+ if diff ${i%.elf}.result ${i%.elf}.expect;
+ then
+ green "ran $(basename $i)" "ok"
+ else
+ red "failed $(basename $i)" "fail"
+ fi;
+ rm -f ${i%.elf}.result
+ done;
+else
+ red "compiling no_vector specification" "fail";
+
+ for i in `ls ${AARCH64_TEST_DIR}/*.elf`;
+ do
+ red "failed $(basename $i)" "fail"
+ done
+fi
+
+printf "Compiling CHERI specification (Sail->Isabelle->OCaml)...\n"
+
+if make "run_cheri.native" 1> /dev/null 2> /dev/null;
+then
+ green "compiled CHERI-256 specification" "ok";
+else
+ red "compiling CHERI-256 specification" "fail";
+fi
+
+make clean 1> /dev/null 2> /dev/null
+
+finish_suite "Isabelle code generation tests"
+
+printf "</testsuites>\n" >> $DIR/tests.xml
diff --git a/test/typecheck/pass/simple_record_access.sail b/test/typecheck/pass/simple_record_access.sail
index a6e34c8b..76cbbaed 100644
--- a/test/typecheck/pass/simple_record_access.sail
+++ b/test/typecheck/pass/simple_record_access.sail
@@ -1,4 +1,5 @@
$include <flow.sail>
+$include <vector_inc.sail>
enum signal = {LOW, HIGH}