summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--aarch64/mono/aarch64_integer_crc.sail13
-rw-r--r--aarch64/mono/aarch64_memory_exclusive_pair.sail97
-rw-r--r--aarch64/mono/aarch64_memory_exclusive_single.sail97
-rw-r--r--aarch64/mono/extra_constraints.sail56
4 files changed, 263 insertions, 0 deletions
diff --git a/aarch64/mono/aarch64_integer_crc.sail b/aarch64/mono/aarch64_integer_crc.sail
new file mode 100644
index 00000000..729a05a1
--- /dev/null
+++ b/aarch64/mono/aarch64_integer_crc.sail
@@ -0,0 +1,13 @@
+val aarch64_integer_crc : forall ('size : Int).
+ (bool, int, int, int, atom('size)) -> unit effect {escape, undef, rreg, wreg}
+
+function aarch64_integer_crc (crc32c, d, m, n, size) = {
+ assert(constraint('size in {8,16,32,64}));
+ if ~(HaveCRCExt()) then UnallocatedEncoding() else ();
+ acc : bits(32) = aget_X(n);
+ val_name : bits('size) = aget_X(m);
+ poly : bits(32) = __GetSlice_int(32, if crc32c then 517762881 else 79764919, 0);
+ tempacc : bits('size + 32) = BitReverse(acc) @ Zeros(size);
+ tempval : bits('size + 32) = BitReverse(val_name) @ Zeros(32);
+ aset_X(d, BitReverse(Poly32Mod2(tempacc ^ tempval, poly)))
+}
diff --git a/aarch64/mono/aarch64_memory_exclusive_pair.sail b/aarch64/mono/aarch64_memory_exclusive_pair.sail
new file mode 100644
index 00000000..27897eb3
--- /dev/null
+++ b/aarch64/mono/aarch64_memory_exclusive_pair.sail
@@ -0,0 +1,97 @@
+/* Moved and tightened an assertion */
+
+val aarch64_memory_exclusive_pair : forall ('datasize : Int) ('regsize : Int) ('elsize : Int).
+ (AccType, atom('datasize), atom('elsize), MemOp, int, bool, atom('regsize), int, int, int) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_exclusive_pair (acctype, datasize, elsize, memop, n, pair, regsize, s, t, t2) = {
+ assert(constraint('regsize >= 0), "regsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ rn_unknown : bool = false;
+ if (memop == MemOp_LOAD & pair) & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if memop == MemOp_STORE then {
+ if s == t | pair & s == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_DATAOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if s == n & n != 31 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_BASEOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rn_unknown = true,
+ Constraint_NONE => rn_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ()
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else if rn_unknown then address = undefined
+ else address = aget_X(n);
+ secondstage : bool = undefined;
+ iswrite : bool = undefined;
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined
+ else if pair then let 'v = ex_int(datasize / 2) in {
+ assert(constraint(2 * 'v = 'datasize));
+ el1 : bits('v) = aget_X(t);
+ el2 : bits('v) = aget_X(t2);
+ data = if BigEndian() then el1 @ el2 else el2 @ el1
+ } else data = aget_X(t);
+ status : bits(1) = 0b1;
+ if AArch64_ExclusiveMonitorsPass(address, dbytes) then {
+ aset_Mem(address, dbytes, acctype, data);
+ status = ExclusiveMonitorsStatus()
+ } else ();
+ aset_X(s, ZeroExtend(status, 32))
+ },
+ MemOp_LOAD => {
+ AArch64_SetExclusiveMonitors(address, dbytes);
+ if pair then
+ if rt_unknown then aset_X(t, undefined : bits(32)) else if elsize == 32 then {
+ assert(constraint(- 'elsize + 'datasize > 0 & 'elsize >= 0), "datasize constraint");
+ data = aget_Mem(address, dbytes, acctype);
+ if BigEndian() then {
+ aset_X(t, slice(data, elsize, negate(elsize) + datasize));
+ aset_X(t2, slice(data, 0, elsize))
+ } else {
+ aset_X(t, slice(data, 0, elsize));
+ aset_X(t2, slice(data, elsize, negate(elsize) + datasize))
+ }
+ } else {
+ if address != Align(address, dbytes) then {
+ iswrite = false;
+ secondstage = false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage))
+ } else ();
+ aset_X(t, aget_Mem(address + 0, 8, acctype));
+ aset_X(t2, aget_Mem(address + 8, 8, acctype))
+ }
+ else {
+ data = aget_Mem(address, dbytes, acctype);
+ aset_X(t, ZeroExtend(data, regsize))
+ }
+ }
+ }
+}
diff --git a/aarch64/mono/aarch64_memory_exclusive_single.sail b/aarch64/mono/aarch64_memory_exclusive_single.sail
new file mode 100644
index 00000000..a370794e
--- /dev/null
+++ b/aarch64/mono/aarch64_memory_exclusive_single.sail
@@ -0,0 +1,97 @@
+/* Changed an assertion to a strict inequality and moved it to where that's true */
+
+val aarch64_memory_exclusive_single : forall ('datasize : Int) 'elsize ('regsize : Int).
+ (AccType, atom('datasize), atom('elsize), MemOp, int, bool, atom('regsize), int, int, int) -> unit effect {escape, undef, rreg, wreg, rmem, wmem}
+
+function aarch64_memory_exclusive_single (acctype, datasize, elsize, memop, n, pair, regsize, s, t, t2) = {
+ assert(constraint('regsize >= 0), "destsize constraint");
+ let 'dbytes = ex_int(datasize / 8);
+ assert(constraint('datasize in {8, 16, 32, 64, 128}), "datasize constraint");
+ assert(constraint(8 * 'dbytes = 'datasize), "dbytes constraint");
+ address : bits(64) = undefined;
+ data : bits('datasize) = undefined;
+ rt_unknown : bool = false;
+ rn_unknown : bool = false;
+ if (memop == MemOp_LOAD & pair) & t == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_LDPOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_UNDEF) || (c == Constraint_NOP)))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if memop == MemOp_STORE then {
+ if s == t | pair & s == t2 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_DATAOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rt_unknown = true,
+ Constraint_NONE => rt_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ();
+ if s == n & n != 31 then {
+ c : Constraint = ConstrainUnpredictable(Unpredictable_BASEOVERLAP);
+ assert(c == Constraint_UNKNOWN | c == Constraint_NONE | c == Constraint_UNDEF | c == Constraint_NOP, "((c == Constraint_UNKNOWN) || ((c == Constraint_NONE) || ((c == Constraint_UNDEF) || (c == Constraint_NOP))))");
+ match c {
+ Constraint_UNKNOWN => rn_unknown = true,
+ Constraint_NONE => rn_unknown = false,
+ Constraint_UNDEF => UnallocatedEncoding(),
+ Constraint_NOP => EndOfInstruction()
+ }
+ } else ()
+ } else ();
+ if n == 31 then {
+ CheckSPAlignment();
+ address = aget_SP()
+ } else if rn_unknown then address = undefined
+ else address = aget_X(n);
+ secondstage : bool = undefined;
+ iswrite : bool = undefined;
+ match memop {
+ MemOp_STORE => {
+ if rt_unknown then data = undefined
+ else if pair then let 'v = ex_int(datasize / 2) in {
+ assert(constraint(2 * 'v = 'datasize));
+ el1 : bits('v) = aget_X(t);
+ el2 : bits('v) = aget_X(t2);
+ data = if BigEndian() then el1 @ el2 else el2 @ el1
+ } else data = aget_X(t);
+ status : bits(1) = 0b1;
+ if AArch64_ExclusiveMonitorsPass(address, dbytes) then {
+ aset_Mem(address, dbytes, acctype, data);
+ status = ExclusiveMonitorsStatus()
+ } else ();
+ aset_X(s, ZeroExtend(status, 32))
+ },
+ MemOp_LOAD => {
+ AArch64_SetExclusiveMonitors(address, dbytes);
+ if pair then {
+ assert(constraint(- 'elsize + 'datasize > 0 & 'elsize >= 0));
+ if rt_unknown then aset_X(t, undefined : bits(32)) else if elsize == 32 then {
+ data = aget_Mem(address, dbytes, acctype);
+ if BigEndian() then {
+ aset_X(t, slice(data, elsize, negate(elsize) + datasize));
+ aset_X(t2, slice(data, 0, elsize))
+ } else {
+ aset_X(t, slice(data, 0, elsize));
+ aset_X(t2, slice(data, elsize, negate(elsize) + datasize))
+ }
+ } else {
+ if address != Align(address, dbytes) then {
+ iswrite = false;
+ secondstage = false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage))
+ } else ();
+ aset_X(t, aget_Mem(address + 0, 8, acctype));
+ aset_X(t2, aget_Mem(address + 8, 8, acctype))
+ }
+ } else {
+ data = aget_Mem(address, dbytes, acctype);
+ aset_X(t, ZeroExtend(data, regsize))
+ }
+ }
+ }
+}
diff --git a/aarch64/mono/extra_constraints.sail b/aarch64/mono/extra_constraints.sail
new file mode 100644
index 00000000..2f89f401
--- /dev/null
+++ b/aarch64/mono/extra_constraints.sail
@@ -0,0 +1,56 @@
+/* Ideally we'd rewrite these to take the bit size */
+
+val aarch64_memory_literal_simdfp : forall ('size : Int).
+ (bits(64), atom('size), int) -> unit effect {escape, undef, wreg, rreg, rmem, wmem}
+
+function aarch64_memory_literal_simdfp (offset, size, t) = {
+ assert(constraint('size >= 0));
+ assert(constraint('size in {4,8,16}));
+ address : bits(64) = aget_PC() + offset;
+ data : bits(8 * 'size) = undefined;
+ CheckFPAdvSIMDEnabled64();
+ data = aget_Mem(address, size, AccType_VEC);
+ aset_V(t, data)
+}
+
+/* like this, which would be difficult otherwise... */
+
+val aarch64_memory_literal_general : forall ('size : Int).
+ (MemOp, bits(64), bool, atom('size), int) -> unit effect {escape, undef, wreg, rreg, rmem, wmem}
+
+function aarch64_memory_literal_general (memop, offset, signed, size, t) = {
+ address : bits(64) = aget_PC() + offset;
+ data : bits('size) = undefined;
+ match memop {
+ MemOp_LOAD => {
+ assert(constraint('size >= 0));
+ let 'bytes = size / 8;
+ assert(constraint(8 * 'bytes = 'size));
+ data = aget_Mem(address, bytes, AccType_NORMAL);
+ if signed then aset_X(t, SignExtend(data, 64)) else aset_X(t, data)
+ },
+ MemOp_PREFETCH => Prefetch(address, __GetSlice_int(5, t, 0))
+ }
+}
+
+val memory_literal_general_decode : (bits(2), bits(1), bits(19), bits(5)) -> unit effect {escape, rmem, rreg, undef, wmem, wreg}
+
+function memory_literal_general_decode (opc, V, imm19, Rt) = {
+ __unconditional = true;
+ t : int = UInt(Rt);
+ memop : MemOp = MemOp_LOAD;
+ signed : bool = false;
+ size : int = undefined;
+ offset : bits(64) = undefined;
+ match opc {
+ 0b00 => size = 4,
+ 0b01 => size = 8,
+ 0b10 => {
+ size = 4;
+ signed = true
+ },
+ 0b11 => memop = MemOp_PREFETCH
+ };
+ offset = SignExtend(imm19 @ 0b00, 64);
+ aarch64_memory_literal_general(memop, offset, signed, 8 * size, t)
+}