summaryrefslogtreecommitdiff
path: root/arm
diff options
context:
space:
mode:
Diffstat (limited to 'arm')
-rw-r--r--arm/Makefile51
-rw-r--r--arm/armv8.h.sail164
-rw-r--r--arm/armv8.sail2359
-rw-r--r--arm/armv8_A32_sys_regs.sail61
-rw-r--r--arm/armv8_A64_lib.sail909
-rw-r--r--arm/armv8_A64_special_purpose_regs.sail101
-rw-r--r--arm/armv8_A64_sys_regs.sail465
-rw-r--r--arm/armv8_common_lib.sail998
-rw-r--r--arm/armv8_lib.h.sail240
-rw-r--r--arm/armv8_pstate.sail73
10 files changed, 5421 insertions, 0 deletions
diff --git a/arm/Makefile b/arm/Makefile
new file mode 100644
index 00000000..16ad6ab4
--- /dev/null
+++ b/arm/Makefile
@@ -0,0 +1,51 @@
+BUILDDIR=./build
+
+SAIL=../sail
+ifeq ("$(wildcard $(SAIL))","")
+ $(warning can not find Sail)
+endif
+
+LEM=../../lem/lem
+ifeq ("$(wildcard $(LEM))","")
+ $(warning can not find Lem)
+endif
+
+LEMINTERPDIR=../src/lem_interp/
+
+# the order of the files is important
+SOURCES=armv8.h.sail\
+ armv8_A64_sys_regs.sail\
+ armv8_A64_special_purpose_regs.sail\
+ armv8_A32_sys_regs.sail\
+ armv8_pstate.sail\
+ armv8_lib.h.sail\
+ armv8_common_lib.sail\
+ armv8_A64_lib.sail\
+ armv8.sail
+
+all: $(BUILDDIR)/armv8.ml
+
+clean:
+ rm -rf $(BUILDDIR)
+
+.PHONY: all clean
+
+$(BUILDDIR):
+ mkdir -p $@
+
+$(BUILDDIR)/armv8.lem: $(SOURCES) | $(BUILDDIR)
+ $(SAIL) -lem_ast $(SOURCES) -o $(basename $@)
+# sail generates the .lem file in pwd
+ mv $(notdir $@) $@
+
+$(BUILDDIR)/armv8.ml: $(BUILDDIR)/armv8.lem
+ $(LEM) -ocaml -lib $(LEMINTERPDIR) $<
+
+######################################################################
+ETCDIR=../etc
+SAIL_FILES=$(wildcard *.sail)
+
+apply_header:
+ headache -c $(ETCDIR)/headache_config -h $(ETCDIR)/arm_header $(SAIL_FILES)
+
+.PHONY: apply_header
diff --git a/arm/armv8.h.sail b/arm/armv8.h.sail
new file mode 100644
index 00000000..1d20c635
--- /dev/null
+++ b/arm/armv8.h.sail
@@ -0,0 +1,164 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+default Order dec
+
+typedef boolean = bit
+typedef integer = int
+typedef uinteger = nat (* ARM ARM does not have nat/uint type *)
+typedef reg_size = bit[5]
+typedef reg_index = [|31|]
+typedef SIMD_index = [|32|]
+
+register (bit[64]) _PC
+
+(* General purpose registers *)
+
+register (bit[64]) R30
+register (bit[64]) R29
+register (bit[64]) R28
+register (bit[64]) R27
+register (bit[64]) R26
+register (bit[64]) R25
+register (bit[64]) R24
+register (bit[64]) R23
+register (bit[64]) R22
+register (bit[64]) R21
+register (bit[64]) R20
+register (bit[64]) R19
+register (bit[64]) R18
+register (bit[64]) R17
+register (bit[64]) R16
+register (bit[64]) R15
+register (bit[64]) R14
+register (bit[64]) R13
+register (bit[64]) R12
+register (bit[64]) R11
+register (bit[64]) R10
+register (bit[64]) R9
+register (bit[64]) R8
+register (bit[64]) R7
+register (bit[64]) R6
+register (bit[64]) R5
+register (bit[64]) R4
+register (bit[64]) R3
+register (bit[64]) R2
+register (bit[64]) R1
+register (bit[64]) R0
+
+let (vector<31,32,dec,(register<bit[64]>)>) _R =
+ [undefined,R30,R29,R28,R27,R26,R25,R24,R23,R22,R21,
+ R20,R19,R18,R17,R16,R15,R14,R13,R12,R11,
+ R10,R9 ,R8 ,R7 ,R6 ,R5 ,R4 ,R3 ,R2 ,R1 ,
+ R0]
+
+function reg_index effect pure UInt_reg((reg_size) x) = (reg_index) x
+
+(* SIMD and floating-point registers *)
+
+register (bit[128]) V31
+register (bit[128]) V30
+register (bit[128]) V29
+register (bit[128]) V28
+register (bit[128]) V27
+register (bit[128]) V26
+register (bit[128]) V25
+register (bit[128]) V24
+register (bit[128]) V23
+register (bit[128]) V22
+register (bit[128]) V21
+register (bit[128]) V20
+register (bit[128]) V19
+register (bit[128]) V18
+register (bit[128]) V17
+register (bit[128]) V16
+register (bit[128]) V15
+register (bit[128]) V14
+register (bit[128]) V13
+register (bit[128]) V12
+register (bit[128]) V11
+register (bit[128]) V10
+register (bit[128]) V9
+register (bit[128]) V8
+register (bit[128]) V7
+register (bit[128]) V6
+register (bit[128]) V5
+register (bit[128]) V4
+register (bit[128]) V3
+register (bit[128]) V2
+register (bit[128]) V1
+register (bit[128]) V0
+
+let (vector<32,33,dec,(register<bit[128]>)>) _V =
+ [undefined,V31,V30,V29,V28,V27,V26,V25,V24,V23,V22,
+ V21,V20,V19,V18,V17,V16,V15,V14,V13,V12,
+ V11,V10,V9 ,V8 ,V7 ,V6 ,V5 ,V4 ,V3 ,V2 ,
+ V1 ,V0]
+
+
+(* lsl: used instead of the ARM ARM << over integers *)
+function forall Nat 'm, Nat 'n, 'm >= 0, 'n >= 0. [:'n * 2** 'm:] lsl(([:'n:]) n, ([:'m:]) m) = n * (2 ** m)
+
+(* not_implemented is used to indicate something WE did not implement *)
+function unit effect { escape } not_implemented((string) message) =
+{
+ exit message;
+}
+
+(* info is used to convey information to the user *)
+function unit effect pure info((string) message) = ()
+
+typedef IMPLEMENTATION_DEFINED_type = const struct
+{
+ boolean HaveCRCExt;
+ boolean HaveAArch32EL;
+ boolean HaveAnyAArch32;
+ boolean HaveEL2;
+ boolean HaveEL3;
+ boolean HighestELUsingAArch32;
+ boolean IsSecureBelowEL3;
+}
+let IMPLEMENTATION_DEFINED =
+{
+ HaveCRCExt = true;
+ HaveAArch32EL = false;
+ HaveAnyAArch32 = false;
+ HaveEL2 = false;
+ HaveEL3 = false;
+ HighestELUsingAArch32 = false;
+ IsSecureBelowEL3 = false;
+}
+
+(* FIXME: *)
+let UNKNOWN = 0
diff --git a/arm/armv8.sail b/arm/armv8.sail
new file mode 100644
index 00000000..a398e52d
--- /dev/null
+++ b/arm/armv8.sail
@@ -0,0 +1,2359 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+typedef ast = const union forall Nat 'R, 'R IN {32, 64}, (* register size *)
+ Nat 'D, 'D IN {8,16,32,64}. (* data size *)
+{
+ Unallocated;
+ (boolean) ImplementationDefinedTestBeginEnd;
+ ImplementationDefinedStopFetching;
+
+ (reg_index,[:'R:],boolean,bit[64]) CompareAndBranch;
+ (bit[64],bit[4]) BranchConditional;
+ (bit[16]) GenerateExceptionEL1; (* TODO: add to .hgen *)
+ (bit[16]) GenerateExceptionEL2; (* TODO: add to .hgen *)
+ (bit[16]) GenerateExceptionEL3; (* TODO: add to .hgen *)
+ (bit[16]) DebugBreakpoint; (* TODO: add to .hgen *)
+ ExternalDebugBreakpoint; (* TODO: add to .hgen *)
+ (bit[2]) DebugSwitchToExceptionLevel; (* TODO: add to .hgen *)
+ (bit[4],PSTATEField) MoveSystemImmediate;
+ (SystemHintOp) Hint;
+ (uinteger) ClearExclusiveMonitor;
+ (MemBarrierOp,MBReqDomain,MBReqTypes) Barrier;
+ (reg_index,uinteger,uinteger,uinteger,uinteger,uinteger,boolean) System;
+ (reg_index,uinteger,uinteger,uinteger,uinteger,uinteger,boolean) MoveSystemRegister;
+ (reg_index,[:'R:],uinteger,bit,bit[64]) TestBitAndBranch;
+ (BranchType,bit[64]) BranchImmediate;
+ (reg_index,BranchType) BranchRegister;
+ ExceptionReturn; (* TODO: add to .hgen *)
+ DebugRestorePState; (* TODO: add to .hgen *)
+ (reg_index,MemOp,boolean,uinteger,bit[64],[:'D:]) LoadLiteral;
+ (reg_index,reg_index,reg_index,reg_index,AccType,boolean,boolean,MemOp,uinteger,[:'R:],[:'D:]) LoadStoreAcqExc;
+ (boolean,boolean,reg_index,reg_index,reg_index,AccType,MemOp,uinteger,[:'D:],bit[64]) LoadStorePairNonTemp;
+ (reg_index,reg_index,AccType,MemOp,boolean,boolean,boolean,bit[64],[:'R:],[:'D:]) LoadImmediate;
+ (reg_index,reg_index,reg_index,AccType,MemOp,boolean,boolean,boolean,ExtendType,uinteger,[:'R:],[:'D:]) LoadRegister;
+ (boolean,boolean,reg_index,reg_index,reg_index,AccType,MemOp,boolean,uinteger,[:'D:],bit[64]) LoadStorePair;
+ (reg_index,reg_index,[:'R:],boolean,boolean,bit['R]) AddSubImmediate;
+ (reg_index,reg_index,[:'R:],boolean,boolean,uinteger,uinteger,bit['R],bit['R]) BitfieldMove;
+ (reg_index,reg_index,reg_index,[:'R:],uinteger) ExtractRegister;
+ (reg_index,reg_index,[:'R:],boolean,LogicalOp,bit['R]) LogicalImmediate;
+ (reg_index,[:'R:],bit[16],uinteger,MoveWideOp) MoveWide;
+ (reg_index,boolean,bit[64]) Address;
+ (reg_index,reg_index,reg_index,[:'R:],boolean,boolean,ExtendType,[|7|]) AddSubExtendRegister;
+ (reg_index,reg_index,reg_index,[:'R:],boolean,boolean,ShiftType,[|63|]) AddSubShiftedRegister;
+ (reg_index,reg_index,reg_index,[:'R:],boolean,boolean) AddSubCarry;
+ (reg_index,[:'R:],boolean,bit[4],bit[4],bit['R]) ConditionalCompareImmediate;
+ (reg_index,reg_index,[:'R:],boolean,bit[4],bit[4]) ConditionalCompareRegister;
+ (reg_index,reg_index,reg_index,[:'R:],bit[4],boolean,boolean) ConditionalSelect;
+ (reg_index,reg_index,[:'R:],RevOp) Reverse;
+ (reg_index,reg_index,[:'R:],CountOp) CountLeading;
+ (reg_index,reg_index,reg_index,[:'R:],boolean) Division;
+ (reg_index,reg_index,reg_index,[:'R:],ShiftType) Shift;
+ (reg_index,reg_index,reg_index,[:'D:],boolean) CRC;
+ (reg_index,reg_index,reg_index,reg_index,[:'R:],[:'D:],boolean) MultiplyAddSub;
+ (reg_index,reg_index,reg_index,reg_index,[:'R:],[:'D:],boolean,boolean) MultiplyAddSubLong;
+ (reg_index,reg_index,reg_index,reg_index,[:'R:],[:'D:],boolean) MultiplyHigh;
+ (reg_index,reg_index,reg_index,[:'R:],boolean,LogicalOp,ShiftType,[|63|],boolean) LogicalShiftedRegister;
+
+}
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}. ast<'R,'D> -> unit effect {rreg,wreg,rmem,barr,eamem,wmv,escape} execute
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}. unit execute
+
+(* CBNZ *)
+(* CBZ *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeCompareBranchImmediate ([sf]:0b011010:[op]:(bit[19]) imm19:Rt) =
+{
+ (reg_index) t := UInt_reg(Rt);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) iszero := (op == 0);
+ (bit[64]) offset := SignExtend(imm19:0b00);
+
+ Some(CompareAndBranch(t,datasize,iszero,offset));
+}
+
+function clause execute (CompareAndBranch(t,datasize,iszero,offset)) = {
+ (bit['R]) operand1 := rX(t);
+ if IsZero(operand1) == iszero then
+ BranchTo(rPC() + offset, BranchType_JMP);
+}
+
+(* B.cond *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeConditionalBranchImmediate (0b0101010:0b0:(bit[19]) imm19:0b0:_cond) =
+{
+ (bit[64]) offset := SignExtend(imm19:0b00);
+ (bit[4]) condition := _cond;
+
+ Some(BranchConditional(offset,condition));
+}
+
+function clause execute (BranchConditional(offset,condition)) = {
+ if ConditionHolds(condition) then
+ BranchTo(rPC() + offset, BranchType_JMP);
+}
+
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ bit[32] -> option<(ast<'R,'D>)> effect {escape} decodeExceptionGeneration
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> decodeExceptionGeneration
+
+(* SVC *)
+function clause decodeExceptionGeneration (0b11010100:0b000:(bit[16]) imm16:0b000:0b01) =
+{
+ (bit[16]) imm := imm16;
+
+ Some(GenerateExceptionEL1(imm))
+}
+
+function clause execute (GenerateExceptionEL1(imm)) =
+{
+ AArch64_CallSupervisor(imm);
+}
+
+(* HVC *)
+function clause decodeExceptionGeneration (0b11010100:0b000:(bit[16]) imm16:0b000:0b10) = {
+ (bit[16]) imm := imm16;
+
+ Some(GenerateExceptionEL2(imm))
+}
+
+function clause execute (GenerateExceptionEL2(imm)) =
+{
+ if ~(HaveEL(EL2)) | PSTATE_EL == EL0 | (PSTATE_EL == EL1 & IsSecure()) then
+ UnallocatedEncoding();
+
+ (bit) hvc_enable := if HaveEL(EL3) then SCR_EL3.HCE else NOT'(HCR_EL2.HCD);
+ if hvc_enable == 0 then
+ AArch64_UndefinedFault()
+ else
+ AArch64_CallHypervisor(imm);
+}
+
+(* SMC *)
+function clause decodeExceptionGeneration (0b11010100:0b000:(bit[16]) imm16:0b000:0b11) = {
+ (bit[16]) imm := imm16;
+
+ Some(GenerateExceptionEL3(imm))
+}
+
+function clause execute (GenerateExceptionEL2(imm)) =
+{
+ if ~(HaveEL(EL3)) | PSTATE_EL == EL0 then
+ UnallocatedEncoding();
+
+ AArch64_CheckForSMCTrap(imm);
+
+ if SCR_EL3.SMD == 1 then
+ (* SMC disabled *)
+ AArch64_UndefinedFault()
+ else
+ AArch64_CallSecureMonitor(imm);
+}
+
+(* BRK *)
+function clause decodeExceptionGeneration (0b11010100:0b001:(bit[16]) imm16:0b000:0b00) = {
+ (bit[16]) comment := imm16;
+
+ Some(DebugBreakpoint(comment))
+}
+
+function clause execute (DebugBreakpoint(comment)) =
+{
+ AArch64_SoftwareBreakpoint(comment);
+}
+
+(* HLT *)
+function clause decodeExceptionGeneration (0b11010100:0b010:(bit[16]) imm16:0b000:0b00) =
+{
+ (* FIXME: we don't allow register reads in the decoding, but we probably should *)
+ (* ARM: if EDSCR.HDE == 0 | ~(HaltingAllowed()) then AArch64_UndefinedFault(); (* ARM: UndefinedFault() *) *)
+
+ Some(ExternalDebugBreakpoint);
+}
+
+
+function clause execute (ExternalDebugBreakpoint) =
+{
+ Halt(DebugHalt_HaltInstruction);
+}
+
+(* DCPS1 LL=0b01*)
+(* DCPS2 LL=0b10*)
+(* DCPS3 LL=0b11*)
+function clause decodeExceptionGeneration (0b11010100:0b101:(bit[16]) imm16:0b000:(bit[2]) LL) =
+{
+ (bit[2]) target_level := LL;
+ if LL == 0b00 then UnallocatedEncoding();
+ (* FIXME: we don't allow register reads in the decoding *)
+ (* ARM: if ~(Halted()) then AArch64_UndefinedFault(); *)
+
+ Some(DebugSwitchToExceptionLevel(target_level));
+}
+
+function clause execute (DebugSwitchToExceptionLevel(target_level)) =
+{
+ DCPSInstruction(target_level);
+}
+
+end decodeExceptionGeneration
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ bit[32] -> option<(ast<'R,'D>)> effect {escape} decodeSystem
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> decodeSystem
+
+(* MSR (immediate) *)
+function clause decodeSystem (0b1101010100:[0]:0b00:op1:0b0100:CRm:op2:0b11111) = {
+ (* FIXME: we don't allow register reads in the decoding *)
+ (* ARM: CheckSystemAccess(0b00, op1, 0b0100, CRm, op2, 0b11111, 0); *)
+
+ (bit[4]) operand := CRm;
+ (PSTATEField) field := 0; (* ARM: uninitialized *)
+ switch (op1:op2) {
+ case (0b000:0b101) -> field := PSTATEField_SP
+ case (0b011:0b110) -> field := PSTATEField_DAIFSet
+ case (0b011:0b111) -> field := PSTATEField_DAIFClr
+ case _ -> UnallocatedEncoding()
+ };
+
+ (* FIXME: we don't allow register reads in the decoding *)
+ (* FIXME: it is not clear why this check is here. How is it different
+ than the CheckSystemAccess above? *)
+ (*Check that an AArch64 MSR/MRS access to the DAIF flags is permitted*)
+ (* ARM: if op1 == 0b011 & PSTATE_EL == EL0 & SCTLR_EL1.UMA == 0 then
+ AArch64_SystemRegisterTrap(EL1, 0b00, op2, op1, 0b0100, 0b11111, CRm, 0);*)
+
+ Some(MoveSystemImmediate(operand,field));
+}
+
+function clause execute ( MoveSystemImmediate(operand,field) ) = {
+ switch field {
+ case PSTATEField_SP -> PSTATE_SP := operand[0]
+ case PSTATEField_DAIFSet -> {
+ PSTATE_D := (PSTATE_D | operand[3]);
+ PSTATE_A := (PSTATE_A | operand[2]);
+ PSTATE_I := (PSTATE_I | operand[1]);
+ PSTATE_F := (PSTATE_F | operand[0]);
+ }
+ case PSTATEField_DAIFClr -> {
+ PSTATE_D := (PSTATE_D & NOT'(operand[3]));
+ PSTATE_A := (PSTATE_A & NOT'(operand[2]));
+ PSTATE_I := (PSTATE_I & NOT'(operand[1]));
+ PSTATE_F := (PSTATE_F & NOT'(operand[0]));
+ }
+ }
+}
+
+(* HINT *)
+function clause decodeSystem (0b1101010100:[0]:0b00:0b011:0b0010:(bit[4]) CRm:(bit[3]) op2:0b11111) = {
+ (SystemHintOp) op := 0; (* ARM: uninitialized *)
+
+ switch CRm:op2 {
+ case (0b0000:0b000) -> op := SystemHintOp_NOP
+ case (0b0000:0b001) -> op := SystemHintOp_YIELD
+ case (0b0000:0b010) -> op := SystemHintOp_WFE
+ case (0b0000:0b011) -> op := SystemHintOp_WFI
+ case (0b0000:0b100) -> op := SystemHintOp_SEV
+ case (0b0000:0b101) -> op := SystemHintOp_SEVL
+ case _ -> op := SystemHintOp_NOP
+ };
+
+ Some(Hint(op));
+}
+
+function clause execute ( Hint(op) ) = {
+ switch op {
+ case SystemHintOp_YIELD ->
+ Hint_Yield()
+
+ case SystemHintOp_WFE -> {
+ if EventRegistered() then
+ ClearEventRegister()
+ else {
+ if PSTATE_EL == EL0 then
+ AArch64_CheckForWFxTrap(EL1, true);
+ if HaveEL(EL2) & ~(IsSecure()) & (PSTATE_EL == EL0 | PSTATE_EL == EL1) then
+ AArch64_CheckForWFxTrap(EL2, true);
+ if HaveEL(EL3) & PSTATE_EL != EL3 then
+ AArch64_CheckForWFxTrap(EL3, true);
+ WaitForEvent();
+ }
+ }
+
+ case SystemHintOp_WFI -> {
+ if ~(InterruptPending()) then {
+ if PSTATE_EL == EL0 then
+ AArch64_CheckForWFxTrap(EL1, false);
+ if HaveEL(EL2) & ~(IsSecure()) & (PSTATE_EL == EL0 | PSTATE_EL == EL1) then
+ AArch64_CheckForWFxTrap(EL2, false);
+ if HaveEL(EL3) & PSTATE_EL != EL3 then
+ AArch64_CheckForWFxTrap(EL3, false);
+ WaitForInterrupt();
+ };
+ }
+
+ case SystemHintOp_SEV ->
+ SendEvent()
+
+ case SystemHintOp_SEVL ->
+ EventRegisterSet()
+
+ case _ -> () (* do nothing *)
+ }
+}
+
+(* CLREX *)
+function clause decodeSystem (0b1101010100:[0]:0b00:0b011:0b0011:(bit[4]) CRm:0b010:0b11111) = {
+ (* ARM: // CRm field is ignored *)
+ (uinteger) imm := CRm; (* we need CRm for pretty printing *)
+ Some(ClearExclusiveMonitor(imm));
+}
+
+function clause execute (ClearExclusiveMonitor(imm)) = {
+ ClearExclusiveLocal(ProcessorID());
+}
+
+(* DMB opc=0b01*)
+(* DSB opc=0b00*)
+(* ISB opc=0b10*)
+function clause decodeSystem (0b1101010100:[0]:0b00:0b011:0b0011:(bit[4]) CRm:[1]:opc:0b11111) = {
+ (* TODO: according to ARM, HCR_EL2.BSU affect the domain, but the pseudo
+ code doesn't show any signs of that *)
+ (MemBarrierOp) op := 0; (* ARM: uninitialized *)
+ (MBReqDomain) domain := 0; (* ARM: uninitialized *)
+ (MBReqTypes) types := 0; (* ARM: uninitialized *)
+
+ switch opc {
+ case 0b00 -> op := MemBarrierOp_DSB
+ case 0b01 -> op := MemBarrierOp_DMB
+ case 0b10 -> op := MemBarrierOp_ISB
+ case _ -> UnallocatedEncoding()
+ };
+
+ switch CRm[3..2] {
+ case 0b00 -> domain := MBReqDomain_OuterShareable
+ case 0b01 -> domain := MBReqDomain_Nonshareable
+ case 0b10 -> domain := MBReqDomain_InnerShareable
+ case 0b11 -> domain := MBReqDomain_FullSystem
+ };
+
+ switch CRm[1..0] {
+ case 0b01 -> types := MBReqTypes_Reads
+ case 0b10 -> types := MBReqTypes_Writes
+ case 0b11 -> types := MBReqTypes_All
+ case _ -> {
+ types := MBReqTypes_All;
+ domain := MBReqDomain_FullSystem;
+ }
+ };
+
+ Some(Barrier(op,domain,types));
+}
+
+function clause execute ( Barrier(op,domain,types) ) = {
+ switch op {
+ case MemBarrierOp_DSB ->
+ DataSynchronizationBarrier(domain, types)
+ case MemBarrierOp_DMB ->
+ DataMemoryBarrier(domain, types)
+ case MemBarrierOp_ISB ->
+ InstructionSynchronizationBarrier()
+ };
+}
+
+(* SYS L=0b0 *)
+(* SYSL L=0b1 *)
+function clause decodeSystem (0b1101010100:[L]:0b01:op1:CRn:CRm:op2:Rt) = {
+ (* FIXME: we don't allow register reads in the decoding *)
+ (* ARM: CheckSystemAccess(0b01, op1, CRn, CRm, op2, Rt, L);*)
+
+ (reg_index) t := UInt_reg(Rt);
+
+ (uinteger) sys_op0 := 1;
+ (uinteger) sys_op1 := UInt(op1);
+ (uinteger) sys_op2 := UInt(op2);
+ (uinteger) sys_crn := UInt(CRn);
+ (uinteger) sys_crm := UInt(CRm);
+ (boolean) has_result := (L == 1);
+
+ Some(System(t,sys_op0,sys_op1,sys_op2,sys_crn,sys_crm,has_result));
+}
+
+function clause execute ( System(t,sys_op0,sys_op1,sys_op2,sys_crn,sys_crm,has_result) ) =
+{
+ if has_result then
+ wX(t) := SysOp_R(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2)
+ else
+ SysOp_W(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2, rX(t));
+}
+
+(* MRS L=1 *)
+(* MSR (register) L=0 *)
+function clause decodeSystem (0b1101010100:[L]:[1]:[o0]:(bit[3]) op1:(bit[4]) CRn:(bit[4]) CRm:(bit[3]) op2:Rt) = {
+ (* FIXME: we don't allow register reads in the decoding *)
+ (* ARM: CheckSystemAccess([0b1]:[o0], op1, CRn, CRm, op2, Rt, L); *)
+
+ (reg_index) t := UInt_reg(Rt);
+
+ (uinteger) sys_op0 := 2 + UInt([o0]);
+ (uinteger) sys_op1 := UInt(op1);
+ (uinteger) sys_op2 := UInt(op2);
+ (uinteger) sys_crn := UInt(CRn);
+ (uinteger) sys_crm := UInt(CRm);
+ (boolean) read := (L == 1);
+
+ Some(MoveSystemRegister(t,sys_op0,sys_op1,sys_op2,sys_crn,sys_crm,read));
+}
+
+function clause execute ( MoveSystemRegister(t,sys_op0,sys_op1,sys_op2,sys_crn,sys_crm,read) ) = {
+ if read then
+ wX(t) := System_Get(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2)
+ else
+ System_Put(sys_op0, sys_op1, sys_crn, sys_crm, sys_op2, rX(t));
+}
+
+end decodeSystem
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ bit[32] -> option<(ast<'R,'D>)> effect pure decodeImplementationDefined
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> decodeImplementationDefined
+
+(* instructions to signal the Sail interpreter that test begins/ends *)
+function clause decodeImplementationDefined (0b1101010100:[0]:0b01:0b011:[1]:[_]:0b11:0b0000:0b000:0b0000:[isEnd]) =
+{
+ Some(ImplementationDefinedTestBeginEnd(isEnd));
+}
+
+function clause execute ( ImplementationDefinedTestBeginEnd(isEnd) ) =
+{
+ if isEnd then
+ info("test ends")
+ else
+ info("test begins");
+}
+
+(* instructions to signal ppcmem to stop fetching *)
+function clause decodeImplementationDefined (0b1101010100:[0]:0b01:0b011:[1]:[_]:0b11:0b0000:0b000:0b00010) =
+{
+ Some(ImplementationDefinedStopFetching)
+}
+
+function clause execute ( ImplementationDefinedStopFetching ) =
+{
+ info("stop fetching instructions");
+}
+
+end decodeImplementationDefined
+
+(* TBNZ *)
+(* TBZ *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeTestBranchImmediate ([b5]:0b011011:[op]:(bit[5]) b40:(bit[14]) imm14:Rt) = {
+ (reg_index) t := UInt_reg(Rt);
+
+ ([:'R:]) datasize := if b5 == 1 then 64 else 32;
+ (uinteger) bit_pos := UInt([b5]:b40);
+ (bit) bit_val := op;
+ (bit[64]) offset := SignExtend(imm14:0b00);
+
+ Some(TestBitAndBranch(t,datasize,bit_pos,bit_val,offset));
+}
+
+function clause execute ( TestBitAndBranch(t,datasize,bit_pos,bit_val,offset) ) = {
+ (bit['R]) operand := rX(t);
+
+ if operand[bit_pos] == bit_val then
+ BranchTo(rPC() + offset, BranchType_JMP);
+}
+
+(* B *)
+(* BL *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeUnconditionalBranchImmediate ([op]:0b00101:(bit[26]) imm26) = {
+ (BranchType) _branch_type := if op == 1 then BranchType_CALL else BranchType_JMP;
+ (bit[64]) offset := SignExtend(imm26:0b00);
+
+ Some(BranchImmediate(_branch_type,offset));
+}
+
+function clause execute (BranchImmediate(_branch_type,offset)) = {
+ if _branch_type == BranchType_CALL then wX(30) := rPC() + 4;
+
+ BranchTo(rPC() + offset, _branch_type);
+}
+
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ bit[32] -> option<(ast<'R,'D>)> effect {escape} decodeUnconditionalBranchRegister
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> decodeUnconditionalBranchRegister
+
+(* BLR *)
+(* BR *)
+(* RET *)
+function clause decodeUnconditionalBranchRegister (0b1101011:0b00:op:0b11111:0b000000:Rn:0b00000) = {
+ (reg_index) n := UInt_reg(Rn);
+ (BranchType) _branch_type := 0; (* ARM: uninitialized *)
+
+ switch op {
+ case 0b00 -> _branch_type := BranchType_JMP
+ case 0b01 -> _branch_type := BranchType_CALL
+ case 0b10 -> _branch_type := BranchType_RET
+ case _ -> UnallocatedEncoding()
+ };
+
+ Some(BranchRegister(n,_branch_type));
+}
+
+function clause execute (BranchRegister(n,_branch_type)) =
+{
+ (bit[64]) target := rX(n);
+
+ if _branch_type == BranchType_CALL then wX(30) := rPC() + 4;
+ BranchTo(target, _branch_type);
+}
+
+(* ERET *)
+function clause decodeUnconditionalBranchRegister (0b1101011:0b0100:0b11111:0b000000:0b11111:0b00000) =
+{
+ (* FIXME: we don't allow register reads in the decoding *)
+ (* ARM: if PSTATE_EL == EL0 then UnallocatedEncoding(); *)
+
+ Some(ExceptionReturn);
+}
+
+function clause execute (ExceptionReturn) =
+{
+ AArch64_ExceptionReturn(rELR'(), rSPSR());
+}
+
+(* DRPS *)
+function clause decodeUnconditionalBranchRegister (0b1101011:0b0101:0b11111:0b000000:0b11111:0b00000) =
+{
+ (* FIXME: we don't allow register reads in the decoding *)
+ (* ARM: if ~(Halted()) | PSTATE_EL == EL0 then UnallocatedEncoding(); *)
+
+ Some(DebugRestorePState);
+}
+
+function clause execute (DebugRestorePState) =
+{
+ DRPSInstruction();
+}
+
+end decodeUnconditionalBranchRegister
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeAdvSIMDLoadStoreMultiStruct ((bit[32]) machineCode) =
+{
+ not_implemented("decodeAdvSIMDLoadStoreMultiStruct");
+ Some(Unallocated);
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeAdvSIMDLoadStoreMultiStructPostIndexed ((bit[32]) machineCode) =
+{
+ not_implemented("decodeAdvSIMDLoadStoreMultiStructPostIndexed");
+ Some(Unallocated);
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeAdvSIMDLoadStoreSingleStruct ((bit[32]) machineCode) =
+{
+ not_implemented("decodeAdvSIMDLoadStoreSingleStruct");
+ Some(Unallocated);
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeAdvSIMDLoadStoreSingleStructPostIndexed ((bit[32]) machineCode) =
+{
+ not_implemented("decodeAdvSIMDLoadStoreSingleStructPostIndexed");
+ Some(Unallocated);
+}
+
+(* LDR (literal) opc=0b0_ *)
+(* LDRSW (literal) opc=0b10 *)
+(* PRFM (literal) opc=0b11 *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadRegisterLiteral ((bit[2]) opc:0b011:[0]:0b00:(bit[19]) imm19:Rt) =
+{
+ (reg_index) t := UInt_reg(Rt);
+ (MemOp) memop := MemOp_LOAD;
+ (boolean) _signed := false;
+ (uinteger) size := 0; (* ARM: uninitialized *)
+ (bit[64]) offset := 4; (* ARM: uninitialized *)
+
+ switch opc {
+ case 0b00 ->
+ size := 4 (* LDR (literal) 32 *)
+ case 0b01 ->
+ size := 8 (* LDR (literal) 64 *)
+ case 0b10 -> { (* LDRSW (literal) *)
+ size := 4;
+ _signed := true;
+ }
+ case 0b11 -> (* PRFM (literal) *)
+ memop := MemOp_PREFETCH
+ };
+
+ offset := SignExtend(imm19:0b00);
+
+ ([:'D:]) datasize := size*8; (* not in ARM ARM *)
+
+ Some(LoadLiteral(t,memop,_signed,size,offset,datasize));
+}
+
+function clause execute ( LoadLiteral(t,memop,_signed,size,offset,datasize) ) = {
+ (bit[64]) address := rPC() + offset;
+ (bit['D]) data := 0; (* ARM: uninitialized *)
+
+ switch memop {
+ case MemOp_LOAD -> {
+ data := flush_read_buffer(
+ rMem(empty_read_buffer, address, size, AccType_NORMAL),
+ size
+ );
+ if _signed then
+ wX(t) := (bit[64]) (SignExtend(data))
+ else
+ wX(t) := data;
+ }
+ case MemOp_PREFETCH ->
+ Prefetch(address,(bit[5]) t)
+ };
+}
+
+(* LDAR size=0b1_,o2=1,L=1,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDARB size=0b00,o2=1,L=1,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDARH size=0b01,o2=1,L=1,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDAXP size=0b1_,o2=0,L=1,o1=1,o0=1,rs=(1)(1)(1)(1)(1) *)
+(* LDAXR size=0b1_,o2=0,L=1,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDAXRB size=0b00,o2=0,L=1,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDAXRH size=0b01,o2=0,L=1,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDXP size=0b1_,o2=0,L=1,o1=1,o0=0,rs=(1)(1)(1)(1)(1) *)
+(* LDXR size=0b1_,o2=0,L=1,o1=0,o0=0,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDXRB size=0b00,o2=0,L=1,o1=0,o0=0,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* LDXRH size=0b01,o2=0,L=1,o1=0,o0=0,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* STLR size=0b1_,o2=1,L=0,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* STLRB size=0b00,o2=1,L=0,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* STLRH size=0b01,o2=1,L=0,o1=0,o0=1,rs=(1)(1)(1)(1)(1),rt2=(1)(1)(1)(1)(1) *)
+(* STLXP size=0b1_,o2=0,L=0,o1=1,o0=1 *)
+(* STLXR size=0b1_,o2=0,L=0,o1=0,o0=1, rt2=(1)(1)(1)(1)(1) *)
+(* STLXRB size=0b00,o2=0,L=0,o1=0,o0=1, rt2=(1)(1)(1)(1)(1) *)
+(* STLXRH size=0b01,o2=0,L=0,o1=0,o0=1, rt2=(1)(1)(1)(1)(1) *)
+(* STXP size=0b1_,o2=0,L=0,o1=1,o0=0 *)
+(* STXR size=0b1_,o2=0,L=0,o1=0,o0=0, rt2=(1)(1)(1)(1)(1) *)
+(* STXRB size=0b00,o2=0,L=0,o1=0,o0=0, rt2=(1)(1)(1)(1)(1) *)
+(* STXRH size=0b01,o2=0,L=0,o1=0,o0=0, rt2=(1)(1)(1)(1)(1) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreExclusive ((bit[2]) size:0b001000:[o2]:[L]:[o1]:Rs:[o0]:Rt2:Rn:Rt) =
+{
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) t := UInt_reg(Rt);
+ (reg_index) t2 := UInt_reg(Rt2); (* ignored by load/store single register *)
+ (reg_index) s := UInt_reg(Rs); (* ignored by all loads and store-release *)
+
+ if ([o2,o1,o0]) == 0b100 | ([o2,o1] == 0b11) then UnallocatedEncoding();
+ if o1 == 1 & size[1] == 0 then UnallocatedEncoding();
+
+ (AccType) acctype := if o0 == 1 then AccType_ORDERED else AccType_ATOMIC;
+ (boolean) excl := (o2 == 0);
+ (boolean) pair := (o1 == 1);
+ (MemOp) memop := if L == 1 then MemOp_LOAD else MemOp_STORE;
+ elsize := lsl(8, UInt(size));
+ ([:'R:]) regsize := if elsize == 64 then 64 else 32;
+ ([:'D:]) datasize := if pair then elsize * 2 else elsize;
+
+ (* lemma: pair --> datasize = 2*regsize *)
+ (* follows from "if o1 == 1 & size[1] == 0 then UnallocatedEncoding();" *)
+
+ Some(LoadStoreAcqExc(n,t,t2,s,acctype,excl,pair,memop,elsize,regsize,datasize));
+}
+
+function clause execute ( LoadStoreAcqExc(n,t,t2,s,acctype,excl,pair,memop,elsize,regsize,datasize) ) = {
+ (bit[64]) address := 0; (* ARM: uninitialized *)
+ (bit['D]) data := 0; (* ARM: uninitialized *)
+ (*constant*) (uinteger) dbytes := datasize quot 8;
+ (boolean) rt_unknown := false;
+ (boolean) rn_unknown := false;
+
+ if memop == MemOp_LOAD & pair & t == t2 then {
+ UnallocatedEncoding(); (* ARM:
+ (Constraint) c := ConstrainUnpredictable();
+ assert( vIN(c, [Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP]) );
+ switch c {
+ case Constraint_UNKNOWN -> rt_unknown =: true (* result is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if memop == MemOp_STORE & excl then {
+ if s == t | (pair & s == t2) then {
+ UnallocatedEncoding(); (* ARM:
+ (Constraint) c := ConstrainUnpredictable();
+ assert( vIN(c, [Constraint_UNKNOWN, Constraint_NONE, Constraint_UNDEF, Constraint_NOP]) );
+ switch c {
+ case Constraint_UNKNOWN -> rt_unknown := true (* store UNKNOWN value *)
+ case Constraint_NONE -> rt_unknown := false (* store original value *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+ if s == n & n != 31 then {
+ UnallocatedEncoding(); (* ARM:
+ (Constraint) c := ConstrainUnpredictable();
+ assert( vIN(c, [Constraint_UNKNOWN, Constraint_NONE, Constraint_UNDEF, Constraint_NOP]) );
+ switch c {
+ case Constraint_UNKNOWN -> rn_unknown := true (* address is UNKNOWN *)
+ case Constraint_NONE -> rn_unknown := false (* address is original base *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+ };
+
+ if n == 31 then {
+ CheckSPAlignment();
+ address := rSP();
+ } else if rn_unknown then
+ address := (bit[64]) UNKNOWN
+ else
+ address := rX(n);
+
+ switch memop {
+ case MemOp_STORE -> {
+ (* anounce the address *)
+ wMem_Addr(address, dbytes, acctype, excl);
+
+ if rt_unknown then
+ data := (bit['D]) UNKNOWN
+ else if pair then {
+ assert( excl, None );
+ (bit['R]) el1 := rX(t); (* ARM: bit[datasize / 2] see lemma in the decoding *)
+ (bit['R]) el2 := rX(t2); (* ARM: bit[datasize / 2] see lemma in the decoding *)
+ data := if BigEndian() then el1:el2 else el2:el1;
+ } else
+ data := rX(t);
+
+ if excl then {
+ (* store {release} exclusive register|pair (atomic) *)
+ (bit) status := 1;
+ (* Check whether the Exclusive Monitors are set to include the *)
+ (* physical memory locations corresponding to virtual address *)
+ (* range [address, address+dbytes-1]. *)
+ if AArch64_ExclusiveMonitorsPass(address, dbytes) then {
+ (* This atomic write will be rejected if it does not refer *)
+ (* to the same physical locations after address translation. *)
+ (* ARM: wMem(address, dbytes, acctype) := data;
+ status := ExclusiveMonitorsStatus(); *)
+ status := flush_write_buffer_exclusive(
+ wMem_exclusive(empty_write_buffer, address, dbytes, acctype, data)
+ );
+ };
+ wX(s) := (bit[32]) (ZeroExtend([status]));
+ } else {
+ (* store release register (atomic) *)
+ flush_write_buffer(
+ wMem(empty_write_buffer, address, dbytes, acctype, data)
+ );
+ };
+ }
+
+ case MemOp_LOAD -> {
+ if excl then {
+ (* Tell the Exclusive Monitors to record a sequence of one or more atomic *)
+ (* memory reads from virtual address range [address, address+dbytes-1]. *)
+ (* The Exclusive Monitor will only be set if all the reads are from the *)
+ (* same dbytes-aligned physical address, to allow for the possibility of *)
+ (* an atomicity break if the translation is changed between reads. *)
+ AArch64_SetExclusiveMonitors(address, dbytes);
+ };
+
+ if pair then {
+ (* load exclusive pair *)
+ assert( excl, None );
+ if rt_unknown then
+ (* ConstrainedUNPREDICTABLE case *)
+ wX(t) := (bit['D]) UNKNOWN
+ else if elsize == 32 then {
+ (* 32-bit load exclusive pair (atomic) *)
+ data := flush_read_buffer(
+ rMem_exclusive(empty_read_buffer, address, dbytes, acctype),
+ dbytes
+ );
+ if BigEndian() then {
+ wX(t) := data[(datasize - 1)..elsize];
+ wX(t2) := data[(elsize - 1)..0];
+ } else {
+ wX(t) := data[(elsize - 1)..0];
+ wX(t2) := data[(datasize - 1)..elsize];
+ };
+ } else { (* elsize == 64 *)
+ (* 64-bit load exclusive pair (not atomic), *)
+ (* but must be 128-bit aligned *)
+ if address != Align(address, dbytes) then {
+ (boolean) iswrite := false;
+ (boolean) secondstage := false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage));
+ };
+ (read_buffer_type) read_buffer := rMem_exclusive(empty_read_buffer, address + 0, 8, acctype);
+ read_buffer := rMem_exclusive(read_buffer, address + 8, 8, acctype);
+ (bit[128]) value := flush_read_buffer(read_buffer, 8*2);
+ wX(t) := value[63..0];
+ wX(t2) := value[127..64];
+ };
+ } else {
+ (* load {acquire} {exclusive} single register *)
+ data := flush_read_buffer(
+ rMem'(empty_read_buffer, address, dbytes, acctype, excl),
+ dbytes
+ );
+ wX(t) := (bit['R]) (ZeroExtend(data));
+ };
+ }
+ };
+}
+
+(* LDNP *)
+(* STNP *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreNoAllocatePairOffset ((bit[2]) opc:0b101:[0]:0b000:[L]:(bit[7]) imm7:Rt2:Rn:Rt) =
+{
+ (boolean) wback := false;
+ (boolean) postindex := false;
+
+ (* Shared decode *)
+
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) t := UInt_reg(Rt);
+ (reg_index) t2 := UInt_reg(Rt2);
+ (AccType) acctype := AccType_STREAM;
+ (MemOp) memop := if L == 1 then MemOp_LOAD else MemOp_STORE;
+ if opc[0] == 1 then UnallocatedEncoding();
+ (uinteger) scale := 2 + UInt([opc[1]]);
+ ([:'D:]) datasize := lsl(8, scale);
+ (bit[64]) offset := LSL(SignExtend(imm7), scale);
+
+ Some(LoadStorePairNonTemp(wback,postindex,n,t,t2,acctype,memop,scale,datasize,offset));
+}
+
+function clause execute ( LoadStorePairNonTemp(wback,postindex,n,t,t2,acctype,memop,scale,datasize,offset) ) = {
+ (bit[64]) address := 0; (* ARM: uninitialized *)
+ (bit['D]) data1 := 0; (* ARM: uninitialized *)
+ (bit['D]) data2 := 0; (* ARM: uninitialized *)
+ (*constant*) (uinteger) dbytes := datasize quot 8;
+ (boolean) rt_unknown := false;
+
+ if memop == MemOp_LOAD & t == t2 then {
+ UnallocatedEncoding(); (* ARM:
+ Constraint c = ConstrainUnpredictable();
+ assert c IN {Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP};
+
+ case c of
+ when Constraint_UNKNOWN rt_unknown = TRUE; // result is UNKNOWN
+ when Constraint_UNDEF UnallocatedEncoding();
+ when Constraint_NOP EndOfInstruction();
+ };*)
+ };
+
+ if n == 31 then {
+ CheckSPAlignment();
+ address := rSP();
+ } else
+ address := rX(n);
+
+ if ~(postindex) then
+ address := address + offset;
+
+ if memop == MemOp_STORE then
+ (* anounce the address *)
+ wMem_Addr(address, dbytes * 2, acctype, false);
+
+ (* this is a hack to allow the address write back to be observed
+ before the memory access *)
+ if wback then {
+ let address' = (bit[64]) (if ~(postindex) then address else (address + offset)) in
+ if n == 31 then
+ wSP() := address'
+ else
+ wX(n) := address'
+ };
+
+ switch memop {
+ case MemOp_STORE -> {
+ if rt_unknown & t == n then
+ date1 := (bit['D]) UNKNOWN
+ else
+ data1 := rX(t);
+ if rt_unknown & t2 == n then
+ date2 := (bit['D]) UNKNOWN
+ else
+ data2 := rX(t2);
+
+ (write_buffer_type) write_buffer := wMem(empty_write_buffer, address + 0, dbytes, acctype, data1);
+ write_buffer := wMem(write_buffer, address + dbytes, dbytes, acctype, data2);
+ flush_write_buffer(write_buffer);
+ }
+
+ case MemOp_LOAD -> {
+ (read_buffer_type) read_buffer := rMem(empty_read_buffer, address + 0 , dbytes, acctype);
+ read_buffer := rMem(read_buffer, address + dbytes, dbytes, acctype);
+ (bit['D*2]) read_data := flush_read_buffer(read_buffer, dbytes*2);
+ data1 := read_data[(datasize - 1)..0];
+ data2 := read_data[((datasize * 2) - 1)..datasize];
+
+ if rt_unknown then {
+ date1 := (bit['D]) UNKNOWN;
+ data2 := (bit['D]) UNKNOWN;
+ };
+ wX(t) := data1;
+ wX(t2) := data2;
+ }
+ };
+
+(* ARM: the following code was moved up, see note there
+ if wback then {
+ if postindex then
+ address := address + offset;
+ if n == 31 then
+ wSP() := address
+ else
+ wX(n) := address;
+ };
+*)
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> sharedDecodeLoadImmediate((bit[2]) opc,(bit[2]) size,Rn,Rt,wback,postindex,(uinteger) scale,offset,acctype,(bool) prefetchAllowed) =
+{
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) t := UInt_reg(Rt);
+ (* ARM: inorder to unify the decoding acctype was moved out.
+ AccType_UNPRIV for LDT*/STT* and AccType_NORMAL for the rest.
+ (AccType) acctype := AccType_NORMAL/AccType_UNPRIV; *)
+ (MemOp) memop := 0; (* ARM: uninitialized *)
+ (boolean) _signed := false; (* ARM: uninitialized *)
+ ([:'R:]) regsize := 64; (* ARM: uninitialized *)
+
+ if opc[1] == 0 then {
+ (* store or zero-extending load *)
+ memop := if opc[0] == 1 then MemOp_LOAD else MemOp_STORE;
+ regsize := if size == 0b11 then 64 else 32;
+ _signed := false;
+ } else {
+ if size == 0b11 then {
+ (* ARM: we use the following if/else to unify the decoding *)
+ if prefetchAllowed then {
+ memop := MemOp_PREFETCH;
+ if opc[0] == 1 then UnallocatedEncoding();
+ } else {
+ (* no unprivileged prefetch *)
+ UnallocatedEncoding();
+ };
+ } else {
+ (* sign-extending load *)
+ memop := MemOp_LOAD;
+ if size == 0b10 & opc[0] == 1 then UnallocatedEncoding();
+ regsize := if opc[0] == 1 then 32 else 64;
+ _signed := true;
+ };
+ };
+
+ ([:'D:]) datasize := lsl(8, scale);
+
+ Some(LoadImmediate(n,t,acctype,memop,_signed,wback,postindex,offset,regsize,datasize));
+}
+
+(* LDR/STR (immediate) post-index *)
+(* LDRB/STRB (immediate) post-index *)
+(* LDRH/STRH (immediate) post-index *)
+(* LDRSB (immediate) post-index *)
+(* LDRSH (immediate) post-index *)
+(* LDRSW (immediate) post-index *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterImmediatePostIndexed (size:0b111:[0]:0b00:opc:[0]:(bit[9]) imm9:0b01:Rn:Rt) = {
+ (boolean) wback := true;
+ (boolean) postindex := true;
+ (uinteger) scale := UInt(size);
+ (bit[64]) offset := SignExtend(imm9);
+
+ sharedDecodeLoadImmediate(opc,size,Rn,Rt,wback,postindex,scale,offset,AccType_NORMAL,false);
+}
+
+function clause execute ( LoadImmediate(n,t,acctype,memop,_signed,wback,postindex,offset,regsize,datasize) ) = {
+ (bit[64]) address := 0; (* ARM: uninitialized *)
+ (bit['D]) data := 0; (* ARM: uninitialized *)
+ (boolean) wb_unknown := false;
+ (boolean) rt_unknown := false;
+
+ if memop == MemOp_LOAD & wback & n == t & n != 31 then {
+ UnallocatedEncoding(); (* ARM:
+ Constraint c := ConstrainUnpredictable();
+ assert( c vIN [Constraint_WBSUPPRESS, Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP] );
+ switch c {
+ case Constraint_WBSUPPRESS -> wback := false (* writeback is suppressed *)
+ case Constraint_UNKNOWN -> wb_unknown := true (* writeback is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if memop == MemOp_STORE & wback & n == t & n != 31 then {
+ UnallocatedEncoding(); (* ARM:
+ Constraint c := ConstrainUnpredictable();
+ assert( c vIN [Constraint_NONE, Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP] );
+ switch c {
+ case Constraint_NONE -> rt_unknown := false (* value stored is original value *)
+ case Constraint_UNKNOWN -> rt_unknown := true (* value stored is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if n == 31 then {
+ if memop != MemOp_PREFETCH then CheckSPAlignment();
+ address := rSP();
+ } else
+ address := rX(n);
+
+ if ~(postindex) then
+ address := address + offset;
+
+ if memop == MemOp_STORE then
+ (* anounce the address *)
+ wMem_Addr(address, datasize quot 8, acctype, false);
+
+ (* this is a hack to allow the address write back to be observed
+ before the memory access *)
+ if wback then {
+ let address' = (bit[64]) (if ~(postindex) then address else (address + offset)) in
+ if n == 31 then
+ wSP() := address'
+ else
+ wX(n) := address'
+ };
+
+ switch memop {
+ case MemOp_STORE -> {
+ if rt_unknown then
+ data := (bit['D]) UNKNOWN
+ else
+ data := rX(t);
+
+ flush_write_buffer(
+ wMem(empty_write_buffer, address, datasize quot 8, acctype, data)
+ );
+ }
+
+ case MemOp_LOAD -> {
+ data := flush_read_buffer(
+ rMem(empty_read_buffer, address, datasize quot 8, acctype),
+ datasize quot 8
+ );
+ if _signed then
+ wX(t) := (bit['R]) (SignExtend(data)) (* ARM: regsize *)
+ else
+ wX(t) := (bit['R]) (ZeroExtend(data)); (* ARM: regsize *)
+ }
+
+ case MemOp_PREFETCH ->
+ Prefetch(address,(bit[5]) t)
+ };
+
+(* ARM: the following code was moved up, see note there
+ if wback then {
+ if wb_unknown then
+ address := (bit[64]) UNKNOWN
+ else if postindex then
+ address := address + offset;
+ if n == 31 then
+ wSP() := address
+ else
+ wX(n) := address;
+ };
+*)
+}
+
+(* LDR/STR (immediate) pre-index *)
+(* LDRB/STRB (immediate) pre-index *)
+(* LDRH/STRH (immediate) pre-index *)
+(* LDRSB (immediate) pre-index *)
+(* LDRSH (immediate) pre-index *)
+(* LDRSW (immediate) pre-index *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterImmediatePreIndexed (size:0b111:[0]:0b00:opc:[0]:(bit[9]) imm9:0b11:Rn:Rt) =
+{
+ (boolean) wback := true;
+ (boolean) postindex := false;
+ (uinteger) scale := UInt(size);
+ (bit[64]) offset := SignExtend(imm9);
+
+ sharedDecodeLoadImmediate(opc,size,Rn,Rt,wback,postindex,scale,offset,AccType_NORMAL,false);
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}. option<(ast<'R,'D>)> sharedDecodeLoadRegister(Rn,Rt,Rm,(bit[2]) opc,(bit[2]) size,wback,postindex,(uinteger) scale,extend_type,shift) = {
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) t := UInt_reg(Rt);
+ (reg_index) m := UInt_reg(Rm);
+ (AccType) acctype := AccType_NORMAL;
+ (MemOp) memop := 0; (* ARM: uninitialized *)
+ (boolean) _signed := 0; (* ARM: uninitialized *)
+ ([:'R:]) regsize := 64; (* ARM: uninitialized *)
+
+ if opc[1] == 0 then {
+ (* store or zero-extending load *)
+ memop := if opc[0] == 1 then MemOp_LOAD else MemOp_STORE;
+ regsize := if size == 0b11 then 64 else 32;
+ _signed := false;
+ } else {
+ if size == 0b11 then {
+ memop := MemOp_PREFETCH;
+ if opc[0] == 1 then UnallocatedEncoding();
+ } else {
+ (* sign-extending load *)
+ memop := MemOp_LOAD;
+ if size == 0b10 & opc[0] == 1 then UnallocatedEncoding();
+ regsize := if opc[0] == 1 then 32 else 64;
+ _signed := true;
+ };
+ };
+
+ ([:'D:]) datasize := lsl(8, scale);
+
+ Some(LoadRegister(n,t,m,acctype,memop,_signed,wback,postindex,extend_type,shift,regsize,datasize));
+}
+
+(* LDR/STR (register) *)
+(* LDRB/STRB (register) *)
+(* LDRH/STRH (register) *)
+(* LDRSB (register) *)
+(* LDRSH (register) *)
+(* LDRSW (register) *)
+(* PRFM (register) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterRegisterOffset (size:0b111:[0]:0b00:opc:[1]:Rm:option_v:[S]:0b10:Rn:Rt) =
+{
+ (boolean) wback := false;
+ (boolean) postindex := false;
+ (uinteger) scale := UInt(size);
+ if option_v[1] == 0 then UnallocatedEncoding(); (* sub-word index *)
+ (ExtendType) extend_type := DecodeRegExtend(option_v);
+ (uinteger) shift := if S == 1 then scale else 0;
+
+ sharedDecodeLoadRegister(Rn,Rt,Rm,opc,size,wback,postindex,scale,extend_type,shift);
+}
+
+function clause execute ( LoadRegister(n,t,m,acctype,memop,_signed,wback,postindex,extend_type,shift,regsize,datasize) ) = {
+ (bit[64]) offset := ExtendReg(m, extend_type, shift);
+ (bit[64]) address := 0; (* ARM: uninitialized *)
+ (bit['D]) data := 0; (* ARM: uninitialized *)
+ (boolean) wb_unknown := false;
+ (boolean) rt_unknown := false;
+
+ if memop == MemOp_LOAD & wback & n == t & n != 31 then {
+ UnallocatedEncoding(); (* ARM:
+ Constraint c := ConstrainUnpredictable();
+ assert( c vIN [Constraint_WBSUPPRESS, Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP] );
+ switch c {
+ case Constraint_WBSUPPRESS -> wback := false (* writeback is suppressed *)
+ case Constraint_UNKNOWN -> wb_unknown := true (* writeback is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if memop == MemOp_STORE & wback & n == t & n != 31 then {
+ UnallocatedEncoding(); (* ARM:
+ Constraint c := ConstrainUnpredictable();
+ assert( c vIN [Constraint_NONE, Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP] );
+ switch c {
+ case Constraint_NONE -> rt_unknown := false (* value stored is original value *)
+ case Constraint_UNKNOWN -> rt_unknown := true (* value stored is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if n == 31 then {
+ if memop != MemOp_PREFETCH then CheckSPAlignment();
+ address := rSP();
+ } else
+ address := rX(n);
+
+ if ~(postindex) then
+ address := address + offset;
+
+ if memop == MemOp_STORE then
+ (* anounce the address *)
+ wMem_Addr(address, datasize quot 8, acctype, false);
+
+ (* this is a hack to allow the address write back to be observed
+ before the memory access *)
+ if wback then {
+ let address' = (bit[64]) (if ~(postindex) then address else (address + offset)) in
+ if n == 31 then
+ wSP() := address'
+ else
+ wX(n) := address'
+ };
+
+ switch memop {
+ case MemOp_STORE -> {
+ if rt_unknown then
+ data := (bit['D]) UNKNOWN
+ else
+ data := rX(t);
+
+ flush_write_buffer(
+ wMem(empty_write_buffer, address, datasize quot 8, acctype, data)
+ );
+ }
+
+ case MemOp_LOAD -> {
+ data := flush_read_buffer(
+ rMem(empty_read_buffer, address, datasize quot 8, acctype),
+ datasize quot 8
+ );
+ if _signed then
+ wX(t) := (bit['R]) (SignExtend(data))
+ else
+ wX(t) := (bit['R]) (ZeroExtend(data));
+ }
+
+ case MemOp_PREFETCH ->
+ Prefetch(address,(bit[5]) t)
+ };
+
+(* ARM: the following code was moved up, see note there
+ if wback then {
+ if wb_unknown then
+ address := (bit[64]) UNKNOWN
+ else if postindex then
+ address := address + offset;
+ if n == 31 then
+ wSP() := address
+ else
+ wX(n) := address;
+ };
+*)
+}
+
+(* LDTR/STTR *)
+(* LDTRB/STTRB *)
+(* LDTRH/STTRH *)
+(* LDTRSB *)
+(* LDTRSH *)
+(* LDTRSW *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterUnprivileged (size:0b111:[0]:0b00:opc:[0]:(bit[9]) imm9:0b10:Rn:Rt) =
+{
+ (boolean) wback := false;
+ (boolean) postindex := false;
+ (uinteger) scale := UInt(size);
+ (bit[64]) offset := SignExtend(imm9);
+
+ sharedDecodeLoadImmediate(opc,size,Rn,Rt,wback,postindex,scale,offset,AccType_UNPRIV,false);
+}
+
+(* LDUR/STRUR *)
+(* LDURB/STURB *)
+(* LDURH/STURH *)
+(* LDURSB *)
+(* LDURSH *)
+(* LDURSW *)
+(* PRFUM *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterUnscaledImmediate (size:0b111:[0]:0b00:opc:[0]:(bit[9]) imm9:0b00:Rn:Rt) = {
+ (boolean) wback := false;
+ (boolean) postindex := false;
+ (uinteger) scale := UInt(size);
+ (bit[64]) offset := SignExtend(imm9);
+
+ sharedDecodeLoadImmediate(opc,size,Rn,Rt,wback,postindex,scale,offset,AccType_NORMAL,true);
+}
+
+(* LDR/STR (immediate) Unsigned offset *)
+(* LDRB/STRB (immediate) Unsigned offset *)
+(* LDRH/STRH (immediate) Unsigned offset *)
+(* LDRSB (immediate) Unsigned offset *)
+(* LDRSH (immediate) Unsigned offset *)
+(* LDRSW (immediate) Unsigned offset *)
+(* PRFM (immediate) Unsigned offset *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterUnsignedImmediate (size:0b111:[0]:0b01:opc:(bit[12]) imm12:Rn:Rt) =
+{
+ (boolean) wback := false;
+ (boolean) postindex := false;
+ (uinteger) scale := UInt(size);
+ (bit[64]) offset := LSL(ZeroExtend(imm12), scale);
+
+ sharedDecodeLoadImmediate(opc,size,Rn,Rt,wback,postindex,scale,offset,AccType_NORMAL,true);
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> sharedDecodeLoadStorePair(L,(bit[2]) opc,imm7,Rn,Rt,Rt2,wback,postindex) =
+{
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) t := UInt_reg(Rt);
+ (reg_index) t2 := UInt_reg(Rt2);
+ (AccType) acctype := AccType_NORMAL;
+ (MemOp) memop := if L == 1 then MemOp_LOAD else MemOp_STORE;
+ if [L,opc[0]] == 0b01 | opc == 0b11 then UnallocatedEncoding();
+ (boolean) _signed := (opc[0] != 0);
+ (uinteger) scale := 2 + UInt([opc[1]]);
+ ([:'D:]) datasize := lsl(8, scale);
+ (bit[64]) offset := LSL(SignExtend(imm7), scale);
+
+ Some(LoadStorePair(wback,postindex,n,t,t2,acctype,memop,_signed,scale,datasize,offset));
+}
+
+(* LDP signed offset *)
+(* LDPSW signed offset *)
+(* STP signed offset *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterPairOffset (opc:0b101:[0]:0b010:[L]:(bit[7]) imm7:Rt2:Rn:Rt) =
+{
+ (boolean) wback := false;
+ (boolean) postindex := false;
+
+ sharedDecodeLoadStorePair(L,opc,imm7,Rn,Rt,Rt2,wback,postindex);
+}
+
+function clause execute ( LoadStorePair(wback,postindex,n,t,t2,acctype,memop,_signed,scale,datasize,offset) ) = {
+ (bit[64]) address := 0; (* ARM: uninitialized *)
+ (bit['D]) data1 := 0; (* ARM: uninitialized *)
+ (bit['D]) data2 := 0; (* ARM: uninitialized *)
+ (*constant*) (uinteger) dbytes := datasize quot 8;
+ (boolean) rt_unknown := false;
+ (boolean) wb_unknown := false;
+
+ if memop == MemOp_LOAD & wback & (t == n | t2 == n) & n != 31 then {
+ UnallocatedEncoding(); (* ARM:
+ Constraint c := ConstrainUnpredictable();
+ assert( c vIN [Constraint_WBSUPPRESS, Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP] );
+ switch c {
+ case Constraint_WBSUPPRESS -> wback := false (* writeback is suppressed *)
+ case Constraint_UNKNOWN -> wb_unknown := true (* writeback is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if memop == MemOp_STORE & wback & (t == n | t2 == n) & n != 31 then {
+ UnallocatedEncoding(); (* ARM:
+ Constraint c := ConstrainUnpredictable();
+ assert( c vIN [Constraint_NONE, Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP] );
+ switch c {
+ case Constraint_NONE -> rt_unknown := false (* value stored is pre-writeback *)
+ case Constraint_UNKNOWN -> rt_unknown := true (* value stored is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if memop == MemOp_LOAD & t == t2 then {
+ UnallocatedEncoding(); (* ARM
+ Constraint c := ConstrainUnpredictable();
+ assert( c vIN [Constraint_UNKNOWN, Constraint_UNDEF, Constraint_NOP] );
+ switch c {
+ case Constraint_UNKNOWN -> rt_unknown := true (* result is UNKNOWN *)
+ case Constraint_UNDEF -> UnallocatedEncoding()
+ case Constraint_NOP -> EndOfInstruction()
+ };*)
+ };
+
+ if n == 31 then {
+ CheckSPAlignment();
+ address := rSP();
+ } else
+ address := rX(n);
+
+ if ~(postindex) then
+ address := address + offset;
+
+ if memop == MemOp_STORE then
+ (* anounce the address *)
+ wMem_Addr(address, dbytes * 2, acctype, false);
+
+ (* this is a hack to allow the address write back to be observed
+ before the memory access *)
+ if wback then {
+ let address' = (bit[64]) (if ~(postindex) then address else (address + offset)) in
+ if n == 31 then
+ wSP() := address'
+ else
+ wX(n) := address'
+ };
+
+ switch memop {
+ case MemOp_STORE -> {
+ if rt_unknown & t == n then
+ data1 := (bit['D]) UNKNOWN
+ else
+ data1 := rX(t);
+ if rt_unknown & t2 == n then
+ data2 := (bit['D]) UNKNOWN
+ else
+ data2 := rX(t2);
+
+ (write_buffer_type) write_buffer := wMem(empty_write_buffer, address + 0, dbytes, acctype, data1);
+ write_buffer := wMem(write_buffer, address + dbytes, dbytes, acctype, data2);
+ flush_write_buffer(write_buffer);
+ }
+
+ case MemOp_LOAD -> {
+ (read_buffer_type) read_buffer := rMem(empty_read_buffer, address + 0, dbytes, acctype);
+ read_buffer := rMem(read_buffer, address + dbytes, dbytes, acctype);
+ (bit['D*2]) read_data := flush_read_buffer(read_buffer, dbytes*2);
+ data1 := read_data[(datasize - 1) .. 0];
+ data2 := read_data[((datasize * 2) - 1) .. datasize];
+ if rt_unknown then {
+ data1 := (bit['D]) UNKNOWN;
+ data2 := (bit['D]) UNKNOWN;
+ };
+ if _signed then {
+ wX(t) := (bit[64]) (SignExtend(data1));
+ wX(t2) := (bit[64]) (SignExtend(data2));
+ } else {
+ wX(t) := data1;
+ wX(t2) := data2;
+ };
+ }
+ };
+
+(* ARM: the following code was moved up, see note there
+ if wback then {
+ if wb_unknown then
+ address := (bit[64]) UNKNOWN
+ else if postindex then
+ address := address + offset;
+ if n == 31 then
+ wSP() := address
+ else
+ wX(n) := address;
+ };
+*)
+}
+
+(* LDP post-index *)
+(* LDPSW post-index *)
+(* STP post-index *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterPairPostIndexed (opc:0b101:[0]:0b001:[L]:(bit[7]) imm7:Rt2:Rn:Rt) =
+{
+ (boolean) wback := true;
+ (boolean) postindex := true;
+
+ sharedDecodeLoadStorePair(L,opc,imm7,Rn,Rt,Rt2,wback,postindex);
+}
+
+(* LDP pre-index *)
+(* LDPSW pre-index *)
+(* STP pre-index *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadStoreRegisterPairPreIndexed (opc:0b101:[0]:0b011:[L]:(bit[7]) imm7:Rt2:Rn:Rt) =
+{
+ (boolean) wback := true;
+ (boolean) postindex := false;
+
+ sharedDecodeLoadStorePair(L,opc,imm7,Rn,Rt,Rt2,wback,postindex);
+}
+
+(* ADD/ADDS (immediate) *)
+(* SUB/SUBS (immediate) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeAddSubtractImmediate ([sf]:[op]:[S]:0b10001:shift:(bit[12]) imm12:Rn:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) sub_op := (op == 1);
+ (boolean) setflags := (S == 1);
+ (bit['R]) imm := 0; (* ARM: uninitialized *)
+
+ switch shift {
+ case 0b00 -> imm := ZeroExtend(imm12)
+ case 0b01 -> imm := ZeroExtend(imm12 : (0b0 ^^ 12))
+ case [1,_] -> ReservedValue()
+ };
+
+ Some(AddSubImmediate(d,n,datasize,sub_op,setflags,imm));
+}
+
+function clause execute (AddSubImmediate(d,n,datasize,sub_op,setflags,imm)) = {
+ (bit['R]) operand1 := if n == 31 then rSP() else rX(n);
+ (bit['R]) operand2 := imm;
+ (bit) carry_in := 0; (* ARM: uninitialized *)
+
+ if sub_op then {
+ operand2 := NOT(operand2);
+ carry_in := 1;
+ }
+ else
+ carry_in := 0;
+
+ let (result,nzcv) = (AddWithCarry(operand1, operand2, carry_in)) in {
+
+ if setflags then
+ wPSTATE_NZCV() := nzcv;
+
+ if (d == 31 & ~(setflags)) then
+ wSP() := result
+ else
+ wX(d) := result;
+ }
+}
+
+(* BFM *)
+(* SBFM *)
+(* UBFM *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeBitfield ([sf]:opc:0b100110:[N]:(bit[6]) immr:(bit[6]) imms:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+
+ (boolean) inzero := false; (* ARM: uninitialized *)
+ (boolean) extend := false; (* ARM: uninitialized *)
+ (uinteger) R := 0; (* ARM: uninitialized *)
+ (uinteger) S := 0; (* ARM: uninitialized *)
+
+ switch opc {
+ case 0b00 -> {inzero := true; extend := true} (* SBFM *)
+ case 0b01 -> {inzero := false; extend := false} (* BFM *)
+ case 0b10 -> {inzero := true; extend := false} (* UBFM *)
+ case 0b11 -> UnallocatedEncoding()
+ };
+
+ if sf == 1 & N != 1 then ReservedValue();
+ if sf == 0 & (N != 0 | immr[5] != 0 | imms[5] != 0) then ReservedValue();
+
+ R := UInt(immr);
+ S := UInt(imms);
+
+ let ((bit['R]) wmask, (bit['R]) tmask) = (DecodeBitMasks(N, imms, immr, false)) in {
+
+ Some(BitfieldMove(d,n,datasize,inzero,extend,R,S,wmask,tmask));
+}}
+
+
+function clause execute (BitfieldMove(d,n,datasize,inzero,extend,R,S,wmask,tmask)) = {
+ (bit['R]) dst := if inzero then Zeros() else rX(d);
+ (bit['R]) src := rX(n);
+
+ (* preform bitfield move on low bits *)
+ (bit['R]) bot := ((dst & NOT(wmask)) | (ROR(src, R) & wmask));
+
+ (* determine extension bits (sign, zero or dest register) *)
+ (bit['R]) top := if extend then Replicate([src[S]]) else dst;
+
+ (* combine extension bits and result bits *)
+ wX(d) := ((top & NOT(tmask)) | (bot & tmask));
+}
+
+(* EXTR *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeExtract ([sf]:0b00:0b100111:[N]:0b0:Rm:(bit[6]) imms:Rn:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (uinteger) lsb := 0; (* ARM: uninitialized *)
+
+ if N != sf then UnallocatedEncoding();
+ if sf == 0 & imms[5] == 1 then ReservedValue();
+ lsb := UInt(imms);
+
+ Some(ExtractRegister(d,n,m,datasize,lsb));
+}
+
+function clause execute ( ExtractRegister(d,n,m,datasize,lsb) ) = {
+ (bit['R]) result := 0; (* ARM: uninitialized *)
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := rX(m);
+
+ (bit[2*'R]) concat := operand1:operand2;
+ result := concat[(lsb + datasize - 1)..lsb];
+ wX(d) := result;
+}
+
+(* AND/ANDS (immediate) *)
+(* EOR (immediate) *)
+(* ORR (immediate) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLogicalImmediate ([sf]:opc:0b100100:[N]:(bit[6]) immr:(bit[6]) imms:Rn:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) setflags := false; (* ARM: uninitialized *)
+ (LogicalOp) op := LogicalOp_AND; (* ARM: uninitialized *)
+ switch opc {
+ case 0b00 -> {op := LogicalOp_AND; setflags := false}
+ case 0b01 -> {op := LogicalOp_ORR; setflags := false}
+ case 0b10 -> {op := LogicalOp_EOR; setflags := false}
+ case 0b11 -> {op := LogicalOp_AND; setflags := true}
+ };
+
+ if sf == 0 & N != 0 then ReservedValue();
+ let (imm,_) = (DecodeBitMasks(N, imms, immr, true)) in {
+
+ Some(LogicalImmediate(d,n,datasize,setflags,op,imm));
+}}
+
+function clause execute (LogicalImmediate(d,n,datasize,setflags,op,imm)) = {
+ (bit['R]) result := 0; (* ARM: uninitialized *)
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := imm;
+
+ switch op {
+ case LogicalOp_AND -> result := (operand1 & operand2)
+ case LogicalOp_ORR -> result := (operand1 | operand2)
+ case LogicalOp_EOR -> result := (operand1 ^ operand2)
+ };
+
+ if setflags then
+ wPSTATE_NZCV() := ([result[length(result) - 1]]:[IsZeroBit(result)]:0b00);
+
+ if d == 31 & ~(setflags) then
+ wSP() := result
+ else
+ wX(d) := result;
+}
+
+(* MOVK *)
+(* MOVN *)
+(* MOVZ *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeMoveWideImmediate ([sf]:opc:0b100101:(bit[2]) hw:(bit[16]) imm16:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (bit[16]) imm := imm16;
+ (uinteger) pos := 0; (* ARM: uninitialized *)
+ (MoveWideOp) opcode := 0; (* ARM: uninitialized *)
+
+ switch opc {
+ case 0b00 -> opcode := MoveWideOp_N
+ case 0b10 -> opcode := MoveWideOp_Z
+ case 0b11 -> opcode := MoveWideOp_K
+ case _ -> UnallocatedEncoding()
+ };
+
+ if sf == 0 & hw[1] == 1 then UnallocatedEncoding();
+ pos := UInt(hw:0b0000);
+
+ Some(MoveWide(d,datasize,imm,pos,opcode));
+}
+
+function clause execute ( MoveWide(d,datasize,imm,pos,opcode) ) = {
+ (bit['R]) result := 0; (* ARM: uninitialized *)
+
+ if opcode == MoveWideOp_K then
+ result := rX(d)
+ else
+ result := Zeros();
+
+ result[(pos+15)..pos] := imm;
+ if opcode == MoveWideOp_N then
+ result := NOT(result);
+ wX(d) := result;
+}
+
+(* ADR *)
+(* ADRP *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodePCRelAddressing ([op]:(bit[2]) immlo:0b10000:(bit[19]) immhi:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (boolean) page := (op == 1);
+ (bit[64]) imm := 0; (* ARM: uninitialized *)
+
+ if page then
+ imm := SignExtend(immhi:immlo:(0b0 ^^ 12))
+ else
+ imm := SignExtend(immhi:immlo);
+
+ Some(Address(d,page,imm))
+}
+
+function clause execute (Address(d,page,imm)) = {
+ (bit[64]) base := rPC();
+
+ if page then
+ base[11..0] := (bit[12]) (Zeros());
+
+ wX(d) := base + imm;
+}
+
+(* ADD/ADDS (extended register) *)
+(* SUB/SUBS (extended register) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeAddSubtractExtendedRegister ([sf]:[op]:[S]:0b01011:0b00:0b1:Rm:option_v:(bit[3]) imm3:Rn:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) sub_op := (op == 1);
+ (boolean) setflags := (S == 1);
+ (ExtendType) extend_type := DecodeRegExtend(option_v);
+ ([|7|]) shift := UInt(imm3);
+ if shift > 4 then ReservedValue();
+
+ Some(AddSubExtendRegister(d,n,m,datasize,sub_op,setflags,extend_type,shift));
+}
+
+function clause execute (AddSubExtendRegister(d,n,m,datasize,sub_op,setflags,extend_type,shift)) = {
+ (bit['R]) operand1 := if n == 31 then rSP() else rX(n);
+ (bit['R]) operand2 := ExtendReg(m, extend_type, shift);
+ (bit) carry_in := 0; (* ARM: uninitialized *)
+
+ if sub_op then {
+ operand2 := NOT(operand2);
+ carry_in := 1;
+ } else
+ carry_in := 0;
+
+ let (result,nzcv) = (AddWithCarry(operand1, operand2, carry_in)) in {
+
+ if setflags then
+ wPSTATE_NZCV() := nzcv;
+
+ if (d == 31 & ~(setflags)) then
+ wSP() := result
+ else
+ wX(d) := result;
+ }
+}
+
+(* ADD/ADDS (shifted register) *)
+(* SUB/SUBS (shifted register) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeAddSubtractShiftedRegister ([sf]:[op]:[S]:0b01011:shift:0b0:Rm:(bit[6]) imm6:Rn:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) sub_op := (op == 1);
+ (boolean) setflags := (S == 1);
+
+ if shift == 0b11 then ReservedValue();
+ if sf == 0 & imm6[5] == 1 then ReservedValue();
+
+ (ShiftType) shift_type := DecodeShift(shift);
+ ([|63|]) shift_amount := UInt(imm6);
+
+ Some(AddSubShiftedRegister(d,n,m,datasize,sub_op,setflags,shift_type,shift_amount));
+}
+
+function clause execute (AddSubShiftedRegister(d,n,m,datasize,sub_op,setflags,shift_type,shift_amount)) = {
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := ShiftReg(m, shift_type, shift_amount);
+ (bit) carry_in := 0; (* ARM: uninitialized *)
+
+ if sub_op then {
+ operand2 := NOT(operand2);
+ carry_in := 1;
+ }
+ else
+ carry_in := 0;
+
+ let (result,nzcv) = (AddWithCarry(operand1, operand2, carry_in)) in {
+
+ if setflags then
+ wPSTATE_NZCV() := nzcv;
+
+ wX(d) := result;
+ }
+}
+
+(* ADC/ADCS *)
+(* SBC/SBCS *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeAddSubtractWithCarry ([sf]:[op]:[S]:0b11010000:Rm:0b000000:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) sub_op := (op == 1);
+ (boolean) setflags := (S == 1);
+
+ Some(AddSubCarry(d,n,m,datasize,sub_op,setflags));
+}
+
+function clause execute( AddSubCarry(d,n,m,datasize,sub_op,setflags)) = {
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := rX(m);
+
+ if sub_op then
+ operand2 := NOT(operand2);
+
+ let (result,nzcv) = (AddWithCarry(operand1, operand2, PSTATE_C)) in {
+
+ if setflags then
+ wPSTATE_NZCV() := nzcv;
+
+ wX(d) := result;
+}}
+
+(* CCMN (immediate) *)
+(* CCMP (immediate) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeConditionalCompareImmediate ((bit[32]) ([sf]:[op]:[1]:0b11010010:(bit[5]) imm5:_cond:[1]:[0]:Rn:[0]:nzcv)) =
+{
+ (reg_index) n := UInt_reg(Rn);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) sub_op := (op ==1);
+ (bit[4]) condition := _cond;
+ (bit[4]) flags := nzcv;
+ (bit['R]) imm := ZeroExtend(imm5);
+
+ Some(ConditionalCompareImmediate(n,datasize,sub_op,condition,flags,imm));
+}
+
+function clause execute (ConditionalCompareImmediate(n,datasize,sub_op,condition,flags,imm)) = {
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := imm;
+ (bit) carry_in := 0;
+
+ (bit[4]) flags' := flags;
+ if ConditionHolds(condition) then {
+ if sub_op then {
+ operand2 := NOT(operand2);
+ carry_in := 1;
+ };
+ let (_,nzcv) = (AddWithCarry(operand1, operand2, carry_in)) in {flags' := nzcv};
+ };
+ wPSTATE_NZCV() := flags';
+}
+
+(* CCMN (register) *)
+(* CCMP (register) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeConditionalCompareRegister ((bit[32]) ([sf]:[op]:[1]:0b11010010:Rm:_cond:[0]:[0]:Rn:[0]:nzcv)) = {
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) sub_op := (op ==1);
+ (bit[4]) condition := _cond;
+ (bit[4]) flags := nzcv;
+
+ Some(ConditionalCompareRegister(n,m,datasize,sub_op,condition,flags));
+}
+
+function clause execute (ConditionalCompareRegister(n,m,datasize,sub_op,condition,flags)) = {
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := rX(m);
+ (bit) carry_in := 0;
+
+ (bit[4]) flags' := flags;
+ if ConditionHolds(condition) then {
+ if sub_op then {
+ operand2 := NOT(operand2);
+ carry_in := 1;
+ };
+ let (_,nzcv) = (AddWithCarry(operand1, operand2, carry_in)) in {flags' := nzcv};
+ };
+ wPSTATE_NZCV() := flags';
+}
+
+(* CSEL op=0,o2=0 *)
+(* CSINC op=0,o2=1 *)
+(* CSINV op=1,o2=0 *)
+(* CSNEG op=1,o2=1 *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeConditionalSelect ((bit[32]) ([sf]:[op]:[0]:0b11010100:Rm:_cond:[0]:[o2]:Rn:Rd)) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (bit[4]) condition := _cond;
+ (boolean) else_inv := (op == 1);
+ (boolean) else_inc := (o2 == 1);
+
+ Some(ConditionalSelect(d,n,m,datasize,condition,else_inv,else_inc));
+}
+
+function clause execute ( ConditionalSelect(d,n,m,datasize,condition,else_inv,else_inc) ) = {
+ (bit['R]) result := 0; (* ARM: uninitialized *)
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := rX(m);
+
+ if ConditionHolds(condition) then
+ result := operand1
+ else {
+ result := operand2;
+ if else_inv then result := NOT(result);
+ if else_inc then result := result + 1;
+ };
+
+ wX(d) := result;
+}
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ bit[32] -> option<(ast<'R,'D>)> effect {escape} decodeData1Source
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> decodeData1Source
+
+(* RBIT *)
+(* REV *)
+(* REV16 *)
+(* REV32 *)
+function clause decodeData1Source ([sf]:[1]:[0]:0b11010110:0b00000:0b0000:opc:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+
+ (RevOp) op := 0; (* ARM: uninitialized *)
+ switch opc {
+ case 0b00 ->
+ op := RevOp_RBIT
+ case 0b01 ->
+ op := RevOp_REV16
+ case 0b10 ->
+ op := RevOp_REV32
+ case 0b11 -> {
+ if sf == 0 then UnallocatedEncoding();
+ op := RevOp_REV64;
+ }
+ };
+
+ Some(Reverse(d,n,datasize,op));
+}
+
+function clause execute ( Reverse(d,n,datasize,op) ) = {
+ (bit['R]) result := 0; (* ARM uninitialized *)
+ (bit[6]) V := 0; (* ARM uninitialized *)
+
+ switch op {
+ case RevOp_REV16 -> V := 0b001000
+ case RevOp_REV32 -> V := 0b011000
+ case RevOp_REV64 -> V := 0b111000
+ case RevOp_RBIT -> V := if datasize == 64 then 0b111111 else 0b011111
+ };
+
+ result := rX(n);
+
+ foreach (vbit from 0 to 5) {
+ (* Swap pairs of 2^vbit bits in result *)
+ if V[vbit] == 1 then {
+ (bit['R]) tmp := result;
+ (uinteger) vsize := lsl(1, vbit);
+ foreach (base from 0 to (datasize - 1) by (2 * vsize)) { (* ARM: while base < datasize do *)
+ result[((base+vsize) - 1)..base] := tmp[(base+(2*vsize) - 1)..(base+vsize)];
+ result[(base+(2*vsize) - 1)..(base+vsize)] := tmp[(base+vsize - 1)..base];
+ (* ARM: base := base + (2 * vsize); *)
+ };
+ };
+ };
+ wX(d) := result;
+}
+
+(* CLS *)
+(* CLZ *)
+function clause decodeData1Source ([sf]:[1]:[0]:0b11010110:0b00000:0b00010:[op]:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (CountOp) opcode := if op == 0 then CountOp_CLZ else CountOp_CLS;
+
+ Some(CountLeading(d,n,datasize,opcode));
+}
+
+function clause execute (CountLeading(d,n,datasize,opcode)) = {
+ (uinteger) result := 0; (* ARM: uninitialized *)
+ (bit['R]) operand1 := rX(n);
+
+ if opcode == CountOp_CLZ then
+ result := CountLeadingZeroBits(operand1)
+ else
+ result := CountLeadingSignBits(operand1);
+
+ wX(d) := (bit['R]) result;
+}
+
+end decodeData1Source
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ bit[32] -> option<(ast<'R,'D>)> effect {escape} decodeData2Source
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> decodeData2Source
+
+(* SDIV o1=1 *)
+(* UDIV o1=0 *)
+function clause decodeData2Source ([sf]:[0]:[0]:0b11010110:Rm:0b00001:[o1]:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) _unsigned := (o1 == 0);
+
+ Some(Division(d,n,m,datasize,_unsigned));
+}
+
+function clause execute ( Division(d,n,m,datasize,_unsigned) ) = {
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := rX(m);
+ (integer) result := 0; (* ARM: uninitialized *)
+
+ if IsZero(operand2) then
+ result := 0
+ else
+ result := (* ARM: RoundTowardsZero*) (Int(operand1, _unsigned) quot Int(operand2, _unsigned)); (* FIXME: does quot round towards zero? *)
+
+ wX(d) := (bit['R]) result ; (* ARM: result[(datasize-1)..0] *)
+}
+
+(* ASRV *)
+(* LSLV *)
+(* LSRV *)
+(* RORV *)
+function clause decodeData2Source ([sf]:[0]:[0]:0b11010110:Rm:0b0010:op2:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (ShiftType) shift_type := DecodeShift(op2);
+
+ Some(Shift(d,n,m,datasize,shift_type));
+}
+
+function clause execute (Shift(d,n,m,datasize,shift_type)) = {
+ (bit['R]) result := 0; (* ARM: uninitialized *)
+ (bit['R]) operand2 := rX(m);
+
+ result := ShiftReg(n, shift_type, UInt(operand2) mod datasize);
+ wX(d) := result;
+}
+
+(* CRC32B/CRC32H/CRC32W/CRC32X C=0 *)
+(* CRC32CB/CRC32CH/CRC32CW/CRC32CX C=1 *)
+function clause decodeData2Source ([sf]:[0]:[0]:0b11010110:Rm:0b010:[C]:sz:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ if sf == 1 & sz != 0b11 then UnallocatedEncoding();
+ if sf == 0 & sz == 0b11 then UnallocatedEncoding();
+ ([:'D:]) size := lsl(8, UInt(sz)); (* 2-bit size field -> 8, 16, 32, 64 *)
+ (boolean) crc32c := (C == 1);
+
+ Some(CRC(d,n,m,size,crc32c));
+}
+
+function clause execute ( CRC(d,n,m,size,crc32c) ) = {
+ if ~(HaveCRCExt()) then
+ UnallocatedEncoding();
+
+ (bit[32]) acc := rX(n); (* accumulator *)
+ (bit['D]) _val := rX(m); (* input value *)
+ (bit[32]) poly := if crc32c then 0x1EDC6F41 else 0x04C11DB7;
+
+ (bit[32+'D]) tempacc := BitReverse(acc) : (bit['D]) (Zeros());
+ (bit['D+32]) tempval := BitReverse(_val) : (bit[32]) (Zeros());
+
+ (* Poly32Mod2 on a bitstring does a polynomial Modulus over {0,1} operation *)
+ wX(d) := BitReverse(Poly32Mod2(tempacc ^ tempval, poly));
+}
+
+end decodeData2Source
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ bit[32] -> option<(ast<'R,'D>)> effect pure decodeData3Source
+scattered function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> decodeData3Source
+
+(* MADD o0=0 *)
+(* MSUB o0=1 *)
+function clause decodeData3Source ([sf]:0b00:0b11011:0b000:Rm:[o0]:Ra:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ (reg_index) a := UInt_reg(Ra);
+ ([:'R:]) destsize := if sf == 1 then 64 else 32;
+ ([:'D:]) datasize := destsize;
+ (boolean) sub_op := (o0 == 1);
+
+ Some(MultiplyAddSub(d,n,m,a,destsize,datasize,sub_op));
+}
+
+function clause execute ( MultiplyAddSub(d,n,m,a,destsize,datasize,sub_op) ) = {
+ (bit['D]) operand1 := rX(n);
+ (bit['D]) operand2 := rX(m);
+ (bit['R]) operand3 := rX(a);
+
+ (integer) result := 0; (* ARM: uninitialized *)
+
+ if sub_op then
+ result := UInt(operand3) - (UInt(operand1) * UInt(operand2))
+ else
+ result := UInt(operand3) + (UInt(operand1) * UInt(operand2));
+
+ wX(d) := (bit['R]) result;
+}
+
+(* SMADDL *)
+(* SMSUBL *)
+(* UMADDL *)
+(* UMSUBL *)
+function clause decodeData3Source ([1]:0b00:0b11011:[U]:0b01:Rm:[o0]:Ra:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ (reg_index) a := UInt_reg(Ra);
+ ([:'R:]) destsize := 64;
+ ([:'D:]) datasize := 32;
+ (boolean) sub_op := (o0 == 1);
+ (boolean) _unsigned := (U == 1);
+
+ Some(MultiplyAddSubLong(d,n,m,a,destsize,datasize,sub_op,_unsigned));
+}
+
+function clause execute ( MultiplyAddSubLong(d,n,m,a,destsize,datasize,sub_op,_unsigned) ) = {
+ (bit['D]) operand1 := rX(n);
+ (bit['D]) operand2 := rX(m);
+ (bit['R]) operand3 := rX(a);
+
+ (integer) result := 0; (* ARM: uninitialized *)
+
+ if sub_op then
+ result := Int(operand3, _unsigned) - (Int(operand1, _unsigned) * Int(operand2, _unsigned))
+ else
+ result := Int(operand3, _unsigned) + (Int(operand1, _unsigned) * Int(operand2, _unsigned));
+
+ wX(d) := (bit[64]) result;
+}
+
+(* SMULH *)
+(* UMULH *)
+function clause decodeData3Source ([1]:0b00:0b11011:[U]:0b10:Rm:[0]:Ra:Rn:Rd) = {
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ (reg_index) a := UInt_reg(Ra); (* ignored by UMULH/SMULH *)
+ ([:'R:]) destsize := 64;
+ ([:'D:]) datasize := destsize;
+ (boolean) _unsigned := (U == 1);
+
+ Some(MultiplyHigh(d,n,m,a,destsize,datasize,_unsigned));
+}
+
+function clause execute ( MultiplyHigh(d,n,m,a,destsize,datasize,_unsigned) ) = {
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := rX(m);
+
+ (integer) result := 0; (* ARM: uninitialized *)
+
+ result := Int(operand1, _unsigned) * Int(operand2, _unsigned);
+
+ wX(d) := ((bit[128]) result)[127..64];
+}
+
+end decodeData3Source
+
+(* AND/ANDS (shifted register) *)
+(* EON (shifted register) *)
+(* EOR (shifted register) *)
+(* ORN (shifted register) *)
+(* ORR (shifted register) *)
+(* BIC/BICS (shifted register) *)
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLogicalShiftedRegister ([sf]:opc:0b01010:shift:[N]:Rm:(bit[6]) imm6:Rn:Rd) =
+{
+ (reg_index) d := UInt_reg(Rd);
+ (reg_index) n := UInt_reg(Rn);
+ (reg_index) m := UInt_reg(Rm);
+ ([:'R:]) datasize := if sf == 1 then 64 else 32;
+ (boolean) setflags := false; (* ARM: uninitialized *)
+ (LogicalOp) op := LogicalOp_AND; (* ARM: uninitialized *)
+ switch opc {
+ case 0b00 -> {op := LogicalOp_AND; setflags := false}
+ case 0b01 -> {op := LogicalOp_ORR; setflags := false}
+ case 0b10 -> {op := LogicalOp_EOR; setflags := false}
+ case 0b11 -> {op := LogicalOp_AND; setflags := true}
+ };
+
+ if sf == 0 & imm6[5] == 1 then ReservedValue();
+
+ (ShiftType) shift_type := DecodeShift(shift);
+ ([|63|]) shift_amount := UInt(imm6);
+ (boolean) invert := (N == 1);
+
+ Some(LogicalShiftedRegister(d,n,m,datasize,setflags,op,shift_type,shift_amount,invert))
+}
+
+function clause execute (LogicalShiftedRegister(d,n,m,datasize,setflags,op,shift_type,shift_amount,invert)) = {
+
+ (bit['R]) operand1 := rX(n);
+ (bit['R]) operand2 := ShiftReg(m, shift_type, shift_amount);
+
+ if invert then operand2 := NOT(operand2);
+
+ (bit['R]) result := 0; (* ARM: uninitialized *)
+ switch op {
+ case LogicalOp_AND -> result := (operand1 & operand2)
+ case LogicalOp_ORR -> result := (operand1 | operand2)
+ case LogicalOp_EOR -> result := (operand1 ^ operand2)
+ };
+
+ if setflags then
+ wPSTATE_NZCV() := ([result[datasize - 1]]:[IsZeroBit(result)]:0b00);
+
+ wX(d) := result;
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeDataSIMDFPoint1 ((bit[32]) machineCode) =
+{
+ not_implemented("decodeDataSIMDFPoint1");
+ Some(Unallocated);
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect {escape} decodeDataSIMDFPoint2 ((bit[32]) machineCode) =
+{
+ not_implemented("decodeDataSIMDFPoint2");
+ Some(Unallocated);
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeDataRegister ((bit[32]) machineCode) =
+{
+ switch machineCode {
+ case ([_]:[_]:[_]:[0]: [1]:[0]:[1]:[0]: [_]:[_]:[_]: (bit[9]) _:[_]:(bit[11]) _) -> decodeLogicalShiftedRegister(machineCode)
+ case ([_]:[_]:[_]:[0]: [1]:[0]:[1]:[1]: [_]:[_]:[0]: (bit[9]) _:[_]:(bit[11]) _) -> decodeAddSubtractShiftedRegister(machineCode)
+ case ([_]:[_]:[_]:[0]: [1]:[0]:[1]:[1]: [_]:[_]:[1]: (bit[9]) _:[_]:(bit[11]) _) -> decodeAddSubtractExtendedRegister(machineCode)
+ case ([_]:[_]:[_]:[1]: [1]:[0]:[1]:[0]: [0]:[0]:[0]: (bit[9]) _:[_]:(bit[11]) _) -> decodeAddSubtractWithCarry(machineCode)
+ case ([_]:[_]:[_]:[1]: [1]:[0]:[1]:[0]: [0]:[1]:[0]: (bit[9]) _:[0]:(bit[11]) _) -> decodeConditionalCompareRegister(machineCode)
+ case ([_]:[_]:[_]:[1]: [1]:[0]:[1]:[0]: [0]:[1]:[0]: (bit[9]) _:[1]:(bit[11]) _) -> decodeConditionalCompareImmediate(machineCode)
+ case ([_]:[_]:[_]:[1]: [1]:[0]:[1]:[0]: [1]:[0]:[0]: (bit[9]) _:[_]:(bit[11]) _) -> decodeConditionalSelect(machineCode)
+ case ([_]:[_]:[_]:[1]: [1]:[0]:[1]:[1]: [_]:[_]:[_]: (bit[9]) _:[_]:(bit[11]) _) -> decodeData3Source(machineCode)
+ case ([_]:[0]:[_]:[1]: [1]:[0]:[1]:[0]: [1]:[1]:[0]: (bit[9]) _:[_]:(bit[11]) _) -> decodeData2Source(machineCode)
+ case ([_]:[1]:[_]:[1]: [1]:[0]:[1]:[0]: [1]:[1]:[0]: (bit[9]) _:[_]:(bit[11]) _) -> decodeData1Source(machineCode)
+ }
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeDataImmediate ((bit[32]) machineCode) =
+{
+ switch machineCode {
+ case ((bit[3]) _:[1]:[0]:[0]:[0]:[0]:[_]:(bit[23]) _) -> decodePCRelAddressing(machineCode)
+ case ((bit[3]) _:[1]:[0]:[0]:[0]:[1]:[_]:(bit[23]) _) -> decodeAddSubtractImmediate(machineCode)
+ case ((bit[3]) _:[1]:[0]:[0]:[1]:[0]:[0]:(bit[23]) _) -> decodeLogicalImmediate(machineCode)
+ case ((bit[3]) _:[1]:[0]:[0]:[1]:[0]:[1]:(bit[23]) _) -> decodeMoveWideImmediate(machineCode)
+ case ((bit[3]) _:[1]:[0]:[0]:[1]:[1]:[0]:(bit[23]) _) -> decodeBitfield(machineCode)
+ case ((bit[3]) _:[1]:[0]:[0]:[1]:[1]:[1]:(bit[23]) _) -> decodeExtract(machineCode)
+ }
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeLoadsStores ((bit[32]) machineCode) =
+{
+ switch machineCode {
+ case ([_]:[_]:[0]:[0]: [1]:[0]:[0]:[0]: [_]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeLoadStoreExclusive(machineCode)
+ case ([_]:[_]:[0]:[1]: [1]:[_]:[0]:[0]: [_]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeLoadRegisterLiteral(machineCode)
+ case ([_]:[_]:[1]:[0]: [1]:[_]:[0]:[0]: [0]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeLoadStoreNoAllocatePairOffset(machineCode)
+ case ([_]:[_]:[1]:[0]: [1]:[_]:[0]:[0]: [1]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeLoadStoreRegisterPairPostIndexed(machineCode)
+ case ([_]:[_]:[1]:[0]: [1]:[_]:[0]:[1]: [0]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeLoadStoreRegisterPairOffset(machineCode)
+ case ([_]:[_]:[1]:[0]: [1]:[_]:[0]:[1]: [1]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeLoadStoreRegisterPairPreIndexed(machineCode)
+ case ([_]:[_]:[1]:[1]: [1]:[_]:[0]:[0]: [_]:[_]:[0]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[0]:[0]: (bit[10]) _) -> decodeLoadStoreRegisterUnscaledImmediate(machineCode)
+ case ([_]:[_]:[1]:[1]: [1]:[_]:[0]:[0]: [_]:[_]:[0]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[0]:[1]: (bit[10]) _) -> decodeLoadStoreRegisterImmediatePostIndexed(machineCode)
+ case ([_]:[_]:[1]:[1]: [1]:[_]:[0]:[0]: [_]:[_]:[0]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[1]:[0]: (bit[10]) _) -> decodeLoadStoreRegisterUnprivileged(machineCode)
+ case ([_]:[_]:[1]:[1]: [1]:[_]:[0]:[0]: [_]:[_]:[0]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[1]:[1]: (bit[10]) _) -> decodeLoadStoreRegisterImmediatePreIndexed(machineCode)
+ case ([_]:[_]:[1]:[1]: [1]:[_]:[0]:[0]: [_]:[_]:[1]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[1]:[0]: (bit[10]) _) -> decodeLoadStoreRegisterRegisterOffset(machineCode)
+ case ([_]:[_]:[1]:[1]: [1]:[_]:[0]:[1]: [_]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeLoadStoreRegisterUnsignedImmediate(machineCode)
+ case ([0]:[_]:[0]:[0]: [1]:[1]:[0]:[0]: [0]:[_]:[0]:[0]: [0]:[0]:[0]:[0]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeAdvSIMDLoadStoreMultiStruct(machineCode)
+ case ([0]:[_]:[0]:[0]: [1]:[1]:[0]:[0]: [1]:[_]:[0]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeAdvSIMDLoadStoreMultiStructPostIndexed(machineCode)
+ case ([0]:[_]:[0]:[0]: [1]:[1]:[0]:[1]: [0]:[_]:[_]:[0]: [0]:[0]:[0]:[0]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeAdvSIMDLoadStoreSingleStruct(machineCode)
+ case ([0]:[_]:[0]:[0]: [1]:[1]:[0]:[1]: [1]:[_]:[_]:[_]: [_]:[_]:[_]:[_]: (bit[4]) _:[_]:[_]: (bit[10]) _) -> decodeAdvSIMDLoadStoreSingleStructPostIndexed(machineCode)
+ }
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeSystemImplementationDefined ((bit[32]) machineCode) =
+{
+ switch machineCode {
+ case ((bit[11]) _:[0]:[1]:[_]:[_]:[_]:[1]:[_]:[1]:[1]:(bit[12]) _) -> decodeImplementationDefined(machineCode)
+ case ((bit[11]) _:[1]:[1]:[_]:[_]:[_]:[1]:[_]:[1]:[1]:(bit[12]) _) -> decodeImplementationDefined(machineCode)
+ case ((bit[11]) _:[_]:[_]:[_]:[_]:[_]:[_]:[_]:[_]:[_]:(bit[12]) _) -> decodeSystem(machineCode)
+ }
+}
+
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect pure decodeBranchesExceptionSystem ((bit[32]) machineCode) =
+{
+ switch machineCode {
+ case ([_]:[0]:[0]:[1]:[0]:[1]:[_]:[_]:[_]:[_]:(bit[22]) _) -> decodeUnconditionalBranchImmediate(machineCode)
+ case ([_]:[0]:[1]:[1]:[0]:[1]:[0]:[_]:[_]:[_]:(bit[22]) _) -> decodeCompareBranchImmediate(machineCode)
+ case ([_]:[0]:[1]:[1]:[0]:[1]:[1]:[_]:[_]:[_]:(bit[22]) _) -> decodeTestBranchImmediate(machineCode)
+ case ([0]:[1]:[0]:[1]:[0]:[1]:[0]:[_]:[_]:[_]:(bit[22]) _) -> decodeConditionalBranchImmediate(machineCode)
+ case ([1]:[1]:[0]:[1]:[0]:[1]:[0]:[0]:[_]:[_]:(bit[22]) _) -> decodeExceptionGeneration(machineCode)
+ case ([1]:[1]:[0]:[1]:[0]:[1]:[0]:[1]:[0]:[0]:(bit[22]) _) -> decodeSystemImplementationDefined(machineCode)
+ case ([1]:[1]:[0]:[1]:[0]:[1]:[1]:[_]:[_]:[_]:(bit[22]) _) -> decodeUnconditionalBranchRegister(machineCode)
+ }
+}
+
+function forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}.
+ option<(ast<'R,'D>)> effect { escape } decode ((bit[32]) machineCode) =
+{
+ switch machineCode {
+ case ((bit[3]) _:[0]:[0]:[_]:[_]:(bit[25]) _) -> Some(Unallocated)
+ case ((bit[3]) _:[1]:[0]:[0]:[_]:(bit[25]) _) -> decodeDataImmediate(machineCode)
+ case ((bit[3]) _:[1]:[0]:[1]:[_]:(bit[25]) _) -> decodeBranchesExceptionSystem(machineCode)
+ case ((bit[3]) _:[_]:[1]:[_]:[0]:(bit[25]) _) -> decodeLoadsStores(machineCode)
+ case ((bit[3]) _:[_]:[1]:[0]:[1]:(bit[25]) _) -> decodeDataRegister(machineCode)
+ case ((bit[3]) _:[0]:[1]:[1]:[1]:(bit[25]) _) -> decodeDataSIMDFPoint1(machineCode)
+ case ((bit[3]) _:[1]:[1]:[1]:[1]:(bit[25]) _) -> decodeDataSIMDFPoint2(machineCode)
+ case _ -> None
+ }
+}
+
+end execute
+
+val forall Nat 'R, 'R IN {32,64}, Nat 'D, 'D IN {8,16,32,64}. ast<'R,'D> -> option<(ast<'R,'D>)> effect pure supported_instructions
+function option<(ast<'R,'D>)> supported_instructions ((ast<'R,'D>) instr) = {
+ switch instr {
+ case _ -> Some(instr)
+ }
+}
diff --git a/arm/armv8_A32_sys_regs.sail b/arm/armv8_A32_sys_regs.sail
new file mode 100644
index 00000000..8b4a8e00
--- /dev/null
+++ b/arm/armv8_A32_sys_regs.sail
@@ -0,0 +1,61 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+(*************************************************************************)
+(* General system control registers *)
+
+register (SCRType) SCR (* Secure Configuration Register *)
+
+(*************************************************************************)
+(* Debug registers *)
+
+typedef DBGOSDLR_type = register bits [31:0]
+{
+ (*31..1 : RES0;*)
+ 0 : DLK;
+}
+register (DBGOSDLR_type) DBGOSDLR (* Debug OS Double Lock Register *)
+
+register (DBGPRCR_type) DBGPRCR (* Debug Power Control Register *)
+
+
+(*************************************************************************)
+(* Performance Monitors registers *)
+
+(*************************************************************************)
+(* Generic Timer registers *)
+
+(*************************************************************************)
+(* Generic Interrupt Controller CPU interface registers *)
+
diff --git a/arm/armv8_A64_lib.sail b/arm/armv8_A64_lib.sail
new file mode 100644
index 00000000..2bcec4ba
--- /dev/null
+++ b/arm/armv8_A64_lib.sail
@@ -0,0 +1,909 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+(** FUNCTION:aarch64/exceptions/aborts/AArch64.Abort *)
+
+function unit AArch64_Abort((bit[64]) vaddress, (FaultRecord) fault) =
+{
+ not_implemented("AArch64_Abort");
+}
+
+(** FUNCTION:AArch64.SPAlignmentFault() *)
+
+function unit effect pure AArch64_SPAlignmentFault() = {
+ not_implemented("AArch64_SPAlignmentFault");
+}
+
+(** FUNCTION:aarch64/exceptions/debug/AArch64.SoftwareBreakpoint *)
+
+function unit AArch64_SoftwareBreakpoint((bit[16]) immediate) =
+{
+ not_implemented("AArch64_SoftwareBreakpoint");
+}
+
+(** FUNCTION:aarch64/exceptions/exceptions/AArch64.TakeReset *)
+
+function unit effect {wreg} AArch64_TakeReset((boolean) cold_reset) =
+{
+ assert((~(HighestELUsingAArch32())), None);
+
+ (* Enter the highest implemented Exception level in AArch64 state *)
+ PSTATE_nRW := 0;
+ if HaveEL(EL3) then {
+ PSTATE_EL := EL3;
+ SCR_EL3.NS := 0; (* Secure state *)
+ } else if HaveEL(EL2) then
+ PSTATE_EL := EL2
+ else
+ PSTATE_EL := EL1;
+
+ (* Reset the system registers and other system components *)
+ AArch64_ResetControlRegisters(cold_reset);
+
+ (* Reset all other PSTATE fields *)
+ PSTATE_SP := 0; (* Select stack pointer *)
+ wPSTATE_DAIF() := 0b1111; (* All asynchronous exceptions masked *)
+ PSTATE_SS := 0; (* Clear software step bit *)
+ PSTATE_IL := 0; (* Clear illegal execution state bit *)
+
+ (* All registers, bits and fields not reset by the above pseudocode or by the BranchTo() call *)
+ (* below are UNKNOWN bitstrings after reset. In particular, the return information registers *)
+ (* ELR_ELx and SPSR_ELx have UNKNOWN values, so that it is impossible to return from a reset *)
+ (* in an architecturally defined way. *)
+ AArch64_ResetGeneralRegisters();
+ AArch64_ResetSIMDFPRegisters();
+ AArch64_ResetSpecialRegisters();
+ ResetExternalDebugRegisters(cold_reset);
+
+ (bit[64]) rv := 0; (* ARM: uninitialized *) (* IMPLEMENTATION DEFINED reset vector *)
+ if HaveEL(EL3) then
+ rv := RVBAR_EL3
+ else if HaveEL(EL2) then
+ rv := RVBAR_EL2
+ else
+ rv := RVBAR_EL1;
+
+ (* The reset vector must be correctly aligned *)
+ assert((IsZero(rv[63..(PAMax())]) & IsZero(rv[1..0])), Some("reset vector not correctly aligned"));
+
+ BranchTo(rv, BranchType_UNKNOWN);
+}
+
+(** FUNCTION:aarch64/exceptions/syscalls/AArch64.CallHypervisor *)
+
+function unit AArch64_CallHypervisor((bit[16]) immediate) =
+{
+ not_implemented("AArch64_CallHypervisor");
+}
+
+(** FUNCTION:aarch64/exceptions/syscalls/AArch64.CallSecureMonitor *)
+
+function unit AArch64_CallSecureMonitor((bit[16]) immediate) =
+{
+ not_implemented("AArch64_CallSecureMonitor");
+}
+
+(** FUNCTION:aarch64/exceptions/syscalls/AArch64.CallSupervisor *)
+
+function unit AArch64_CallSupervisor((bit[16]) immediate) =
+{
+ not_implemented("AArch64_CallSupervisor");
+}
+
+(** FUNCTION:aarch64/exceptions/traps/AArch64.CheckForSMCTrap *)
+
+function unit AArch64_CheckForSMCTrap((bit[16]) imm) =
+{
+ (boolean) route_to_el2 := (HaveEL(EL2) & ~(IsSecure()) & (PSTATE_EL == EL0 | PSTATE_EL == EL1) & HCR_EL2.TSC == 1);
+ if route_to_el2 then {
+ not_implemented("AArch64_CheckForSMCTrap route_to_el2");
+ (* ARM:
+ bits(64) preferred_exception_return = ThisInstrAddr();
+ vect_offset = 0x0;
+ exception = ExceptionSyndrome(Exception_MonitorCall);
+ exception.syndrome<15:0> = imm;
+ AArch64.TakeException(EL2, exception, preferred_exception_return, vect_offset);
+ *)
+ };
+}
+
+(** FUNCTION:aarch64/exceptions/traps/AArch64.CheckForWFxTrap *)
+
+function unit effect {rreg} AArch64_CheckForWFxTrap((bit[2]) target_el, (boolean) is_wfe) =
+{
+ assert((HaveEL(target_el)), None);
+
+ (boolean) trap := false; (* ARM: uninitialized *)
+ switch target_el {
+ case EL1 -> trap := ((if is_wfe then SCTLR_EL1.nTWE else SCTLR_EL1.nTWI) == 0)
+ case EL2 -> trap := ((if is_wfe then HCR_EL2.TWE else HCR_EL2.TWI) == 1)
+ case EL3 -> trap := ((if is_wfe then SCR_EL3.TWE else SCR_EL3.TWI) == 1)
+ };
+
+ if trap then
+ AArch64_WFxTrap(target_el, is_wfe);
+}
+
+(** FUNCTION:aarch64/exceptions/traps/AArch64.SystemRegisterTrap *)
+
+function unit AArch64_SystemRegisterTrap((bit[2]) target_el, (bit[2]) op0, (bit[3]) op2, (bit[3]) op1, (bit[4]) crn,
+ (bit[5]) rt, (bit[4]) crm, (bit) dir) =
+{
+ not_implemented("AArch64_SystemRegisterTrap");
+}
+
+(** FUNCTION:aarch64/exceptions/traps/AArch64.UndefinedFault *)
+
+function unit AArch64_UndefinedFault() =
+{
+ not_implemented("AArch64_UndefinedFault");
+}
+
+(** FUNCTION:aarch64/exceptions/traps/AArch64.WFxTrap *)
+
+function unit AArch64_WFxTrap((bit[2]) target_el, (boolean) is_wfe) =
+{
+ not_implemented("AArch64_WFxTrap");
+}
+
+(** FUNCTION:aarch64/functions/aborts/AArch64.CreateFaultRecord *)
+
+function FaultRecord AArch64_CreateFaultRecord((Fault) type, (bit[48]) ipaddress,
+ (uinteger) level, (AccType) acctype, (boolean) write, (bit) extflag,
+ (boolean) secondstage, (boolean) s2fs1walk) =
+{
+ (FaultRecord) fault := {
+ type = type;
+ domain = (bit[4]) UNKNOWN;
+ debugmoe = (bit[4]) UNKNOWN;
+ ipaddress = ipaddress;
+ level = level;
+ acctype = acctype;
+ write = write;
+ extflag = extflag;
+ secondstage = secondstage;
+ s2fs1walk = s2fs1walk;
+ };
+
+ fault;
+}
+
+(** FUNCTION:aarch64/functions/exclusive/AArch64.ExclusiveMonitorsPass *)
+
+function boolean AArch64_ExclusiveMonitorsPass((bit[64]) address, (uinteger) size) =
+{
+ (*It is IMPLEMENTATION DEFINED whether the detection of memory aborts happens*)
+ (*before or after the check on the local Exclusive Monitor. As a result a failure*)
+ (*of the local monitor can occur on some implementations even if the memory*)
+ (*access would give an memory abort.*)
+
+ (AccType) acctype := AccType_ATOMIC;
+ (boolean) iswrite := true;
+ (boolean) aligned := (address == Align(address, size));
+
+ if ~(aligned) then {
+ (boolean) secondstage := false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage));
+ };
+
+ (boolean) passed := AArch64_IsExclusiveVA(address, ProcessorID(), size);
+ if ~(passed) then
+ (* return *) false
+ else {
+
+ (AddressDescriptor) memaddrdesc := AArch64_TranslateAddress(address, acctype, iswrite, aligned, size);
+
+ (*Check for aborts or debug exceptions*)
+ if IsFault(memaddrdesc) then
+ AArch64_Abort(address, memaddrdesc.fault);
+
+ passed := IsExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
+
+ if passed then {
+ ClearExclusiveLocal(ProcessorID());
+ if (memaddrdesc.memattrs).shareable then
+ passed := IsExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size);
+ };
+
+ passed;
+}}
+
+(** FUNCTION:aarch64/functions/exclusive/AArch64.IsExclusiveVA *)
+
+function boolean AArch64_IsExclusiveVA((bit[64]) address, (integer) processorid, (uinteger) size) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+ true;
+}
+
+(** FUNCTION:aarch64/functions/exclusive/AArch64.MarkExclusiveVA *)
+
+function unit effect pure AArch64_MarkExclusiveVA((bit[64]) address, (integer) processorid, (uinteger) size) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+}
+
+(** FUNCTION:aarch64/functions/exclusive/AArch64.SetExclusiveMonitors *)
+
+function unit AArch64_SetExclusiveMonitors((bit[64]) address, (uinteger) size) =
+{
+ (AccType) acctype := AccType_ATOMIC;
+ (boolean) iswrite := false;
+ (boolean) aligned := (address != Align(address, size));
+
+ (AddressDescriptor) memaddrdesc := AArch64_TranslateAddress(address, acctype, iswrite, aligned, size);
+
+ (* Check for aborts or debug exceptions *)
+ if IsFault(memaddrdesc) then
+ (* return *) ()
+ else {
+
+ if (memaddrdesc.memattrs).shareable then
+ MarkExclusiveGlobal(memaddrdesc.paddress, ProcessorID(), size);
+
+ MarkExclusiveLocal(memaddrdesc.paddress, ProcessorID(), size);
+
+ AArch64_MarkExclusiveVA(address, ProcessorID(), size);
+}}
+
+(** FUNCTION:aarch64/functions/memory/AArch64.CheckAlignment *)
+
+function boolean AArch64_CheckAlignment((bit[64]) address, (uinteger) size, (AccType) acctype, (boolean) iswrite) =
+{
+ (boolean) aligned := (address == Align(address, size));
+ (bit) A := (SCTLR'()).A;
+
+ if ~(aligned) & (acctype == AccType_ATOMIC | acctype == AccType_ORDERED | A == 1) then {
+ secondstage := false;
+ AArch64_Abort(address, AArch64_AlignmentFault(acctype, iswrite, secondstage));
+ };
+
+ aligned;
+}
+
+(** FUNCTION:// AArch64.MemSingle[] - non-assignment (read) form *)
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. (*bit['N*8]*) read_buffer_type effect {rmem} AArch64_rMemSingle((read_buffer_type) read_buffer, (bit[64]) address, ([:'N:]) size, (AccType) acctype, (boolean) wasaligned, (bool) exclusive) =
+{
+ (*assert size IN {1, 2, 4, 8, 16};*)
+ assert((address == Align(address, size)), None);
+
+ (* ARM: AddressDescriptor memaddrdesc; *)
+ (bit['N*8]) value := 0;
+ (boolean) iswrite := false;
+
+ (* MMU or MPU *)
+ (AddressDescriptor) memaddrdesc := AArch64_TranslateAddress(address, acctype, iswrite, wasaligned, size);
+
+ (* Check for aborts or debug exceptions *)
+ if IsFault(memaddrdesc) then
+ AArch64_Abort(address, memaddrdesc.fault);
+
+ (* Memory array access *)
+ _rMem(read_buffer, memaddrdesc, size, acctype, exclusive);
+}
+
+(** FUNCTION:// AArch64.MemSingle[] - assignment (write) form *)
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. write_buffer_type effect pure AArch64_wMemSingle((write_buffer_type) write_buffer, (bit[64]) address, ([:'N:]) size, (AccType) acctype, (boolean) wasaligned, (bool) exclusive, (bit['N*8]) value) =
+{
+ (*assert size IN {1, 2, 4, 8, 16};*)
+ assert((address == Align(address, size)), None);
+
+ (* ARM: AddressDescriptor memaddrdesc; *)
+ (boolean) iswrite := true;
+
+ (* MMU or MPU *)
+ (AddressDescriptor) memaddrdesc := AArch64_TranslateAddress(address, acctype, iswrite, wasaligned, size);
+
+ (* Check for aborts or debug exceptions *)
+ if IsFault(memaddrdesc) then
+ AArch64_Abort(address, memaddrdesc.fault);
+
+ (* Effect on exclusives *)
+ if (memaddrdesc.memattrs).shareable then
+ ClearExclusiveByAddress(memaddrdesc.paddress, ProcessorID(), size);
+
+ (* Memory array access *)
+ _wMem(write_buffer, memaddrdesc, size, acctype, exclusive, value)
+}
+
+(** FUNCTION:CheckSPAlignment() *)
+
+function unit effect {rreg} CheckSPAlignment() = {
+ (bit[64]) sp := rSP();
+ (bool) stack_align_check := false; (* ARM: this is missing from ARM ARM *)
+
+ if PSTATE_EL == EL0 then
+ stack_align_check := (SCTLR_EL1.SA0 != 0)
+ else
+ stack_align_check := ((SCTLR'()).SA != 0);
+
+ if stack_align_check & sp != Align(sp, 16) then
+ AArch64_SPAlignmentFault();
+}
+
+(** FUNCTION:// Mem[] - non-assignment (read) form *)
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. (*bit['N*8]*) read_buffer_type effect {rmem} rMem'((read_buffer_type) read_buffer, (bit[64]) address,([:'N:]) size, (AccType) acctype, (bool) exclusive) =
+{
+ (* ARM: assert size IN {1, 2, 4, 8, 16}; *)
+ (read_buffer_type) read_buffer' := read_buffer;
+
+ (uinteger) i := 0; (* ARM: uninitialized *)
+ (boolean) iswrite := false;
+
+ (boolean) aligned := AArch64_CheckAlignment(address, size, acctype, iswrite);
+ (boolean) atomic := ((aligned & ~(acctype == AccType_VEC | acctype == AccType_VECSTREAM)) | size == 1);
+
+ if ~(atomic) then {
+ assert((~(exclusive)), None); (* as far as I can tell this should never happen *)
+
+ assert((size > 1), None);
+ if (~(exclusive)) then {
+ (*value[7..0] :=*)read_buffer' := AArch64_rMemSingle(read_buffer', address, 1, acctype, aligned, false);
+
+ (* For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory *)
+ (* access will generate an Alignment Fault, as to get this far means the first byte did *)
+ (* not, so we must be changing to a new translation page. *)
+ (* FIXME: ???
+ if ~(aligned) then {
+ c = ConstrainUnpredictable();
+ assert c IN {Constraint_FAULT, Constraint_NONE};
+ if c == Constraint_NONE then aligned = TRUE;
+ };*)
+
+ foreach (i from 1 to (size - 1)) {
+ (*value[(8*i+7)..(8*i)] :=*)read_buffer' := AArch64_rMemSingle(read_buffer', address+i, 1, acctype, aligned, false);
+ }}} else
+ (*value :=*)read_buffer' := AArch64_rMemSingle(read_buffer', address, size, acctype, aligned, exclusive);
+
+ (*if BigEndian() then
+ value := BigEndianReverse(value);
+ value;*)
+ read_buffer'
+}
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. (*bit['N*8]*) read_buffer_type effect {rmem} rMem((read_buffer_type) read_buffer, (bit[64]) address,([:'N:]) size, (AccType) acctype) =
+ rMem'(read_buffer, address, size, acctype, false)
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. (*bit['N*8]*) read_buffer_type effect {rmem} rMem_exclusive((read_buffer_type) read_buffer, (bit[64]) address,([:'N:]) size, (AccType) acctype) =
+ rMem'(read_buffer, address, size, acctype, true)
+
+(** FUNCTION:// Mem[] - assignment (write) form *)
+
+(* the 'exclusive' and return value are our addition for handling
+ store-exclusive, this function should only be called indirectly by one
+ of the functions that follow it.
+ returns true if the store-exclusive was successful. *)
+function forall Nat 'N, 'N IN {1,2,4,8,16}. write_buffer_type wMem'((write_buffer_type) write_buffer, (bit[64]) address, ([:'N:]) size, (AccType) acctype, (bit['N*8]) value, (bool) exclusive) =
+{
+ (write_buffer_type) write_buffer' := write_buffer;
+
+ (* sail doesn't allow assignment to function arguments *)
+ (bit['N*8]) value' := value;
+
+ (uinteger) i := 0; (* ARM: uninitialized *)
+ (boolean) iswrite := true;
+
+ if BigEndian() then
+ value' := BigEndianReverse(value');
+ (boolean) aligned := AArch64_CheckAlignment(address, size, acctype, iswrite);
+ (boolean) atomic := ((aligned & ~(acctype == AccType_VEC | acctype == AccType_VECSTREAM)) | size == 1);
+
+ (bool) exclusiveSuccess := false;
+ if ~(atomic) then {
+ assert((~(exclusive)), None); (* as far as I can tell this should never happen *)
+
+ if (~(exclusive)) then {
+ assert((size > 1), None);
+ write_buffer' := AArch64_wMemSingle(write_buffer', address, 1, acctype, aligned, false, value'[7..0]);
+
+ (* For subsequent bytes it is CONSTRAINED UNPREDICTABLE whether an unaligned Device memory *)
+ (* access will generate an Alignment Fault, as to get this far means the first byte did *)
+ (* not, so we must be changing to a new translation page. *)
+ (* FIXME:
+ if !aligned then
+ c = ConstrainUnpredictable();
+ assert c IN {Constraint_FAULT, Constraint_NONE};
+ if c == Constraint_NONE then aligned = TRUE;*)
+
+ foreach (i from 1 to (size - 1))
+ write_buffer' := AArch64_wMemSingle(write_buffer', address+i, 1, acctype, aligned, false, value'[(8*i+7)..(8*i)]);
+ }} else
+ write_buffer' := AArch64_wMemSingle(write_buffer', address, size, acctype, aligned, exclusive, value');
+
+ write_buffer'
+}
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. write_buffer_type wMem((write_buffer_type) write_buffer, (bit[64]) address, ([:'N:]) size, (AccType) acctype, (bit['N*8]) value) =
+ wMem'(write_buffer, address, size, acctype, value, false)
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. write_buffer_type wMem_exclusive((write_buffer_type) write_buffer, (bit[64]) address, ([:'N:]) size, (AccType) acctype, (bit['N*8]) value) =
+ wMem'(write_buffer, address, size, acctype, value, true)
+
+(** FUNCTION:aarch64/functions/registers/AArch64.ResetGeneralRegisters *)
+
+function unit AArch64_ResetGeneralRegisters() =
+{
+ foreach (i from 0 to 30)
+ wX(i) := (bit[64]) UNKNOWN;
+}
+
+(** FUNCTION:aarch64/functions/registers/AArch64.ResetSIMDFPRegisters *)
+
+function unit AArch64_ResetSIMDFPRegisters() =
+{
+ foreach (i from 0 to 31)
+ wV(i) := (bit[128]) UNKNOWN;
+}
+
+(** FUNCTION:aarch64/functions/registers/AArch64.ResetSpecialRegisters *)
+
+function unit AArch64_ResetSpecialRegisters() =
+{
+ (* AArch64 special registers *)
+ SP_EL0 := (bit[64]) UNKNOWN;
+ SP_EL1 := (bit[64]) UNKNOWN;
+ SPSR_EL1 := (bit[32]) UNKNOWN;
+ ELR_EL1 := (bit[64]) UNKNOWN;
+ if HaveEL(EL2) then {
+ SP_EL2 := (bit[64]) UNKNOWN;
+ SPSR_EL2 := (bit[32]) UNKNOWN;
+ ELR_EL2 := (bit[64]) UNKNOWN;
+ };
+ if HaveEL(EL3) then {
+ SP_EL3 := (bit[64]) UNKNOWN;
+ SPSR_EL3 := (bit[32]) UNKNOWN;
+ ELR_EL3 := (bit[64]) UNKNOWN;
+ };
+
+ (* AArch32 special registers that are not architecturally mapped to AArch64 registers *)
+ if HaveAArch32EL(EL1) then {
+ SPSR_fiq := (bit[32]) UNKNOWN;
+ SPSR_irq := (bit[32]) UNKNOWN;
+ SPSR_abt := (bit[32]) UNKNOWN;
+ SPSR_und := (bit[32]) UNKNOWN;
+ };
+
+ (* External debug special registers *)
+ DLR_EL0 := (bit[64]) UNKNOWN;
+ DSPSR_EL0 := (bit[32]) UNKNOWN;
+}
+
+(** FUNCTION:aarch64/functions/registers/PC *)
+
+function bit[64] effect {rreg} rPC () = _PC
+
+(** FUNCTION:// SP[] - assignment form *)
+
+function forall Nat 'N, 'N IN {32,64}. unit effect {rreg,wreg} wSP ((),(bit['N]) value) =
+{
+ (*assert width IN {32,64};*)
+ if PSTATE_SP == 0 then
+ SP_EL0 := ZeroExtend(value)
+ else
+ switch PSTATE_EL {
+ case EL0 -> SP_EL0 := ZeroExtend(value)
+ case EL1 -> SP_EL1 := ZeroExtend(value)
+ case EL2 -> SP_EL2 := ZeroExtend(value)
+ case EL3 -> SP_EL3 := ZeroExtend(value)
+ }
+}
+
+(** FUNCTION:// SP[] - non-assignment form *)
+
+function bit['N] effect {rreg} rSP () =
+{
+ (*assert width IN {8,16,32,64};*)
+ if PSTATE_SP == 0 then
+ mask(SP_EL0)
+ else
+ switch PSTATE_EL {
+ case EL0 -> mask(SP_EL0)
+ case EL1 -> mask(SP_EL1)
+ case EL2 -> mask(SP_EL2)
+ case EL3 -> mask(SP_EL3)
+ }
+}
+
+(** FUNCTION:// V[] - assignment form *)
+
+function forall Nat 'N, 'N IN {8,16,32,64,128}. unit effect {wreg} wV((SIMD_index) n, (bit['N]) value) = {
+ (*assert n >= 0 && n <= 31;*)
+ (*assert width IN {8,16,32,64,128};*)
+ _V[n] := ZeroExtend(value);
+}
+
+(** FUNCTION:// V[] - non-assignment form *)
+
+function forall Nat 'N, 'N IN {8,16,32,64,128}. bit['N] effect {rreg} rV((SIMD_index) n) = mask(_V[n])
+
+(** FUNCTION:// Vpart[] - non-assignment form *)
+
+function forall Nat 'N, 'N IN {8,16,32,64,128}. bit['N] effect {rreg} rVpart (n,part) = {
+ if part == 0 then
+ ((bit['N]) (mask(_V[n])))
+ else {
+ assert((length((bit['N]) 0) == 64), None);
+ ((bit[64]) ((_V[n])[127..64]));
+ }
+}
+
+(** FUNCTION:// Vpart[] - assignment form *)
+
+function forall Nat 'N, 'N IN {8,16,32,64,128}. unit effect {wreg} wVpart((SIMD_index) n, ([|1|]) part, (bit['N]) value) = {
+ if part == 0 then
+ _V[n] := ZeroExtend(value)
+ else {
+ assert((length((bit['N]) 0) == 64), None);
+ (_V[n])[127..64] := value;
+ }
+}
+
+(** FUNCTION:// X[] - assignment form *)
+
+function forall Nat 'N, 'N IN {32,64}. unit effect {wreg} wX((reg_index) n, (bit['N]) value) =
+{
+ (*assert n >= 0 && n <= 31;*)
+ (*assert width IN {32,64};*)
+ if n != 31 then
+ _R[n] := ZeroExtend(value);
+}
+
+(** FUNCTION:// X[] - non-assignment form *)
+
+function forall Nat 'N, 'N IN {8,16,32,64}. bit['N] effect {rreg} rX((reg_index) n) =
+{
+ (*assert n >= 0 && n <= 31;*)
+ (*assert width IN {8,16,32,64};*)
+ if n != 31 then
+ mask(_R[n])
+ else
+ Zeros();
+}
+
+(** FUNCTION:bits(64) ELR[bits(2) el] *)
+
+function bit[64] rELR((bit[2]) el) =
+{
+ (bit[64]) r := 0; (* ARM: uninitialized *)
+ switch el {
+ case EL1 -> r := ELR_EL1
+ case EL2 -> r := ELR_EL2
+ case EL3 -> r := ELR_EL3
+ case _ -> Unreachable()
+ };
+ r;
+}
+
+(** FUNCTION:bits(64) ELR[] *)
+
+function bit[64] rELR'() =
+{
+ assert (PSTATE_EL != EL0, None);
+ rELR(PSTATE_EL);
+}
+
+(** FUNCTION:SCTLRType SCTLR[bits(2) regime] *)
+
+function SCTLR_type effect {rreg} SCTLR ((bit[2]) regime) = {
+ switch regime {
+ case EL1 -> SCTLR_EL1
+ case EL2 -> SCTLR_EL2
+ case EL3 -> SCTLR_EL3
+ case _ -> {assert(false,Some("SCTLR_type unreachable")); SCTLR_EL1} (* ARM: Unreachabe() *)
+ }
+}
+
+(** FUNCTION:SCTLRType SCTLR[] *)
+
+function SCTLR_type effect {rreg} SCTLR' () = SCTLR(S1TranslationRegime())
+
+(** FUNCTION:aarch64/functions/system/AArch64.CheckUnallocatedSystemAccess *)
+
+(* return true if the access is not allowed *)
+function boolean AArch64_CheckUnallocatedSystemAccess ((bit[2]) op0, (bit[3]) op1, (bit[4]) crn, (bit[4]) crm, (bit[3]) op2, (bit) read) =
+{
+ switch (op0,op1,crn,crm,op2, read) {
+ case (0b00,0b000,0b0100,_,0b101, _) -> PSTATE_EL < EL1 (* SPSel *)
+ case (0b00,0b011,0b0100,_,0b110, _) -> false (* DAIFSet *) (* TODO: EL0 Config-RW ??? *)
+ case (0b00,0b011,0b0100,_,0b111, _) -> false (* DAIFClr *) (* TODO: EL0 Config-RW ??? *)
+
+ (* follow the "Usage constraints" of each register *)
+ case (0b11,0b011,0b0100,0b0010,0b000, _) -> false (* NZCV *)
+ case (0b11,0b011,0b0100,0b0010,0b001, _) -> false (* DAIF *) (* TODO: EL0 Config-RW ??? *)
+(* case (0b11,0b000,0b0001,0b0000,0b001, _) -> PSTATE_EL < EL1 (* ACTLR_EL1 *) *)
+ }
+}
+
+(** FUNCTION:aarch64/functions/system/CheckSystemAccess *)
+
+function unit CheckSystemAccess((bit[2]) op0, (bit[3]) op1, (bit[4]) crn, (bit[4]) crm, (bit[3]) op2, (bit[5]) rt, (bit) read) =
+{
+ (*Checks if an AArch64 MSR/MRS/SYS instruction is UNALLOCATED or trapped at the current exception*)
+ (*level, security state and configuration, based on the opcode's encoding.*)
+ (boolean) unallocated := false;
+ (boolean) need_secure := false;
+ (bit[2]) min_EL := 0; (* ARM: uninitialized *)
+
+ (*Check for traps by HCR_EL2.TIDCP*)
+ (* TODO: uncomment when implementing traps
+ if HaveEL(EL2) & ~(IsSecure()) & HCR_EL2.TIDCP == 1 & op0[0] == 1 & (switch crn {case [1,_,1,1] -> true case _ -> false}) then {
+ (*At Non-secure EL0, it is IMPLEMENTATION_DEFINED whether attempts to execute system control*)
+ (*register access instructions with reserved encodings are trapped to EL2 or UNDEFINED*)
+ if PSTATE_EL == EL0 (* FIXME: & boolean IMPLEMENTATION_DEFINED "Reserved Control Space EL0 Trapped" *) then
+ AArch64_SystemRegisterTrap(EL2, op0, op2, op1, crn, rt, crm, read)
+ else if PSTATE.EL == EL1 then
+ AArch64_SystemRegisterTrap(EL2, op0, op2, op1, crn, rt, crm, read);
+ };*)
+
+ (*Check for unallocated encodings*)
+ switch op1 {
+ case (0b00:[_]) ->
+ min_EL := EL1
+ case 0b010 ->
+ min_EL := EL1
+ case 0b011 ->
+ min_EL := EL0
+ case 0b100 ->
+ min_EL := EL2
+ case 0b101 ->
+ UnallocatedEncoding()
+ case 0b110 ->
+ min_EL := EL3
+ case 0b111 -> {
+ min_EL := EL1;
+ need_secure := true;
+ }
+ };
+ if PSTATE_EL < min_EL then (* ARM: UInt *)
+ UnallocatedEncoding()
+ else if need_secure & ~(IsSecure()) then
+ UnallocatedEncoding()
+ else if AArch64_CheckUnallocatedSystemAccess(op0, op1, crn, crm, op2, read) then
+ UnallocatedEncoding();
+ (*Check for traps on accesses to SIMD or floating-point registers*)
+ (* TODO: uncomment when implementing SIMD/FP
+ let (take_trap, target_el) = AArch64.CheckAdvSIMDFPSystemRegisterTraps(op0, op1, crn, crm, op2) in {
+ if take_trap then
+ AArch64_AdvSIMDFPAccessTrap(target_el);
+ };*)
+
+ (*Check for traps on access to all other system registers*)
+ (* TODO: uncomment when implementing traps
+ let (take_trap, target_el) = AArch64_CheckSystemRegisterTraps(op0, op1, crn, crm, op2, read) in {
+ if take_trap then
+ AArch64_SystemRegisterTrap(target_el, op0, op2, op1, crn, rt, crm, read);
+ };*)
+}
+
+(** FUNCTION:aarch64/functions/system/SysOp_R *)
+
+function bit[64] SysOp_R((uinteger) op0, (uinteger) op1, (uinteger) crn, (uinteger) crm, (uinteger) op2) =
+{
+ not_implemented("SysOp_R");
+ 0;
+}
+
+(** FUNCTION:aarch64/functions/system/SysOp_W *)
+
+function unit SysOp_W((uinteger) op0, (uinteger) op1, (uinteger) crn, (uinteger) crm, (uinteger) op2, (bit[64]) _val) =
+{
+ not_implemented("SysOp_W");
+}
+
+(** FUNCTION:bits(64) System_Get(integer op0, integer op1, integer crn, integer crm, integer op2); *)
+
+function bit[64] System_Get((uinteger) op0, (uinteger) op1, (uinteger) crn, (uinteger) crm, (uinteger) op2) = {
+ switch (op0,op1,crn,crm,op2) {
+ case (3,3,4,2,0) -> ZeroExtend(NZCV)
+ case (3,3,4,2,1) -> ZeroExtend(DAIF)
+
+(* case (3,0,1,0,1) -> ZeroExtend(ACTLR_EL1) *)
+ }
+}
+
+(** FUNCTION:System_Put(integer op0, integer op1, integer crn, integer crm, integer op2, bits(64) val); *)
+
+function unit effect {wreg} System_Put((uinteger) op0, (uinteger) op1, (uinteger) crn, (uinteger) crm, (uinteger) op2, (bit[64]) _val) = {
+ switch (op0,op1,crn,crm,op2) {
+ case (3,3,4,2,0) -> NZCV := _val[31..0]
+ case (3,3,4,2,1) -> DAIF := _val[31..0]
+
+(* case (3,0,1,0,1) -> ACTLR_EL1 := _val[31..0] *)
+ }
+}
+
+(** FUNCTION:aarch64/instrs/branch/eret/AArch64.ExceptionReturn *)
+
+function unit AArch64_ExceptionReturn((bit[64]) new_pc, (bit[32]) spsr) =
+{
+ not_implemented("AArch64_ExceptionReturn");
+}
+
+(** FUNCTION:ExtendType DecodeRegExtend(bits(3) op) *)
+
+function ExtendType DecodeRegExtend ((bit[3]) op) = ([|8|]) op
+
+(** FUNCTION:aarch64/instrs/extendreg/ExtendReg *)
+
+val forall Nat 'N, Nat 'S, 'N IN {8,16,32,64}, 'S >= 0, 'S <= 4.
+ (implicit<'N>,reg_index,ExtendType,[:'S:]) -> bit['N] effect {rreg} ExtendReg
+function bit['N] ExtendReg ((reg_index) _reg, (ExtendType) type, ([:'S:]) shift) =
+{
+ (*assert( shift >= 0 & shift <= 4 );*)
+ (bit['N]) _val := rX(_reg);
+ (boolean) _unsigned := false;
+ (uinteger) len := 0;
+
+ switch type {
+ case ExtendType_SXTB -> { _unsigned := false; len := 8}
+ case ExtendType_SXTH -> { _unsigned := false; len := 16}
+ case ExtendType_SXTW -> { _unsigned := false; len := 32}
+ case ExtendType_SXTX -> { _unsigned := false; len := 64}
+ case ExtendType_UXTB -> { _unsigned := true; len := 8}
+ case ExtendType_UXTH -> { _unsigned := true; len := 16}
+ case ExtendType_UXTW -> { _unsigned := true; len := 32}
+ case ExtendType_UXTX -> { _unsigned := true; len := 64}
+ };
+
+ len := uMin(len, length(_val) - shift);
+ Extend((_val[(len - 1)..0]) : ((bit['S]) (Zeros())), _unsigned)
+}
+
+(** FUNCTION:(bits(M), bits(M)) DecodeBitMasks (bit immN, bits(6) imms, bits(6) immr, boolean immediate) *)
+
+val forall Nat 'M, Nat 'E. (implicit<'M>,bit,bit[6],bit[6],boolean) -> (bit['M],bit['M]) effect {escape} DecodeBitMasks
+function forall Nat 'M, Nat 'E, 'E IN {2,4,8,16,32,64}. (bit['M],bit['M]) DecodeBitMasks((bit) immN, (bit[6]) imms, (bit[6]) immr, (boolean) immediate) = {
+ let M = (length((bit['M]) 0)) in {
+(* ARM: (bit['M]) tmask := 0; (* ARM: uninitialized *) *)
+(* ARM: (bit['M]) wmask := 0; (* ARM: uninitialized *) *)
+ (bit[6]) levels := 0; (* ARM: uninitialized *)
+
+ (* Compute log2 of element size *)
+ (* 2^len must be in range [2, 'M] *)
+ ([|6|]) len := switch HighestSetBit([immN]:(NOT(imms))) { case -1 -> { assert (false,None); 0; } case ([|6|]) c -> c };
+ if len < 1 then ReservedValue();
+ assert((M >= lsl(1, len)),None);
+
+ (* Determine S, R and S - R parameters *)
+ levels := ZeroExtend(0b1 ^^ len);
+
+ (* For logical immediates an all-ones value of S is reserved *)
+ (* since it would generate a useless all-ones result (many times) *)
+ if immediate & ((imms & levels) == levels) then
+ ReservedValue();
+
+ (bit[6]) S := (imms & levels);
+ (bit[6]) R := (immr & levels);
+ (bit[6]) diff := S - R; (* 6-bit subtract with borrow *)
+
+ ([:'E:]) esize := lsl(1, len);
+ (bit[6]) d := diff[(len - 1)..0];
+ (bit['E]) welem := ZeroExtend(0b1 ^^ (S + 1));
+ (bit['E]) telem := ZeroExtend(0b1 ^^ (d + 1));
+ wmask := Replicate(ROR(welem, R));
+ tmask := Replicate(telem);
+ (wmask, tmask);
+}}
+
+(** FUNCTION:ShiftType DecodeShift(bits(2) op) *)
+
+function ShiftType DecodeShift((bit[2]) op) = ([|4|]) op
+
+(** FUNCTION:bits(N) ShiftReg(integer reg, ShiftType type, integer amount) *)
+
+val forall Nat 'N. (implicit<'N>,reg_index,ShiftType,nat) -> bit['N] effect {rreg} ShiftReg
+function forall Nat 'N. bit['N] effect {rreg} ShiftReg((reg_index) _reg, (ShiftType) type, (nat) amount) = {
+ (bit['N]) result := rX(_reg);
+ switch type {
+ case ShiftType_LSL -> result := LSL(result, amount)
+ case ShiftType_LSR -> result := LSR(result, amount)
+ case ShiftType_ASR -> result := ASR(result, amount)
+ case ShiftType_ROR -> result := ROR(result, amount)
+ };
+ result;
+}
+
+(** FUNCTION:aarch64/instrs/memory/prefetch/Prefetch *)
+
+function unit Prefetch((bit[64]) address, (bit[5]) prfop) =
+{
+ (PrefetchHint) hint := 0;
+ (uinteger) target := 0;
+ (boolean) stream := 0;
+
+ (bool) return := false;
+ switch prfop[4..3] {
+ case 0b00 -> hint := Prefetch_READ (* PLD: prefetch for load *)
+ case 0b01 -> hint := Prefetch_EXEC (* PLI: preload instructions *)
+ case 0b10 -> hint := Prefetch_WRITE (* PST: prepare for store *)
+ case 0b11 -> return := true (* unallocated hint *)
+ };
+ if ~(return) then {
+ target := prfop[2..1]; (* target cache level *)
+ stream := (prfop[0] != 0); (* streaming (non-temporal) *)
+ Hint_Prefetch(address, hint, target, stream);
+}}
+
+(** FUNCTION:aarch64/translation/faults/AArch64.AlignmentFault *)
+
+function FaultRecord AArch64_AlignmentFault((AccType) acctype, (boolean) iswrite, (boolean) secondstage) =
+{
+ (bit[48]) ipaddress := (bit[48]) UNKNOWN;
+ (uinteger) level := (uinteger) UNKNOWN;
+ (bit) extflag := (bit) UNKNOWN;
+ (boolean) s2fs1walk := (boolean) UNKNOWN;
+
+ AArch64_CreateFaultRecord(Fault_Alignment, ipaddress, level, acctype, iswrite,
+ extflag, secondstage, s2fs1walk);
+}
+
+(** FUNCTION:aarch64/translation/faults/AArch64.NoFault *)
+
+function FaultRecord AArch64_NoFault() =
+{
+ (bit[48]) ipaddress := (bit[48]) UNKNOWN;
+ (uinteger) level := (uinteger) UNKNOWN;
+ (AccType) acctype := AccType_NORMAL;
+ (boolean) iswrite := (boolean) UNKNOWN;
+ (bit) extflag := (bit) UNKNOWN;
+ (boolean) secondstage := false;
+ (boolean) s2fs1walk := false;
+
+ AArch64_CreateFaultRecord(Fault_None, ipaddress, level, acctype, iswrite,
+ extflag, secondstage, s2fs1walk);
+}
+
+(** FUNCTION:aarch64/translation/translation/AArch64.TranslateAddress *)
+
+function AddressDescriptor AArch64_TranslateAddress((bit[64]) vaddress, (AccType) acctype, (boolean) iswrite,
+ (boolean) wasaligned, (uinteger) size) =
+{
+ info("Translation is not implemented, return same address as the virtual (no fault, normal, shareable, non-secure).");
+ (AddressDescriptor) result := {
+ fault = AArch64_NoFault();
+ memattrs = {type = MemType_Normal; shareable = true};
+ paddress = {physicaladdress = vaddress; NS = 1};
+ };
+ (* ARM: uncomment when implementing TLB and delete the code above
+ (AddressDescriptor) result := AArch64_FullTranslate(vaddress, acctype, iswrite, wasaligned, size);
+
+ if ~(acctype == AccType_PTW | acctype == AccType_IC | acctype == AccType_AT) & ~(IsFault(result)) then
+ result.fault := AArch64_CheckDebug(vaddress, acctype, iswrite, size);
+ *)
+
+ result;
+}
diff --git a/arm/armv8_A64_special_purpose_regs.sail b/arm/armv8_A64_special_purpose_regs.sail
new file mode 100644
index 00000000..c12f0eeb
--- /dev/null
+++ b/arm/armv8_A64_special_purpose_regs.sail
@@ -0,0 +1,101 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+typedef CurrentEL_type = register bits [31:0]
+{
+ (*31..4 : RES0;*)
+ 3..2 : EL;
+ (*1..0 : RES0;*)
+}
+register (CurrentEL_type) CurrentEL (* Current Exception Level *)
+
+typedef DAIF_type = register bits [31:0]
+{
+ (*31..10 : RES0;*)
+ 9 : D;
+ 8 : A;
+ 7 : I;
+ 6 : F;
+ (*5..0 : RES0;*)
+}
+register (DAIF_type) DAIF (* Interrupt Mask Bits *)
+
+typedef NZCV_type = register bits [31:0]
+{
+ 31 : N;
+ 30 : Z;
+ 29 : C;
+ 28 : V;
+ (*27..0 : RES0;*)
+}
+register (NZCV_type) NZCV (* Condition Flags *)
+
+register (bit[64]) SP_EL0 (* Stack Pointer (EL0) *)
+register (bit[64]) SP_EL1 (* Stack Pointer (EL1) *)
+register (bit[64]) SP_EL2 (* Stack Pointer (EL2) *)
+register (bit[64]) SP_EL3 (* Stack Pointer (EL3) *)
+
+typedef SPSel_type = register bits [31:0]
+{
+ (*31..1 : RES0;*)
+ 0 : SP;
+}
+register (SPSel_type) SPSel (* Stack Pointer Select *)
+
+typedef SPSR_type = register bits [31:0]
+{
+ 31 : N;
+ 30 : Z;
+ 29 : C;
+ 28 : V;
+ (*27..22 : RES0;*)
+ 21 : SS;
+ 20 : IL;
+ (*19..10 : RES0*)
+ 9 : E;
+ 8 : A;
+ 7 : I;
+ 6 : F;
+ (*5 : RES0;*)
+ 4 : M4;
+ 3..0 : M3_0;
+}
+register (SPSR_type) SPSR_EL1 (* Saved Program Status Register (EL1) *)
+register (SPSR_type) SPSR_EL2 (* Saved Program Status Register (EL2) *)
+register (SPSR_type) SPSR_EL3 (* Saved Program Status Register (EL3) *)
+
+
+register (bit[64]) ELR_EL1 (* Exception Link Register (EL1) *)
+register (bit[64]) ELR_EL2 (* Exception Link Register (EL2) *)
+register (bit[64]) ELR_EL3 (* Exception Link Register (EL3) *)
diff --git a/arm/armv8_A64_sys_regs.sail b/arm/armv8_A64_sys_regs.sail
new file mode 100644
index 00000000..918e2b53
--- /dev/null
+++ b/arm/armv8_A64_sys_regs.sail
@@ -0,0 +1,465 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+(*************************************************************************)
+(* General system control registers *)
+
+register (bit[32]) ACTLR_EL1 (* Auxiliary Control Register (EL1) *) (* UNUSED *)
+register (bit[32]) ACTLR_EL2 (* Auxiliary Control Register (EL2) *) (* UNUSED *)
+register (bit[32]) ACTLR_EL3 (* Auxiliary Control Register (EL3) *) (* UNUSED *)
+register (bit[32]) AFSR0_EL1 (* Auxiliary Fault Status Register 0 (EL1) *) (* UNUSED *)
+register (bit[32]) AFSR0_EL2 (* Auxiliary Fault Status Register 0 (EL2) *) (* UNUSED *)
+register (bit[32]) AFSR0_EL3 (* Auxiliary Fault Status Register 0 (EL3) *) (* UNUSED *)
+register (bit[32]) AFSR1_EL1 (* Auxiliary Fault Status Register 1 (EL1) *) (* UNUSED *)
+register (bit[32]) AFSR1_EL2 (* Auxiliary Fault Status Register 1 (EL2) *) (* UNUSED *)
+register (bit[32]) AFSR1_EL3 (* Auxiliary Fault Status Register 1 (EL3) *) (* UNUSED *)
+register (bit[32]) AIDR_EL1 (* Auxiliary ID Register *) (* UNUSED *)
+register (bit[64]) AMAIR_EL1 (* Auxiliary Memory Attribute Indirection Register (EL1) *) (* UNUSED *)
+register (bit[64]) AMAIR_EL2 (* Auxiliary Memory Attribute Indirection Register (EL2) *) (* UNUSED *)
+register (bit[64]) AMAIR_EL3 (* Auxiliary Memory Attribute Indirection Register (EL3) *) (* UNUSED *)
+
+typedef CCSIDR_type = register bits [31:0]
+{
+ 31 : WT;
+ 30 : WB;
+ 29 : RA;
+ 28 : WA;
+ 27..13 : NumSets;
+ 12..3 : Associativity;
+ 2..0 : LineSize;
+}
+register (CCSIDR_type) CCSIDR_EL1 (* Current Cache Size ID Register *) (* UNUSED *)
+
+typedef CLIDR_type = register bits [63:0]
+{
+ (*63..33 : RES0*)
+ 32..30 : ICB;
+ 29..27 : LoUU;
+ 26..24 : LoC;
+ 23..21 : LoUIS;
+ 20..18 : Ctype7;
+ 17..15 : Ctype6;
+ 14..12 : Ctype5;
+ 11..9 : Ctype4;
+ 8..6 : Ctype3;
+ 5..3 : Ctype2;
+ 2..0 : Ctype1;
+}
+register (CLIDR_type) CLIDR_EL1 (* Cache Level ID Register *) (* UNUSED *)
+
+typedef CONTEXTIDR_type = register bits [31:0] { 31..0 : PROCID }
+register (CONTEXTIDR_type) CONTEXTIDR_EL1 (* Context ID Register *) (* UNUSED *)
+
+typedef CPACR_type = register bits [31:0]
+{
+ (*31..29 : RES0;*)
+ 28 : TTA;
+ (*27..22 : RES0;*)
+ 21..20 : FPEN;
+ (*19..0 : RES0;*)
+}
+register (CPACR_type) CPACR_EL1 (* Architectural Feature Access Control Register *) (* UNUSED *)
+
+typedef CPTR_type = register bits [31:0]
+{
+ (* in EL3 all the RES are RES0 *)
+ 31 : TCPAC;
+ (*30..21 : RES0;*)
+ 20 : TTA;
+ (*19..14 : RES0;*)
+ (*13..12 : RES1;*)
+ (*11 : RES0;*)
+ 10 : TFP;
+ (*9..0 : RES1;*)
+}
+register (CPTR_type) CPTR_EL2 (* Architectural Feature Trap Register (EL2) *) (* UNUSED *)
+register (CPTR_type) CPTR_EL3 (* Architectural Feature Trap Register (EL3) *) (* UNUSED *)
+
+typedef CSSELR_type = register bits [31:0]
+{
+ (*31..4 : RES0;*)
+ 3..1 : Level;
+ 0 : InD;
+}
+register (CSSELR_type) CSSELR_EL1 (* Cache Size Selection Register *) (* UNUSED *)
+
+typedef CTR_type = register bits [31:0]
+{
+ (*31 : RES1;*)
+ (*30..28 : RES0;*)
+ 27..24 : CWG;
+ 23..20 : ERG;
+ 19..16 : DminLine;
+ 15..14 : L1Ip;
+ (*13..4 : RES0;*)
+ 3..0 : IminLine;
+}
+register (CTR_type) CTR_EL0 (* Cache Type Register *) (* UNUSED *)
+
+typedef DACR32_type = register bits [31:0]
+{
+ 31..30 : D15;
+ 29..28 : D14;
+ 27..26 : D13;
+ 25..24 : D12;
+ 23..22 : D11;
+ 21..20 : D10;
+ 29..18 : D9;
+ 17..16 : D8;
+ 15..14 : D7;
+ 13..12 : D6;
+ 11..10 : D5;
+ 9..8 : D4;
+ 7..6 : D3;
+ 5..4 : D2;
+ 3..2 : D1;
+ 1..0 : D0;
+}
+register (DACR32_type) DACR32_EL2 (* Domain Access Control Register *) (* UNUSED *)
+
+typedef DCZID_type = register bits [31:0]
+{
+ (*31..5 : RES0;*)
+ 4 : DZP;
+ 3..0 : BS;
+}
+register (DCZID_type) DCZID_EL0 (* Data Cache Zero ID register *) (* UNUSED *)
+
+typedef ESR_type = register bits [31:0]
+{
+ 31..26 : EC;
+ 25 : IL;
+ 24..0 : ISS;
+}
+register (ESR_type) ESR_EL1 (* Exception Syndrome Register (EL1) *) (* UNUSED *)
+register (ESR_type) ESR_EL2 (* Exception Syndrome Register (EL2) *) (* UNUSED *)
+register (ESR_type) ESR_EL3 (* Exception Syndrome Register (EL3) *) (* UNUSED *)
+
+register (bit[64]) FAR_EL1 (* Fault Address Register (EL1) *) (* UNUSED *)
+register (bit[64]) FAR_EL2 (* Fault Address Register (EL2) *) (* UNUSED *)
+register (bit[64]) FAR_EL3 (* Fault Address Register (EL3) *) (* UNUSED *)
+
+typedef FPEXC32_type = register bits [31:0]
+{
+ 31 : EX;
+ 30 : EN;
+ 29 : DEX;
+ 28 : FP2V;
+ 27 : VV;
+ 26 : TFV;
+ (*25..21 : RES0;*)
+ (*20..11 : IMPLEMENTATION DEFINED*)
+ 10..8 : VECITR;
+ 7 : IDF;
+ (*6..5 : IMPLEMENTATION DEFINED*)
+ 4 : IXF;
+ 3 : UFF;
+ 2 : OFF;
+ 1 : DZF;
+ 0 : IOF;
+}
+register (FPEXC32_type) FPEXC32_EL2 (* Floating-point Exception Control register *) (* UNUSED *)
+register (bit[32]) HACR_EL2 (* Hypervisor Auxiliary Control Register *) (* UNUSED *)
+
+typedef HCR_type = register bits [63:0]
+{
+ (*63..34 : RES0;*)
+ 33 : ID;
+ 32 : CD;
+ 31 : RW;
+ 30 : TRVM;
+ 29 : HCD;
+ 28 : TDZ;
+ 27 : TGE;
+ 26 : TVM;
+ 25 : TTLB;
+ 24 : TPU;
+ 23 : TPC;
+ 22 : TSW;
+ 21 : TACR;
+ 20 : TIDCP;
+ 19 : TSC;
+ 18 : TID3;
+ 17 : TID2;
+ 16 : TID1;
+ 15 : TID0;
+ 14 : TWE;
+ 13 : TWI;
+ 12 : DC;
+ 11..10 :BSU;
+ 9 : FB;
+ 8 : VSE;
+ 7 : VI;
+ 6 : VF;
+ 5 : AMO;
+ 4 : IMO;
+ 3 : FMO;
+ 2 : PTW;
+ 1 : SWIO;
+ 0 : VM;
+}
+register (HCR_type) HCR_EL2 (* Hypervisor Configuration Register *)
+
+typedef HPFAR_type = register bits [63:0]
+{
+ (*63..40 : RES0;*)
+ 39..4 : FIPA; (* bits [47:12] of FIPA *)
+ (*3..0 : RES0;*)
+}
+register (HPFAR_type) HPFAR_EL2 (* Hypervisor IPA Fault Address Register *) (* UNUSED *)
+
+typedef HSTR_type = register bits [31:0]
+{
+ (*31..16 : RES0;*)
+ 15 : T15;
+ 14 : T14;
+ 13 : T13;
+ 12 : T12;
+ 11 : T11;
+ 10 : T10;
+ 9 : T9;
+ 8 : T8;
+ 7 : T7;
+ 6 : T6;
+ 5 : T5;
+ 4 : T4;
+ 3 : T3;
+ 2 : T2;
+ 1 : T1;
+ 0 : T0;
+}
+register (HSTR_type) HSTR_EL2 (* Hypervisor System Trap Register *) (* UNUSED *)
+
+typedef ID_AA64MMFR0_type = register bits [63:0]
+{
+ (*63..32 : RES0;*)
+ 31..28 : TGran4;
+ 27..24 : TGran64;
+ 23..20 : TGran16;
+ 19..16 : BigEndEL0;
+ 15..12 : SNSMem;
+ 11..8 : BigEnd;
+ 7..4 : ASIDBits;
+ 3..0 : PARange;
+}
+register (ID_AA64MMFR0_type) ID_AA64MMFR0_EL1 (* AArch64 Memory Model Feature Register 0 *)
+
+register (bit[64]) RVBAR_EL1 (* Reset Vector Base Address Register (if EL2 and EL3 not implemented) *)
+register (bit[64]) RVBAR_EL2 (* Reset Vector Base Address Register (if EL3 not implemented) *)
+register (bit[64]) RVBAR_EL3 (* Reset Vector Base Address Register (if EL3 implemented) *)
+
+typedef SCRType = register bits [31:0]
+{
+ (*31..14 : RES0;*)
+ 13 : TWE;
+ 12 : TWI;
+ 11 : ST;
+ 10 : RW;
+ 9 : SIF;
+ 8 : HCE;
+ 7 : SMD;
+ (*6 : RES0;*)
+ (*5..4 : RES1;*)
+ 3 : EA;
+ 2 : FIQ;
+ 1 : IRQ;
+ 0 : NS;
+}
+register (SCRType) SCR_EL3 (* Secure Configuration Register *)
+
+typedef SCTLR_EL1_type = register bits [31:0]
+{
+ (*31..30 : RES0;*)
+ (*29..28 : RES1;*)
+ (*27 : RES0;*)
+ 26 : UCI;
+ 25 : EE;
+ 24 : E0E;
+ (*23..22 : RES1;*)
+ (*21 : RES0;*)
+ (*20 : RES1;*)
+ 19 : WXN;
+ 18 : nTWE;
+ (*17 : RES0;*)
+ 16 : nTWI;
+ 15 : UCT;
+ 14 : DZE;
+ (*13 : RES0;*)
+ 12 : I;
+ (*11 : RES1;*)
+ (*10 : RES0;*)
+ 9 : UMA;
+ 8 : SED;
+ 7 : ITD;
+ (*6 : RES0;*)
+ 5 : CP15BEN;
+ 4 : SA0;
+ 3 : SA;
+ 2 : C;
+ 1 : A;
+ 0 : M;
+}
+register (SCTLR_EL1_type) SCTLR_EL1 (* System Control Register (EL1) *)
+
+typedef SCTLR_type = register bits [31:0]
+{
+ (*31..30 : RES0;*)
+ (*29..28 : RES1;*)
+ (*27..26 : RES0;*)
+ 25 : EE;
+ (*24 : RES0;*)
+ (*23..22 : RES1;*)
+ (*21..20 : RES0;*)
+ 19 : WXN;
+ (*18 : RES1;*)
+ (*17 : RES0;*)
+ (*16 : RES1;*)
+ (*15..13 : RES0;*)
+ 12 : I;
+ (*11 : RES1;*)
+ (*10..6 : RES0;*)
+ (*5..4 : RES1;*)
+ 3 : SA;
+ 2 : C;
+ 1 : A;
+ 0 : M;
+}
+register (SCTLR_type) SCTLR_EL2 (* System Control Register (EL2) *)
+register (SCTLR_type) SCTLR_EL3 (* System Control Register (EL3) *)
+
+typedef TCR_EL1_type = register bits [63:0]
+{
+ (*63..39 : RES0;*)
+ 38 : TBI1;
+ 37 : TBI0;
+ 36 : AS;
+ (*35 : RES0;*)
+ 34..32 : IPS;
+ 31..30 : TG1;
+ 29..28 : SH1;
+ 27..26 : ORGN1;
+ 25..24 : IRGN1;
+ 23 : EPD1;
+ 22 : A1;
+ 21..16 : T1SZ;
+ 15..14 : TG0;
+ 13..12 : SH0;
+ 11..10 : ORGN0;
+ 9..8 : IRGN0;
+ 7 : EPD0;
+ (*6 : RES0;*)
+ 5..0 : T0SZ;
+}
+register (TCR_EL1_type) TCR_EL1 (* Translation Control Register (EL1) *)
+
+typedef TCR_type = register bits [31:0]
+{
+ (*31 : RES1;*)
+ (*30..24 : RES0;*)
+ (*23 : RES1;*)
+ (*22..21 : RES0;*)
+ 20 : TBI;
+ (*19 : RES0;*)
+ 18..16 : PS;
+ 15..14 : TG0;
+ 13..12 : SH0;
+ 11..10 : ORGN0;
+ 9..8 : IRGN0;
+ (*7..6 : RES0;*)
+ 5..0 : T0SZ;
+}
+register (TCR_type) TCR_EL2 (* Translation Control Register (EL2) *)
+register (TCR_type) TCR_EL3 (* Translation Control Register (EL3) *)
+
+(*************************************************************************)
+(* Debug registers *)
+
+typedef DBGPRCR_type = register bits [31:0]
+{
+ (*31..1 : RES0;*)
+ 0 : CORENPDRQ;
+}
+register (DBGPRCR_type) DBGPRCR_EL1 (* Debug Power Control Register *)
+
+typedef OSDLR_type = register bits [31:0]
+{
+ (*31..1 : RES0;*)
+ 0 : DLK;
+}
+register (OSDLR_type) OSDLR_EL1 (* OS Double Lock Register *)
+
+(*************************************************************************)
+(* Performance Monitors registers *)
+
+(*************************************************************************)
+(* Generic Timer registers *)
+
+(*************************************************************************)
+(* Generic Interrupt Controller CPU interface registers *)
+
+(*************************************************************************)
+(* External Debug Register *)
+
+typedef EDSCR_type = register bits [31:0]
+{
+ (*31 : RES0;*)
+ 30 : RXfull;
+ 29 : TXfull;
+ 28 : ITO;
+ 27 : RXO;
+ 26 : TXU;
+ 25 : PipeAdv;
+ 24 : ITE;
+ 23..22 : INTdis;
+ 21 : TDA;
+ 20 : MA;
+ (*19 : RES0;*)
+ 18 : NS;
+ (*17 : RES0;*)
+ 16 : SDD;
+ (*15 : RES0;*)
+ 14 : HDE;
+ 13..10 : RW;
+ 9..8 : EL;
+ 7 : A;
+ 6 : ERR;
+ 5..0 : STATUS;
+}
+register (EDSCR_type) EDSCR (* External Debug Status and Control Register *)
+
+function unit effect pure AArch64_ResetControlRegisters((boolean) cold_reset) =
+{
+ ()
+}
+
diff --git a/arm/armv8_common_lib.sail b/arm/armv8_common_lib.sail
new file mode 100644
index 00000000..b09f9b7c
--- /dev/null
+++ b/arm/armv8_common_lib.sail
@@ -0,0 +1,998 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+(** FUNCTION:shared/debug/DoubleLockStatus/DoubleLockStatus *)
+
+function boolean DoubleLockStatus() =
+{
+ if ELUsingAArch32(EL1) then
+ (DBGOSDLR.DLK == 1 & DBGPRCR.CORENPDRQ == 0 & ~(Halted()))
+ else
+ (OSDLR_EL1.DLK == 1 & DBGPRCR_EL1.CORENPDRQ == 0 & ~(Halted()));
+}
+
+(** FUNCTION:shared/debug/authentication/Debug_authentication *)
+
+(* TODO: these are external signals *)
+
+typedef signalValue =
+ enumerate {LOw; HIGH}
+
+val extern unit -> signalValue effect pure signalDBGEN
+val extern unit -> signalValue effect pure signalNIDEN
+val extern unit -> signalValue effect pure signalSPIDEN
+val extern unit -> signalValue effect pure signalSPNIDEN
+
+(** FUNCTION:shared/debug/authentication/ExternalInvasiveDebugEnabled *)
+
+function boolean ExternalInvasiveDebugEnabled() =
+{
+ (* In the recommended interface, ExternalInvasiveDebugEnabled returns the state of the DBGEN *)
+ (* signal. *)
+ signalDBGEN() == HIGH;
+}
+
+(** FUNCTION:shared/debug/authentication/ExternalSecureInvasiveDebugEnabled *)
+
+function boolean ExternalSecureInvasiveDebugEnabled() =
+{
+ (* In the recommended interface, ExternalSecureInvasiveDebugEnabled returns the state of the *)
+ (* (DBGEN AND SPIDEN) signal. *)
+ (* CoreSight allows asserting SPIDEN without also asserting DBGEN, but this is not recommended. *)
+ if ~(HaveEL(EL3)) & ~(IsSecure()) then false
+ else {
+ (ExternalInvasiveDebugEnabled() & signalSPIDEN() == HIGH);
+}}
+
+(** FUNCTION:shared/debug/halting/DCPSInstruction *)
+
+function unit DCPSInstruction((bit[2]) target_el) =
+{
+ not_implemented("DCPSInstruction");
+}
+
+(** FUNCTION:shared/debug/halting/DRPSInstruction *)
+
+function unit DRPSInstruction() =
+{
+ not_implemented("DRPSInstruction");
+}
+
+(** TYPE:shared/debug/halting/DebugHalt *)
+
+let DebugHalt_Breakpoint = 0b000111
+let DebugHalt_EDBGRQ = 0b010011
+let DebugHalt_Step_Normal = 0b011011
+let DebugHalt_Step_Exclusive = 0b011111
+let DebugHalt_OSUnlockCatch = 0b100011
+let DebugHalt_ResetCatch = 0b100111
+let DebugHalt_Watchpoint = 0b101011
+let DebugHalt_HaltInstruction = 0b101111
+let DebugHalt_SoftwareAccess = 0b110011
+let DebugHalt_ExceptionCatch = 0b110111
+let DebugHalt_Step_NoSyndrome = 0b111011
+
+(** FUNCTION:shared/debug/halting/Halt *)
+
+function unit Halt((bit[6]) reason) =
+{
+ not_implemented("Halt");
+}
+
+(** FUNCTION:shared/debug/halting/Halted *)
+
+function boolean Halted() =
+{
+ ~(EDSCR.STATUS == 0b000001 | EDSCR.STATUS == 0b000010); (* Halted *)
+}
+
+(** FUNCTION:shared/debug/halting/HaltingAllowed *)
+
+function boolean HaltingAllowed() =
+{
+ if Halted() | DoubleLockStatus() then
+ false
+ else if IsSecure() then
+ ExternalSecureInvasiveDebugEnabled()
+ else
+ ExternalInvasiveDebugEnabled();
+}
+
+(** FUNCTION:shared/exceptions/traps/ReservedValue *)
+
+function unit ReservedValue() =
+{
+ (* ARM: uncomment when adding aarch32
+ if UsingAArch32() && !AArch32.GeneralExceptionsToAArch64() then
+ AArch32.TakeUndefInstrException()
+ else*)
+ AArch64_UndefinedFault();
+}
+
+(** FUNCTION:shared/exceptions/traps/UnallocatedEncoding *)
+
+function unit UnallocatedEncoding() =
+{
+ (* If the unallocated encoding is an AArch32 CP10 or CP11 instruction, FPEXC.DEX must be written *)
+ (* to zero. This is omitted from this code. *)
+ (* ARM: uncomment whenimplementing aarch32
+ if UsingAArch32() && !AArch32.GeneralExceptionsToAArch64() then
+ AArch32.TakeUndefInstrException();
+ else*)
+ AArch64_UndefinedFault();
+}
+
+(** FUNCTION:shared/functions/aborts/IsFault *)
+
+function boolean IsFault((AddressDescriptor) addrdesc) =
+{
+ (addrdesc.fault).type != Fault_None;
+}
+
+(** FUNCTION:shared/functions/common/ASR *)
+
+function forall Nat 'N. bit['N] ASR((bit['N]) x, (uinteger) shift) =
+{
+ (*assert shift >= 0;*)
+ (bit['N]) result := 0;
+ if shift == 0 then
+ result := x
+ else
+ let (result', _) = (ASR_C(x, shift)) in { result := result' };
+ result;
+}
+
+(** FUNCTION:shared/functions/common/ASR_C *)
+
+function forall Nat 'N, Nat 'S, 'S >= 1. (bit['N], bit) ASR_C((bit['N]) x, ([:'S:]) shift) =
+{
+ (*assert shift > 0;*)
+ (bit['S+'N]) extended_x := SignExtend(x);
+ (bit['N]) result := extended_x[(shift + length(x) - 1)..shift];
+ (bit) carry_out := extended_x[shift - 1];
+ (result, carry_out);
+}
+
+(** FUNCTION:integer Align(integer x, integer y) *)
+
+function uinteger Align'((uinteger) x, (uinteger) y) =
+ y * (x quot y)
+
+(** FUNCTION:bits(N) Align(bits(N) x, integer y) *)
+
+function forall Nat 'N. bit['N] Align((bit['N]) x, (uinteger) y) =
+ (bit['N]) (Align'(UInt(x), y))
+
+(** FUNCTION:integer CountLeadingSignBits(bits(N) x) *)
+
+function forall Nat 'N. [|'N|] CountLeadingSignBits((bit['N]) x) =
+ CountLeadingZeroBits(x[(length(x) - 1)..1] ^ x[(length(x) - 2)..0])
+
+(** FUNCTION:integer CountLeadingZeroBits(bits(N) x) *)
+
+function forall Nat 'N. [|'N|] CountLeadingZeroBits((bit['N]) x) =
+ length(x) - 1 - HighestSetBit(x)
+
+(** FUNCTION:bits(N) Extend(bits(M) x, integer N, boolean unsigned) *)
+
+val forall Nat 'N, Nat 'M, 'M <= 'N. (implicit<'N>,bit['M],bit) -> bit['N] effect pure Extend
+function forall Nat 'N, Nat 'M. bit['N] Extend (x, unsigned) =
+ if unsigned then ZeroExtend(x) else SignExtend(x)
+
+(** FUNCTION:integer HighestSetBit(bits(N) x) *)
+
+function forall Nat 'N. [|-1:('N + -1)|] HighestSetBit((bit['N]) x) = {
+ let N = (length(x)) in {
+ ([|('N + -1)|]) result := 0;
+ (bool) break := false;
+ foreach (i from (N - 1) downto 0)
+ if ~(break) & x[i] == 1 then {
+ result := i;
+ break := true;
+ };
+
+ if break then result else -1;
+}}
+
+(** FUNCTION:integer Int(bits(N) x, boolean unsigned) *)
+
+function forall Nat 'N. integer Int((bit['N]) x, (boolean) unsigned) = {
+ result := if unsigned then UInt(x) else SInt(x);
+ result;
+}
+
+(** FUNCTION:boolean IsZero(bits(N) x) *)
+
+function forall Nat 'N. boolean IsZero((bit['N]) x) = x == 0 (* ARM: Zeros(N) *)
+
+(** FUNCTION:bit IsZeroBit(bits(N) x) *)
+
+function forall Nat 'N. bit IsZeroBit((bit['N]) x) = if IsZero(x) then 1 else 0
+
+(** FUNCTION:shared/functions/common/LSL *)
+
+function forall Nat 'N. bit['N] LSL((bit['N]) x, (uinteger) shift) =
+{
+ (*assert shift >= 0;*)
+ (bit['N]) result := 0;
+ if shift == 0 then
+ result := x
+ else
+ let (result', _) = (LSL_C(x, shift)) in { result := result' };
+ result;
+}
+
+(** FUNCTION:shared/functions/common/LSL_C *)
+
+function forall Nat 'N, Nat 'S, 'S >= 1. (bit['N], bit) LSL_C((bit['N]) x, ([:'S:]) shift) =
+{
+ (*assert shift > 0;*)
+ (bit['N + 'S]) extended_x := x : (bit['S]) (Zeros());
+ (bit['N]) result := (bit['N]) (mask(extended_x));
+ (bit) carry_out := extended_x[length(x)];
+ (result, carry_out);
+}
+
+(** FUNCTION:shared/functions/common/LSR *)
+
+function forall Nat 'N. bit['N] LSR((bit['N]) x, (uinteger) shift) =
+{
+ (*assert shift >= 0;*)
+ (bit['N]) result := 0;
+ if shift == 0 then
+ result := x
+ else
+ let (result', _) = (LSR_C(x, shift)) in { result := result' };
+ result;
+}
+
+(** FUNCTION:shared/functions/common/LSR_C *)
+
+function forall Nat 'N, Nat 'S, 'S >= 1. (bit['N], bit) LSR_C((bit['N]) x, ([:'S:]) shift) =
+{
+ (*assert shift > 0;*)
+ (bit['N + 'S]) extended_x := ZeroExtend(x);
+ (bit['N]) result := extended_x[(shift + length(x) - 1)..shift];
+ (bit) carry_out := extended_x[shift - 1];
+ (result, carry_out);
+}
+
+(** FUNCTION:integer Min(integer a, integer b) *)
+
+function integer Min((integer) a, (integer) b) =
+{
+ if a <= b then a else b;
+}
+
+function uinteger uMin((uinteger) a, (uinteger) b) =
+{
+ if a <= b then a else b;
+}
+
+(** FUNCTION:bits(N) NOT(bits(N) x); *)
+
+function forall Nat 'N. bit['N] NOT((bit['N]) x) = ~(x)
+function bit NOT'((bit) x) = ~(x)
+
+(** FUNCTION:shared/functions/common/Ones *)
+
+function forall Nat 'N. bit['N] Ones() = Replicate([1])
+
+(** FUNCTION:shared/functions/common/ROR *)
+
+function forall Nat 'N. bit['N] ROR((bit['N]) x, (uinteger) shift) =
+{
+ (*assert shift >= 0;*)
+ (bit['N]) result := 0;
+ if shift == 0 then
+ result := x
+ else
+ let (result', _) = (ROR_C(x, shift)) in { result := result' };
+ result;
+}
+
+(** FUNCTION:shared/functions/common/ROR_C *)
+
+function forall Nat 'N, Nat 'S, 'S >= 1, 'S <= -1. (bit['N], bit) ROR_C((bit['N]) x, ([:'S:]) shift) =
+{
+ let N = (length(x)) in {
+ (*assert shift != 0;*)
+ (nat) m := shift mod N;
+ (bit['N]) result := (LSR(x,m) | LSL(x,N - m));
+ (bit) carry_out := result[N - 1];
+ (result, carry_out);
+}}
+
+(** FUNCTION:shared/functions/common/Replicate *)
+
+function forall Nat 'N, Nat 'M. bit['N] Replicate((bit['M]) x) = {
+ let (N,M) = (length((bit['N]) 0), length((bit['M]) 0)) in {
+ assert((N mod M == 0),None);
+
+ (bit['N]) result := 0;
+ zeros := (bit['N-'M]) (Zeros());
+ foreach (i from M to N by M) result := ((result << M) | zeros:x);
+
+ result;
+}}
+
+(** FUNCTION:integer SInt(bits(N) x) *)
+
+(*function forall Nat 'N, Nat 'M, Nat 'K, 'M = 'N + -1, 'K = 2**'M. [|'K * -1:'K + -1|] SInt((bit['N]) x) =*)
+function forall Nat 'N, Nat 'M. [:'M:] SInt((bit['N]) x) =
+{ signed(x)
+ (*let N = (length((bit['N]) 0)) in {
+ (integer) result := (nat) x;
+ if x[N - 1] == 1 then result := result - (2 ** N);
+ result;
+}*)}
+
+
+(** FUNCTION:bits(N) SignExtend(bits(M) x, integer N) *)
+
+function forall Nat 'N, Nat 'M. bit['N] SignExtend ((bit['M]) ([h]:remainder as x)) =
+ ((bit[('N - 'M)]) (Replicate([h]))) : x
+
+(** FUNCTION:integer UInt(bits(N) x) *)
+
+(* function forall Nat 'N, Nat 'M, 'M = 2**'N. [|'M + -1|] UInt((bit['N]) x) = ([|'M + -1|]) x *)
+function forall Nat 'N, Nat 'M. [:'M:] UInt((bit['N]) x) = unsigned(x)
+
+(** FUNCTION:bits(N) ZeroExtend(bits(M) x, integer N) *)
+
+function forall Nat 'M, Nat 'N. bit['N] ZeroExtend ((bit['M]) x) =
+ ((bit[('N + 'M * -1)]) (Zeros())) : x
+
+(** FUNCTION:shared/functions/common/Zeros *)
+
+function forall Nat 'N. bit['N] Zeros() = (bit['N]) 0 (* ARM: Zeros(N) *)
+
+(** FUNCTION:bits(N) BitReverse(bits(N) data) *)
+
+function forall Nat 'N. bit['N] BitReverse((bit['N]) data) = {
+ let N = (length(data)) in {
+ (bit['N]) result := 0; (* ARM: uninitialized *)
+ foreach (i from 0 to (N - 1))
+ result[N - i - 1] := data[i];
+ result;
+}}
+
+(** FUNCTION:shared/functions/crc/HaveCRCExt *)
+
+(* TODO: this should not be hardcoded *)
+function (boolean) HaveCRCExt() = IMPLEMENTATION_DEFINED.HaveCRCExt
+
+(** FUNCTION:bits(32) Poly32Mod2(bits(N) data, bits(32) poly) *)
+
+function forall Nat 'N. bit[32] Poly32Mod2((bit['N]) data, (bit[32]) poly) = {
+ (bit['N]) result := data;
+ let N = (length(data)) in {
+ assert((N > 32 ),None);
+ (bit['N]) data' := data;
+ (bit['N - 32]) zeros := Zeros();
+ foreach (i from (N - 1) downto 32) {
+ if data'[i] == 1 then
+ data'[(i - 1)..0] := data'[(i - 1)..0] ^ (poly:zeros[(i - 33)..0]);
+ };
+ data'[31..0];
+}}
+
+(** FUNCTION:shared/functions/exclusive/ClearExclusiveByAddress *)
+
+function unit effect pure ClearExclusiveByAddress((FullAddress) paddress, (integer) processorid, (uinteger) size) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+}
+
+(** FUNCTION:shared/functions/exclusive/ClearExclusiveLocal *)
+
+function unit effect pure ClearExclusiveLocal((int) processorid) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+}
+
+(** FUNCTION:shared/functions/exclusive/ExclusiveMonitorsStatus *)
+
+function bit effect pure ExclusiveMonitorsStatus() =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+ not_implemented("ExclusiveMonitorsStatus should not be called");
+ 0;
+}
+
+(** FUNCTION:shared/functions/exclusive/IsExclusiveGlobal *)
+
+function boolean effect pure IsExclusiveGlobal((FullAddress) paddress, (integer) processorid, (uinteger) size) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+ true;
+}
+
+(** FUNCTION:shared/functions/exclusive/IsExclusiveLocal *)
+
+function boolean effect pure IsExclusiveLocal((FullAddress) paddress, (integer) processorid, (uinteger) size) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+ true;
+}
+
+(** FUNCTION:shared/functions/exclusive/MarkExclusiveGlobal *)
+
+function unit effect pure MarkExclusiveGlobal((FullAddress) paddress, (integer) processorid, (uinteger) size) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+}
+
+(** FUNCTION:shared/functions/exclusive/MarkExclusiveLocal *)
+
+function unit effect pure MarkExclusiveLocal((FullAddress) paddress, (integer) processorid, (uinteger) size) =
+{
+ info("The model does not implement the exclusive monitors explicitly.");
+}
+
+(** FUNCTION:shared/functions/exclusive/ProcessorID *)
+
+(* FIXME: return the real number? *)
+function integer ProcessorID() = {0}
+
+(** FUNCTION:(bits(N), bits(4)) AddWithCarry(bits(N) x, bits(N) y, bit carry_in) *)
+
+function forall Nat 'N. (bit['N],bit[4]) AddWithCarry ((bit['N]) x, (bit['N]) y, (bit) carry_in) = {
+ (uinteger) unsigned_sum := UInt(x) + UInt(y) + UInt([carry_in]);
+ (integer) signed_sum := SInt(x) + SInt(y) + UInt([carry_in]);
+ (bit['N]) result := (bit['N]) unsigned_sum; (* same value as signed_sum<N-1:0> *)
+ (bit) n := result[(length(result)) - 1];
+ (bit) z := if IsZero(result) then 1 else 0;
+ (bit) c := if UInt(result) == unsigned_sum then 0 else 1;
+ (bit) v := if SInt(result) == signed_sum then 0 else 1;
+ (result,[n]:[z]:[c]:[v])
+}
+
+(** FUNCTION:boolean BigEndian() *)
+
+function boolean effect {rreg} BigEndian() = {
+ (boolean) bigend := 0; (* ARM: uninitialized *)
+ if UsingAArch32() then
+ bigend := (PSTATE_E != 0)
+ else if PSTATE_EL == EL0 then
+ bigend := (SCTLR_EL1.E0E != 0)
+ else
+ bigend := ((SCTLR'()).EE != 0);
+ bigend;
+}
+
+(** FUNCTION:shared/functions/memory/BigEndianReverse *)
+val forall Nat 'W, 'W IN {8,16,32,64,128}. bit['W] -> bit['W] effect pure BigEndianReverse
+function rec forall Nat 'W, 'W IN {8, 16, 32, 64, 128}. bit['W] BigEndianReverse ((bit['W]) value) =
+{
+ (uinteger) width := length(value);
+ (uinteger) half := width quot 2;
+ if width == 8 then (*return*) value
+ else (*return*) BigEndianReverse(value[(half - 1)..0]) : BigEndianReverse(value[(width - 1)..half]);
+}
+
+(** FUNCTION:shared/functions/memory/DataMemoryBarrier *)
+
+val extern unit -> unit effect {barr} DataMemoryBarrier_Reads
+val extern unit -> unit effect {barr} DataMemoryBarrier_Writes
+val extern unit -> unit effect {barr} DataMemoryBarrier_All
+
+function unit effect {barr} DataMemoryBarrier((MBReqDomain) domain, (MBReqTypes) types) =
+{
+ if domain != MBReqDomain_FullSystem then
+ not_implemented("DataMemoryBarrier: not MBReqDomain_FullSystem");
+
+ switch types {
+ case MBReqTypes_Reads -> DataMemoryBarrier_Reads()
+ case MBReqTypes_Writes -> DataMemoryBarrier_Writes()
+ case MBReqTypes_All -> DataMemoryBarrier_All()
+ };
+}
+
+(** FUNCTION:shared/functions/memory/DataSynchronizationBarrier *)
+
+val extern unit -> unit effect {barr} DataSynchronizationBarrier_Reads
+val extern unit -> unit effect {barr} DataSynchronizationBarrier_Writes
+val extern unit -> unit effect {barr} DataSynchronizationBarrier_All
+
+function unit effect {barr} DataSynchronizationBarrier((MBReqDomain) domain, (MBReqTypes) types) =
+{
+ if domain != MBReqDomain_FullSystem then
+ not_implemented("DataSynchronizationBarrier: not MBReqDomain_FullSystem");
+
+ switch types {
+ case MBReqTypes_Reads -> DataSynchronizationBarrier_Reads()
+ case MBReqTypes_Writes -> DataSynchronizationBarrier_Writes()
+ case MBReqTypes_All -> DataSynchronizationBarrier_All()
+ };
+}
+
+(** FUNCTION:shared/functions/memory/Hint_Prefetch *)
+val (bit[64],PrefetchHint,integer,boolean) -> unit effect pure Hint_Prefetch
+function unit effect pure Hint_Prefetch(addr,hint,target,stream) = ()
+
+(** FUNCTION:shared/functions/memory/_Mem *)
+
+(* regular load *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*),[:'N:] (*size*)) -> bit['N*8] effect {rmem} rMem_NORMAL
+(* non-temporal load (LDNP), see ARM ARM for special exception to normal memory ordering rules *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*),[:'N:] (*size*)) -> bit['N*8] effect {rmem} rMem_STREAM
+(* load-acquire *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*),[:'N:] (*size*)) -> bit['N*8] effect {rmem} rMem_ORDERED
+(* load-exclusive *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*),[:'N:] (*size*)) -> bit['N*8] effect {rmem} rMem_ATOMIC
+(* load-exclusive+acquire *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*),[:'N:] (*size*)) -> bit['N*8] effect {rmem} rMem_ATOMIC_ORDERED
+
+typedef read_buffer_type = const struct
+{
+ AccType acctype;
+ (bool) exclusive;
+ (bit[64]) address;
+ (uinteger) size;
+}
+
+let empty_read_buffer =
+{ size = 0;
+ (* arbitrary values: *)
+ acctype = AccType_NORMAL;
+ exclusive = false;
+ address = 0;
+}
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. read_buffer_type effect pure _rMem((read_buffer_type) read_buffer, (AddressDescriptor) desc, ([:'N:]) size, (AccType) acctype, (bool) exclusive) =
+{
+ if read_buffer.size == 0 then {
+ { acctype = acctype;
+ exclusive = exclusive;
+ address = (desc.paddress).physicaladdress;
+ size = size;
+ }
+ }
+ else {
+ assert((read_buffer.acctype == acctype), None);
+ assert((read_buffer.exclusive == exclusive), None);
+ assert(((bit[64]) (read_buffer.address + read_buffer.size) == (desc.paddress).physicaladdress), None);
+
+ {read_buffer with size = read_buffer.size + size}
+ }
+}
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. bit['N*8] effect {rmem} flush_read_buffer((read_buffer_type) read_buffer, ([:'N:]) size) =
+{
+ assert((read_buffer.size == size), None);
+
+ (bit['N*8]) value := 0;
+
+ if read_buffer.exclusive then {
+ switch read_buffer.acctype {
+ case AccType_ATOMIC -> value := rMem_ATOMIC(read_buffer.address, read_buffer.size)
+ case AccType_ORDERED -> value := rMem_ATOMIC_ORDERED(read_buffer.address, read_buffer.size)
+ case _ -> { not_implemented("unimplemented memory access"); }
+ }
+ } else {
+ switch read_buffer.acctype {
+ case AccType_NORMAL -> value := rMem_NORMAL (read_buffer.address, read_buffer.size)
+ case AccType_STREAM -> value := rMem_STREAM (read_buffer.address, read_buffer.size)
+ case AccType_UNPRIV -> value := rMem_NORMAL (read_buffer.address, read_buffer.size)
+ case AccType_ORDERED -> value := rMem_ORDERED(read_buffer.address, read_buffer.size)
+ case AccType_ATOMIC -> assert(false,Some("Reached AccType_ATOMIC: unreachable when address values are known"))
+ (*(*old code*) value := rMem_NORMAL (read_buffer.address, read_buffer.size) (* the second read of 64-bit LDXP *)*)
+ }
+ };
+
+ if BigEndian() then
+ value := BigEndianReverse(value);
+ value;
+}
+
+(** FUNCTION:_Mem[AddressDescriptor desc, integer size, AccType acctype] = bits(8*size) value; *)
+
+(* regular store *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*), [:'N:] (*size*)) -> unit effect {eamem} wMem_Addr_NORMAL
+(* store-release *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*), [:'N:] (*size*)) -> unit effect {eamem} wMem_Addr_ORDERED
+(* store-exclusive *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*), [:'N:] (*size*)) -> unit effect {eamem} wMem_Addr_ATOMIC
+(* store-exclusive+release *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. (bit[64] (*address*), [:'N:] (*size*)) -> unit effect {eamem} wMem_Addr_ATOMIC_ORDERED
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. unit effect {eamem} wMem_Addr((bit[64]) address, ([:'N:]) size, (AccType) acctype, (boolean) excl) =
+{
+ switch (excl, acctype) {
+ case (false, AccType_NORMAL) -> wMem_Addr_NORMAL(address, size)
+ case (false, AccType_STREAM) -> wMem_Addr_NORMAL(address, size)
+ case (false, AccType_UNPRIV) -> wMem_Addr_NORMAL(address, size)
+ case (false, AccType_ORDERED) -> wMem_Addr_ORDERED(address, size)
+ case (true, AccType_ATOMIC) -> wMem_Addr_ATOMIC(address, size)
+ case (true, AccType_ORDERED) -> wMem_Addr_ATOMIC_ORDERED(address, size)
+ case _ -> not_implemented("unrecognised memory access")
+ };
+}
+
+
+(* regular store *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. ([:'N:] (*size*), bit['N*8] (*value*)) -> unit effect {wmv} wMem_Val_NORMAL
+(* store-exclusive *)
+val extern forall Nat 'N, 'N IN {1,2,4,8,16}. ([:'N:] (*size*), bit['N*8] (*value*)) -> bool effect {wmv} wMem_Val_ATOMIC
+
+
+typedef write_buffer_type = const struct
+{
+ AccType acctype;
+ (bool) exclusive;
+ (bit[64]) address;
+ (bit[128]) value;
+ (uinteger) size;
+}
+
+let empty_write_buffer =
+{ size = 0;
+ (* arbitrary values: *)
+ acctype = AccType_NORMAL;
+ exclusive = false;
+ address = 0;
+ value = 0;
+}
+
+function forall Nat 'N, 'N IN {1,2,4,8,16}. write_buffer_type effect pure _wMem((write_buffer_type) write_buffer, (AddressDescriptor) desc, ([:'N:]) size, (AccType) acctype, (bool) exclusive, (bit['N*8]) value) =
+{
+ if write_buffer.size == 0 then {
+ { acctype = acctype;
+ exclusive = exclusive;
+ address = (desc.paddress).physicaladdress;
+ value = (bit[128]) (ZeroExtend(value));
+ size = size;
+ }
+ } else {
+ assert((write_buffer.acctype == acctype),None);
+ assert((write_buffer.exclusive == exclusive), None);
+ assert(((bit[64]) (write_buffer.address + write_buffer.size) == (desc.paddress).physicaladdress),None);
+
+ { write_buffer with
+ value = (bit[128]) (ZeroExtend(value : (write_buffer.value)[((write_buffer.size * 8) - 1) .. 0]));
+ size = write_buffer.size + size;
+ }
+ }
+}
+
+function unit effect {wmv} flush_write_buffer((write_buffer_type) write_buffer) =
+{
+ assert((write_buffer.exclusive == false), None);
+
+ switch write_buffer.acctype {
+ case AccType_NORMAL -> wMem_Val_NORMAL (write_buffer.size, (write_buffer.value)[((write_buffer.size * 8) - 1) .. 0])
+ case AccType_STREAM -> wMem_Val_NORMAL (write_buffer.size, (write_buffer.value)[((write_buffer.size * 8) - 1) .. 0])
+ case AccType_UNPRIV -> wMem_Val_NORMAL (write_buffer.size, (write_buffer.value)[((write_buffer.size * 8) - 1) .. 0])
+ case AccType_ORDERED -> wMem_Val_NORMAL (write_buffer.size, (write_buffer.value)[((write_buffer.size * 8) - 1) .. 0])
+ case _ -> not_implemented("unrecognised memory access")
+ };
+}
+
+function bool effect {wmv} flush_write_buffer_exclusive((write_buffer_type) write_buffer) =
+{
+ assert((write_buffer.exclusive), None);
+
+ switch write_buffer.acctype {
+ case AccType_ATOMIC -> wMem_Val_ATOMIC(write_buffer.size, (write_buffer.value)[((write_buffer.size * 8) - 1) .. 0])
+ case AccType_ORDERED -> wMem_Val_ATOMIC(write_buffer.size, (write_buffer.value)[((write_buffer.size * 8) - 1) .. 0])
+ case _ -> { not_implemented("unrecognised memory access"); false; }
+ };
+}
+
+(** FUNCTION:BranchTo(bits(N) target, BranchType branch_type) *)
+
+function forall Nat 'N, 'N IN {32,64}. unit effect {rreg,wreg} BranchTo((bit['N]) target, (BranchType) _branch_type) = {
+ (bit['N]) target' := target; (* Sail does not let you change parameter vector *)
+
+ Hint_Branch(_branch_type);
+ if length(target) == 32 then {
+ assert( UsingAArch32(), None );
+ _PC := ZeroExtend(target);
+ } else {
+ assert(( length(target) == 64 & ~(UsingAArch32()) ), None);
+ (* Remove the tag bits from tagged target *)
+ switch PSTATE_EL {
+ case EL0 -> {
+ if target'[55] == 1 & TCR_EL1.TBI1 == 1 then
+ target'[63..56] := 0b11111111;
+ if target'[55] == 0 & TCR_EL1.TBI0 == 1 then
+ target'[63..56] := 0b00000000;
+ }
+ case EL1 -> {
+ if target'[55] == 1 & TCR_EL1.TBI1 == 1 then
+ target'[63..56] := 0b11111111;
+ if target'[55] == 0 & TCR_EL1.TBI0 == 1 then
+ target'[63..56] := 0b00000000;
+ }
+ case EL2 -> {
+ if TCR_EL2.TBI == 1 then
+ target'[63..56] := 0b00000000;
+ }
+ case EL3 -> {
+ if TCR_EL3.TBI == 1 then
+ target'[63..56] := 0b00000000;
+ }
+ };
+ _PC := target';
+ };
+}
+
+(** FUNCTION:shared/functions/registers/Hint_Branch *)
+
+function unit effect pure Hint_Branch((BranchType) hint) =
+{
+ info("This hint can be used for hardware optimization that has no effect on the model.");
+}
+
+(** FUNCTION:shared/functions/registers/ResetExternalDebugRegisters *)
+val extern boolean -> unit effect pure ResetExternalDebugRegisters
+
+(** FUNCTION:shared/functions/registers/ThisInstrAddr *)
+
+val forall Nat 'N. implicit<'N> -> bit['N] effect {rreg} ThisInstrAddr
+function forall Nat 'N. bit['N] effect {rreg} ThisInstrAddr() =
+{
+ let N = (length((bit['N]) 0)) in {
+ assert((N == 64 | (N == 32 & UsingAArch32())), None);
+ (*return*) mask(rPC());
+}}
+
+(** FUNCTION:// SPSR[] - non-assignment form *)
+
+function bit[32] rSPSR() =
+{
+ (bit[32]) result := 0; (* ARM: uninitialized *)
+ if UsingAArch32() then {
+ not_implemented("rSPSR UsingAArch32");
+ (* ARM:
+ case PSTATE.M of
+ when M32_FIQ result = SPSR_fiq;
+ when M32_IRQ result = SPSR_irq;
+ when M32_Svc result = SPSR_svc;
+ when M32_Monitor result = SPSR_mon;
+ when M32_Abort result = SPSR_abt;
+ when M32_Hyp result = SPSR_hyp;
+ when M32_Undef result = SPSR_und;
+ otherwise Unreachable();
+ *)
+ } else {
+ switch PSTATE_EL {
+ case EL1 -> result := SPSR_EL1
+ case EL2 -> result := SPSR_EL2
+ case EL3 -> result := SPSR_EL3
+ case _ -> Unreachable()
+ };
+ };
+
+ result;
+}
+
+(** FUNCTION:shared/functions/system/ClearEventRegister *)
+val extern unit -> unit effect pure ClearEventRegister
+
+(** FUNCTION:boolean ConditionHolds(bits(4) cond) *)
+
+function boolean effect {rreg} ConditionHolds((bit[4]) _cond) =
+{
+ (boolean) result := false; (* ARM: uninitialized *)
+ (* Evaluate base condition *)
+ switch _cond[3..1] {
+ case 0b000 -> result := (PSTATE_Z == 1) (* EQ or NE *)
+ case 0b001 -> result := (PSTATE_C == 1) (* CS or CC *)
+ case 0b010 -> result := (PSTATE_N == 1) (* MI or PL *)
+ case 0b011 -> result := (PSTATE_V == 1) (* VS or VC *)
+ case 0b100 -> result := (PSTATE_C == 1 & PSTATE_Z == 0) (* HI or LS *)
+ case 0b101 -> result := (PSTATE_N == PSTATE_V) (* GE or LT *)
+ case 0b110 -> result := (PSTATE_N == PSTATE_V & PSTATE_Z == 0) (* GT or LE *)
+ case 0b111 -> result := true (* AL *)
+ };
+
+ (* Condition flag valuesin the set '111x' indicate always true *)
+ (* Otherwise, invert condition if necessary. *)
+ if _cond[0] == 1 & _cond != 0b1111 then
+ result := ~(result);
+
+ result;
+}
+
+(** FUNCTION:boolean ELUsingAArch32(bits(2) el) *)
+
+function boolean ELUsingAArch32((bit[2]) el) =
+ false (* ARM: ELStateUsingAArch32(el, IsSecureBelowEL3()) *) (* FIXME: implement *)
+
+(** FUNCTION:shared/functions/system/EventRegisterSet *)
+val extern unit -> unit effect pure EventRegisterSet
+
+(** FUNCTION:shared/functions/system/EventRegistered *)
+val extern unit -> boolean effect pure EventRegistered
+
+(** FUNCTION:shared/functions/system/HaveAArch32EL *)
+
+function boolean HaveAArch32EL((bit[2]) el) =
+{
+ (* Return TRUE if Exception level 'el' supports AArch32 *)
+ if ~(HaveEL(el)) then
+ false
+ else if ~(HaveAnyAArch32()) then
+ false (* No exception level can use AArch32 *)
+ else if HighestELUsingAArch32() then
+ true (* All exception levels must use AArch32 *)
+ else if el == EL0 then
+ true (* EL0 must support using AArch32 *)
+ else
+
+ IMPLEMENTATION_DEFINED.HaveAArch32EL;
+}
+
+(** FUNCTION:boolean HaveAnyAArch32() *)
+
+function boolean HaveAnyAArch32() =
+{
+ IMPLEMENTATION_DEFINED.HaveAnyAArch32
+}
+
+(** FUNCTION:boolean HaveEL(bits(2) el) *)
+
+function boolean HaveEL((bit[2]) el) =
+{
+ if el == EL1 | el == EL0 then
+ true (*EL1 and EL0 must exist*)
+ else {
+
+ switch el {
+ case EL2 -> IMPLEMENTATION_DEFINED.HaveEL2
+ case EL3 -> IMPLEMENTATION_DEFINED.HaveEL3
+ case _ -> {assert (false,None); false;}
+ };
+}}
+
+(** FUNCTION:boolean HighestELUsingAArch32() *)
+
+function boolean HighestELUsingAArch32() =
+{
+ if ~(HaveAnyAArch32()) then false else
+ IMPLEMENTATION_DEFINED.HighestELUsingAArch32; (* e.g. CFG32SIGNAL == HIGH *)
+}
+
+(** FUNCTION:shared/functions/system/Hint_Yield *)
+
+function unit Hint_Yield() = ()
+
+(** FUNCTION:shared/functions/system/InstructionSynchronizationBarrier *)
+val extern unit -> unit effect {barr} InstructionSynchronizationBarrier
+
+(** FUNCTION:shared/functions/system/InterruptPending *)
+
+val extern unit -> boolean effect pure InterruptPending
+
+(** FUNCTION:boolean IsSecure() *)
+
+function boolean IsSecure() =
+{
+ (*Return TRUE if current Exception level is in Secure state.*)
+ if HaveEL(EL3) & ~(UsingAArch32()) & PSTATE_EL == EL3 then
+ true
+ else if HaveEL(EL3) & UsingAArch32() & PSTATE_M == M32_Monitor then
+ true
+ else
+ IsSecureBelowEL3();
+}
+
+(** FUNCTION:boolean IsSecureBelowEL3() *)
+
+function boolean IsSecureBelowEL3() = {
+ if HaveEL(EL3) then
+ ((SCR_GEN()).NS == 0)
+ else if HaveEL(EL2) then
+ false
+ else
+ (*TRUE if processor is Secure or FALSE if Non-secure;*)
+ IMPLEMENTATION_DEFINED.IsSecureBelowEL3;
+}
+
+(** FUNCTION:shared/functions/system/SCR_GEN *)
+
+function SCRType SCR_GEN() = {
+ (*AArch32 secure & AArch64 EL3 registers are not architecturally mapped*)
+ assert (HaveEL(EL3),None);
+
+ if HighestELUsingAArch32() then
+ SCR
+ else
+ SCR_EL3;
+}
+
+(** FUNCTION:shared/functions/system/SendEvent *)
+
+function unit SendEvent() =
+{
+ ()
+ (* TODO: ??? *)
+}
+
+(** FUNCTION:shared/functions/system/Unreachable *)
+
+function unit Unreachable() =
+{
+ assert (false,Some("Unreachable reached"));
+}
+
+(** FUNCTION:shared/functions/system/UsingAArch32 *)
+
+function boolean UsingAArch32() =
+{
+ false;
+ (* ARM: uncomment when implementing aarch32
+ boolean aarch32 = (PSTATE.nRW == '1');
+ if !HaveAnyAArch32() then assert !aarch32;
+ if HighestELUsingAArch32() then assert aarch32;
+ return aarch32;*)
+}
+
+(** FUNCTION:shared/functions/system/WaitForEvent *)
+val extern unit -> unit effect pure WaitForEvent
+
+(** FUNCTION:shared/functions/system/WaitForInterrupt *)
+val extern unit -> unit effect pure WaitForInterrupt
+
+(** FUNCTION:shared/translation/translation/PAMax *)
+
+function uinteger PAMax() =
+{
+ (uinteger) pa_size := 0;
+ switch ID_AA64MMFR0_EL1.PARange {
+ case 0b0000 -> pa_size := 32
+ case 0b0001 -> pa_size := 36
+ case 0b0010 -> pa_size := 40
+ case 0b0011 -> pa_size := 42
+ case 0b0100 -> pa_size := 44
+ case 0b0101 -> pa_size := 48
+ case _ -> Unreachable()
+ };
+
+ (*return*) pa_size;
+}
+
+(** FUNCTION:shared/translation/translation/S1TranslationRegime *)
+
+function bit[2] effect {rreg} S1TranslationRegime () =
+{
+ if PSTATE_EL != EL0 then
+ PSTATE_EL
+ else if IsSecure() & HaveEL(EL3) & ELUsingAArch32(EL3) then
+ EL3
+ else
+ EL1
+}
diff --git a/arm/armv8_lib.h.sail b/arm/armv8_lib.h.sail
new file mode 100644
index 00000000..e8e979c8
--- /dev/null
+++ b/arm/armv8_lib.h.sail
@@ -0,0 +1,240 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+(*** common lib ***)
+
+typedef AccType =
+ enumerate {AccType_NORMAL; AccType_VEC; (* Normal loads and stores *)
+ AccType_STREAM; AccType_VECSTREAM; (* Streaming loads and stores *)
+ AccType_ATOMIC; (* Atomic loads and stores *)
+ AccType_ORDERED; (* Load-Acquire and Store-Release *)
+ AccType_UNPRIV; (* Load and store unprivileged *)
+ AccType_IFETCH; (* Instruction fetch *)
+ AccType_PTW; (* Page table walk *)
+ (* Other operations*)
+ AccType_DC; (* Data cache maintenance *)
+ AccType_IC; (* Instruction cache maintenance *)
+ AccType_AT} (* Address translation *)
+
+typedef MBReqDomain =
+ enumerate {MBReqDomain_Nonshareable; MBReqDomain_InnerShareable;
+ MBReqDomain_OuterShareable; MBReqDomain_FullSystem}
+
+typedef MBReqTypes =
+ enumerate {MBReqTypes_Reads; MBReqTypes_Writes; MBReqTypes_All}
+
+
+typedef BranchType =
+ enumerate {BranchType_CALL; BranchType_ERET; BranchType_DBGEXIT;
+ BranchType_RET; BranchType_JMP; BranchType_EXCEPTION;
+ BranchType_UNKNOWN}
+
+typedef MoveWideOp =
+ enumerate {MoveWideOp_N; MoveWideOp_Z; MoveWideOp_K}
+
+(* shared/functions/system/Mode_Bits *)
+let M32_User = 0b10000
+let M32_FIQ = 0b10001
+let M32_IRQ = 0b10010
+let M32_Svc = 0b10011
+let M32_Monitor = 0b10110
+let M32_Abort = 0b10111
+let M32_Hyp = 0b11010
+let M32_Undef = 0b11011
+let M32_System = 0b11111
+
+(* shared/functions/system/EL0 *)
+let EL3 = 0b11
+let EL2 = 0b10
+let EL1 = 0b01
+let EL0 = 0b00
+
+typedef DeviceType = enumerate {DeviceType_GRE; DeviceType_nGRE;
+ DeviceType_nGnRE; DeviceType_nGnRnE}
+
+typedef Fault =
+ enumerate { Fault_None;
+ Fault_AccessFlag;
+ Fault_Alignment;
+ Fault_Background;
+ Fault_Domain;
+ Fault_Permission;
+ Fault_Translation;
+ Fault_AddressSize;
+ Fault_SyncExternal;
+ Fault_SyncExternalOnWalk;
+ Fault_SyncParity;
+ Fault_SyncParityOnWalk;
+ Fault_AsyncParity;
+ Fault_AsyncExternal;
+ Fault_Debug;
+ Fault_TLBConflict;
+ Fault_Lockdown;
+ Fault_Exclusive;
+ Fault_ICacheMaint }
+
+typedef FaultRecord = const struct {
+ Fault type; (* Fault Status *)
+ AccType acctype; (* Type of access that faulted *)
+ bit[48] ipaddress; (* Intermediate physical address *)
+ boolean s2fs1walk; (* Is on a Stage 1 page table walk *)
+ boolean write; (* TRUE for a write, FALSE for a read *)
+ uinteger level; (* For translation, access flag and permission faults *)
+ bit extflag; (* IMPLEMENTATION DEFINED syndrome for external aborts *)
+ boolean secondstage; (* Is a Stage 2 abort *)
+ bit[4] domain; (* Domain number, AArch32 only *)
+ bit[4] debugmoe; (* Debug method of entry, from AArch32 only *)
+}
+
+typedef MemAttrHints = const struct {
+ bit[2] attrs; (* The possible encodings for each attributes field are as below *)
+ bit[2] hints; (* The possible encodings for the hints are below *)
+ boolean transient;
+}
+
+typedef MemType = enumerate {MemType_Normal; MemType_Device}
+
+typedef MemoryAttributes = const struct {
+ MemType type;
+
+(* DeviceType device; (* For Device memory types *) *)
+(* MemAttrHints inner; (* Inner hints and attributes *) *)
+(* MemAttrHints outer; (* Outer hints and attributes *) *)
+
+ boolean shareable;
+(* boolean outershareable; *)
+}
+
+typedef FullAddress = const struct {
+ (* because we don't use TLB this is actually a virtual address and
+ therefore we have to use 64 bits instead of 48 *)
+ bit[64] physicaladdress; (* ARM: bit[48] physicaladdress *)
+ bit NS; (* '0' = Secure, '1' = Non-secure *)
+}
+
+typedef AddressDescriptor = const struct {
+ FaultRecord fault; (* fault.type indicates whether the address is valid *)
+ MemoryAttributes memattrs;
+ FullAddress paddress;
+}
+
+typedef PrefetchHint =
+ enumerate {Prefetch_READ; Prefetch_WRITE; Prefetch_EXEC}
+
+val forall Nat 'N, Nat 'S, 'S >= 1. (bit['N],[:'S:]) -> (bit['N], bit) effect pure ASR_C
+val forall Nat 'N, Nat 'S, 'S >= 1. (bit['N],[:'S:]) -> (bit['N], bit) effect pure LSL_C
+val forall Nat 'N, Nat 'S, 'S >= 1. (bit['N],[:'S:]) -> (bit['N], bit) effect pure LSR_C
+val forall Nat 'N, Nat 'S, 'S >= 1, 'S <= -1. (bit['N],[:'S:]) -> (bit['N], bit) effect pure ROR_C
+val forall Nat 'N. bit['N] -> boolean effect pure IsZero
+val forall Nat 'N, Nat 'M. (implicit<'N>,bit['M]) -> bit['N] effect pure Replicate
+val forall Nat 'N, Nat 'M, 'N >= 'M. (implicit<'N>,bit['M]) -> bit['N] effect pure SignExtend
+val forall Nat 'N, Nat 'M, 'N >= 'M. (implicit<'N>,bit['M]) -> bit['N] effect pure ZeroExtend
+val forall Nat 'N. implicit<'N> -> bit['N] effect pure Zeros
+val forall Nat 'N. implicit<'N> -> bit['N] effect pure Ones
+(* val forall Nat 'N, Nat 'M, 'M = 2**'N. bit['N] -> [|'M + -1|] effect pure UInt *)
+val forall Nat 'N, Nat 'M. bit['N] -> [:'M:] effect pure UInt
+(* val forall Nat 'N, Nat 'M, Nat 'K, 'M = 'N + -1, 'K = 2**'M. bit['N] -> [|'K * -1:'K + -1|] effect pure SInt *)
+val forall Nat 'N, Nat 'M. bit['N] -> [:'M:] effect pure SInt
+val forall Nat 'N. bit['N+1] -> [|-1:'N|] effect pure HighestSetBit
+val forall Nat 'N. bit['N] -> [|'N|] effect pure CountLeadingZeroBits
+val unit -> boolean effect {rreg} IsSecure
+val unit -> boolean effect {rreg} IsSecureBelowEL3
+val unit -> SCRType effect pure SCR_GEN
+val unit -> boolean effect pure UsingAArch32
+val bit[2] -> boolean effect pure ELUsingAArch32
+val unit -> boolean effect {rreg} Halted
+val bit[2] -> boolean effect pure HaveEL
+val unit -> boolean effect pure HaveAnyAArch32
+val unit -> boolean effect pure HighestELUsingAArch32
+val unit -> unit effect pure Unreachable
+val BranchType -> unit effect pure Hint_Branch
+
+(*************************************************************************)
+(*** AArch64 lib ***)
+
+typedef CountOp =
+ enumerate {CountOp_CLZ; CountOp_CLS; CountOp_CNT}
+
+typedef ExtendType =
+ (* the oreder is important for decoding *)
+ enumerate { ExtendType_UXTB; ExtendType_UXTH; ExtendType_UXTW; ExtendType_UXTX;
+ ExtendType_SXTB; ExtendType_SXTH; ExtendType_SXTW; ExtendType_SXTX }
+
+typedef RevOp =
+ enumerate {RevOp_RBIT; RevOp_REV16; RevOp_REV32; RevOp_REV64}
+
+typedef ShiftType =
+ (* the oreder is important for decoding *)
+ enumerate {ShiftType_LSL; ShiftType_LSR; ShiftType_ASR; ShiftType_ROR}
+
+typedef LogicalOp =
+ enumerate {LogicalOp_AND; LogicalOp_EOR; LogicalOp_ORR}
+
+typedef MemOp =
+ enumerate {MemOp_LOAD; MemOp_STORE; MemOp_PREFETCH}
+
+typedef MemBarrierOp =
+ enumerate {MemBarrierOp_DSB; MemBarrierOp_DMB; MemBarrierOp_ISB}
+
+typedef SystemHintOp =
+ enumerate {SystemHintOp_NOP; SystemHintOp_YIELD;
+ SystemHintOp_WFE; SystemHintOp_WFI;
+ SystemHintOp_SEV; SystemHintOp_SEVL}
+
+typedef PSTATEField =
+ enumerate {PSTATEField_DAIFSet; PSTATEField_DAIFClr;
+ PSTATEField_SP}
+
+val unit -> bit[64] effect {rreg} rPC
+val forall Nat 'N, 'N IN {8,16,32,64}. implicit<'N> -> bit['N] effect {rreg} rSP
+val forall Nat 'N, 'N IN {32,64}. (reg_index,bit['N]) -> unit effect {wreg} wX
+val forall Nat 'N, 'N IN {8,16,32,64}. (implicit<'N>,reg_index) -> bit['N] effect {rreg} rX
+val forall Nat 'N, 'N IN {8,16,32,64,128}. (SIMD_index,bit['N]) -> unit effect {wreg} wV
+val forall Nat 'N, 'N IN {8,16,32,64,128}. (implicit<'N>,SIMD_index) -> bit['N] effect {rreg} rV
+val forall Nat 'N, 'N IN {8,16,32,64,128}. (implicit<'N>,SIMD_index,[|1|]) -> bit['N] effect {rreg} rVpart
+val unit -> SCTLR_type effect {rreg} SCTLR'
+val unit -> unit effect {escape} AArch64_UndefinedFault
+val (bit[64],AccType,boolean,boolean,uinteger) -> AddressDescriptor effect pure AArch64_TranslateAddress
+val (bit[2],boolean) -> unit effect {escape} AArch64_WFxTrap
+val (AccType,boolean,boolean) -> FaultRecord effect pure AArch64_AlignmentFault
+val unit -> unit effect {wreg} AArch64_ResetGeneralRegisters
+val unit -> unit effect {wreg} AArch64_ResetSIMDFPRegisters
+val unit -> unit effect {wreg} AArch64_ResetSpecialRegisters
+val (bit[64],integer,uinteger) -> boolean effect pure AArch64_IsExclusiveVA
+
+(*************************************************************************)
+(*** AArch32 lib ***)
+
+
+
diff --git a/arm/armv8_pstate.sail b/arm/armv8_pstate.sail
new file mode 100644
index 00000000..5be50b19
--- /dev/null
+++ b/arm/armv8_pstate.sail
@@ -0,0 +1,73 @@
+(*========================================================================*)
+(* *)
+(* Copyright (c) 2015-2016 Shaked Flur *)
+(* Copyright (c) 2015-2016 Kathyrn Gray *)
+(* All rights reserved. *)
+(* *)
+(* This software was developed by the University of Cambridge Computer *)
+(* Laboratory as part of the Rigorous Engineering of Mainstream Systems *)
+(* (REMS) project, funded by EPSRC grant EP/K008528/1. *)
+(* *)
+(* Redistribution and use in source and binary forms, with or without *)
+(* modification, are permitted provided that the following conditions *)
+(* are met: *)
+(* 1. Redistributions of source code must retain the above copyright *)
+(* notice, this list of conditions and the following disclaimer. *)
+(* 2. Redistributions in binary form must reproduce the above copyright *)
+(* notice, this list of conditions and the following disclaimer in *)
+(* the documentation and/or other materials provided with the *)
+(* distribution. *)
+(* *)
+(* THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' *)
+(* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED *)
+(* TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A *)
+(* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR *)
+(* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, *)
+(* SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT *)
+(* LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF *)
+(* USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND *)
+(* ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, *)
+(* OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT *)
+(* OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF *)
+(* SUCH DAMAGE. *)
+(*========================================================================*)
+
+register alias PSTATE_N = NZCV.N (* Negative condition flag *)
+register alias PSTATE_Z = NZCV.Z (* Zero condition flag *)
+register alias PSTATE_C = NZCV.C (* Carry condition flag *)
+register alias PSTATE_V = NZCV.V (* oVerflow condition flag *)
+register alias PSTATE_D = DAIF.D (* Debug mask bit [AArch64 only] *)
+register alias PSTATE_A = DAIF.A (* Asynchronous abort mask bit *)
+register alias PSTATE_I = DAIF.I (* IRQ mask bit *)
+register alias PSTATE_F = DAIF.F (* FIQ mask bit *)
+(* register alias PSTATE_SS = (* Software step bit *) *)
+(* register alias PSTATE_IL = (* Illegal execution state bit *) *)
+register alias PSTATE_EL = CurrentEL.EL (* Exception Level *)
+register (bit) PSTATE_nRW (* not Register Width: 0=64, 1=32 *)
+register alias PSTATE_SP = SPSel.SP (* Stack pointer select: 0=SP0, 1=SPx [AArch64 only] *) (* TODO: confirm this *)
+(* register alias PSTATE_Q = (* Cumulative saturation flag [AArch32 only] *) *)
+(* register alias PSTATE_GE = (* Greater than or Equal flags [AArch32 only] *) *)
+(* register alias PSTATE_IT = (* If-then bits, RES0 in CPSR [AArch32 only] *) *)
+(* register alias PSTATE_J = (* J bit, RES0 in CPSR [AArch32 only, RES0 in ARMv8] *) *)
+(* register alias PSTATE_T = (* T32 bit, RES0 in CPSR [AArch32 only] *) *)
+register (bit) PSTATE_E (* Endianness bit [AArch32 only] *)
+register (bit[5]) PSTATE_M (* Mode field [AArch32 only] *)
+
+
+(* this is a convenient way to do "PSTATE.<N,Z,C,V> = nzcv;" *)
+function unit effect {wreg} wPSTATE_NZCV((), [n,z,c,v]) =
+{
+ PSTATE_N := n;
+ PSTATE_Z := z;
+ PSTATE_C := c;
+ PSTATE_V := v;
+}
+
+(* this is a convenient way to do "PSTATE.<D,A,I,F> = daif;" *)
+function unit effect {wreg} wPSTATE_DAIF((), [d,a,i,f]) =
+{
+ PSTATE_D := d;
+ PSTATE_A := a;
+ PSTATE_I := i;
+ PSTATE_F := f;
+}