Updated mmu to not generate trap on cacheable misaligned access when supported.

Updated tests with David's help.
This commit is contained in:
Rose Thompson 2023-10-30 18:26:11 -05:00
parent f13b67b869
commit 2241976d29
9 changed files with 186 additions and 19 deletions

View File

@ -138,8 +138,8 @@ module mmu import cvw::*; #(parameter cvw_t P,
2'b10: DataMisalignedM = VAdr[1] | VAdr[0]; // lw, sw, flw, fsw, lwu
2'b11: DataMisalignedM = |VAdr[2:0]; // ld, sd, fld, fsd
endcase
assign LoadMisalignedFaultM = DataMisalignedM & ReadNoAmoAccessM;
assign StoreAmoMisalignedFaultM = DataMisalignedM & WriteAccessM;
assign LoadMisalignedFaultM = DataMisalignedM & ReadNoAmoAccessM & ~(P.ZICCLSM_SUPPORTED & Cacheable);
assign StoreAmoMisalignedFaultM = DataMisalignedM & WriteAccessM & ~(P.ZICCLSM_SUPPORTED & Cacheable);
// Specify which type of page fault is occurring
assign InstrPageFaultF = TLBPageFault & ExecuteAccessF;

View File

@ -28,11 +28,11 @@
# Description: Makefrag for RV64I architectural tests
rv64i_sc_tests = \
WALLY-ADD \
WALLY-ADD \
WALLY-SUB \
WALLY-SLT \
WALLY-SLTU \
WALLY-XOR
WALLY-SLTU \
WALLY-XOR \
rv64i_tests = $(addsuffix .elf, $(rv64i_sc_tests))

View File

@ -57,6 +57,7 @@ target_tests_nosim = \
WALLY-wfi-01 \
WALLY-cbom-01 \
WALLY-cboz-01 \
WALLY-misaligned-access-01 \
# unclear why status-fp-enabled and wfi aren't simulating ok

View File

@ -0,0 +1,24 @@
00000000
00000000
00000001
00000000
ffffffff
ffffffff
00000001
00000000
00000002
00000000
00000000
00000000
ffffffff
ffffffff
00000000
00000000
fffffffe
ffffffff
393cb5d1
72ca6f49
7b12609b
245889d8
7f42ac28
af17a2d3

View File

@ -1,3 +1,4 @@
FFFFFFFF # stimecmp low bits
00000000 # stimecmp high bits
00000000 # menvcfg low bits
@ -24,7 +25,7 @@ FFFFFFFF # stimecmp low bits
00000000
00000004 # mcause from load address misaligned
00000000
80000411 # mtval of misaligned address (0x80000409)
02000001 # mtval of misaligned address
00000000
00001880 # masked out mstatus.MPP = 11, mstatus.MPIE = 1, and mstatus.MIE = 0
00000000
@ -36,7 +37,7 @@ FFFFFFFF # stimecmp low bits
00000000
00000006 # mcause from store misaligned
00000000
80000429 # mtval of address with misaligned store instr (0x80000421)
02000001 # mtval of misaligned address
00000000
00001880 # masked out mstatus.MPP = 11, mstatus.MPIE = 1, and mstatus.MIE = 0
00000000
@ -136,7 +137,7 @@ FFFFFFFF # stimecmp low bits
00000000
00000004 # mcause from load address misaligned
00000000
80000411 # mtval of misaligned address (0x80000409)
02000001 # mtval of misaligned address
00000000
00001880 # masked out mstatus.MPP = 11, mstatus.MPIE = 1, and mstatus.MIE = 0
00000000
@ -148,7 +149,7 @@ FFFFFFFF # stimecmp low bits
00000000
00000006 # mcause from store misaligned
00000000
80000429 # mtval of address with misaligned store instr (0x80000421)
02000001 # mtval of misaligned address
00000000
00001880 # masked out mstatus.MPP = 11, mstatus.MPIE = 1, and mstatus.MIE = 0
00000000

View File

@ -26,7 +26,7 @@
00000000
00000004 # scause from load address misaligned
00000000
80000411 # stval of misaligned address (0x80000409)
02000001 # mtval of misaligned address
00000000
00000800 # masked out mstatus.mpp = 1, mstatus.MPIE = 0, and mstatus.MIE = 0
00000000
@ -38,7 +38,7 @@
00000000
00000006 # scause from store misaligned
00000000
80000429 # stval of address with misaligned store instr (0x80000421)
02000001 # mtval of misaligned address
00000000
00000800 # masked out mstatus.mpp = 1, mstatus.MPIE = 0, and mstatus.MIE = 0
00000000
@ -128,7 +128,7 @@
00000000
00000004 # scause from load address misaligned
00000000
80000411 # stval of misaligned address (0x80000409)
02000001 # mtval of misaligned address
00000000
00000120 # masked out sstatus.SPP = 1, sstatus.SPIE = 1, and sstatus.SIE = 0
00000000
@ -140,7 +140,7 @@
00000000
00000006 # scause from store misaligned
00000000
80000429 # stval of address with misaligned store instr (0x80000421)
02000001 # mtval of misaligned address
00000000
00000120 # masked out sstatus.SPP = 1, sstatus.SPIE = 1, and sstatus.SIE = 0
00000000

View File

@ -26,7 +26,7 @@
00000000
00000004 # scause from load address misaligned
00000000
80000411 # stval of misaligned address (0x80000409)
02000001 # mtval of misaligned address
00000000
00000000 # masked out mstatus.mpp = 0, mstatus.MPIE = 0, and mstatus.MIE = 0
00000000
@ -38,7 +38,7 @@
00000000
00000006 # scause from store misaligned
00000000
80000429 # stval of address with misaligned store instr (0x80000421)
02000001 # mtval of misaligned address
00000000
00000000 # masked out mstatus.mpp = 0, mstatus.MPIE = 0, and mstatus.MIE = 0
00000000
@ -122,7 +122,7 @@
00000000
00000004 # scause from load address misaligned
00000000
80000411 # stval of misaligned address (0x80000409)
02000001 # mtval of misaligned address
00000000
00000020 # masked out sstatus.SPP = 0, sstatus.SPIE = 1, and sstatus.SIE = 0
00000000
@ -134,7 +134,7 @@
00000000
00000006 # scause from store misaligned
00000000
80000429 # stval of address with misaligned store instr (0x80000421)
02000001 # mtval of misaligned address
00000000
00000020 # masked out sstatus.SPP = 0, sstatus.SPIE = 1, and sstatus.SIE = 0
00000000

View File

@ -98,7 +98,8 @@ cause_breakpnt:
ret
cause_load_addr_misaligned:
auipc t3, 0 // get current PC, which is aligned
li t3, 0x02000000 // base address of clint, because with zicclsm misaligned cached access won't trap
//auipc t3, 0 // get current PC, which is aligned
addi t3, t3, 1
lw t4, 0(t3) // load from a misaligned address
ret
@ -108,7 +109,8 @@ cause_load_acc:
ret
cause_store_addr_misaligned:
auipc t3, 0 // get current PC, which is aligned
li t3, 0x02000000 // base address of clint, because with zicclsm misaligned cached access won't trap
//auipc t3, 0 // get current PC, which is aligned
addi t3, t3, 1
sw t4, 0(t3) // store to a misaligned address
ret

View File

@ -0,0 +1,139 @@
///////////////////////////////////////////
// ../wally-riscv-arch-test/riscv-test-suite/rv64i_m/I/src/WALLY-SLT.S
// David_Harris@hmc.edu & Katherine Parry
// Created 2022-06-17 22:58:09.916813//
// Copyright (C) 2021 Harvey Mudd College & Oklahoma State University
//
// Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation
// files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy,
// modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software
// is furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
// OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
// BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT
// OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
///////////////////////////////////////////
#include "model_test.h"
#include "arch_test.h"
RVTEST_ISA("RV64I")
.section .text.init
.globl rvtest_entry_point
rvtest_entry_point:
RVMODEL_BOOT
RVTEST_CODE_BEGIN
RVTEST_SIGBASE( x6, wally_signature)
RVTEST_CASE(0,"//check ISA:=regex(.*64.*);check ISA:=regex(.*I.*);def TEST_CASE_1=True;def NO_SAIL=True;",ld)
# Testcase 0: rs1:x18(0x0000000000000000), rs2:x9(0x0000000000000000), result rd:x5(0x0000000000000000)
li x18, MASK_XLEN(0x0000000000000000)
li x9, MASK_XLEN(0x0000000000000000)
SLT x5, x18, x9
sd x5, 0(x6)
# Testcase 1: rs1:x8(0x0000000000000000), rs2:x25(0x0000000000000001), result rd:x31(0x0000000000000001)
li x8, MASK_XLEN(0x0000000000000000)
li x25, MASK_XLEN(0x0000000000000001)
SLT x31, x8, x25
sd x31, 8(x6)
# Testcase 2: rs1:x16(0x0000000000000000), rs2:x12(0xffffffffffffffff), result rd:x20(0x0000000000000000)
li x16, MASK_XLEN(0x0000000000000000)
li x12, MASK_XLEN(0xffffffffffffffff)
SLT x20, x16, x12
sd x20, 16(x6)
# Testcase 3: rs1:x10(0x0000000000000001), rs2:x22(0x0000000000000000), result rd:x12(0x0000000000000000)
li x10, MASK_XLEN(0x0000000000000001)
li x22, MASK_XLEN(0x0000000000000000)
SLT x12, x10, x22
sd x12, 24(x6)
# Testcase 4: rs1:x19(0x0000000000000001), rs2:x31(0x0000000000000001), result rd:x29(0x0000000000000000)
li x19, MASK_XLEN(0x0000000000000001)
li x31, MASK_XLEN(0x0000000000000001)
SLT x29, x19, x31
sd x29, 32(x6)
# Testcase 5: rs1:x21(0x0000000000000001), rs2:x28(0xffffffffffffffff), result rd:x20(0x0000000000000000)
li x21, MASK_XLEN(0x0000000000000001)
li x28, MASK_XLEN(0xffffffffffffffff)
SLT x20, x21, x28
sd x20, 40(x6)
# Testcase 6: rs1:x5(0xffffffffffffffff), rs2:x23(0x0000000000000000), result rd:x10(0x0000000000000001)
li x5, MASK_XLEN(0xffffffffffffffff)
li x23, MASK_XLEN(0x0000000000000000)
SLT x10, x5, x23
sd x10, 48(x6)
# Testcase 7: rs1:x13(0xffffffffffffffff), rs2:x24(0x0000000000000001), result rd:x14(0x0000000000000001)
li x13, MASK_XLEN(0xffffffffffffffff)
li x24, MASK_XLEN(0x0000000000000001)
SLT x14, x13, x24
sd x14, 56(x6)
# Testcase 8: rs1:x27(0xffffffffffffffff), rs2:x21(0xffffffffffffffff), result rd:x3(0x0000000000000000)
li x27, MASK_XLEN(0xffffffffffffffff)
li x21, MASK_XLEN(0xffffffffffffffff)
SLT x3, x27, x21
sd x3, 64(x6)
# Testcase 9: rs1:x8(0x983631890063e42f), rs2:x21(0xb2d650af313b32b7), result rd:x15(0x0000000000000001)
li x8, MASK_XLEN(0x983631890063e42f)
li x21, MASK_XLEN(0xb2d650af313b32b7)
SLT x15, x8, x21
sd x15, 72(x6)
# Testcase 10: rs1:x19(0xb5d97ef760ef1471), rs2:x28(0xac7c8803e01bbf50), result rd:x14(0x0000000000000000)
li x19, MASK_XLEN(0xb5d97ef760ef1471)
li x28, MASK_XLEN(0xac7c8803e01bbf50)
SLT x14, x19, x28
sd x14, 80(x6)
# Testcase 11: rs1:x19(0x66faf98908135d58), rs2:x14(0xb3ab1b2cdf26f517), result rd:x25(0x0000000000000000)
li x19, MASK_XLEN(0x66faf98908135d58)
li x14, MASK_XLEN(0xb3ab1b2cdf26f517)
SLT x25, x19, x14
sd x25, 88(x6)
.EQU NUMTESTS,12
RVTEST_CODE_END
RVMODEL_HALT
RVTEST_DATA_BEGIN
.align 4
rvtest_data:
.word 0x98765432
RVTEST_DATA_END
RVMODEL_DATA_BEGIN
wally_signature:
.fill NUMTESTS*(XLEN/32),4,0xdeadbeef
#ifdef rvtest_mtrap_routine
mtrap_sigptr:
.fill 64*(XLEN/32),4,0xdeadbeef
#endif
#ifdef rvtest_gpr_save
gpr_save:
.fill 32*(XLEN/32),4,0xdeadbeef
#endif
RVMODEL_DATA_END
// ../wally-riscv-arch-test/riscv-test-suite/rv64i_m/I/src/WALLY-SLT.S
// David_Harris@hmc.edu & Katherine Parry