From c17b0c7d4548c8052c17c1f54d62c29568c9e80a Mon Sep 17 00:00:00 2001 From: David Harris Date: Tue, 24 Dec 2024 05:36:20 -0800 Subject: [PATCH 1/5] Improved naming of signals in TLB --- src/mmu/tlb/tlbcontrol.sv | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/src/mmu/tlb/tlbcontrol.sv b/src/mmu/tlb/tlbcontrol.sv index f957e2cf6..4bc7501f9 100644 --- a/src/mmu/tlb/tlbcontrol.sv +++ b/src/mmu/tlb/tlbcontrol.sv @@ -61,7 +61,7 @@ module tlbcontrol import cvw::*; #(parameter cvw_t P, ITLB = 0) ( logic TLBAccess; logic ImproperPrivilege; logic BadPBMT, BadNAPOT, BadReserved; - logic ReservedEncoding; + logic ReservedRW; logic InvalidAccess; logic PreUpdateDA, PrePageFault; @@ -89,7 +89,7 @@ module tlbcontrol import cvw::*; #(parameter cvw_t P, ITLB = 0) ( assign BadPBMT = ((PTE_PBMT != 0) & ~(P.SVPBMT_SUPPORTED & ENVCFG_PBMTE)) | PTE_PBMT == 3; // PBMT must be zero if not supported; value of 3 is reserved assign BadNAPOT = PTE_N & (~P.SVNAPOT_SUPPORTED | ~NAPOT4); // N must be be 0 if CVNAPOT is not supported or not 64 KiB contiguous region assign BadReserved = PTE_RESERVED; // Reserved bits must be zero - assign ReservedEncoding = PTE_W & ~PTE_R; // fault on reserved encoding with R=0, W=1 to match ImperasDV behavior + assign ReservedRW = PTE_W & ~PTE_R; // page fault on reserved encoding with R=0, W=1 per Privileged Spec 10.3.1 // Check whether the access is allowed, page faulting if not. if (ITLB == 1) begin:itlb // Instruction TLB fault checking @@ -97,7 +97,7 @@ module tlbcontrol import cvw::*; #(parameter cvw_t P, ITLB = 0) ( // only execute non-user mode pages. assign ImproperPrivilege = ((PrivilegeModeW == P.U_MODE) & ~PTE_U) | ((PrivilegeModeW == P.S_MODE) & PTE_U); assign PreUpdateDA = ~PTE_A; - assign InvalidAccess = ~PTE_X | ReservedEncoding; + assign InvalidAccess = ~PTE_X | ReservedRW; end else begin:dtlb // Data TLB fault checking logic InvalidRead, InvalidWrite; logic InvalidCBOM, InvalidCBOZ; @@ -114,7 +114,7 @@ module tlbcontrol import cvw::*; #(parameter cvw_t P, ITLB = 0) ( assign InvalidWrite = WriteAccess & ~PTE_W; assign InvalidCBOM = (|CMOpM[2:0]) & (~PTE_R & (~STATUS_MXR | ~PTE_X)); assign InvalidCBOZ = CMOpM[3] & ~PTE_W; - assign InvalidAccess = InvalidRead | InvalidWrite | InvalidCBOM | InvalidCBOZ | ReservedEncoding; + assign InvalidAccess = InvalidRead | InvalidWrite | InvalidCBOM | InvalidCBOZ | ReservedRW; assign PreUpdateDA = ~PTE_A | WriteAccess & ~PTE_D; end From 7d72ca6091a525f58bad604699a4135593e02e8e Mon Sep 17 00:00:00 2001 From: David Harris Date: Mon, 30 Dec 2024 10:50:19 -0800 Subject: [PATCH 2/5] Enabled Zmmul coverage tests --- config/rv32gc/coverage.svh | 2 ++ config/rv64gc/coverage.svh | 2 ++ 2 files changed, 4 insertions(+) diff --git a/config/rv32gc/coverage.svh b/config/rv32gc/coverage.svh index 789f6ae70..20bddd5c0 100644 --- a/config/rv32gc/coverage.svh +++ b/config/rv32gc/coverage.svh @@ -28,6 +28,8 @@ `include "ZfhD_coverage.svh" // Note: Zfhmin is a subset of Zfh, so usually only one or the other would be used. When Zfhmin and D are supported, ZfhD should also be enabled `include "Zfhmin_coverage.svh" +// Note: Zmmul is a subset of M, so usually only one or the other would be used. +`include "Zmmul_coverage.svh" `include "Zicond_coverage.svh" `include "Zca_coverage.svh" `include "Zcb_coverage.svh" diff --git a/config/rv64gc/coverage.svh b/config/rv64gc/coverage.svh index f86f0882f..aa4e071fb 100644 --- a/config/rv64gc/coverage.svh +++ b/config/rv64gc/coverage.svh @@ -28,6 +28,8 @@ `include "Zfh_coverage.svh" // Note: Zfhmin is a subset of Zfh, so usually only one or the other would be used. When Zfhmin and D are supported, ZfhD should also be enabled `include "Zfhmin_coverage.svh" +// Note: Zmmul is a subset of M, so usually only one or the other would be used. +`include "Zmmul_coverage.svh" `include "Zicond_coverage.svh" `include "Zca_coverage.svh" `include "Zcb_coverage.svh" From 029f9e2d55f3846fd78561a46713ce54f1b04ffb Mon Sep 17 00:00:00 2001 From: David Harris Date: Mon, 30 Dec 2024 19:16:59 -0800 Subject: [PATCH 3/5] Fixed PMPCFG bits 6:5 need to be WARL 00 --- src/privileged/csrm.sv | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/privileged/csrm.sv b/src/privileged/csrm.sv index 1cdfd7d85..02ffa8965 100644 --- a/src/privileged/csrm.sv +++ b/src/privileged/csrm.sv @@ -103,6 +103,7 @@ module csrm import cvw::*; #(parameter cvw_t P) ( // when compressed instructions are supported, there can't be misaligned instructions localparam MEDELEG_MASK = P.ZCA_SUPPORTED ? 16'hB3FE : 16'hB3FF; localparam MIDELEG_MASK = 12'h222; // we choose to not make machine interrupts delegable + localparam PMPCFG_MASK = 8'h9F; // bits 6:5 are WARL 00 // There are PMP_ENTRIES = 0, 16, or 64 PMPADDR registers, each of which has its own flop genvar i; @@ -134,7 +135,7 @@ module csrm import cvw::*; #(parameter cvw_t P) ( assign CSRPMPWRLegalizedWriteValM[i] = {(CSRPMPWriteValM[i][1] & CSRPMPWriteValM[i][0]), CSRPMPWriteValM[i][0]}; // legalize WR fields (reserved 10 written as 00) assign CSRPMPLegalizedWriteValM[i] = {CSRPMPWriteValM[i][7], 2'b00, CSRPMPWriteValM[i][4:2], CSRPMPWRLegalizedWriteValM[i]}; - flopenr #(8) PMPCFGreg(clk, reset, WritePMPCFGM[i], CSRPMPWriteValM[i], PMPCFG_ARRAY_REGW[i]); + flopenr #(8) PMPCFGreg(clk, reset, WritePMPCFGM[i], CSRPMPWriteValM[i] & PMPCFG_MASK, PMPCFG_ARRAY_REGW[i]); end end From 55b63ff486b586e5f8a808d7a79401e962eac029 Mon Sep 17 00:00:00 2001 From: David Harris Date: Mon, 30 Dec 2024 22:05:01 -0800 Subject: [PATCH 4/5] Fixed PMP checking that top of access is still within range --- src/mmu/mmu.sv | 4 ++-- src/mmu/pmpadrdec.sv | 19 +++++++++++------- src/mmu/pmpchecker.sv | 46 ++++++++++++++++++++++++++++++++++--------- 3 files changed, 51 insertions(+), 18 deletions(-) diff --git a/src/mmu/mmu.sv b/src/mmu/mmu.sv index 85796f060..a53392fc9 100644 --- a/src/mmu/mmu.sv +++ b/src/mmu/mmu.sv @@ -118,7 +118,7 @@ module mmu import cvw::*; #(parameter cvw_t P, if (P.PMP_ENTRIES > 0) begin : pmp pmpchecker #(P) pmpchecker(.PhysicalAddress, .PrivilegeModeW, .PMPCFG_ARRAY_REGW, .PMPADDR_ARRAY_REGW, - .ExecuteAccessF, .WriteAccessM, .ReadAccessM, .CMOpM, + .ExecuteAccessF, .WriteAccessM, .ReadAccessM, .Size, .CMOpM, .PMPInstrAccessFaultF, .PMPLoadAccessFaultM, .PMPStoreAmoAccessFaultM); end else begin assign PMPInstrAccessFaultF = 1'b0; @@ -131,7 +131,7 @@ module mmu import cvw::*; #(parameter cvw_t P, // Misaligned faults always_comb // exclusion-tag: immu-wordaccess - case(Size[1:0]) + case(Size) 2'b00: DataMisalignedM = 1'b0; // lb, sb, lbu 2'b01: DataMisalignedM = VAdr[0]; // lh, sh, lhu 2'b10: DataMisalignedM = VAdr[1] | VAdr[0]; // lw, sw, flw, fsw, lwu diff --git a/src/mmu/pmpadrdec.sv b/src/mmu/pmpadrdec.sv index 71a6b890a..fab15b24d 100644 --- a/src/mmu/pmpadrdec.sv +++ b/src/mmu/pmpadrdec.sv @@ -35,9 +35,11 @@ module pmpadrdec import cvw::*; #(parameter cvw_t P) ( input logic [P.PA_BITS-1:0] PhysicalAddress, input logic [7:0] PMPCfg, input logic [P.PA_BITS-3:0] PMPAdr, + input logic FirstMatch, input logic PAgePMPAdrIn, output logic PAgePMPAdrOut, output logic Match, + output logic [P.PA_BITS-1:0] PMPTop, output logic L, X, W, R ); @@ -50,7 +52,8 @@ module pmpadrdec import cvw::*; #(parameter cvw_t P) ( logic PAltPMPAdr; logic [P.PA_BITS-1:0] CurrentAdrFull; logic [1:0] AdrMode; - + logic [P.PA_BITS-1:0] PMPTop1; + assign AdrMode = PMPCfg[4:3]; // The two lsb of the physical address don't matter for this checking. @@ -71,20 +74,22 @@ module pmpadrdec import cvw::*; #(parameter cvw_t P) ( assign NAMask[P.PA_BITS-1:2] = (PMPAdr + {{(P.PA_BITS-3){1'b0}}, (AdrMode == NAPOT)}) ^ PMPAdr; // form a mask where the bottom k bits are 1, corresponding to a size of 2^k bytes for this memory region. // This assumes we're using at least an NA4 region, but works for any size NAPOT region. - assign NABase = {(PMPAdr & ~NAMask[P.PA_BITS-1:2]), 2'b00}; // base physical address of the pmp. - + assign NABase = {(PMPAdr & ~NAMask[P.PA_BITS-1:2]), 2'b00}; // base physical address of the pmp region assign NAMatch = &((NABase ~^ PhysicalAddress) | NAMask); // check if upper bits of base address match, ignore lower bits correspoonding to inside the memory range + // finally pick the appropriate match for the access type assign Match = (AdrMode == TOR) ? TORMatch : (AdrMode == NA4 | AdrMode == NAPOT) ? NAMatch : 1'b0; + // Report top of region for first matching region + assign PMPTop1 = {PMPAdr,2'b00} | NAMask; // top of the pmp region. All 1s in the lower bits. Used to check the address doesn't pass the top + assign PMPTop = FirstMatch ? PMPTop1 : '0; // AND portion of distributed AND-OR mux (OR portion in pmpchhecker) + + // PMP should match but fail if the size is too big (8-byte accesses spanning to TOR or NA4 region) assign L = PMPCfg[7]; assign X = PMPCfg[2]; assign W = PMPCfg[1]; assign R = PMPCfg[0]; - // known bug: The size of the access is not yet checked. For example, if an NA4 entry matches 0xC-0xF and the system - // attempts an 8-byte access to 0x8, the access should fail (see page 60 of privileged specification 20211203). This - // implementation will not detect the failure. - endmodule +endmodule diff --git a/src/mmu/pmpchecker.sv b/src/mmu/pmpchecker.sv index a55e137ef..1b3701c42 100644 --- a/src/mmu/pmpchecker.sv +++ b/src/mmu/pmpchecker.sv @@ -43,6 +43,7 @@ module pmpchecker import cvw::*; #(parameter cvw_t P) ( input var logic [7:0] PMPCFG_ARRAY_REGW[P.PMP_ENTRIES-1:0], input var logic [P.PA_BITS-3:0] PMPADDR_ARRAY_REGW [P.PMP_ENTRIES-1:0], input logic ExecuteAccessF, WriteAccessM, ReadAccessM, + input logic [1:0] Size, input logic [3:0] CMOpM, output logic PMPInstrAccessFaultF, output logic PMPLoadAccessFaultM, @@ -55,29 +56,56 @@ module pmpchecker import cvw::*; #(parameter cvw_t P) ( logic [P.PMP_ENTRIES-1:0] FirstMatch; // onehot encoding for the first pmpaddr to match the current address. logic [P.PMP_ENTRIES-1:0] L, X, W, R; // PMP matches and has flag set logic [P.PMP_ENTRIES-1:0] PAgePMPAdr; // for TOR PMP matching, PhysicalAddress > PMPAdr[i] + logic [P.PA_BITS-1:0] PMPTop[P.PMP_ENTRIES-1:0]; // Upper end of each region, for checking that the access is fully within the region logic PMPCMOAccessFault, PMPCBOMAccessFault, PMPCBOZAccessFault; - + logic [2:0] SizeBytesMinus1; + logic MatchingR, MatchingW, MatchingX, MatchingL; + logic [P.PA_BITS-1:0] MatchingPMPTop, PhysicalAddressTop; + logic TooBig; if (P.PMP_ENTRIES > 0) begin: pmp // prevent complaints about array of no elements when PMP_ENTRIES = 0 pmpadrdec #(P) pmpadrdecs[P.PMP_ENTRIES-1:0]( .PhysicalAddress, .PMPCfg(PMPCFG_ARRAY_REGW), .PMPAdr(PMPADDR_ARRAY_REGW), + .FirstMatch, .PAgePMPAdrIn({PAgePMPAdr[P.PMP_ENTRIES-2:0], 1'b1}), .PAgePMPAdrOut(PAgePMPAdr), - .Match, .L, .X, .W, .R); + .Match, .PMPTop, .L, .X, .W, .R); end priorityonehot #(P.PMP_ENTRIES) pmppriority(.a(Match), .y(FirstMatch)); // combine the match signal from all the adress decoders to find the first one that matches. - // Only enforce PMP checking for S and U modes or in Machine mode when L bit is set in selected region - assign EnforcePMP = (PrivilegeModeW != P.M_MODE) | (|(L & FirstMatch)); + // Distributed AND-OR mux to select the first matching results + // If the access does not match all bytes of the PMP region, it is too big and the matches are disabled + assign MatchingR = |(R & FirstMatch) & ~TooBig; + assign MatchingW = |(W & FirstMatch) & ~TooBig; + assign MatchingX = |(X & FirstMatch) & ~TooBig; + assign MatchingL = |(L & FirstMatch); + or_rows #(P.PMP_ENTRIES, P.PA_BITS) PTEOr(PMPTop, MatchingPMPTop); - assign PMPCBOMAccessFault = EnforcePMP & (|CMOpM[2:0]) & ~|((R|W) & FirstMatch) ; // exclusion-tag: immu-pmpcbom - assign PMPCBOZAccessFault = EnforcePMP & CMOpM[3] & ~|(W & FirstMatch) ; // exclusion-tag: immu-pmpcboz + // Matching PMP entry must match all bytes of an access, or the access fails (Priv Spec 3.7.1.3) + // *** not fully implemented + // *** check R=0,W=1 WARL is enforced + // First find the size of the access in terms of the offset to the most significant byte + always_comb + case (Size) + 2'b00: SizeBytesMinus1 = 3'd0; + 2'b01: SizeBytesMinus1 = 3'd1; + 2'b10: SizeBytesMinus1 = 3'd3; + 2'b11: SizeBytesMinus1 = 3'd7; + endcase + assign PhysicalAddressTop = PhysicalAddress + {{P.PA_BITS-3{1'b0}}, SizeBytesMinus1}; // top of the access range + assign TooBig = PhysicalAddressTop > MatchingPMPTop; // check if the access goes beyond the top of the PMP region + + // Only enforce PMP checking for S and U modes or in Machine mode when L bit is set in selected region + assign EnforcePMP = (PrivilegeModeW != P.M_MODE) | MatchingL; + + assign PMPCBOMAccessFault = EnforcePMP & (|CMOpM[2:0]) & ~MatchingR ; // checking R is sufficient because W implies R in PMP // exclusion-tag: immu-pmpcbom + assign PMPCBOZAccessFault = EnforcePMP & CMOpM[3] & ~MatchingW ; // exclusion-tag: immu-pmpcboz assign PMPCMOAccessFault = PMPCBOZAccessFault | PMPCBOMAccessFault; // exclusion-tag: immu-pmpcboaccess - assign PMPInstrAccessFaultF = EnforcePMP & ExecuteAccessF & ~|(X & FirstMatch) ; - assign PMPStoreAmoAccessFaultM = (EnforcePMP & WriteAccessM & ~|(W & FirstMatch)) | PMPCMOAccessFault; // exclusion-tag: immu-pmpstoreamoaccessfault - assign PMPLoadAccessFaultM = EnforcePMP & ReadAccessM & ~WriteAccessM & ~|(R & FirstMatch) ; + assign PMPInstrAccessFaultF = EnforcePMP & ExecuteAccessF & ~MatchingX ; + assign PMPStoreAmoAccessFaultM = (EnforcePMP & WriteAccessM & ~MatchingW) | PMPCMOAccessFault; // exclusion-tag: immu-pmpstoreamoaccessfault + assign PMPLoadAccessFaultM = EnforcePMP & ReadAccessM & ~WriteAccessM & ~MatchingR; endmodule From c40450cc3a728e4d54c5be9851a0b72e45f05c19 Mon Sep 17 00:00:00 2001 From: David Harris Date: Mon, 30 Dec 2024 22:22:29 -0800 Subject: [PATCH 5/5] Fixed size bug and cleaned up PMP csr logic --- src/mmu/pmpchecker.sv | 5 ++--- src/privileged/csrm.sv | 5 ++--- 2 files changed, 4 insertions(+), 6 deletions(-) diff --git a/src/mmu/pmpchecker.sv b/src/mmu/pmpchecker.sv index 1b3701c42..27f8745fe 100644 --- a/src/mmu/pmpchecker.sv +++ b/src/mmu/pmpchecker.sv @@ -85,8 +85,6 @@ module pmpchecker import cvw::*; #(parameter cvw_t P) ( or_rows #(P.PMP_ENTRIES, P.PA_BITS) PTEOr(PMPTop, MatchingPMPTop); // Matching PMP entry must match all bytes of an access, or the access fails (Priv Spec 3.7.1.3) - // *** not fully implemented - // *** check R=0,W=1 WARL is enforced // First find the size of the access in terms of the offset to the most significant byte always_comb case (Size) @@ -95,6 +93,7 @@ module pmpchecker import cvw::*; #(parameter cvw_t P) ( 2'b10: SizeBytesMinus1 = 3'd3; 2'b11: SizeBytesMinus1 = 3'd7; endcase + // Then find the top of the access and see if it is beyond the top of the region assign PhysicalAddressTop = PhysicalAddress + {{P.PA_BITS-3{1'b0}}, SizeBytesMinus1}; // top of the access range assign TooBig = PhysicalAddressTop > MatchingPMPTop; // check if the access goes beyond the top of the PMP region @@ -104,7 +103,7 @@ module pmpchecker import cvw::*; #(parameter cvw_t P) ( assign PMPCBOMAccessFault = EnforcePMP & (|CMOpM[2:0]) & ~MatchingR ; // checking R is sufficient because W implies R in PMP // exclusion-tag: immu-pmpcbom assign PMPCBOZAccessFault = EnforcePMP & CMOpM[3] & ~MatchingW ; // exclusion-tag: immu-pmpcboz assign PMPCMOAccessFault = PMPCBOZAccessFault | PMPCBOMAccessFault; // exclusion-tag: immu-pmpcboaccess - + assign PMPInstrAccessFaultF = EnforcePMP & ExecuteAccessF & ~MatchingX ; assign PMPStoreAmoAccessFaultM = (EnforcePMP & WriteAccessM & ~MatchingW) | PMPCMOAccessFault; // exclusion-tag: immu-pmpstoreamoaccessfault assign PMPLoadAccessFaultM = EnforcePMP & ReadAccessM & ~WriteAccessM & ~MatchingR; diff --git a/src/privileged/csrm.sv b/src/privileged/csrm.sv index 02ffa8965..8716ce63c 100644 --- a/src/privileged/csrm.sv +++ b/src/privileged/csrm.sv @@ -103,7 +103,6 @@ module csrm import cvw::*; #(parameter cvw_t P) ( // when compressed instructions are supported, there can't be misaligned instructions localparam MEDELEG_MASK = P.ZCA_SUPPORTED ? 16'hB3FE : 16'hB3FF; localparam MIDELEG_MASK = 12'h222; // we choose to not make machine interrupts delegable - localparam PMPCFG_MASK = 8'h9F; // bits 6:5 are WARL 00 // There are PMP_ENTRIES = 0, 16, or 64 PMPADDR registers, each of which has its own flop genvar i; @@ -122,7 +121,7 @@ module csrm import cvw::*; #(parameter cvw_t P) ( assign ADDRLocked[i] = PMPCFG_ARRAY_REGW[i][7]; else assign ADDRLocked[i] = PMPCFG_ARRAY_REGW[i][7] | (PMPCFG_ARRAY_REGW[i+1][7] & PMPCFG_ARRAY_REGW[i+1][4:3] == 2'b01); - + assign WritePMPADDRM[i] = (CSRMWriteM & (CSRAdrM == (PMPADDR0+i))) & ~ADDRLocked[i]; flopenr #(P.PA_BITS-2) PMPADDRreg(clk, reset, WritePMPADDRM[i], CSRWriteValM[P.PA_BITS-3:0], PMPADDR_ARRAY_REGW[i]); if (P.XLEN==64) begin @@ -135,7 +134,7 @@ module csrm import cvw::*; #(parameter cvw_t P) ( assign CSRPMPWRLegalizedWriteValM[i] = {(CSRPMPWriteValM[i][1] & CSRPMPWriteValM[i][0]), CSRPMPWriteValM[i][0]}; // legalize WR fields (reserved 10 written as 00) assign CSRPMPLegalizedWriteValM[i] = {CSRPMPWriteValM[i][7], 2'b00, CSRPMPWriteValM[i][4:2], CSRPMPWRLegalizedWriteValM[i]}; - flopenr #(8) PMPCFGreg(clk, reset, WritePMPCFGM[i], CSRPMPWriteValM[i] & PMPCFG_MASK, PMPCFG_ARRAY_REGW[i]); + flopenr #(8) PMPCFGreg(clk, reset, WritePMPCFGM[i], CSRPMPLegalizedWriteValM[i], PMPCFG_ARRAY_REGW[i]); end end