18 #ifndef __CORE_FEATURE_PMP_H__
19 #define __CORE_FEATURE_PMP_H__
35 #include "core_feature_base.h"
36 #include "core_compatiable.h"
38 #if defined(__PMP_PRESENT) && (__PMP_PRESENT == 1)
53 #ifndef __PMP_ENTRY_NUM
55 #error "__PMP_ENTRY_NUM is not defined, please check!"
58 typedef struct PMP_CONFIG {
83 #define PMPCFG_LIST_RV32_0_7 \
84 X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7)
92 #define PMPCFG_LIST_RV32_8_15 \
93 X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15)
101 #define PMPCFG_LIST_RV64_0_6 \
110 #define PMPCFG_LIST_RV64_8_14 \
111 X(8) X(10) X(12) X(14)
128 #if __RISCV_XLEN == 32
129 #if __PMP_ENTRY_NUM <= 32
130 #define PMPCFG_LIST PMPCFG_LIST_RV32_0_7
131 #elif __PMP_ENTRY_NUM <= 64
132 #define PMPCFG_LIST PMPCFG_LIST_RV32_0_7 PMPCFG_LIST_RV32_8_15
134 #error "Unsupported PMP_ENTRY_NUM value for RV32"
136 #elif __RISCV_XLEN == 64
137 #if __PMP_ENTRY_NUM <= 32
138 #define PMPCFG_LIST PMPCFG_LIST_RV64_0_6
139 #elif __PMP_ENTRY_NUM <= 64
140 #define PMPCFG_LIST PMPCFG_LIST_RV64_0_6 PMPCFG_LIST_RV64_8_14
142 #error "Unsupported PMP_ENTRY_NUM value for RV64"
145 #error "Unsupported RISC-V architecture"
169 #define X(n) case n: return __RV_CSR_READ(CSR_PMPCFG##n);
197 #define X(n) case n: __RV_CSR_WRITE(CSR_PMPCFG##n, pmpcfg); break;
213 uint8_t csr_cfg_num = 0;
214 uint16_t csr_idx = 0;
215 uint16_t cfg_shift = 0;
217 if (entry_idx >= __PMP_ENTRY_NUM)
return 0;
219 #if __RISCV_XLEN == 32
221 csr_idx = entry_idx >> 2;
222 #elif __RISCV_XLEN == 64
226 csr_idx = (entry_idx >> 2) & ~1;
236 cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
239 return (uint8_t)(__RV_EXTRACT_FIELD(pmpcfgx, 0xFF << cfg_shift));
254 uint8_t csr_cfg_num = 0;
255 uint16_t csr_idx = 0;
256 uint16_t cfg_shift = 0;
257 if (entry_idx >= __PMP_ENTRY_NUM)
return;
259 #if __RISCV_XLEN == 32
261 csr_idx = entry_idx >> 2;
262 #elif __RISCV_XLEN == 64
265 csr_idx = (entry_idx >> 2) & ~1;
276 cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
278 pmpcfgx = __RV_INSERT_FIELD(pmpcfgx, 0xFFUL << cfg_shift, pmpxcfg);
285 #define PMPADDR_LIST_BASE \
286 X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7)
291 #define PMPADDR_LIST_8_15 \
292 X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15)
297 #define PMPADDR_LIST_16_31 \
298 X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) \
299 X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
304 #define PMPADDR_LIST_32_63 \
305 X(32) X(33) X(34) X(35) X(36) X(37) X(38) X(39) \
306 X(40) X(41) X(42) X(43) X(44) X(45) X(46) X(47) \
307 X(48) X(49) X(50) X(51) X(52) X(53) X(54) X(55) \
308 X(56) X(57) X(58) X(59) X(60) X(61) X(62) X(63)
322 #if __PMP_ENTRY_NUM <= 8
323 #define PMPADDR_LIST PMPADDR_LIST_BASE
324 #elif __PMP_ENTRY_NUM <= 16
325 #define PMPADDR_LIST PMPADDR_LIST_BASE PMPADDR_LIST_8_15
326 #elif __PMP_ENTRY_NUM <= 32
327 #define PMPADDR_LIST PMPADDR_LIST_BASE PMPADDR_LIST_8_15 PMPADDR_LIST_16_31
328 #elif __PMP_ENTRY_NUM <= 64
329 #define PMPADDR_LIST PMPADDR_LIST_BASE PMPADDR_LIST_8_15 PMPADDR_LIST_16_31 PMPADDR_LIST_32_63
331 #error "Unsupported PMP_ENTRY_NUM value"
343 #define X(n) case n: return __RV_CSR_READ(CSR_PMPADDR##n);
359 #define X(n) case n: __RV_CSR_WRITE(CSR_PMPADDR##n, pmpaddr); break;
379 unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
380 unsigned long cfgmask, addrmask = 0;
381 unsigned long pmpcfg, pmpaddr = 0;
382 unsigned long protection, csr_cfg_num = 0;
387 #if __RISCV_XLEN == 32
389 cfg_csr_idx = (entry_idx >> 2);
390 #elif __RISCV_XLEN == 64
392 cfg_csr_idx = ((entry_idx >> 2)) & ~1;
401 cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
402 addr_csr_idx = entry_idx;
405 protection = (
unsigned long)pmp_cfg->
protection;
407 cfgmask = ~(0xFFUL << cfg_shift);
409 pmpcfg |= ((protection << cfg_shift) & ~cfgmask);
417 pmpaddr |= (addrmask >> 1);
440 unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
441 unsigned long cfgmask, pmpcfg, prot = 0;
442 unsigned long t1, addr, pmpaddr, len = 0;
443 uint8_t csr_cfg_num = 0;
445 if (entry_idx >= __PMP_ENTRY_NUM || !pmp_cfg)
return -1;
448 #if __RISCV_XLEN == 32
450 cfg_csr_idx = entry_idx >> 2;
451 #elif __RISCV_XLEN == 64
453 cfg_csr_idx = (entry_idx>> 2) & ~1;
459 cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
460 addr_csr_idx = entry_idx;
463 cfgmask = (0xFFUL << cfg_shift);
465 prot = pmpcfg >> cfg_shift;
470 t1 =
__CTZ(~pmpaddr);
471 addr = (pmpaddr & ~((1UL << t1) - 1)) <<
PMP_SHIFT;
481 pmp_cfg->
order = len;
__STATIC_INLINE unsigned long __CTZ(unsigned long data)
Count tailing zero.
#define __STATIC_INLINE
Define a static function that may be inlined by the compiler.
__STATIC_INLINE void __set_PMPCFGx(uint32_t csr_idx, rv_csr_t pmpcfg)
Set PMPCFGx by CSR index.
__STATIC_INLINE void __set_PMPxCFG(uint32_t entry_idx, uint8_t pmpxcfg)
Set 8bit PMPxCFG by pmp entry index.
#define PMPCFG_LIST
Select appropriate PMPCFG list based on architecture and PMP entry count.
#define PMPADDR_LIST
Select appropriate PMPADDR list based on PMP_ENTRY_NUM.
__STATIC_INLINE rv_csr_t __get_PMPCFGx(uint32_t csr_idx)
Get PMPCFGx Register by CSR index.
__STATIC_INLINE rv_csr_t __get_PMPADDRx(uint32_t csr_idx)
Get PMPADDRx Register by CSR index.
__STATIC_INLINE void __set_PMPADDRx(uint32_t csr_idx, rv_csr_t pmpaddr)
Set PMPADDRx by CSR index.
__STATIC_INLINE int __get_PMPENTRYx(unsigned int entry_idx, pmp_config *pmp_cfg)
Get PMP entry by entry idx.
__STATIC_INLINE uint8_t __get_PMPxCFG(uint32_t entry_idx)
Get 8bit PMPxCFG Register by PMP entry index.
__STATIC_INLINE void __set_PMPENTRYx(uint32_t entry_idx, const pmp_config *pmp_cfg)
Set PMP entry by entry idx.
unsigned long rv_csr_t
Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V.
#define __RISCV_XLEN
Refer to the width of an integer register in bits(either 32 or 64)
unsigned long order
Size of memory region as power of 2, it has to be minimum 2 and maxium __RISCV_XLEN according to the ...
unsigned long base_addr
Base address of memory region It must be 2^order aligned address.
unsigned int protection
set locking bit, addressing mode, read, write, and instruction execution permissions,...