NMSIS-Core  Version 1.5.0
NMSIS-Core support for Nuclei processor-based devices
core_feature_pmp.h
1 /*
2  * Copyright (c) 2019 Nuclei Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 #ifndef __CORE_FEATURE_PMP_H__
19 #define __CORE_FEATURE_PMP_H__
24 /*
25  * PMP Feature Configuration Macro:
26  * 1. __PMP_PRESENT: Define whether Physical Memory Protection(PMP) is present or not
27  * * 0: Not present
28  * * 1: Present
29  * 2. __PMP_ENTRY_NUM: Define the number of PMP entries, only 8 or 16 is configurable.
30  */
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #include "core_feature_base.h"
36 #include "core_compatiable.h"
37 
38 #if defined(__PMP_PRESENT) && (__PMP_PRESENT == 1)
39 /* ===== PMP Operations ===== */
53 #ifndef __PMP_ENTRY_NUM
54 /* numbers of PMP entries(__PMP_ENTRY_NUM) should be defined in <Device.h> */
55 #error "__PMP_ENTRY_NUM is not defined, please check!"
56 #endif
57 
58 typedef struct PMP_CONFIG {
63  unsigned int protection;
69  unsigned long order;
74  unsigned long base_addr;
75 } pmp_config;
76 
83 #define PMPCFG_LIST_RV32_0_7 \
84  X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7)
85 
92 #define PMPCFG_LIST_RV32_8_15 \
93  X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15)
94 
101 #define PMPCFG_LIST_RV64_0_6 \
102  X(0) X(2) X(4) X(6)
103 
110 #define PMPCFG_LIST_RV64_8_14 \
111  X(8) X(10) X(12) X(14)
112 
128 #if __RISCV_XLEN == 32
129  #if __PMP_ENTRY_NUM <= 32
130  #define PMPCFG_LIST PMPCFG_LIST_RV32_0_7
131  #elif __PMP_ENTRY_NUM <= 64
132  #define PMPCFG_LIST PMPCFG_LIST_RV32_0_7 PMPCFG_LIST_RV32_8_15
133  #else
134  #error "Unsupported PMP_ENTRY_NUM value for RV32"
135  #endif
136 #elif __RISCV_XLEN == 64
137  #if __PMP_ENTRY_NUM <= 32
138  #define PMPCFG_LIST PMPCFG_LIST_RV64_0_6
139  #elif __PMP_ENTRY_NUM <= 64
140  #define PMPCFG_LIST PMPCFG_LIST_RV64_0_6 PMPCFG_LIST_RV64_8_14
141  #else
142  #error "Unsupported PMP_ENTRY_NUM value for RV64"
143  #endif
144 #else
145  #error "Unsupported RISC-V architecture"
146 #endif
147 
167 {
168  switch (csr_idx) {
169  #define X(n) case n: return __RV_CSR_READ(CSR_PMPCFG##n);
171  #undef X
172  default: return 0;
173  }
174 }
175 
194 __STATIC_INLINE void __set_PMPCFGx(uint32_t csr_idx, rv_csr_t pmpcfg)
195 {
196  switch (csr_idx) {
197  #define X(n) case n: __RV_CSR_WRITE(CSR_PMPCFG##n, pmpcfg); break;
199  #undef X
200  default: return;
201  }
202 }
203 
210 __STATIC_INLINE uint8_t __get_PMPxCFG(uint32_t entry_idx)
211 {
212  rv_csr_t pmpcfgx = 0;
213  uint8_t csr_cfg_num = 0;
214  uint16_t csr_idx = 0;
215  uint16_t cfg_shift = 0;
216 
217  if (entry_idx >= __PMP_ENTRY_NUM) return 0;
218 
219 #if __RISCV_XLEN == 32
220  csr_cfg_num = 4;
221  csr_idx = entry_idx >> 2;
222 #elif __RISCV_XLEN == 64
223  csr_cfg_num = 8;
224  /* For RV64, each PMPCFG register (pmpcfg0, pmpcfg2, etc.) holds 8 PMP entries */
225  /* Only even-numbered CSRs are used, so we align the index by clearing the LSB */
226  csr_idx = (entry_idx >> 2) & ~1;
227 #else
228  // TODO Add RV128 Handling
229  return 0;
230 #endif
231  pmpcfgx = __get_PMPCFGx(csr_idx);
232  /*
233  * first get specific pmpxcfg's order in one CSR composed of csr_cfg_num pmpxcfgs,
234  * then get pmpxcfg's bit position in one CSR by left shift 3(each pmpxcfg size is one byte)
235  */
236  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
237 
238  /* read specific pmpxcfg register value */
239  return (uint8_t)(__RV_EXTRACT_FIELD(pmpcfgx, 0xFF << cfg_shift));
240 }
241 
251 __STATIC_INLINE void __set_PMPxCFG(uint32_t entry_idx, uint8_t pmpxcfg)
252 {
253  rv_csr_t pmpcfgx = 0;
254  uint8_t csr_cfg_num = 0;
255  uint16_t csr_idx = 0;
256  uint16_t cfg_shift = 0;
257  if (entry_idx >= __PMP_ENTRY_NUM) return;
258 
259 #if __RISCV_XLEN == 32
260  csr_cfg_num = 4;
261  csr_idx = entry_idx >> 2;
262 #elif __RISCV_XLEN == 64
263  csr_cfg_num = 8;
264  /* For RV64, pmpcfg0 and pmpcfg2 each hold 8 PMP entries, align by 2 */
265  csr_idx = (entry_idx >> 2) & ~1;
266 #else
267  // TODO Add RV128 Handling
268  return;
269 #endif
270  /* read specific pmpcfgx register value */
271  pmpcfgx = __get_PMPCFGx(csr_idx);
272  /*
273  * first get specific pmpxcfg's order in one CSR composed of csr_cfg_num pmpxcfgs,
274  * then get pmpxcfg's bit position in one CSR by left shift 3(each pmpxcfg size is one byte)
275  */
276  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
277 
278  pmpcfgx = __RV_INSERT_FIELD(pmpcfgx, 0xFFUL << cfg_shift, pmpxcfg);
279  __set_PMPCFGx(csr_idx, pmpcfgx);
280 }
281 
285 #define PMPADDR_LIST_BASE \
286  X(0) X(1) X(2) X(3) X(4) X(5) X(6) X(7)
287 
291 #define PMPADDR_LIST_8_15 \
292  X(8) X(9) X(10) X(11) X(12) X(13) X(14) X(15)
293 
297 #define PMPADDR_LIST_16_31 \
298  X(16) X(17) X(18) X(19) X(20) X(21) X(22) X(23) \
299  X(24) X(25) X(26) X(27) X(28) X(29) X(30) X(31)
300 
304 #define PMPADDR_LIST_32_63 \
305  X(32) X(33) X(34) X(35) X(36) X(37) X(38) X(39) \
306  X(40) X(41) X(42) X(43) X(44) X(45) X(46) X(47) \
307  X(48) X(49) X(50) X(51) X(52) X(53) X(54) X(55) \
308  X(56) X(57) X(58) X(59) X(60) X(61) X(62) X(63)
309 
322 #if __PMP_ENTRY_NUM <= 8
323 #define PMPADDR_LIST PMPADDR_LIST_BASE
324 #elif __PMP_ENTRY_NUM <= 16
325 #define PMPADDR_LIST PMPADDR_LIST_BASE PMPADDR_LIST_8_15
326 #elif __PMP_ENTRY_NUM <= 32
327 #define PMPADDR_LIST PMPADDR_LIST_BASE PMPADDR_LIST_8_15 PMPADDR_LIST_16_31
328 #elif __PMP_ENTRY_NUM <= 64
329 #define PMPADDR_LIST PMPADDR_LIST_BASE PMPADDR_LIST_8_15 PMPADDR_LIST_16_31 PMPADDR_LIST_32_63
330 #else
331 #error "Unsupported PMP_ENTRY_NUM value"
332 #endif
333 
341 {
342  switch (csr_idx) {
343  #define X(n) case n: return __RV_CSR_READ(CSR_PMPADDR##n);
345  #undef X
346  default: return 0;
347  }
348 }
349 
356 __STATIC_INLINE void __set_PMPADDRx(uint32_t csr_idx, rv_csr_t pmpaddr)
357 {
358  switch (csr_idx) {
359  #define X(n) case n: __RV_CSR_WRITE(CSR_PMPADDR##n, pmpaddr); break;
361  #undef X
362  default: return;
363  }
364 }
365 
377 __STATIC_INLINE void __set_PMPENTRYx(uint32_t entry_idx, const pmp_config *pmp_cfg)
378 {
379  unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
380  unsigned long cfgmask, addrmask = 0;
381  unsigned long pmpcfg, pmpaddr = 0;
382  unsigned long protection, csr_cfg_num = 0;
383  /* check parameters */
384  if (entry_idx >= __PMP_ENTRY_NUM || pmp_cfg->order > __RISCV_XLEN || pmp_cfg->order < PMP_SHIFT) return;
385 
386  /* calculate PMP register and offset */
387 #if __RISCV_XLEN == 32
388  csr_cfg_num = 4;
389  cfg_csr_idx = (entry_idx >> 2);
390 #elif __RISCV_XLEN == 64
391  csr_cfg_num = 8;
392  cfg_csr_idx = ((entry_idx >> 2)) & ~1;
393 #else
394  // TODO Add RV128 Handling
395  return;
396 #endif
397  /*
398  * first get specific pmpxcfg's order in one CSR composed of csr_cfg_num pmpxcfgs,
399  * then get pmpxcfg's bit position in one CSR by left shift 3, each pmpxcfg size is one byte
400  */
401  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
402  addr_csr_idx = entry_idx;
403 
404  /* encode PMP config */
405  protection = (unsigned long)pmp_cfg->protection;
406  protection |= (PMP_SHIFT == pmp_cfg->order) ? PMP_A_NA4 : PMP_A_NAPOT;
407  cfgmask = ~(0xFFUL << cfg_shift);
408  pmpcfg = (__get_PMPCFGx(cfg_csr_idx) & cfgmask);
409  pmpcfg |= ((protection << cfg_shift) & ~cfgmask);
410 
411  /* encode PMP address */
412  if (PMP_SHIFT == pmp_cfg->order) { /* NA4 */
413  pmpaddr = (pmp_cfg->base_addr >> PMP_SHIFT);
414  } else { /* NAPOT */
415  addrmask = (1UL << (pmp_cfg->order - PMP_SHIFT)) - 1;
416  pmpaddr = ((pmp_cfg->base_addr >> PMP_SHIFT) & ~addrmask);
417  pmpaddr |= (addrmask >> 1);
418  }
419  /*
420  * write csrs, update the address first, in case the entry is locked that
421  * we won't be able to modify it after we set the config csr.
422  */
423  __set_PMPADDRx(addr_csr_idx, pmpaddr);
424  __set_PMPCFGx(cfg_csr_idx, pmpcfg);
425 }
426 
438 __STATIC_INLINE int __get_PMPENTRYx(unsigned int entry_idx, pmp_config *pmp_cfg)
439 {
440  unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
441  unsigned long cfgmask, pmpcfg, prot = 0;
442  unsigned long t1, addr, pmpaddr, len = 0;
443  uint8_t csr_cfg_num = 0;
444  /* check parameters */
445  if (entry_idx >= __PMP_ENTRY_NUM || !pmp_cfg) return -1;
446 
447  /* calculate PMP register and offset */
448 #if __RISCV_XLEN == 32
449  csr_cfg_num = 4;
450  cfg_csr_idx = entry_idx >> 2;
451 #elif __RISCV_XLEN == 64
452  csr_cfg_num = 8;
453  cfg_csr_idx = (entry_idx>> 2) & ~1;
454 #else
455  // TODO Add RV128 Handling
456  return -1;
457 #endif
458 
459  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
460  addr_csr_idx = entry_idx;
461 
462  /* decode PMP config */
463  cfgmask = (0xFFUL << cfg_shift);
464  pmpcfg = (__get_PMPCFGx(cfg_csr_idx) & cfgmask);
465  prot = pmpcfg >> cfg_shift;
466 
467  /* decode PMP address */
468  pmpaddr = __get_PMPADDRx(addr_csr_idx);
469  if (PMP_A_NAPOT == (prot & PMP_A)) {
470  t1 = __CTZ(~pmpaddr);
471  addr = (pmpaddr & ~((1UL << t1) - 1)) << PMP_SHIFT;
472  len = (t1 + PMP_SHIFT + 1);
473  } else {
474  addr = pmpaddr << PMP_SHIFT;
475  len = PMP_SHIFT;
476  }
477 
478  /* return details */
479  pmp_cfg->protection = prot;
480  pmp_cfg->base_addr = addr;
481  pmp_cfg->order = len;
482 
483  return 0;
484 }
485  /* End of Doxygen Group NMSIS_Core_PMP_Functions */
487 #endif /* defined(__PMP_PRESENT) && (__PMP_PRESENT == 1) */
488 
489 #ifdef __cplusplus
490 }
491 #endif
492 #endif /* __CORE_FEATURE_PMP_H__ */
__STATIC_INLINE unsigned long __CTZ(unsigned long data)
Count tailing zero.
#define PMP_A
#define PMP_A_NAPOT
#define PMP_A_NA4
#define PMP_SHIFT
#define __STATIC_INLINE
Define a static function that may be inlined by the compiler.
Definition: nmsis_gcc.h:65
__STATIC_INLINE void __set_PMPCFGx(uint32_t csr_idx, rv_csr_t pmpcfg)
Set PMPCFGx by CSR index.
__STATIC_INLINE void __set_PMPxCFG(uint32_t entry_idx, uint8_t pmpxcfg)
Set 8bit PMPxCFG by pmp entry index.
#define PMPCFG_LIST
Select appropriate PMPCFG list based on architecture and PMP entry count.
#define PMPADDR_LIST
Select appropriate PMPADDR list based on PMP_ENTRY_NUM.
__STATIC_INLINE rv_csr_t __get_PMPCFGx(uint32_t csr_idx)
Get PMPCFGx Register by CSR index.
__STATIC_INLINE rv_csr_t __get_PMPADDRx(uint32_t csr_idx)
Get PMPADDRx Register by CSR index.
__STATIC_INLINE void __set_PMPADDRx(uint32_t csr_idx, rv_csr_t pmpaddr)
Set PMPADDRx by CSR index.
__STATIC_INLINE int __get_PMPENTRYx(unsigned int entry_idx, pmp_config *pmp_cfg)
Get PMP entry by entry idx.
__STATIC_INLINE uint8_t __get_PMPxCFG(uint32_t entry_idx)
Get 8bit PMPxCFG Register by PMP entry index.
__STATIC_INLINE void __set_PMPENTRYx(uint32_t entry_idx, const pmp_config *pmp_cfg)
Set PMP entry by entry idx.
unsigned long rv_csr_t
Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V.
#define __RISCV_XLEN
Refer to the width of an integer register in bits(either 32 or 64)
unsigned long order
Size of memory region as power of 2, it has to be minimum 2 and maxium __RISCV_XLEN according to the ...
unsigned long base_addr
Base address of memory region It must be 2^order aligned address.
unsigned int protection
set locking bit, addressing mode, read, write, and instruction execution permissions,...