NMSIS-Core  Version 1.3.1
NMSIS-Core support for Nuclei processor-based devices
core_feature_pmp.h
1 /*
2  * Copyright (c) 2019 Nuclei Limited. All rights reserved.
3  *
4  * SPDX-License-Identifier: Apache-2.0
5  *
6  * Licensed under the Apache License, Version 2.0 (the License); you may
7  * not use this file except in compliance with the License.
8  * You may obtain a copy of the License at
9  *
10  * www.apache.org/licenses/LICENSE-2.0
11  *
12  * Unless required by applicable law or agreed to in writing, software
13  * distributed under the License is distributed on an AS IS BASIS, WITHOUT
14  * WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15  * See the License for the specific language governing permissions and
16  * limitations under the License.
17  */
18 #ifndef __CORE_FEATURE_PMP_H__
19 #define __CORE_FEATURE_PMP_H__
20 
24 /*
25  * PMP Feature Configuration Macro:
26  * 1. __PMP_PRESENT: Define whether Physical Memory Protection(PMP) is present or not
27  * * 0: Not present
28  * * 1: Present
29  * 2. __PMP_ENTRY_NUM: Define the number of PMP entries, only 8 or 16 is configurable.
30  */
31 #ifdef __cplusplus
32 extern "C" {
33 #endif
34 
35 #include "core_feature_base.h"
36 #include "core_compatiable.h"
37 
38 #if defined(__PMP_PRESENT) && (__PMP_PRESENT == 1)
39 /* ===== PMP Operations ===== */
53 #ifndef __PMP_ENTRY_NUM
54 /* numbers of PMP entries(__PMP_ENTRY_NUM) should be defined in <Device.h> */
55 #error "__PMP_ENTRY_NUM is not defined, please check!"
56 #endif
57 
58 typedef struct PMP_CONFIG {
63  unsigned int protection;
69  unsigned long order;
74  unsigned long base_addr;
75 } pmp_config;
76 
90 {
91  switch (csr_idx) {
92  case 0: return __RV_CSR_READ(CSR_PMPCFG0);
93  case 1: return __RV_CSR_READ(CSR_PMPCFG1);
94  case 2: return __RV_CSR_READ(CSR_PMPCFG2);
95  case 3: return __RV_CSR_READ(CSR_PMPCFG3);
96  default: return 0;
97  }
98 }
99 
112 __STATIC_INLINE void __set_PMPCFGx(uint32_t csr_idx, rv_csr_t pmpcfg)
113 {
114  switch (csr_idx) {
115  case 0: __RV_CSR_WRITE(CSR_PMPCFG0, pmpcfg); break;
116  case 1: __RV_CSR_WRITE(CSR_PMPCFG1, pmpcfg); break;
117  case 2: __RV_CSR_WRITE(CSR_PMPCFG2, pmpcfg); break;
118  case 3: __RV_CSR_WRITE(CSR_PMPCFG3, pmpcfg); break;
119  default: return;
120  }
121 }
122 
129 __STATIC_INLINE uint8_t __get_PMPxCFG(uint32_t entry_idx)
130 {
131  rv_csr_t pmpcfgx = 0;
132  uint8_t csr_cfg_num = 0;
133  uint16_t csr_idx = 0;
134  uint16_t cfg_shift = 0;
135 
136  if (entry_idx >= __PMP_ENTRY_NUM) return 0;
137 
138 #if __RISCV_XLEN == 32
139  csr_cfg_num = 4;
140  csr_idx = entry_idx >> 2;
141 #elif __RISCV_XLEN == 64
142  csr_cfg_num = 8;
143  /* For RV64, pmpcfg0 and pmpcfg2 each hold 8 PMP entries, align by 2 */
144  csr_idx = (entry_idx >> 2) & ~1;
145 #else
146  // TODO Add RV128 Handling
147  return 0;
148 #endif
149  pmpcfgx = __get_PMPCFGx(csr_idx);
150  /*
151  * first get specific pmpxcfg's order in one CSR composed of csr_cfg_num pmpxcfgs,
152  * then get pmpxcfg's bit position in one CSR by left shift 3(each pmpxcfg size is one byte)
153  */
154  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
155 
156  /* read specific pmpxcfg register value */
157  return (uint8_t)(__RV_EXTRACT_FIELD(pmpcfgx, 0xFF << cfg_shift));
158 }
159 
169 __STATIC_INLINE void __set_PMPxCFG(uint32_t entry_idx, uint8_t pmpxcfg)
170 {
171  rv_csr_t pmpcfgx = 0;
172  uint8_t csr_cfg_num = 0;
173  uint16_t csr_idx = 0;
174  uint16_t cfg_shift = 0;
175  if (entry_idx >= __PMP_ENTRY_NUM) return;
176 
177 #if __RISCV_XLEN == 32
178  csr_cfg_num = 4;
179  csr_idx = entry_idx >> 2;
180 #elif __RISCV_XLEN == 64
181  csr_cfg_num = 8;
182  /* For RV64, pmpcfg0 and pmpcfg2 each hold 8 PMP entries, align by 2 */
183  csr_idx = (entry_idx >> 2) & ~1;
184 #else
185  // TODO Add RV128 Handling
186  return;
187 #endif
188  /* read specific pmpcfgx register value */
189  pmpcfgx = __get_PMPCFGx(csr_idx);
190  /*
191  * first get specific pmpxcfg's order in one CSR composed of csr_cfg_num pmpxcfgs,
192  * then get pmpxcfg's bit position in one CSR by left shift 3(each pmpxcfg size is one byte)
193  */
194  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
195 
196  pmpcfgx = __RV_INSERT_FIELD(pmpcfgx, 0xFFUL << cfg_shift, pmpxcfg);
197  __set_PMPCFGx(csr_idx, pmpcfgx);
198 }
199 
207 {
208  switch (csr_idx) {
209  case 0: return __RV_CSR_READ(CSR_PMPADDR0);
210  case 1: return __RV_CSR_READ(CSR_PMPADDR1);
211  case 2: return __RV_CSR_READ(CSR_PMPADDR2);
212  case 3: return __RV_CSR_READ(CSR_PMPADDR3);
213  case 4: return __RV_CSR_READ(CSR_PMPADDR4);
214  case 5: return __RV_CSR_READ(CSR_PMPADDR5);
215  case 6: return __RV_CSR_READ(CSR_PMPADDR6);
216  case 7: return __RV_CSR_READ(CSR_PMPADDR7);
217  case 8: return __RV_CSR_READ(CSR_PMPADDR8);
218  case 9: return __RV_CSR_READ(CSR_PMPADDR9);
219  case 10: return __RV_CSR_READ(CSR_PMPADDR10);
220  case 11: return __RV_CSR_READ(CSR_PMPADDR11);
221  case 12: return __RV_CSR_READ(CSR_PMPADDR12);
222  case 13: return __RV_CSR_READ(CSR_PMPADDR13);
223  case 14: return __RV_CSR_READ(CSR_PMPADDR14);
224  case 15: return __RV_CSR_READ(CSR_PMPADDR15);
225  default: return 0;
226  }
227 }
228 
235 __STATIC_INLINE void __set_PMPADDRx(uint32_t csr_idx, rv_csr_t pmpaddr)
236 {
237  switch (csr_idx) {
238  case 0: __RV_CSR_WRITE(CSR_PMPADDR0, pmpaddr); break;
239  case 1: __RV_CSR_WRITE(CSR_PMPADDR1, pmpaddr); break;
240  case 2: __RV_CSR_WRITE(CSR_PMPADDR2, pmpaddr); break;
241  case 3: __RV_CSR_WRITE(CSR_PMPADDR3, pmpaddr); break;
242  case 4: __RV_CSR_WRITE(CSR_PMPADDR4, pmpaddr); break;
243  case 5: __RV_CSR_WRITE(CSR_PMPADDR5, pmpaddr); break;
244  case 6: __RV_CSR_WRITE(CSR_PMPADDR6, pmpaddr); break;
245  case 7: __RV_CSR_WRITE(CSR_PMPADDR7, pmpaddr); break;
246  case 8: __RV_CSR_WRITE(CSR_PMPADDR8, pmpaddr); break;
247  case 9: __RV_CSR_WRITE(CSR_PMPADDR9, pmpaddr); break;
248  case 10: __RV_CSR_WRITE(CSR_PMPADDR10, pmpaddr); break;
249  case 11: __RV_CSR_WRITE(CSR_PMPADDR11, pmpaddr); break;
250  case 12: __RV_CSR_WRITE(CSR_PMPADDR12, pmpaddr); break;
251  case 13: __RV_CSR_WRITE(CSR_PMPADDR13, pmpaddr); break;
252  case 14: __RV_CSR_WRITE(CSR_PMPADDR14, pmpaddr); break;
253  case 15: __RV_CSR_WRITE(CSR_PMPADDR15, pmpaddr); break;
254  default: return;
255  }
256 }
257 
269 __STATIC_INLINE void __set_PMPENTRYx(uint32_t entry_idx, const pmp_config *pmp_cfg)
270 {
271  unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
272  unsigned long cfgmask, addrmask = 0;
273  unsigned long pmpcfg, pmpaddr = 0;
274  unsigned long protection, csr_cfg_num = 0;
275  /* check parameters */
276  if (entry_idx >= __PMP_ENTRY_NUM || pmp_cfg->order > __RISCV_XLEN || pmp_cfg->order < PMP_SHIFT) return;
277 
278  /* calculate PMP register and offset */
279 #if __RISCV_XLEN == 32
280  csr_cfg_num = 4;
281  cfg_csr_idx = (entry_idx >> 2);
282 #elif __RISCV_XLEN == 64
283  csr_cfg_num = 8;
284  cfg_csr_idx = ((entry_idx >> 2)) & ~1;
285 #else
286  // TODO Add RV128 Handling
287  return;
288 #endif
289  /*
290  * first get specific pmpxcfg's order in one CSR composed of csr_cfg_num pmpxcfgs,
291  * then get pmpxcfg's bit position in one CSR by left shift 3, each pmpxcfg size is one byte
292  */
293  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
294  addr_csr_idx = entry_idx;
295 
296  /* encode PMP config */
297  protection = (unsigned long)pmp_cfg->protection;
298  protection |= (PMP_SHIFT == pmp_cfg->order) ? PMP_A_NA4 : PMP_A_NAPOT;
299  cfgmask = ~(0xFFUL << cfg_shift);
300  pmpcfg = (__get_PMPCFGx(cfg_csr_idx) & cfgmask);
301  pmpcfg |= ((protection << cfg_shift) & ~cfgmask);
302 
303  /* encode PMP address */
304  if (PMP_SHIFT == pmp_cfg->order) { /* NA4 */
305  pmpaddr = (pmp_cfg->base_addr >> PMP_SHIFT);
306  } else { /* NAPOT */
307  addrmask = (1UL << (pmp_cfg->order - PMP_SHIFT)) - 1;
308  pmpaddr = ((pmp_cfg->base_addr >> PMP_SHIFT) & ~addrmask);
309  pmpaddr |= (addrmask >> 1);
310  }
311  /*
312  * write csrs, update the address first, in case the entry is locked that
313  * we won't be able to modify it after we set the config csr.
314  */
315  __set_PMPADDRx(addr_csr_idx, pmpaddr);
316  __set_PMPCFGx(cfg_csr_idx, pmpcfg);
317 }
318 
330 __STATIC_INLINE int __get_PMPENTRYx(unsigned int entry_idx, pmp_config *pmp_cfg)
331 {
332  unsigned int cfg_shift, cfg_csr_idx, addr_csr_idx = 0;
333  unsigned long cfgmask, pmpcfg, prot = 0;
334  unsigned long t1, addr, pmpaddr, len = 0;
335  uint8_t csr_cfg_num = 0;
336  /* check parameters */
337  if (entry_idx >= __PMP_ENTRY_NUM || !pmp_cfg) return -1;
338 
339  /* calculate PMP register and offset */
340 #if __RISCV_XLEN == 32
341  csr_cfg_num = 4;
342  cfg_csr_idx = entry_idx >> 2;
343 #elif __RISCV_XLEN == 64
344  csr_cfg_num = 8;
345  cfg_csr_idx = (entry_idx>> 2) & ~1;
346 #else
347 // TODO Add RV128 Handling
348  return -1;
349 #endif
350 
351  cfg_shift = (entry_idx & (csr_cfg_num - 1)) << 3;
352  addr_csr_idx = entry_idx;
353 
354  /* decode PMP config */
355  cfgmask = (0xFFUL << cfg_shift);
356  pmpcfg = (__get_PMPCFGx(cfg_csr_idx) & cfgmask);
357  prot = pmpcfg >> cfg_shift;
358 
359  /* decode PMP address */
360  pmpaddr = __get_PMPADDRx(addr_csr_idx);
361  if (PMP_A_NAPOT == (prot & PMP_A)) {
362  t1 = __CTZ(~pmpaddr);
363  addr = (pmpaddr & ~((1UL << t1) - 1)) << PMP_SHIFT;
364  len = (t1 + PMP_SHIFT + 1);
365  } else {
366  addr = pmpaddr << PMP_SHIFT;
367  len = PMP_SHIFT;
368  }
369 
370  /* return details */
371  pmp_cfg->protection = prot;
372  pmp_cfg->base_addr = addr;
373  pmp_cfg->order = len;
374 
375  return 0;
376 }
377  /* End of Doxygen Group NMSIS_Core_PMP_Functions */
379 #endif /* defined(__PMP_PRESENT) && (__PMP_PRESENT == 1) */
380 
381 #ifdef __cplusplus
382 }
383 #endif
384 #endif /* __CORE_FEATURE_PMP_H__ */
CSR_PMPADDR8
#define CSR_PMPADDR8
Definition: riscv_encoding.h:641
__get_PMPxCFG
__STATIC_INLINE uint8_t __get_PMPxCFG(uint32_t entry_idx)
Get 8bit PMPxCFG Register by PMP entry index.
Definition: core_feature_pmp.h:129
CSR_PMPADDR4
#define CSR_PMPADDR4
Definition: riscv_encoding.h:637
PMP_CONFIG::base_addr
unsigned long base_addr
Base address of memory region It must be 2^order aligned address.
Definition: core_feature_pmp.h:74
PMP_A_NA4
#define PMP_A_NA4
Definition: riscv_encoding.h:411
CSR_PMPADDR10
#define CSR_PMPADDR10
Definition: riscv_encoding.h:643
PMP_CONFIG::order
unsigned long order
Size of memory region as power of 2, it has to be minimum 2 and maxium __RISCV_XLEN according to the ...
Definition: core_feature_pmp.h:69
__set_PMPxCFG
__STATIC_INLINE void __set_PMPxCFG(uint32_t entry_idx, uint8_t pmpxcfg)
Set 8bit PMPxCFG by pmp entry index.
Definition: core_feature_pmp.h:169
__CTZ
__STATIC_FORCEINLINE unsigned long __CTZ(unsigned long data)
Count tailing zero.
Definition: core_compatiable.h:255
PMP_CONFIG
Definition: core_feature_pmp.h:58
__RV_CSR_WRITE
#define __RV_CSR_WRITE(csr, val)
CSR operation Macro for csrw instruction.
Definition: core_feature_base.h:532
PMP_A_NAPOT
#define PMP_A_NAPOT
Definition: riscv_encoding.h:412
CSR_PMPADDR3
#define CSR_PMPADDR3
Definition: riscv_encoding.h:636
CSR_PMPADDR13
#define CSR_PMPADDR13
Definition: riscv_encoding.h:646
__STATIC_INLINE
#define __STATIC_INLINE
Define a static function that may be inlined by the compiler.
Definition: nmsis_gcc.h:65
CSR_PMPCFG2
#define CSR_PMPCFG2
Definition: riscv_encoding.h:619
CSR_PMPADDR1
#define CSR_PMPADDR1
Definition: riscv_encoding.h:634
CSR_PMPADDR14
#define CSR_PMPADDR14
Definition: riscv_encoding.h:647
CSR_PMPADDR0
#define CSR_PMPADDR0
Definition: riscv_encoding.h:633
__get_PMPADDRx
__STATIC_INLINE rv_csr_t __get_PMPADDRx(uint32_t csr_idx)
Get PMPADDRx Register by CSR index.
Definition: core_feature_pmp.h:206
CSR_PMPCFG3
#define CSR_PMPCFG3
Definition: riscv_encoding.h:620
CSR_PMPADDR12
#define CSR_PMPADDR12
Definition: riscv_encoding.h:645
CSR_PMPADDR2
#define CSR_PMPADDR2
Definition: riscv_encoding.h:635
PMP_A
#define PMP_A
Definition: riscv_encoding.h:409
PMP_CONFIG::protection
unsigned int protection
set locking bit, addressing mode, read, write, and instruction execution permissions,...
Definition: core_feature_pmp.h:63
__get_PMPCFGx
__STATIC_INLINE rv_csr_t __get_PMPCFGx(uint32_t csr_idx)
Get PMPCFGx Register by csr index.
Definition: core_feature_pmp.h:89
__set_PMPENTRYx
__STATIC_INLINE void __set_PMPENTRYx(uint32_t entry_idx, const pmp_config *pmp_cfg)
Set PMP entry by entry idx.
Definition: core_feature_pmp.h:269
CSR_PMPADDR5
#define CSR_PMPADDR5
Definition: riscv_encoding.h:638
__RV_CSR_READ
#define __RV_CSR_READ(csr)
CSR operation Macro for csrr instruction.
Definition: core_feature_base.h:514
__get_PMPENTRYx
__STATIC_INLINE int __get_PMPENTRYx(unsigned int entry_idx, pmp_config *pmp_cfg)
Get PMP entry by entry idx.
Definition: core_feature_pmp.h:330
PMP_SHIFT
#define PMP_SHIFT
Definition: riscv_encoding.h:415
CSR_PMPCFG1
#define CSR_PMPCFG1
Definition: riscv_encoding.h:618
CSR_PMPADDR15
#define CSR_PMPADDR15
Definition: riscv_encoding.h:648
CSR_PMPCFG0
#define CSR_PMPCFG0
Definition: riscv_encoding.h:617
CSR_PMPADDR7
#define CSR_PMPADDR7
Definition: riscv_encoding.h:640
__set_PMPADDRx
__STATIC_INLINE void __set_PMPADDRx(uint32_t csr_idx, rv_csr_t pmpaddr)
Set PMPADDRx by CSR index.
Definition: core_feature_pmp.h:235
__RISCV_XLEN
#define __RISCV_XLEN
Refer to the width of an integer register in bits(either 32 or 64)
Definition: core_feature_base.h:48
CSR_PMPADDR11
#define CSR_PMPADDR11
Definition: riscv_encoding.h:644
CSR_PMPADDR6
#define CSR_PMPADDR6
Definition: riscv_encoding.h:639
rv_csr_t
unsigned long rv_csr_t
Type of Control and Status Register(CSR), depends on the XLEN defined in RISC-V.
Definition: core_feature_base.h:55
CSR_PMPADDR9
#define CSR_PMPADDR9
Definition: riscv_encoding.h:642
__set_PMPCFGx
__STATIC_INLINE void __set_PMPCFGx(uint32_t csr_idx, rv_csr_t pmpcfg)
Set PMPCFGx by csr index.
Definition: core_feature_pmp.h:112