keos/mm/page_table.rs
1//! Entries of Page Table and thier permissions.
2use crate::{
3 addressing::{Pa, Va},
4 mm::{Page, tlb::TlbIpi},
5};
6use abyss::x86_64::Cr3;
7use alloc::boxed::Box;
8use core::ops::Deref;
9
10bitflags::bitflags! {
11 /// Flags for pml4e.
12 pub struct Pml4eFlags: usize {
13 /// Present; must be 1 to reference a page-directory-pointer table
14 const P = 1 << 0;
15 /// Read/write; if 0, writes may not be allowed to the 512-GByte region controlled by this entry (see Section 4.6).
16 const RW = 1 << 1;
17 /// User/supervisor; if 0, user-mode accesses are not allowed to the 512-GByte region controlled by this entry (see Section 4.6)
18 const US = 1 << 2;
19 /// Page-level write-through; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry (see Section 4.9.2)
20 const PWT = 1 << 3;
21 /// Page-level cache disable; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry (see Section 4.9.2)
22 const PCD = 1 << 4;
23 /// Accessed; indicates whether this entry has been used for linear-address translation (see Section 4.8)
24 const A = 1 << 5;
25 #[doc(hidden)] const _IGN_6 = 1 << 6;
26 #[doc(hidden)] const _REV_0 = 1 << 7;
27 #[doc(hidden)] const _IGN_8 = 1 << 8;
28 #[doc(hidden)] const _IGN_9 = 1 << 9;
29 #[doc(hidden)] const _IGN_10 = 1 << 10;
30 /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
31 const R = 1 << 11;
32 #[doc(hidden)] const _IGN_52 = 1 << 52;
33 #[doc(hidden)] const _IGN_53 = 1 << 53;
34 #[doc(hidden)] const _IGN_54 = 1 << 54;
35 #[doc(hidden)] const _IGN_55 = 1 << 55;
36 #[doc(hidden)] const _IGN_56 = 1 << 56;
37 #[doc(hidden)] const _IGN_57 = 1 << 57;
38 #[doc(hidden)] const _IGN_58 = 1 << 58;
39 #[doc(hidden)] const _IGN_59 = 1 << 59;
40 #[doc(hidden)] const _IGN_60 = 1 << 60;
41 #[doc(hidden)] const _IGN_61 = 1 << 61;
42 #[doc(hidden)] const _IGN_62 = 1 << 62;
43 /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 512-GByte region controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
44 const XD = 1 << 63;
45 }
46}
47
48/// Page Map Level 4 Entry (PML4E).
49///
50/// This struct represents a **Page Map Level 4 Entry** (PML4E), which is the
51/// top-level entry in the 4-level page table system used in x86_64
52/// architecture. A PML4E is the highest-level entry in the virtual memory
53/// hierarchy and points to a **Page Directory Pointer Table** (PDP) or a
54/// higher-level page table that contains further mappings for virtual to
55/// physical memory.
56///
57/// The [`Pml4e`] struct provides methods for working with the physical address
58/// and flags associated with a PML4E, allowing manipulation of page tables in
59/// the virtual memory system.
60#[derive(Clone, Copy)]
61#[repr(transparent)]
62pub struct Pml4e(pub usize);
63
64impl core::fmt::Debug for Pml4e {
65 fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
66 if let Some(pa) = self.pa() {
67 write!(f, "Pml4e({:016x}, {:?})", pa.into_usize(), self.flags())
68 } else {
69 write!(f, ".")
70 }
71 }
72}
73
74impl Pml4e {
75 /// Get the physical address pointed to by this entry.
76 ///
77 /// This function checks whether the PML4 entry is **present** (i.e., if the
78 /// "P" flag is set in the entry). If the entry is present, it extracts
79 /// the physical address by clearing the flags from the entry.
80 ///
81 /// # Returns
82 /// - `Some(Pa)` if the PML4E is present, containing the physical address.
83 /// - `None` if the PML4E is not present (i.e., the "P" flag is not set).
84 #[inline]
85 pub const fn pa(&self) -> Option<Pa> {
86 if self.flags().contains(Pml4eFlags::P) {
87 Pa::new(self.0 & !Pml4eFlags::all().bits())
88 } else {
89 None
90 }
91 }
92
93 /// Get the flags associated with this entry.
94 ///
95 /// This function extracts the flags from the PML4E, which may indicate
96 /// whether the page map level entry is present,
97 /// writable, user-accessible, etc.
98 ///
99 /// # Returns
100 /// A [`Pml4eFlags`] value representing the flags associated with this
101 /// entry.
102 #[inline]
103 pub const fn flags(&self) -> Pml4eFlags {
104 Pml4eFlags::from_bits_truncate(self.0)
105 }
106
107 /// Set the physical address for this entry.
108 ///
109 /// This method updates the physical address of the PML4E while preserving
110 /// the current flags (e.g., read/write permissions). It ensures that
111 /// the provided physical address is aligned to a 4K boundary (the page
112 /// size), as required by the architecture.
113 ///
114 /// # Parameters
115 /// - `pa`: The new physical address to set for the entry.
116 ///
117 /// # Returns
118 /// - `Ok(&mut Self)` if the address is valid and the update is successful.
119 /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
120 /// address is not aligned.
121 ///
122 /// # Warning
123 /// This operation does not modify the flags of the entry.
124 #[inline]
125 pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
126 let pa = { pa.into_usize() };
127 if pa & 0xfff != 0 {
128 Err(PageTableMappingError::Unaligned)
129 } else {
130 self.0 = pa | self.flags().bits() | Pml4eFlags::P.bits();
131 Ok(self)
132 }
133 }
134
135 /// Set the flags for this entry.
136 ///
137 /// This method allows you to update the flags associated with the PML4E
138 /// without modifying the physical address. It combines the current
139 /// physical address with the new flags and sets the updated value back into
140 /// the entry.
141 ///
142 /// # Parameters
143 /// - `perm`: The new set of flags to assign to the entry.
144 ///
145 /// # Returns
146 /// A mutable reference to `self`, allowing for method chaining.
147 #[inline]
148 pub fn set_flags(&mut self, perm: Pml4eFlags) -> &mut Self {
149 self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
150 self
151 }
152
153 /// Clears the entry.
154 ///
155 /// This method removes any previously set physical address and flags from
156 /// the entry. If the entry contained a valid physical address before
157 /// being cleared, that address is returned.
158 ///
159 /// # Returns
160 /// - `Some(Pa)`: The physical address that was previously stored in the
161 /// entry, if it existed.
162 /// - `None`: If the entry did not contain a valid physical address.
163 #[inline]
164 pub fn clear(&mut self) -> Option<Pa> {
165 self.pa().inspect(|_| {
166 self.0 = 0;
167 })
168 }
169
170 /// Get a mutable reference to the page directory pointer table pointed to
171 /// by this entry.
172 ///
173 /// This method retrieves a mutable reference to the page directory pointer
174 /// table (PDP) that this PML4E points to, assuming that the entry is
175 /// present (i.e., the "P" flag is set).
176 ///
177 /// # Returns
178 /// - `Ok(&mut [Pdpe])` if the page directory pointer table is valid,
179 /// represented as a mutable slice of `Pdpe` (page directory pointer
180 /// entries).
181 /// - `Err(PageTableMappingError::NotExist)` if the PML4E is not present or
182 /// invalid.
183 ///
184 /// # Safety
185 /// This operation assumes that the physical address of the page directory
186 /// pointer table is valid and properly aligned.
187 #[inline]
188 pub fn into_pdp_mut(&mut self) -> Result<&mut [Pdpe], PageTableMappingError> {
189 let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
190 if !self.flags().contains(Pml4eFlags::P) {
191 return Err(PageTableMappingError::NotExist);
192 }
193 unsafe {
194 Ok(core::slice::from_raw_parts_mut(
195 pa.into_kva().into_usize() as *mut Pdpe,
196 512,
197 ))
198 }
199 }
200
201 /// Get a reference to the page directory pointer table pointed to by this
202 /// entry.
203 ///
204 /// This method retrieves an immutable reference to the page directory
205 /// pointer table (PDP) that this PML4E points to, assuming that the
206 /// entry is present (i.e., the "P" flag is set).
207 ///
208 /// # Returns
209 /// - `Ok(&[Pdpe])` if the page directory pointer table is valid,
210 /// represented as an immutable slice of `Pdpe` (page directory pointer
211 /// entries).
212 /// - `Err(PageTableMappingError::NotExist)` if the PML4E is not present or
213 /// invalid.
214 ///
215 /// # Safety
216 /// This operation assumes that the physical address of the page directory
217 /// pointer table is valid and properly aligned.
218 #[inline]
219 pub fn into_pdp(&self) -> Result<&[Pdpe], PageTableMappingError> {
220 let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
221 if !self.flags().contains(Pml4eFlags::P) {
222 return Err(PageTableMappingError::NotExist);
223 }
224 unsafe {
225 Ok(core::slice::from_raw_parts(
226 pa.into_kva().into_usize() as *const Pdpe,
227 512,
228 ))
229 }
230 }
231}
232
233bitflags::bitflags! {
234 /// Flags for pdpe.
235 pub struct PdpeFlags: usize {
236 /// Present; must be 1 to reference a page directory
237 const P = 1 << 0;
238 /// Read/write; if 0, writes may not be allowed to the 1-GByte region controlled by this entry (see Section 4.6)
239 const RW = 1 << 1;
240 /// User/supervisor; if 0, user-mode accesses are not allowed to the 1-GByte region controlled by this entry (see Section 4.6)
241 const US = 1 << 2;
242 /// Page-level write-through; indirectly determines the memory type used to access the page directory referenced by this entry (see Section 4.9.2)
243 const PWT = 1 << 3;
244 /// Page-level cache disable; indirectly determines the memory type used to access the page directory referenced by this entry (see Section 4.9.2)
245 const PCD = 1 << 4;
246 /// Accessed; indicates whether this entry has been used for linear-address translation (see Section 4.8)
247 const A = 1 << 5;
248 #[doc(hidden)] const _IGN_6 = 1 << 6;
249 #[doc(hidden)] const _REV_0 = 1 << 7;
250 #[doc(hidden)] const _IGN_8 = 1 << 8;
251 #[doc(hidden)] const _IGN_9 = 1 << 9;
252 #[doc(hidden)] const _IGN_10 = 1 << 10;
253 /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
254 const R = 1 << 11;
255 #[doc(hidden)] const _IGN_52 = 1 << 52;
256 #[doc(hidden)] const _IGN_53 = 1 << 53;
257 #[doc(hidden)] const _IGN_54 = 1 << 54;
258 #[doc(hidden)] const _IGN_55 = 1 << 55;
259 #[doc(hidden)] const _IGN_56 = 1 << 56;
260 #[doc(hidden)] const _IGN_57 = 1 << 57;
261 #[doc(hidden)] const _IGN_58 = 1 << 58;
262 #[doc(hidden)] const _IGN_59 = 1 << 59;
263 #[doc(hidden)] const _IGN_60 = 1 << 60;
264 #[doc(hidden)] const _IGN_61 = 1 << 61;
265 #[doc(hidden)] const _IGN_62 = 1 << 62;
266 /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 1-GByte region controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
267 const XD = 1 << 63;
268 }
269}
270
271/// Page Directory Pointer Table Entry (PDPE).
272///
273/// This struct represents a **Page Directory Pointer Table Entry** (PDPE), the
274/// entry of second-level table, in the 4-level page table system for x86_64
275/// architecture. A PDPE is the second-level entry in the virtual memory
276/// hierarchy, directly pointing to a **Page Directory** (PDE) or a higher-level
277/// page table that contains further mappings for virtual to physical memory.
278///
279/// The [`Pdpe`] struct provides methods for working with the physical address
280/// and flags associated with a PDPE, allowing the manipulation of page tables
281/// in the virtual memory system.
282#[derive(Clone, Copy)]
283#[repr(transparent)]
284pub struct Pdpe(pub usize);
285
286impl Pdpe {
287 /// Get the physical address pointed to by this entry.
288 ///
289 /// This function checks whether the page directory pointer table entry is
290 /// **present** (i.e., if the "P" flag is set in the entry).
291 /// If the entry is present, it extracts the physical address by clearing
292 /// the flags from the entry.
293 ///
294 /// # Returns
295 /// - `Some(Pa)` if the PDPE is present, containing the physical address.
296 /// - `None` if the PDPE is not present (i.e., the "P" flag is not set).
297 #[inline]
298 pub const fn pa(&self) -> Option<Pa> {
299 if self.flags().contains(PdpeFlags::P) {
300 Pa::new(self.0 & !PdpeFlags::all().bits())
301 } else {
302 None
303 }
304 }
305
306 /// Get the flags associated with this entry.
307 ///
308 /// This function extracts the flags from the PDPE, which may indicate
309 /// whether the page directory pointer table entry is present, writable,
310 /// user-accessible, etc.
311 ///
312 /// # Returns
313 /// A [`PdpeFlags`] value representing the flags associated with this entry.
314 #[inline]
315 pub const fn flags(&self) -> PdpeFlags {
316 PdpeFlags::from_bits_truncate(self.0)
317 }
318
319 /// Set the physical address for this entry.
320 ///
321 /// This method updates the physical address of the PDPE while preserving
322 /// the current flags (e.g., read/write permissions). It ensures that
323 /// the provided physical address is aligned to a 4K boundary (the page
324 /// size), as required by the architecture.
325 ///
326 /// # Parameters
327 /// - `pa`: The new physical address to set for the entry.
328 ///
329 /// # Returns
330 /// - `Ok(&mut Self)` if the address is valid and the update is successful.
331 /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
332 /// address is not aligned.
333 ///
334 /// # Warning
335 /// This operation does not modify the flags of the entry.
336 #[inline]
337 pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
338 let pa = { pa.into_usize() };
339 if pa & 0xfff != 0 {
340 Err(PageTableMappingError::Unaligned)
341 } else {
342 self.0 = pa | self.flags().bits() | PdpeFlags::P.bits();
343 Ok(self)
344 }
345 }
346
347 /// Set the flags for this entry.
348 ///
349 /// This method allows you to update the flags associated with the PDPE
350 /// without modifying the physical address. It combines the current
351 /// physical address with the new flags and sets the updated value back into
352 /// the entry.
353 ///
354 /// # Parameters
355 /// - `perm`: The new set of flags to assign to the entry.
356 ///
357 /// # Returns
358 /// A mutable reference to `self`, allowing for method chaining.
359 #[inline]
360 pub fn set_flags(&mut self, perm: PdpeFlags) -> &mut Self {
361 self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
362 self
363 }
364
365 /// Clears the entry.
366 ///
367 /// This method removes any previously set physical address and flags from
368 /// the entry. If the entry contained a valid physical address before
369 /// being cleared, that address is returned.
370 ///
371 /// # Returns
372 /// - `Some(Pa)`: The physical address that was previously stored in the
373 /// entry, if it existed.
374 /// - `None`: If the entry did not contain a valid physical address.
375 #[inline]
376 pub fn clear(&mut self) -> Option<Pa> {
377 self.pa().inspect(|_| {
378 self.0 = 0;
379 })
380 }
381
382 /// Get a mutable reference to the page directory pointed to by this entry.
383 ///
384 /// This method retrieves a mutable reference to the page directory that
385 /// this PDPE points to, assuming that the entry is present (i.e., the
386 /// "P" flag is set).
387 ///
388 /// # Returns
389 /// - `Ok(&mut [Pde])` if the page directory is valid, represented as a
390 /// mutable slice of `Pde` (page directory entries).
391 /// - `Err(PageTableMappingError::NotExist)` if the PDPE is not present or
392 /// invalid.
393 ///
394 /// # Safety
395 /// This operation assumes that the physical address of the page directory
396 /// is valid and properly aligned.
397 #[inline]
398 pub fn into_pd_mut(&mut self) -> Result<&mut [Pde], PageTableMappingError> {
399 let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
400 if !self.flags().contains(PdpeFlags::P) {
401 return Err(PageTableMappingError::NotExist);
402 }
403 unsafe {
404 Ok(core::slice::from_raw_parts_mut(
405 pa.into_kva().into_usize() as *mut Pde,
406 512,
407 ))
408 }
409 }
410
411 /// Get a reference to the page directory pointed to by this entry.
412 ///
413 /// This method retrieves an immutable reference to the page directory that
414 /// this PDPE points to, assuming that the entry is present (i.e., the
415 /// "P" flag is set).
416 ///
417 /// # Returns
418 /// - `Ok(&[Pde])` if the page directory is valid, represented as an
419 /// immutable slice of `Pde` (page directory entries).
420 /// - `Err(PageTableMappingError::NotExist)` if the PDPE is not present or
421 /// invalid.
422 ///
423 /// # Safety
424 /// This operation assumes that the physical address of the page directory
425 /// is valid and properly aligned.
426 #[inline]
427 pub fn into_pd(&self) -> Result<&[Pde], PageTableMappingError> {
428 let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
429 if !self.flags().contains(PdpeFlags::P) {
430 return Err(PageTableMappingError::NotExist);
431 }
432 unsafe {
433 Ok(core::slice::from_raw_parts(
434 pa.into_kva().into_usize() as *const Pde,
435 512,
436 ))
437 }
438 }
439}
440
441/// Page Directory Entry (PDE).
442///
443/// This struct represents a **Page Directory Entry** (PDE), entry of
444/// third-level table, in the 4-level page table system for x86_64 architecture.
445/// A page directory entry typically holds the information about the physical
446/// address of a page directory or a page table, along with various flags.
447/// In a paging system, a PDE points to a page table, which in turn contains the
448/// actual page table entries (PTEs) that map virtual addresses to physical
449/// addresses.
450///
451/// The [`Pde`] struct in this code provides access to these fields and
452/// operations on them. Each entry corresponds to a page directory in the
453/// virtual memory hierarchy and is used by the kernel to map higher-level
454/// virtual addresses to lower-level page table entries.
455#[derive(Clone, Copy)]
456#[repr(transparent)]
457pub struct Pde(pub usize);
458
459impl Pde {
460 /// Get the physical address pointed to by this entry.
461 ///
462 /// This function checks whether the page directory entry is **present**
463 /// (i.e., if the "P" flag is set in the entry). If the page directory
464 /// entry is present, it extracts the physical address by clearing the flags
465 /// from the entry.
466 ///
467 /// # Returns
468 /// - `Some(Pa)` if the page directory entry is present, containing the
469 /// physical address.
470 /// - `None` if the page directory entry is not present (i.e., the "P" flag
471 /// is not set).
472 #[inline]
473 pub const fn pa(&self) -> Option<Pa> {
474 if self.flags().contains(PdeFlags::P) {
475 Pa::new(self.0 & !PdeFlags::all().bits())
476 } else {
477 None
478 }
479 }
480
481 /// Get the flags associated with this entry.
482 ///
483 /// This function extracts the flags from the page directory entry, which
484 /// may indicate whether the page directory is present,
485 /// writable, user-accessible, etc.
486 ///
487 /// # Returns
488 /// A [`PdeFlags`] value representing the flags associated with this entry.
489 #[inline]
490 pub const fn flags(&self) -> PdeFlags {
491 PdeFlags::from_bits_truncate(self.0)
492 }
493
494 /// Set the physical address for this entry.
495 ///
496 /// This method updates the physical address of the page directory entry
497 /// while preserving the current flags (e.g., read/write permissions).
498 /// It checks that the provided physical address is aligned to a 4K boundary
499 /// (the page size), as required by the architecture.
500 ///
501 /// # Parameters
502 /// - `pa`: The new physical address to set for the entry.
503 ///
504 /// # Returns
505 /// - `Ok(&mut Self)` if the address is valid and the update is successful.
506 /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
507 /// address is not aligned.
508 ///
509 /// # Warning
510 /// This operation does not modify the flags of the entry.
511 #[inline]
512 pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
513 let pa = pa.into_usize();
514 if pa & 0xfff != 0 {
515 Err(PageTableMappingError::Unaligned)
516 } else {
517 self.0 = pa | self.flags().bits() | PdeFlags::P.bits();
518 Ok(self)
519 }
520 }
521
522 /// Set the flags for this entry.
523 ///
524 /// This method allows you to update the flags associated with the page
525 /// directory entry without modifying the physical address. It combines
526 /// the current physical address with the new flags and sets the updated
527 /// value back into the entry.
528 ///
529 /// # Parameters
530 /// - `perm`: The new set of flags to assign to the entry.
531 ///
532 /// # Returns
533 /// A mutable reference to `self`, allowing for method chaining.
534 #[inline]
535 pub fn set_flags(&mut self, perm: PdeFlags) -> &mut Self {
536 self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
537 self
538 }
539
540 /// Clears the entry.
541 ///
542 /// This method removes any previously set physical address and flags from
543 /// the entry. If the entry contained a valid physical address before
544 /// being cleared, that address is returned.
545 ///
546 /// # Returns
547 /// - `Some(Pa)`: The physical address that was previously stored in the
548 /// entry, if it existed.
549 /// - `None`: If the entry did not contain a valid physical address.
550 #[inline]
551 pub fn clear(&mut self) -> Option<Pa> {
552 self.pa().inspect(|_| {
553 self.0 = 0;
554 })
555 }
556
557 /// Get a mutable reference to the page table pointed to by this entry.
558 ///
559 /// This method retrieves a mutable reference to the page table that this
560 /// page directory entry points to, assuming that the entry is present
561 /// (i.e., the "P" flag is set).
562 ///
563 /// # Returns
564 /// - `Ok(&mut [Pte])` if the page table is valid, represented as a mutable
565 /// slice of `Pte` (page table entries).
566 /// - `Err(PageTableMappingError::NotExist)` if the page directory entry is
567 /// not present or invalid.
568 ///
569 /// # Safety
570 /// This operation assumes that the physical address of the page table is
571 /// valid and properly aligned.
572 #[inline]
573 pub fn into_pt_mut(&mut self) -> Result<&mut [Pte], PageTableMappingError> {
574 let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
575 if !self.flags().contains(PdeFlags::P) {
576 return Err(PageTableMappingError::NotExist);
577 }
578 unsafe {
579 Ok(core::slice::from_raw_parts_mut(
580 pa.into_kva().into_usize() as *mut Pte,
581 512,
582 ))
583 }
584 }
585
586 /// Get a reference to the page table pointed to by this entry.
587 ///
588 /// This method retrieves an immutable reference to the page table that this
589 /// page directory entry points to, assuming that the entry is present
590 /// (i.e., the "P" flag is set).
591 ///
592 /// # Returns
593 /// - `Ok(&[Pte])` if the page table is valid, represented as an immutable
594 /// slice of `Pte` (page table entries).
595 /// - `Err(PageTableMappingError::NotExist)` if the page directory entry is
596 /// not present or invalid.
597 ///
598 /// # Safety
599 /// This operation assumes that the physical address of the page table is
600 /// valid and properly aligned.
601 #[inline]
602 pub fn into_pt(&self) -> Result<&[Pte], PageTableMappingError> {
603 let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
604 if !self.flags().contains(PdeFlags::P) {
605 return Err(PageTableMappingError::NotExist);
606 }
607 unsafe {
608 Ok(core::slice::from_raw_parts(
609 pa.into_kva().into_usize() as *const Pte,
610 512,
611 ))
612 }
613 }
614}
615
616bitflags::bitflags! {
617 /// Flags for pde.
618 pub struct PdeFlags: usize {
619 /// Present; must be 1 to reference a page table
620 const P = 1 << 0;
621 /// Read/write; if 0, writes may not be allowed to the 2-MByte region controlled by this entry (see Section 4.6)
622 const RW = 1 << 1;
623 /// User/supervisor; if 0, user-mode accesses are not allowed to the 2-MByte region controlled by this entry (see Section 4.6)
624 const US = 1 << 2;
625 /// Page-level write-through; indirectly determines the memory type used to access the page table referenced by this entry (see Section 4.9.2)
626 const PWT = 1 << 3;
627 /// Page-level cache disable; indirectly determines the memory type used to access the page table referenced by this entry (see Section 4.9.2)
628 const PCD = 1 << 4;
629 /// Accessed; indicates whether this entry has been used for linear-address translation (see Section 4.8)
630 const A = 1 << 5;
631 /// Page size; indicates whether this entry is 2M page.
632 const PS = 1 << 7;
633 #[doc(hidden)] const _IGN_6 = 1 << 6;
634 #[doc(hidden)] const _REV_0 = 1 << 7;
635 #[doc(hidden)] const _IGN_8 = 1 << 8;
636 #[doc(hidden)] const _IGN_9 = 1 << 9;
637 #[doc(hidden)] const _IGN_10 = 1 << 10;
638 /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
639 const R = 1 << 11;
640 #[doc(hidden)] const _IGN_52 = 1 << 52;
641 #[doc(hidden)] const _IGN_53 = 1 << 53;
642 #[doc(hidden)] const _IGN_54 = 1 << 54;
643 #[doc(hidden)] const _IGN_55 = 1 << 55;
644 #[doc(hidden)] const _IGN_56 = 1 << 56;
645 #[doc(hidden)] const _IGN_57 = 1 << 57;
646 #[doc(hidden)] const _IGN_58 = 1 << 58;
647 #[doc(hidden)] const _IGN_59 = 1 << 59;
648 #[doc(hidden)] const _IGN_60 = 1 << 60;
649 #[doc(hidden)] const _IGN_61 = 1 << 61;
650 #[doc(hidden)] const _IGN_62 = 1 << 62;
651 /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 2-MByte region controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
652 const XD = 1 << 63;
653 }
654}
655
656/// Page Table Entry (PTE).
657///
658/// This struct represents a Page Table Entry (PTE), the entry of last-level
659/// table, in the 4-level page table system for x86_64 architecture.
660/// A page table entry typically holds the information about the physical
661/// address of a page and various control bits, such as flags indicating whether
662/// the page is present, read/write, etc.
663///
664/// The [`Pte`] struct in this code provides access to these fields and
665/// operations on them. Each entry corresponds to a single page in memory and is
666/// used by the kernel to map virtual addresses to physical addresses in the
667/// page table.
668#[derive(Clone, Copy)]
669#[repr(transparent)]
670pub struct Pte(pub usize);
671
672impl Pte {
673 /// Get the physical address pointed to by this entry.
674 ///
675 /// This function checks whether the page is present (i.e., if the "P" flag
676 /// is set in the entry). If the page is present, it extracts the
677 /// physical address from the entry by clearing the flags bits.
678 ///
679 /// # Returns
680 /// - `Some(Pa)` if the page is present, containing the physical address.
681 /// - `None` if the page is not present (i.e., the "P" flag is not set).
682 #[inline]
683 pub const fn pa(&self) -> Option<Pa> {
684 if self.flags().contains(PteFlags::P) {
685 Pa::new(self.0 & !PteFlags::all().bits())
686 } else {
687 None
688 }
689 }
690
691 /// Get the flags associated with this page table entry.
692 ///
693 /// This function extracts the flags from the entry. The flags represent
694 /// various properties of the page, such as whether the page is present,
695 /// read-only, user-accessible, etc.
696 ///
697 /// # Returns
698 /// A [`PteFlags`] value representing the flags associated with this entry.
699 #[inline]
700 pub const fn flags(&self) -> PteFlags {
701 PteFlags::from_bits_truncate(self.0)
702 }
703
704 /// Set the physical address for this entry.
705 ///
706 /// This method updates the physical address of the entry, preserving the
707 /// current flags (e.g., read/write permissions). It checks that the
708 /// physical address is aligned to a 4K boundary (the page size), as
709 /// required by the architecture.
710 ///
711 /// # Safety
712 /// You must invalidate the corresponding TLB Entry.
713 ///
714 /// # Parameters
715 /// - `pa`: The new physical address to set for the entry.
716 ///
717 /// # Returns
718 /// - `Ok(&mut Self)` if the address is valid and the update is successful.
719 /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
720 /// address is not aligned.
721 ///
722 /// # Warning
723 /// This operation does not modify the flags of the entry.
724 #[inline]
725 pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
726 let pa = pa.into_usize();
727 if pa & 0xfff != 0 {
728 Err(PageTableMappingError::Unaligned)
729 } else {
730 self.0 = pa | self.flags().bits() | PteFlags::P.bits();
731 Ok(self)
732 }
733 }
734
735 /// Set the flags for this entry.
736 ///
737 /// This method allows you to update the flags associated with the page.
738 /// The physical address remains unchanged, but the permission settings
739 /// (e.g., read/write, user/kernel) can be updated.
740 ///
741 /// # Parameters
742 /// - `perm`: The new set of flags to assign to the entry.
743 ///
744 /// # Returns
745 /// A mutable reference to `self`, allowing for method chaining.
746 ///
747 /// # Safety
748 /// You must invalidate the corresponding TLB Entry.
749 #[inline]
750 pub unsafe fn set_flags(&mut self, perm: PteFlags) -> &mut Self {
751 self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
752 self
753 }
754
755 /// Clears the entry.
756 ///
757 /// This method removes any previously set physical address and flags from
758 /// the entry. If the entry contained a valid physical address before
759 /// being cleared, that address is returned.
760 ///
761 /// # Returns
762 /// - `Some(Pa)`: The physical address that was previously stored in the
763 /// entry, if it existed.
764 /// - `None`: If the entry did not contain a valid physical address.
765 ///
766 /// # Safety
767 /// You must invalidate the corresponding TLB Entry.
768 #[inline]
769 pub unsafe fn clear(&mut self) -> Option<Pa> {
770 self.pa().inspect(|_| {
771 self.0 = 0;
772 })
773 }
774}
775
776bitflags::bitflags! {
777 /// Flags for pte.
778 pub struct PteFlags: usize {
779 /// Present; must be 1 to map a 4-KByte page
780 const P = 1 << 0;
781 /// Read/write; if 0, writes may not be allowed to the 4-KByte page referenced by this entry (see Section 4.6)
782 const RW = 1 << 1;
783 /// User/supervisor; if 0, user-mode accesses are not allowed to the 4-KByte page referenced by this entry (see Section 4.6)
784 const US = 1 << 2;
785 /// Page-level write-through; indirectly determines the memory type used to access the 4-KByte page referenced by this entry (see Section 4.9.2)
786 const PWT = 1 << 3;
787 /// Page-level cache disable; indirectly determines the memory type used to access the 4-KByte page referenced by this entry (see Section 4.9.2)
788 const PCD = 1 << 4;
789 /// Accessed; indicates whether software has accessed the 4-KByte page referenced by this entry (see Section 4.8)
790 const A = 1 << 5;
791 /// Dirty; indicates whether software has written to the 4-KByte page referenced by this entry (see Section 4.8)
792 const D = 1 << 6;
793 /// Indirectly determines the memory type used to access the 4-KByte page referenced by this entry (see Section 4.9.2)
794 const PAT = 1 << 7;
795 /// Global; if CR4.PGE = 1, determines whether the translation is global (see Section 4.10); ignored otherwise
796 const G = 1 << 8;
797 #[doc(hidden)] const _IGN_9 = 1 << 9;
798 #[doc(hidden)] const _IGN_10 = 1 << 10;
799 /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
800 const R = 1 << 11;
801 #[doc(hidden)] const _IGN_52 = 1 << 52;
802 #[doc(hidden)] const _IGN_53 = 1 << 53;
803 #[doc(hidden)] const _IGN_54 = 1 << 54;
804 #[doc(hidden)] const _IGN_55 = 1 << 55;
805 #[doc(hidden)] const _IGN_56 = 1 << 56;
806 #[doc(hidden)] const _IGN_57 = 1 << 57;
807 #[doc(hidden)] const _IGN_58 = 1 << 58;
808 /// Protection key bit 0; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
809 const PK_0 = 1 << 59;
810 /// Protection key bit 1; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
811 const PK_1 = 1 << 60;
812 /// Protection key bit 2; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
813 const PK_2 = 1 << 61;
814 /// Protection key bit 3; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
815 const PK_3 = 1 << 62;
816 /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 4-KByte page controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
817 const XD = 1 << 63;
818 }
819}
820
821/// Struct for invalidating the TLB (Translation Lookaside Buffer) entry.
822///
823/// This struct is responsible for invalidating a TLB entry associated with a
824/// specific virtual address (`Va`). TLB entries are cached mappings between
825/// virtual addresses and physical addresses, and they need to be invalidated
826/// when the corresponding page table entries are modified or removed.
827///
828/// This struct provides methods for invalidating the TLB entry
829/// and safely forgetting the modification. This internally holds the page
830/// to be invalidated, to delay the free until the tlb entry is invalidated.
831pub struct StaleTLBEntry(Va, Page);
832
833impl core::ops::Deref for StaleTLBEntry {
834 type Target = Page;
835 fn deref(&self) -> &Self::Target {
836 &self.1
837 }
838}
839
840impl core::ops::DerefMut for StaleTLBEntry {
841 fn deref_mut(&mut self) -> &mut Self::Target {
842 &mut self.1
843 }
844}
845
846impl StaleTLBEntry {
847 /// Create a new StaleTLBEntry.
848 pub fn new(va: Va, page: Page) -> Self {
849 Self(va, page)
850 }
851
852 /// Invalidate the underlying virtual address.
853 ///
854 /// This method issues an assembly instruction to invalidate the TLB entry
855 /// corresponding to the given virtual address. The invalidation ensures
856 /// that any cached translations are cleared and that the system will use
857 /// the updated page table entries for subsequent address lookups.
858 pub fn invalidate(self) -> Page {
859 let va = self.0;
860 let page = unsafe { core::ptr::read(&core::mem::ManuallyDrop::new(self).1) };
861
862 unsafe {
863 core::arch::asm!(
864 "invlpg [{0}]",
865 in(reg) va.into_usize(),
866 options(nostack)
867 );
868 }
869
870 TlbIpi::send(Cr3::current(), Some(va));
871 page
872 }
873}
874
875impl Drop for StaleTLBEntry {
876 fn drop(&mut self) {
877 panic!(
878 "TLB entry for {:?} is not invalidated. You must call `.invalidate()`.",
879 self.0,
880 );
881 }
882}
883
884/// Shutdown the TLB.
885///
886/// This method issues an assembly instruction to invalidate all TLB
887/// entries of the current CPU. The invalidation ensures that any cached
888/// translations are cleared and that the system will use the updated
889/// page table entries for subsequent address lookups.
890pub fn tlb_shutdown() {
891 unsafe {
892 core::arch::asm! {
893 "mov rax, cr3",
894 "mov cr3, rax",
895 out("rax") _,
896 options(nostack)
897 }
898
899 TlbIpi::send(Cr3::current(), None);
900 }
901}
902
903/// Page Table Mapping Error.
904///
905/// This enum represents errors that can occur when working with page table
906/// mappings in the virtual memory system. It is used to indicate specific
907/// issues that arise during memory address mapping operations, such as setting
908/// up or updating page tables.
909#[derive(Debug, PartialEq, Eq)]
910pub enum PageTableMappingError {
911 /// Unaligned address.
912 ///
913 /// This error is returned when an address provided for a page table entry
914 /// is not properly aligned to the required page size. For example, the
915 /// address might not be a multiple of 4KB (on x86_64 systems).
916 Unaligned,
917
918 /// Not exist.
919 ///
920 /// This error is returned when a requested page table entry does not exist
921 /// or is invalid. For instance, it could occur when trying to access an
922 /// entry that is not present or has not been mapped yet.
923 NotExist,
924
925 /// Duplicated mapping.
926 ///
927 /// This error is returned when an attempt is made to create a duplicate
928 /// mapping for an address that already has an existing mapping.
929 Duplicated,
930
931 /// Invalid permission.
932 ///
933 /// This error is returned when an attempt is made to create a mapping with
934 /// an invalid permission.
935 InvalidPermission,
936}
937
938bitflags::bitflags! {
939 /// Possible memory permissions for a page.
940 ///
941 /// This defines the various permissions that can be assigned
942 /// to memory pages in a page table. Each permission is represented by a single bit,
943 /// allowing for efficient bitwise operations to check or modify permissions.
944 ///
945 /// The [`Permission`] allows you to specify memory access permissions such as:
946 /// - Whether a page is readable.
947 /// - Whether a page is writable.
948 /// - Whether a page is executable.
949 /// - Whether a page can be accessed by user applications.
950 pub struct Permission: usize {
951 /// Page is readable.
952 ///
953 /// This permission allows read access to the page. The page can be
954 /// accessed for reading data.
955 const READ = 1 << 0;
956
957 /// Page is writable.
958 ///
959 /// This permission allows write access to the page. The page can be
960 /// modified by a process.
961 const WRITE = 1 << 1;
962
963 /// Page is executable.
964 ///
965 /// This permission allows the page to be executed. The page can contain
966 /// code that is executed by the CPU, such as instructions.
967 const EXECUTABLE = 1 << 2;
968
969 /// Page can be referred by user application.
970 ///
971 /// This permission allows the page to be accessed by user-mode applications.
972 /// Typically, the kernel uses this flag to differentiate between user-mode and
973 /// kernel-mode access.
974 const USER = 1 << 3;
975 }
976}
977
978impl Permission {
979 /// All possible permissions.
980 pub const ALL_CASES: [Permission; 16] = [
981 Permission::from_bits_truncate(0),
982 Permission::from_bits_truncate(1),
983 Permission::from_bits_truncate(2),
984 Permission::from_bits_truncate(3),
985 Permission::from_bits_truncate(4),
986 Permission::from_bits_truncate(5),
987 Permission::from_bits_truncate(6),
988 Permission::from_bits_truncate(7),
989 Permission::from_bits_truncate(8),
990 Permission::from_bits_truncate(9),
991 Permission::from_bits_truncate(10),
992 Permission::from_bits_truncate(11),
993 Permission::from_bits_truncate(12),
994 Permission::from_bits_truncate(13),
995 Permission::from_bits_truncate(14),
996 Permission::from_bits_truncate(15),
997 ];
998}
999
1000/// A page table root.
1001///
1002/// It wraps the Pml4e array to ensure page table align to 4096.
1003/// Note that it is not allowed to modified the larger indices than
1004/// [`Self::KBASE`], which are reserved for kernel address.
1005#[repr(align(4096))]
1006#[derive(Debug)]
1007pub struct PageTableRoot([Pml4e; 512]);
1008
1009impl Deref for PageTableRoot {
1010 type Target = [Pml4e; 512];
1011 fn deref(&self) -> &Self::Target {
1012 &self.0
1013 }
1014}
1015
1016impl core::ops::Index<usize> for PageTableRoot {
1017 type Output = Pml4e;
1018 fn index(&self, index: usize) -> &Self::Output {
1019 &self.0[index]
1020 }
1021}
1022
1023impl core::ops::IndexMut<usize> for PageTableRoot {
1024 // Required method
1025 fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1026 if index >= Self::KBASE {
1027 let kernel_pt = unsafe {
1028 (Pa::new({
1029 unsafe extern "C" {
1030 static mut boot_pml4e: u64;
1031 }
1032 boot_pml4e as usize
1033 })
1034 .unwrap()
1035 .into_kva()
1036 .into_usize() as *const [Pml4e; 512])
1037 .as_ref()
1038 .unwrap()
1039 };
1040 if kernel_pt[index].pa().is_some() && kernel_pt[index].pa() == self.0[index].pa() {
1041 panic!(
1042 "Trying to modify entries for kernel page table: {} (limit: {}).",
1043 index,
1044 Self::KBASE
1045 );
1046 }
1047 }
1048 &mut self.0[index]
1049 }
1050}
1051
1052impl PageTableRoot {
1053 /// Base of pml4 index occupied for kernel address.
1054 pub const KBASE: usize = 256;
1055
1056 /// Create a empty [`PageTableRoot`].
1057 pub fn new_boxed() -> Box<Self> {
1058 Box::new(PageTableRoot([Pml4e(0); 512]))
1059 }
1060
1061 /// Create a new [`PageTableRoot`] that allowed to access the kernel
1062 /// addresses.
1063 pub fn new_boxed_with_kernel_addr() -> Box<Self> {
1064 let kernel_pt = unsafe {
1065 (Pa::new({
1066 unsafe extern "C" {
1067 static mut boot_pml4e: u64;
1068 }
1069 boot_pml4e as usize
1070 })
1071 .unwrap()
1072 .into_kva()
1073 .into_usize() as *const [Pml4e; 512])
1074 .as_ref()
1075 .unwrap()
1076 };
1077 let mut this = Self::new_boxed();
1078 this.0[Self::KBASE..512].copy_from_slice(&kernel_pt[Self::KBASE..512]);
1079 this
1080 }
1081}