keos/mm/
page_table.rs

1//! Entries of Page Table and thier permissions.
2use crate::{
3    addressing::{Pa, Va},
4    mm::{Page, tlb::TlbIpi},
5    sync::atomic::AtomicUsize,
6};
7use abyss::{MAX_CPU, x86_64::Cr3};
8use alloc::boxed::Box;
9use core::ops::Deref;
10
11bitflags::bitflags! {
12    /// Flags for pml4e.
13    pub struct Pml4eFlags: usize {
14        /// Present; must be 1 to reference a page-directory-pointer table
15        const P = 1 << 0;
16        /// Read/write; if 0, writes may not be allowed to the 512-GByte region controlled by this entry (see Section 4.6).
17        const RW = 1 << 1;
18        /// User/supervisor; if 0, user-mode accesses are not allowed to the 512-GByte region controlled by this entry (see Section 4.6)
19        const US = 1 << 2;
20        /// Page-level write-through; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry (see Section 4.9.2)
21        const PWT = 1 << 3;
22        /// Page-level cache disable; indirectly determines the memory type used to access the page-directory-pointer table referenced by this entry (see Section 4.9.2)
23        const PCD = 1 << 4;
24        /// Accessed; indicates whether this entry has been used for linear-address translation (see Section 4.8)
25        const A = 1 << 5;
26        #[doc(hidden)] const _IGN_6 = 1 << 6;
27        #[doc(hidden)] const _REV_0 = 1 << 7;
28        #[doc(hidden)] const _IGN_8 = 1 << 8;
29        #[doc(hidden)] const _IGN_9 = 1 << 9;
30        #[doc(hidden)] const _IGN_10 = 1 << 10;
31        /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
32        const R = 1 << 11;
33        #[doc(hidden)] const _IGN_52 = 1 << 52;
34        #[doc(hidden)] const _IGN_53 = 1 << 53;
35        #[doc(hidden)] const _IGN_54 = 1 << 54;
36        #[doc(hidden)] const _IGN_55 = 1 << 55;
37        #[doc(hidden)] const _IGN_56 = 1 << 56;
38        #[doc(hidden)] const _IGN_57 = 1 << 57;
39        #[doc(hidden)] const _IGN_58 = 1 << 58;
40        #[doc(hidden)] const _IGN_59 = 1 << 59;
41        #[doc(hidden)] const _IGN_60 = 1 << 60;
42        #[doc(hidden)] const _IGN_61 = 1 << 61;
43        #[doc(hidden)] const _IGN_62 = 1 << 62;
44        /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 512-GByte region controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
45        const XD = 1 << 63;
46    }
47}
48
49/// Page Map Level 4 Entry (PML4E).
50///
51/// This struct represents a **Page Map Level 4 Entry** (PML4E), which is the
52/// top-level entry in the 4-level page table system used in x86_64
53/// architecture. A PML4E is the highest-level entry in the virtual memory
54/// hierarchy and points to a **Page Directory Pointer Table** (PDP) or a
55/// higher-level page table that contains further mappings for virtual to
56/// physical memory.
57///
58/// The [`Pml4e`] struct provides methods for working with the physical address
59/// and flags associated with a PML4E, allowing manipulation of page tables in
60/// the virtual memory system.
61#[derive(Clone, Copy)]
62#[repr(transparent)]
63pub struct Pml4e(pub usize);
64
65impl core::fmt::Debug for Pml4e {
66    fn fmt(&self, f: &mut core::fmt::Formatter<'_>) -> core::fmt::Result {
67        if let Some(pa) = self.pa() {
68            write!(f, "Pml4e({:016x}, {:?})", pa.into_usize(), self.flags())
69        } else {
70            write!(f, ".")
71        }
72    }
73}
74
75impl Pml4e {
76    /// Get the physical address pointed to by this entry.
77    ///
78    /// This function checks whether the PML4 entry is **present** (i.e., if the
79    /// "P" flag is set in the entry). If the entry is present, it extracts
80    /// the physical address by clearing the flags from the entry.
81    ///
82    /// # Returns
83    /// - `Some(Pa)` if the PML4E is present, containing the physical address.
84    /// - `None` if the PML4E is not present (i.e., the "P" flag is not set).
85    #[inline]
86    pub const fn pa(&self) -> Option<Pa> {
87        if self.flags().contains(Pml4eFlags::P) {
88            Pa::new(self.0 & !Pml4eFlags::all().bits())
89        } else {
90            None
91        }
92    }
93
94    /// Get the flags associated with this entry.
95    ///
96    /// This function extracts the flags from the PML4E, which may indicate
97    /// whether the page map level entry is present,
98    /// writable, user-accessible, etc.
99    ///
100    /// # Returns
101    /// A [`Pml4eFlags`] value representing the flags associated with this
102    /// entry.
103    #[inline]
104    pub const fn flags(&self) -> Pml4eFlags {
105        Pml4eFlags::from_bits_truncate(self.0)
106    }
107
108    /// Set the physical address for this entry.
109    ///
110    /// This method updates the physical address of the PML4E while preserving
111    /// the current flags (e.g., read/write permissions). It ensures that
112    /// the provided physical address is aligned to a 4K boundary (the page
113    /// size), as required by the architecture.
114    ///
115    /// # Parameters
116    /// - `pa`: The new physical address to set for the entry.
117    ///
118    /// # Returns
119    /// - `Ok(&mut Self)` if the address is valid and the update is successful.
120    /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
121    ///   address is not aligned.
122    ///
123    /// # Warning
124    /// This operation does not modify the flags of the entry.
125    #[inline]
126    pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
127        let pa = { pa.into_usize() };
128        if pa & 0xfff != 0 {
129            Err(PageTableMappingError::Unaligned)
130        } else {
131            self.0 = pa | self.flags().bits() | Pml4eFlags::P.bits();
132            Ok(self)
133        }
134    }
135
136    /// Set the flags for this entry.
137    ///
138    /// This method allows you to update the flags associated with the PML4E
139    /// without modifying the physical address. It combines the current
140    /// physical address with the new flags and sets the updated value back into
141    /// the entry.
142    ///
143    /// # Parameters
144    /// - `perm`: The new set of flags to assign to the entry.
145    ///
146    /// # Returns
147    /// A mutable reference to `self`, allowing for method chaining.
148    #[inline]
149    pub fn set_flags(&mut self, perm: Pml4eFlags) -> &mut Self {
150        self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
151        self
152    }
153
154    /// Clears the entry.
155    ///
156    /// This method removes any previously set physical address and flags from
157    /// the entry. If the entry contained a valid physical address before
158    /// being cleared, that address is returned.
159    ///
160    /// # Returns
161    /// - `Some(Pa)`: The physical address that was previously stored in the
162    ///   entry, if it existed.
163    /// - `None`: If the entry did not contain a valid physical address.
164    #[inline]
165    pub fn clear(&mut self) -> Option<Pa> {
166        self.pa().inspect(|_| {
167            self.0 = 0;
168        })
169    }
170
171    /// Get a mutable reference to the page directory pointer table pointed to
172    /// by this entry.
173    ///
174    /// This method retrieves a mutable reference to the page directory pointer
175    /// table (PDP) that this PML4E points to, assuming that the entry is
176    /// present (i.e., the "P" flag is set).
177    ///
178    /// # Returns
179    /// - `Ok(&mut [Pdpe])` if the page directory pointer table is valid,
180    ///   represented as a mutable slice of `Pdpe` (page directory pointer
181    ///   entries).
182    /// - `Err(PageTableMappingError::NotExist)` if the PML4E is not present or
183    ///   invalid.
184    ///
185    /// # Safety
186    /// This operation assumes that the physical address of the page directory
187    /// pointer table is valid and properly aligned.
188    #[inline]
189    pub fn into_pdp_mut(&mut self) -> Result<&mut [Pdpe], PageTableMappingError> {
190        let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
191        if !self.flags().contains(Pml4eFlags::P) {
192            return Err(PageTableMappingError::NotExist);
193        }
194        unsafe {
195            Ok(core::slice::from_raw_parts_mut(
196                pa.into_kva().into_usize() as *mut Pdpe,
197                512,
198            ))
199        }
200    }
201
202    /// Get a reference to the page directory pointer table pointed to by this
203    /// entry.
204    ///
205    /// This method retrieves an immutable reference to the page directory
206    /// pointer table (PDP) that this PML4E points to, assuming that the
207    /// entry is present (i.e., the "P" flag is set).
208    ///
209    /// # Returns
210    /// - `Ok(&[Pdpe])` if the page directory pointer table is valid,
211    ///   represented as an immutable slice of `Pdpe` (page directory pointer
212    ///   entries).
213    /// - `Err(PageTableMappingError::NotExist)` if the PML4E is not present or
214    ///   invalid.
215    ///
216    /// # Safety
217    /// This operation assumes that the physical address of the page directory
218    /// pointer table is valid and properly aligned.
219    #[inline]
220    pub fn into_pdp(&self) -> Result<&[Pdpe], PageTableMappingError> {
221        let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
222        if !self.flags().contains(Pml4eFlags::P) {
223            return Err(PageTableMappingError::NotExist);
224        }
225        unsafe {
226            Ok(core::slice::from_raw_parts(
227                pa.into_kva().into_usize() as *const Pdpe,
228                512,
229            ))
230        }
231    }
232}
233
234bitflags::bitflags! {
235    /// Flags for pdpe.
236    pub struct PdpeFlags: usize {
237        /// Present; must be 1 to reference a page directory
238        const P = 1 << 0;
239        /// Read/write; if 0, writes may not be allowed to the 1-GByte region controlled by this entry (see Section 4.6)
240        const RW = 1 << 1;
241        /// User/supervisor; if 0, user-mode accesses are not allowed to the 1-GByte region controlled by this entry (see Section 4.6)
242        const US = 1 << 2;
243        /// Page-level write-through; indirectly determines the memory type used to access the page directory referenced by this entry (see Section 4.9.2)
244        const PWT = 1 << 3;
245        /// Page-level cache disable; indirectly determines the memory type used to access the page directory referenced by this entry (see Section 4.9.2)
246        const PCD = 1 << 4;
247        /// Accessed; indicates whether this entry has been used for linear-address translation (see Section 4.8)
248        const A = 1 << 5;
249        #[doc(hidden)] const _IGN_6 = 1 << 6;
250        #[doc(hidden)] const _REV_0 = 1 << 7;
251        #[doc(hidden)] const _IGN_8 = 1 << 8;
252        #[doc(hidden)] const _IGN_9 = 1 << 9;
253        #[doc(hidden)] const _IGN_10 = 1 << 10;
254        /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
255        const R = 1 << 11;
256        #[doc(hidden)] const _IGN_52 = 1 << 52;
257        #[doc(hidden)] const _IGN_53 = 1 << 53;
258        #[doc(hidden)] const _IGN_54 = 1 << 54;
259        #[doc(hidden)] const _IGN_55 = 1 << 55;
260        #[doc(hidden)] const _IGN_56 = 1 << 56;
261        #[doc(hidden)] const _IGN_57 = 1 << 57;
262        #[doc(hidden)] const _IGN_58 = 1 << 58;
263        #[doc(hidden)] const _IGN_59 = 1 << 59;
264        #[doc(hidden)] const _IGN_60 = 1 << 60;
265        #[doc(hidden)] const _IGN_61 = 1 << 61;
266        #[doc(hidden)] const _IGN_62 = 1 << 62;
267        /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 1-GByte region controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
268        const XD = 1 << 63;
269    }
270}
271
272/// Page Directory Pointer Table Entry (PDPE).
273///
274/// This struct represents a **Page Directory Pointer Table Entry** (PDPE), the
275/// entry of second-level table, in the 4-level page table system for x86_64
276/// architecture. A PDPE is the second-level entry in the virtual memory
277/// hierarchy, directly pointing to a **Page Directory** (PDE) or a higher-level
278/// page table that contains further mappings for virtual to physical memory.
279///
280/// The [`Pdpe`] struct provides methods for working with the physical address
281/// and flags associated with a PDPE, allowing the manipulation of page tables
282/// in the virtual memory system.
283#[derive(Clone, Copy)]
284#[repr(transparent)]
285pub struct Pdpe(pub usize);
286
287impl Pdpe {
288    /// Get the physical address pointed to by this entry.
289    ///
290    /// This function checks whether the page directory pointer table entry is
291    /// **present** (i.e., if the "P" flag is set in the entry).
292    /// If the entry is present, it extracts the physical address by clearing
293    /// the flags from the entry.
294    ///
295    /// # Returns
296    /// - `Some(Pa)` if the PDPE is present, containing the physical address.
297    /// - `None` if the PDPE is not present (i.e., the "P" flag is not set).
298    #[inline]
299    pub const fn pa(&self) -> Option<Pa> {
300        if self.flags().contains(PdpeFlags::P) {
301            Pa::new(self.0 & !PdpeFlags::all().bits())
302        } else {
303            None
304        }
305    }
306
307    /// Get the flags associated with this entry.
308    ///
309    /// This function extracts the flags from the PDPE, which may indicate
310    /// whether the page directory pointer table entry is present, writable,
311    /// user-accessible, etc.
312    ///
313    /// # Returns
314    /// A [`PdpeFlags`] value representing the flags associated with this entry.
315    #[inline]
316    pub const fn flags(&self) -> PdpeFlags {
317        PdpeFlags::from_bits_truncate(self.0)
318    }
319
320    /// Set the physical address for this entry.
321    ///
322    /// This method updates the physical address of the PDPE while preserving
323    /// the current flags (e.g., read/write permissions). It ensures that
324    /// the provided physical address is aligned to a 4K boundary (the page
325    /// size), as required by the architecture.
326    ///
327    /// # Parameters
328    /// - `pa`: The new physical address to set for the entry.
329    ///
330    /// # Returns
331    /// - `Ok(&mut Self)` if the address is valid and the update is successful.
332    /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
333    ///   address is not aligned.
334    ///
335    /// # Warning
336    /// This operation does not modify the flags of the entry.
337    #[inline]
338    pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
339        let pa = { pa.into_usize() };
340        if pa & 0xfff != 0 {
341            Err(PageTableMappingError::Unaligned)
342        } else {
343            self.0 = pa | self.flags().bits() | PdpeFlags::P.bits();
344            Ok(self)
345        }
346    }
347
348    /// Set the flags for this entry.
349    ///
350    /// This method allows you to update the flags associated with the PDPE
351    /// without modifying the physical address. It combines the current
352    /// physical address with the new flags and sets the updated value back into
353    /// the entry.
354    ///
355    /// # Parameters
356    /// - `perm`: The new set of flags to assign to the entry.
357    ///
358    /// # Returns
359    /// A mutable reference to `self`, allowing for method chaining.
360    #[inline]
361    pub fn set_flags(&mut self, perm: PdpeFlags) -> &mut Self {
362        self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
363        self
364    }
365
366    /// Clears the entry.
367    ///
368    /// This method removes any previously set physical address and flags from
369    /// the entry. If the entry contained a valid physical address before
370    /// being cleared, that address is returned.
371    ///
372    /// # Returns
373    /// - `Some(Pa)`: The physical address that was previously stored in the
374    ///   entry, if it existed.
375    /// - `None`: If the entry did not contain a valid physical address.
376    #[inline]
377    pub fn clear(&mut self) -> Option<Pa> {
378        self.pa().inspect(|_| {
379            self.0 = 0;
380        })
381    }
382
383    /// Get a mutable reference to the page directory pointed to by this entry.
384    ///
385    /// This method retrieves a mutable reference to the page directory that
386    /// this PDPE points to, assuming that the entry is present (i.e., the
387    /// "P" flag is set).
388    ///
389    /// # Returns
390    /// - `Ok(&mut [Pde])` if the page directory is valid, represented as a
391    ///   mutable slice of `Pde` (page directory entries).
392    /// - `Err(PageTableMappingError::NotExist)` if the PDPE is not present or
393    ///   invalid.
394    ///
395    /// # Safety
396    /// This operation assumes that the physical address of the page directory
397    /// is valid and properly aligned.
398    #[inline]
399    pub fn into_pd_mut(&mut self) -> Result<&mut [Pde], PageTableMappingError> {
400        let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
401        if !self.flags().contains(PdpeFlags::P) {
402            return Err(PageTableMappingError::NotExist);
403        }
404        unsafe {
405            Ok(core::slice::from_raw_parts_mut(
406                pa.into_kva().into_usize() as *mut Pde,
407                512,
408            ))
409        }
410    }
411
412    /// Get a reference to the page directory pointed to by this entry.
413    ///
414    /// This method retrieves an immutable reference to the page directory that
415    /// this PDPE points to, assuming that the entry is present (i.e., the
416    /// "P" flag is set).
417    ///
418    /// # Returns
419    /// - `Ok(&[Pde])` if the page directory is valid, represented as an
420    ///   immutable slice of `Pde` (page directory entries).
421    /// - `Err(PageTableMappingError::NotExist)` if the PDPE is not present or
422    ///   invalid.
423    ///
424    /// # Safety
425    /// This operation assumes that the physical address of the page directory
426    /// is valid and properly aligned.
427    #[inline]
428    pub fn into_pd(&self) -> Result<&[Pde], PageTableMappingError> {
429        let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
430        if !self.flags().contains(PdpeFlags::P) {
431            return Err(PageTableMappingError::NotExist);
432        }
433        unsafe {
434            Ok(core::slice::from_raw_parts(
435                pa.into_kva().into_usize() as *const Pde,
436                512,
437            ))
438        }
439    }
440}
441
442/// Page Directory Entry (PDE).
443///
444/// This struct represents a **Page Directory Entry** (PDE), entry of
445/// third-level table, in the 4-level page table system for x86_64 architecture.
446/// A page directory entry typically holds the information about the physical
447/// address of a page directory or a page table, along with various flags.
448/// In a paging system, a PDE points to a page table, which in turn contains the
449/// actual page table entries (PTEs) that map virtual addresses to physical
450/// addresses.
451///
452/// The [`Pde`] struct in this code provides access to these fields and
453/// operations on them. Each entry corresponds to a page directory in the
454/// virtual memory hierarchy and is used by the kernel to map higher-level
455/// virtual addresses to lower-level page table entries.
456#[derive(Clone, Copy)]
457#[repr(transparent)]
458pub struct Pde(pub usize);
459
460impl Pde {
461    /// Get the physical address pointed to by this entry.
462    ///
463    /// This function checks whether the page directory entry is **present**
464    /// (i.e., if the "P" flag is set in the entry). If the page directory
465    /// entry is present, it extracts the physical address by clearing the flags
466    /// from the entry.
467    ///
468    /// # Returns
469    /// - `Some(Pa)` if the page directory entry is present, containing the
470    ///   physical address.
471    /// - `None` if the page directory entry is not present (i.e., the "P" flag
472    ///   is not set).
473    #[inline]
474    pub const fn pa(&self) -> Option<Pa> {
475        if self.flags().contains(PdeFlags::P) {
476            Pa::new(self.0 & !PdeFlags::all().bits())
477        } else {
478            None
479        }
480    }
481
482    /// Get the flags associated with this entry.
483    ///
484    /// This function extracts the flags from the page directory entry, which
485    /// may indicate whether the page directory is present,
486    /// writable, user-accessible, etc.
487    ///
488    /// # Returns
489    /// A [`PdeFlags`] value representing the flags associated with this entry.
490    #[inline]
491    pub const fn flags(&self) -> PdeFlags {
492        PdeFlags::from_bits_truncate(self.0)
493    }
494
495    /// Set the physical address for this entry.
496    ///
497    /// This method updates the physical address of the page directory entry
498    /// while preserving the current flags (e.g., read/write permissions).
499    /// It checks that the provided physical address is aligned to a 4K boundary
500    /// (the page size), as required by the architecture.
501    ///
502    /// # Parameters
503    /// - `pa`: The new physical address to set for the entry.
504    ///
505    /// # Returns
506    /// - `Ok(&mut Self)` if the address is valid and the update is successful.
507    /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
508    ///   address is not aligned.
509    ///
510    /// # Warning
511    /// This operation does not modify the flags of the entry.
512    #[inline]
513    pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
514        let pa = pa.into_usize();
515        if pa & 0xfff != 0 {
516            Err(PageTableMappingError::Unaligned)
517        } else {
518            self.0 = pa | self.flags().bits() | PdeFlags::P.bits();
519            Ok(self)
520        }
521    }
522
523    /// Set the flags for this entry.
524    ///
525    /// This method allows you to update the flags associated with the page
526    /// directory entry without modifying the physical address. It combines
527    /// the current physical address with the new flags and sets the updated
528    /// value back into the entry.
529    ///
530    /// # Parameters
531    /// - `perm`: The new set of flags to assign to the entry.
532    ///
533    /// # Returns
534    /// A mutable reference to `self`, allowing for method chaining.
535    #[inline]
536    pub fn set_flags(&mut self, perm: PdeFlags) -> &mut Self {
537        self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
538        self
539    }
540
541    /// Clears the entry.
542    ///
543    /// This method removes any previously set physical address and flags from
544    /// the entry. If the entry contained a valid physical address before
545    /// being cleared, that address is returned.
546    ///
547    /// # Returns
548    /// - `Some(Pa)`: The physical address that was previously stored in the
549    ///   entry, if it existed.
550    /// - `None`: If the entry did not contain a valid physical address.
551    #[inline]
552    pub fn clear(&mut self) -> Option<Pa> {
553        self.pa().inspect(|_| {
554            self.0 = 0;
555        })
556    }
557
558    /// Get a mutable reference to the page table pointed to by this entry.
559    ///
560    /// This method retrieves a mutable reference to the page table that this
561    /// page directory entry points to, assuming that the entry is present
562    /// (i.e., the "P" flag is set).
563    ///
564    /// # Returns
565    /// - `Ok(&mut [Pte])` if the page table is valid, represented as a mutable
566    ///   slice of `Pte` (page table entries).
567    /// - `Err(PageTableMappingError::NotExist)` if the page directory entry is
568    ///   not present or invalid.
569    ///
570    /// # Safety
571    /// This operation assumes that the physical address of the page table is
572    /// valid and properly aligned.
573    #[inline]
574    pub fn into_pt_mut(&mut self) -> Result<&mut [Pte], PageTableMappingError> {
575        let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
576        if !self.flags().contains(PdeFlags::P) {
577            return Err(PageTableMappingError::NotExist);
578        }
579        unsafe {
580            Ok(core::slice::from_raw_parts_mut(
581                pa.into_kva().into_usize() as *mut Pte,
582                512,
583            ))
584        }
585    }
586
587    /// Get a reference to the page table pointed to by this entry.
588    ///
589    /// This method retrieves an immutable reference to the page table that this
590    /// page directory entry points to, assuming that the entry is present
591    /// (i.e., the "P" flag is set).
592    ///
593    /// # Returns
594    /// - `Ok(&[Pte])` if the page table is valid, represented as an immutable
595    ///   slice of `Pte` (page table entries).
596    /// - `Err(PageTableMappingError::NotExist)` if the page directory entry is
597    ///   not present or invalid.
598    ///
599    /// # Safety
600    /// This operation assumes that the physical address of the page table is
601    /// valid and properly aligned.
602    #[inline]
603    pub fn into_pt(&self) -> Result<&[Pte], PageTableMappingError> {
604        let pa = self.pa().ok_or(PageTableMappingError::NotExist)?;
605        if !self.flags().contains(PdeFlags::P) {
606            return Err(PageTableMappingError::NotExist);
607        }
608        unsafe {
609            Ok(core::slice::from_raw_parts(
610                pa.into_kva().into_usize() as *const Pte,
611                512,
612            ))
613        }
614    }
615}
616
617bitflags::bitflags! {
618    /// Flags for pde.
619    pub struct PdeFlags: usize {
620        /// Present; must be 1 to reference a page table
621        const P = 1 << 0;
622        /// Read/write; if 0, writes may not be allowed to the 2-MByte region controlled by this entry (see Section 4.6)
623        const RW = 1 << 1;
624        /// User/supervisor; if 0, user-mode accesses are not allowed to the 2-MByte region controlled by this entry (see Section 4.6)
625        const US = 1 << 2;
626        /// Page-level write-through; indirectly determines the memory type used to access the page table referenced by this entry (see Section 4.9.2)
627        const PWT = 1 << 3;
628        /// Page-level cache disable; indirectly determines the memory type used to access the page table referenced by this entry (see Section 4.9.2)
629        const PCD = 1 << 4;
630        /// Accessed; indicates whether this entry has been used for linear-address translation (see Section 4.8)
631        const A = 1 << 5;
632        /// Page size; indicates whether this entry is 2M page.
633        const PS = 1 << 7;
634        #[doc(hidden)] const _IGN_6 = 1 << 6;
635        #[doc(hidden)] const _REV_0 = 1 << 7;
636        #[doc(hidden)] const _IGN_8 = 1 << 8;
637        #[doc(hidden)] const _IGN_9 = 1 << 9;
638        #[doc(hidden)] const _IGN_10 = 1 << 10;
639        /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
640        const R = 1 << 11;
641        #[doc(hidden)] const _IGN_52 = 1 << 52;
642        #[doc(hidden)] const _IGN_53 = 1 << 53;
643        #[doc(hidden)] const _IGN_54 = 1 << 54;
644        #[doc(hidden)] const _IGN_55 = 1 << 55;
645        #[doc(hidden)] const _IGN_56 = 1 << 56;
646        #[doc(hidden)] const _IGN_57 = 1 << 57;
647        #[doc(hidden)] const _IGN_58 = 1 << 58;
648        #[doc(hidden)] const _IGN_59 = 1 << 59;
649        #[doc(hidden)] const _IGN_60 = 1 << 60;
650        #[doc(hidden)] const _IGN_61 = 1 << 61;
651        #[doc(hidden)] const _IGN_62 = 1 << 62;
652        /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 2-MByte region controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
653        const XD = 1 << 63;
654    }
655}
656
657/// Page Table Entry (PTE).
658///
659/// This struct represents a Page Table Entry (PTE), the entry of last-level
660/// table, in the 4-level page table system for x86_64 architecture.
661/// A page table entry typically holds the information about the physical
662/// address of a page and various control bits, such as flags indicating whether
663/// the page is present, read/write, etc.
664///
665/// The [`Pte`] struct in this code provides access to these fields and
666/// operations on them. Each entry corresponds to a single page in memory and is
667/// used by the kernel to map virtual addresses to physical addresses in the
668/// page table.
669#[derive(Clone, Copy)]
670#[repr(transparent)]
671pub struct Pte(pub usize);
672
673impl Pte {
674    /// Get the physical address pointed to by this entry.
675    ///
676    /// This function checks whether the page is present (i.e., if the "P" flag
677    /// is set in the entry). If the page is present, it extracts the
678    /// physical address from the entry by clearing the flags bits.
679    ///
680    /// # Returns
681    /// - `Some(Pa)` if the page is present, containing the physical address.
682    /// - `None` if the page is not present (i.e., the "P" flag is not set).
683    #[inline]
684    pub const fn pa(&self) -> Option<Pa> {
685        if self.flags().contains(PteFlags::P) {
686            Pa::new(self.0 & !PteFlags::all().bits())
687        } else {
688            None
689        }
690    }
691
692    /// Get the flags associated with this page table entry.
693    ///
694    /// This function extracts the flags from the entry. The flags represent
695    /// various properties of the page, such as whether the page is present,
696    /// read-only, user-accessible, etc.
697    ///
698    /// # Returns
699    /// A [`PteFlags`] value representing the flags associated with this entry.
700    #[inline]
701    pub const fn flags(&self) -> PteFlags {
702        PteFlags::from_bits_truncate(self.0)
703    }
704
705    /// Set the physical address for this entry.
706    ///
707    /// This method updates the physical address of the entry, preserving the
708    /// current flags (e.g., read/write permissions). It checks that the
709    /// physical address is aligned to a 4K boundary (the page size), as
710    /// required by the architecture.
711    ///
712    /// # Safety
713    /// You must invalidate the corresponding TLB Entry.
714    ///
715    /// # Parameters
716    /// - `pa`: The new physical address to set for the entry.
717    ///
718    /// # Returns
719    /// - `Ok(&mut Self)` if the address is valid and the update is successful.
720    /// - `Err(PageTableMappingError::Unaligned)` if the provided physical
721    ///   address is not aligned.
722    ///
723    /// # Warning
724    /// This operation does not modify the flags of the entry.
725    #[inline]
726    pub fn set_pa(&mut self, pa: Pa) -> Result<&mut Self, PageTableMappingError> {
727        let pa = pa.into_usize();
728        if pa & 0xfff != 0 {
729            Err(PageTableMappingError::Unaligned)
730        } else {
731            self.0 = pa | self.flags().bits() | PteFlags::P.bits();
732            Ok(self)
733        }
734    }
735
736    /// Set the flags for this entry.
737    ///
738    /// This method allows you to update the flags associated with the page.
739    /// The physical address remains unchanged, but the permission settings
740    /// (e.g., read/write, user/kernel) can be updated.
741    ///  
742    /// # Parameters
743    /// - `perm`: The new set of flags to assign to the entry.
744    ///
745    /// # Returns
746    /// A mutable reference to `self`, allowing for method chaining.
747    ///   
748    ///  # Safety
749    /// You must invalidate the corresponding TLB Entry.
750    #[inline]
751    pub unsafe fn set_flags(&mut self, perm: PteFlags) -> &mut Self {
752        self.0 = self.pa().map(|n| n.into_usize()).unwrap_or(0) | perm.bits();
753        self
754    }
755
756    /// Clears the entry.
757    ///
758    /// This method removes any previously set physical address and flags from
759    /// the entry. If the entry contained a valid physical address before
760    /// being cleared, that address is returned.
761    ///
762    /// # Returns
763    /// - `Some(Pa)`: The physical address that was previously stored in the
764    ///   entry, if it existed.
765    /// - `None`: If the entry did not contain a valid physical address.
766    ///
767    /// # Safety
768    /// You must invalidate the corresponding TLB Entry.
769    #[inline]
770    pub unsafe fn clear(&mut self) -> Option<Pa> {
771        self.pa().inspect(|_| {
772            self.0 = 0;
773        })
774    }
775}
776
777bitflags::bitflags! {
778    /// Flags for pte.
779    pub struct PteFlags: usize {
780        /// Present; must be 1 to map a 4-KByte page
781        const P = 1 << 0;
782        /// Read/write; if 0, writes may not be allowed to the 4-KByte page referenced by this entry (see Section 4.6)
783        const RW = 1 << 1;
784        /// User/supervisor; if 0, user-mode accesses are not allowed to the 4-KByte page referenced by this entry (see Section 4.6)
785        const US = 1 << 2;
786        /// Page-level write-through; indirectly determines the memory type used to access the 4-KByte page referenced by this entry (see Section 4.9.2)
787        const PWT = 1 << 3;
788        /// Page-level cache disable; indirectly determines the memory type used to access the 4-KByte page referenced by this entry (see Section 4.9.2)
789        const PCD = 1 << 4;
790        /// Accessed; indicates whether software has accessed the 4-KByte page referenced by this entry (see Section 4.8)
791        const A = 1 << 5;
792        /// Dirty; indicates whether software has written to the 4-KByte page referenced by this entry (see Section 4.8)
793        const D = 1 << 6;
794        /// Indirectly determines the memory type used to access the 4-KByte page referenced by this entry (see Section 4.9.2)
795        const PAT = 1 << 7;
796        /// Global; if CR4.PGE = 1, determines whether the translation is global (see Section 4.10); ignored otherwise
797        const G = 1 << 8;
798        #[doc(hidden)] const _IGN_9 = 1 << 9;
799        #[doc(hidden)] const _IGN_10 = 1 << 10;
800        /// For ordinary paging, ignored; for HLAT paging, restart (if 1, linear-address translation is restarted with ordinary paging)
801        const R = 1 << 11;
802        #[doc(hidden)] const _IGN_52 = 1 << 52;
803        #[doc(hidden)] const _IGN_53 = 1 << 53;
804        #[doc(hidden)] const _IGN_54 = 1 << 54;
805        #[doc(hidden)] const _IGN_55 = 1 << 55;
806        #[doc(hidden)] const _IGN_56 = 1 << 56;
807        #[doc(hidden)] const _IGN_57 = 1 << 57;
808        #[doc(hidden)] const _IGN_58 = 1 << 58;
809        /// Protection key bit 0; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
810        const PK_0 = 1 << 59;
811        /// Protection key bit 1; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
812        const PK_1 = 1 << 60;
813        /// Protection key bit 2; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
814        const PK_2 = 1 << 61;
815        /// Protection key bit 3; if CR4.PKE = 1 or CR4.PKS = 1, this may control the page’s access rights (see Section 4.6.2); otherwise, it is ignored and not used to control access rights.
816        const PK_3 = 1 << 62;
817        /// If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not allowed from the 4-KByte page controlled by this entry; see Section 4.6); otherwise, reserved (must be 0)
818        const XD = 1 << 63;
819    }
820}
821
822/// Struct for invalidating the TLB (Translation Lookaside Buffer) entry.
823///
824/// This struct is responsible for invalidating a TLB entry associated with a
825/// specific virtual address (`Va`). TLB entries are cached mappings between
826/// virtual addresses and physical addresses, and they need to be invalidated
827/// when the corresponding page table entries are modified or removed.
828///
829/// This struct provides methods for invalidating the TLB entry
830/// and safely forgetting the modification. This internally holds the page
831/// to be invalidated, to delay the free until the tlb entry is invalidated.
832pub struct StaleTLBEntry(Va, Page);
833
834impl core::ops::Deref for StaleTLBEntry {
835    type Target = Page;
836    fn deref(&self) -> &Self::Target {
837        &self.1
838    }
839}
840
841impl core::ops::DerefMut for StaleTLBEntry {
842    fn deref_mut(&mut self) -> &mut Self::Target {
843        &mut self.1
844    }
845}
846
847impl StaleTLBEntry {
848    /// Create a new StaleTLBEntry.
849    pub fn new(va: Va, page: Page) -> Self {
850        Self(va, page)
851    }
852
853    /// Invalidate the underlying virtual address.
854    ///
855    /// This method issues an assembly instruction to invalidate the TLB entry
856    /// corresponding to the given virtual address. The invalidation ensures
857    /// that any cached translations are cleared and that the system will use
858    /// the updated page table entries for subsequent address lookups.
859    pub fn invalidate(self) -> Page {
860        let va = self.0;
861        let page = unsafe { core::ptr::read(&core::mem::ManuallyDrop::new(self).1) };
862
863        unsafe {
864            core::arch::asm!(
865                "invlpg [{0}]",
866                in(reg) va.into_usize(),
867                options(nostack)
868            );
869        }
870
871        TlbIpi::send(Cr3::current(), Some(va));
872        page
873    }
874}
875
876impl Drop for StaleTLBEntry {
877    fn drop(&mut self) {
878        panic!(
879            "TLB entry for {:?} is not invalidated. You must call `.invalidate()`.",
880            self.0,
881        );
882    }
883}
884
885/// Shutdown the TLB.
886///
887/// This method issues an assembly instruction to invalidate all TLB
888/// entries of the current CPU. The invalidation ensures that any cached
889/// translations are cleared and that the system will use the updated
890/// page table entries for subsequent address lookups.
891pub fn tlb_shutdown(pgtbl: &PageTableRoot) {
892    let pgtbl_pa = pgtbl.pa().into_usize();
893    let curr_cr3 = Cr3::current();
894
895    if pgtbl_pa == curr_cr3.into_usize() {
896        unsafe {
897            core::arch::asm! {
898                "mov rax, cr3",
899                "mov cr3, rax",
900                out("rax") _,
901                options(nostack)
902            }
903        }
904    }
905
906    TlbIpi::send(Cr3(pgtbl_pa as u64), None);
907}
908
909/// Page Table Mapping Error.
910///
911/// This enum represents errors that can occur when working with page table
912/// mappings in the virtual memory system. It is used to indicate specific
913/// issues that arise during memory address mapping operations, such as setting
914/// up or updating page tables.
915#[derive(Debug, PartialEq, Eq)]
916pub enum PageTableMappingError {
917    /// Unaligned address.
918    ///
919    /// This error is returned when an address provided for a page table entry
920    /// is not properly aligned to the required page size. For example, the
921    /// address might not be a multiple of 4KB (on x86_64 systems).
922    Unaligned,
923
924    /// Not exist.
925    ///
926    /// This error is returned when a requested page table entry does not exist
927    /// or is invalid. For instance, it could occur when trying to access an
928    /// entry that is not present or has not been mapped yet.
929    NotExist,
930
931    /// Duplicated mapping.
932    ///
933    /// This error is returned when an attempt is made to create a duplicate
934    /// mapping for an address that already has an existing mapping.
935    Duplicated,
936
937    /// Invalid permission.
938    ///
939    /// This error is returned when an attempt is made to create a mapping with
940    /// an invalid permission.
941    InvalidPermission,
942}
943
944bitflags::bitflags! {
945    /// Possible memory permissions for a page.
946    ///
947    /// This defines the various permissions that can be assigned
948    /// to memory pages in a page table. Each permission is represented by a single bit,
949    /// allowing for efficient bitwise operations to check or modify permissions.
950    ///
951    /// The [`Permission`] allows you to specify memory access permissions such as:
952    /// - Whether a page is readable.
953    /// - Whether a page is writable.
954    /// - Whether a page is executable.
955    /// - Whether a page can be accessed by user applications.
956    pub struct Permission: usize {
957        /// Page is readable.
958        ///
959        /// This permission allows read access to the page. The page can be
960        /// accessed for reading data.
961        const READ = 1 << 0;
962
963        /// Page is writable.
964        ///
965        /// This permission allows write access to the page. The page can be
966        /// modified by a process.
967        const WRITE = 1 << 1;
968
969        /// Page is executable.
970        ///
971        /// This permission allows the page to be executed. The page can contain
972        /// code that is executed by the CPU, such as instructions.
973        const EXECUTABLE = 1 << 2;
974
975        /// Page can be referred by user application.
976        ///
977        /// This permission allows the page to be accessed by user-mode applications.
978        /// Typically, the kernel uses this flag to differentiate between user-mode and
979        /// kernel-mode access.
980        const USER = 1 << 3;
981    }
982}
983
984impl Permission {
985    /// All possible permissions.
986    pub const ALL_CASES: [Permission; 16] = [
987        Permission::from_bits_truncate(0),
988        Permission::from_bits_truncate(1),
989        Permission::from_bits_truncate(2),
990        Permission::from_bits_truncate(3),
991        Permission::from_bits_truncate(4),
992        Permission::from_bits_truncate(5),
993        Permission::from_bits_truncate(6),
994        Permission::from_bits_truncate(7),
995        Permission::from_bits_truncate(8),
996        Permission::from_bits_truncate(9),
997        Permission::from_bits_truncate(10),
998        Permission::from_bits_truncate(11),
999        Permission::from_bits_truncate(12),
1000        Permission::from_bits_truncate(13),
1001        Permission::from_bits_truncate(14),
1002        Permission::from_bits_truncate(15),
1003    ];
1004}
1005
1006/// A page table root.
1007///
1008/// It wraps the Pml4e array to ensure page table align to 4096.
1009/// Note that it is not allowed to modified the larger indices than
1010/// [`Self::KBASE`], which are reserved for kernel address.
1011#[repr(align(4096))]
1012#[derive(Debug)]
1013pub struct PageTableRoot([Pml4e; 512]);
1014
1015impl Deref for PageTableRoot {
1016    type Target = [Pml4e; 512];
1017    fn deref(&self) -> &Self::Target {
1018        &self.0
1019    }
1020}
1021
1022impl core::ops::Index<usize> for PageTableRoot {
1023    type Output = Pml4e;
1024    fn index(&self, index: usize) -> &Self::Output {
1025        &self.0[index]
1026    }
1027}
1028
1029impl core::ops::IndexMut<usize> for PageTableRoot {
1030    // Required method
1031    fn index_mut(&mut self, index: usize) -> &mut Self::Output {
1032        if index >= Self::KBASE {
1033            let kernel_pt = unsafe {
1034                (Pa::new({
1035                    unsafe extern "C" {
1036                        static mut boot_pml4e: u64;
1037                    }
1038                    boot_pml4e as usize
1039                })
1040                .unwrap()
1041                .into_kva()
1042                .into_usize() as *const [Pml4e; 512])
1043                    .as_ref()
1044                    .unwrap()
1045            };
1046            if kernel_pt[index].pa().is_some() && kernel_pt[index].pa() == self.0[index].pa() {
1047                panic!(
1048                    "Trying to modify entries for kernel page table: {} (limit: {}).",
1049                    index,
1050                    Self::KBASE
1051                );
1052            }
1053        }
1054        &mut self.0[index]
1055    }
1056}
1057
1058impl PageTableRoot {
1059    /// Base of pml4 index occupied for kernel address.
1060    pub const KBASE: usize = 256;
1061
1062    /// Create a empty [`PageTableRoot`].
1063    pub fn new_boxed() -> Box<Self> {
1064        Box::new(PageTableRoot([Pml4e(0); 512]))
1065    }
1066
1067    /// Create a new [`PageTableRoot`] that allowed to access the kernel
1068    /// addresses.
1069    pub fn new_boxed_with_kernel_addr() -> Box<Self> {
1070        let kernel_pt = unsafe {
1071            (Pa::new({
1072                unsafe extern "C" {
1073                    static mut boot_pml4e: u64;
1074                }
1075                boot_pml4e as usize
1076            })
1077            .unwrap()
1078            .into_kva()
1079            .into_usize() as *const [Pml4e; 512])
1080                .as_ref()
1081                .unwrap()
1082        };
1083        let mut this = Self::new_boxed();
1084        this.0[Self::KBASE..512].copy_from_slice(&kernel_pt[Self::KBASE..512]);
1085        this
1086    }
1087
1088    /// Get the physical address of this page table root.
1089    pub fn pa(&self) -> Pa {
1090        crate::mm::Kva::new(self.as_ptr() as usize)
1091            .unwrap()
1092            .into_pa()
1093    }
1094}
1095
1096#[doc(hidden)]
1097pub(crate) static ACTIVE_PAGE_TABLES: [AtomicUsize; MAX_CPU] =
1098    [const { AtomicUsize::new(0) }; MAX_CPU];
1099
1100/// Load page table by given physical address.
1101#[inline]
1102pub fn load_pt(pa: Pa) {
1103    if abyss::x86_64::Cr3::current().into_usize() != pa.into_usize() {
1104        // println!("RELOAD PT {:?}", pa);
1105        ACTIVE_PAGE_TABLES[abyss::x86_64::intrinsics::cpuid()].store(pa.into_usize());
1106        unsafe { abyss::x86_64::Cr3(pa.into_usize() as u64).apply() }
1107    }
1108}
1109
1110/// Get current page table's physical address.
1111#[inline]
1112pub fn get_current_pt_pa() -> Pa {
1113    let addr = abyss::x86_64::Cr3::current().into_usize();
1114    assert_eq!(
1115        ACTIVE_PAGE_TABLES[abyss::x86_64::intrinsics::cpuid()].load(),
1116        addr
1117    );
1118    Pa::new(addr).unwrap()
1119}