Implementation for Virtual Memory Manager

This commit is contained in:
lucic71 2022-06-28 19:40:51 +03:00
parent ab93ad2de1
commit 585fe5b9cb
6 changed files with 305 additions and 48 deletions

20
kernel/vmm/asm/paging.S Normal file
View File

@ -0,0 +1,20 @@
.global enable_paging
/*
* enable_paging:
* --------------
*
* Move the argument in cr3 and enable PE and PG bits in cr0.
*
*/
enable_paging:
mov 0x04(%esp), %eax
mov %eax, %cr3
mov %cr0, %eax
or $0x80000001, %eax
mov %eax, %cr0
ret

62
kernel/vmm/include/pde.h Normal file
View File

@ -0,0 +1,62 @@
#ifndef PDE_H_
#define PDE_H_
#include <stdint.h>
typedef uint32_t pde_t;
/* Macros for working with Page Directories. */
#define PD_TABLE_SIZE 1024
#define PD_INDEX(x) (((x) >> 22) & 0x3FF)
/* Page Directory Flags. */
enum PDE_FLAGS {
PDE_PRESENT = 0x01,
PDE_WRITABLE = 0x02,
PDE_USER = 0x04,
PDE_PWT = 0x08,
PDE_PCD = 0x10,
PDE_ACCESSED = 0x20,
PDE_DIRTY = 0x40,
PDE_4MB = 0x80,
PDE_CPU_GLOBAL = 0x100,
PDE_LV4_GLOBAL = 0x200,
PDE_FRAME = 0xFFFFF000
};
/* pde_*set_attr:
* Set/Unset attribute in Page Directory Entry.
*
* @param e - PD entry
* @param attr - Attribute
*
*/
static inline void pde_set_attr(pde_t *e, uint32_t attr) { *e |= attr; }
static inline void pde_unset_attr(pde_t *e, uint32_t attr) { *e &= ~attr; }
/*
* pde_set/get_frame:
* Sets/Gets the frame the PDE points to. The physical address must
* be 4K aligned
*
* @param e - PD entry
* @param phys_addr - Frame to be set
*
*/
static inline void pde_set_frame(pde_t *e, uint32_t phys_addr) {
*e = (*e & ~PDE_FRAME) | phys_addr;
}
static inline uint32_t pde_get_frame(pde_t e) { return e & PDE_FRAME; }
/* Checkers for flags. */
static inline int pde_is_present(pde_t e) { return e & PDE_PRESENT; }
#endif

View File

@ -1,48 +0,0 @@
#ifndef PE_H_
#define PE_H_
#include <stdint.h>
/*
* The following routines serve as a common interface for Page Directory and
* Page Table entries.
*
*/
/* Page Entry Flags. */
enum PE_FLAGS {
PE_PRESENT = 0x01,
PE_WRITABLE = 0x02,
PE_USER = 0x04,
PE_PWT = 0x08,
PE_PCD = 0x10,
PE_ACCESSED = 0x20,
PE_DIRTY = 0x40,
PE_4MB = 0x80,
PE_CPU_GLOBAL = 0x100,
PE_LV4_GLOBAL = 0x200,
PE_FRAME = 0xFFFFF000
};
inline void pe_set_attr(uint32_t *e, uint32_t attr) { *e |= attr; }
inline void pe_unset_attr(uint32_t *e, uint32_t attr) { *e &= ~attr; }
void pe_set_frame(uint32_t *e, uint32_t addr) {
*e = (*e & ~PE_FRAME) | (addr << 12);
}
inline uint32_t pe_get_frame (uint32_t e) { return e & PE_FRAME; }
inline int pe_is_present (uint32_t e) { return e & PE_PRESENT; }
inline int pe_is_writable (uint32_t e) { return e & PE_WRITABLE; }
inline int pe_is_user (uint32_t e) { return e & PE_USER; }
inline int pe_is_4mb (uint32_t e) { return e & PE_4MB; }
#endif

61
kernel/vmm/include/pte.h Normal file
View File

@ -0,0 +1,61 @@
#ifndef PTE_H_
#define PTE_H_
#include <stdint.h>
typedef uint32_t pte_t;
/* Macros for working with Page Tables. */
#define PT_TABLE_SIZE 1024
#define PT_INDEX(x) (((x) >> 12) & 0x3FF)
/* Page Table Flags. */
enum PTE_FLAGS {
PTE_PRESENT = 0x01,
PTE_WRITABLE = 0x02,
PTE_USER = 0x04,
PTE_WRITETHOUGH = 0x08,
PTE_NOT_CACHEABLE = 0x10,
PTE_ACCESSED = 0x20,
PTE_DIRTY = 0x40,
PTE_PAT = 0x80,
PTE_CPU_GLOBAL = 0x100,
PTE_LV4_GLOBAL = 0x200,
PTE_FRAME = 0xFFFFF000
};
/* pte_*set_attr:
* Set/Unset attribute in Page Table Entry.
*
* @param e - PT entry
* @param attr - Attribute
*
*/
static inline void pte_set_attr(pte_t *e, uint32_t attr) { *e |= attr; }
static inline void pte_unset_attr(pte_t *e, uint32_t attr) { *e &= ~attr; }
/*
* pte_set/get_frame:
* Sets/Gets the frame the PTE points to. The physical address must
* be 4K aligned
*
* @param e - PT entry
* @param phys_addr - Frame to be set
*
*/
static inline void pte_set_frame(pte_t *e, uint32_t phys_addr) {
*e = (*e & ~PTE_FRAME) | phys_addr;
}
static inline uint32_t pte_get_frame(pte_t e) { return e & PTE_FRAME; }
/* Checkers for flags. */
static inline int pte_is_present(pte_t e) { return e & PTE_PRESENT; }
#endif

View File

@ -0,0 +1,29 @@
#ifndef VMM_INTERNALS_H_
#define VMM_INTERNALS_H_
#include "pte.h"
#include "pde.h"
/*
* pd - Page directory placed in the Physical Memory
* kpd - Kernel page directory palced in the last 4MB of memory.
*
*/
static pde_t pd[1024] __attribute__((aligned (4096)));
static pde_t *kpd = (pde_t *) 0xFFFFF000;
/* Start of virtual page tables after pagination was enabled. */
#define VIRT_PT_START 0xFFC00000
/*
* enable_paging:
*
* @param pd_phys_addr - Physical address of Page Directory.
*
*/
void enable_paging(uint32_t pd_phys_addr);
#endif

133
kernel/vmm/src/vmm.c Normal file
View File

@ -0,0 +1,133 @@
#include "kernel/vmm.h"
#include "vmm_internals.h"
#include "kernel/pmm.h"
#include <string.h>
/*
* vmm_init:
* ---------
*
* Allocate the first page table and put in in Page Directory on the
* first position (because this routine wants to map the first 4MB of
* physical space).
*
* Next, point the last entry of pd to pd. This is called Recursive Mapping.
* Its advantage is that the kernel can map pages even after the paging
* was enabled. TODO: maybe look for other solutions.
*
* The first page table is then filled with its corresponding addresses and
* paging is enabled.
*
*/
void vmm_init(void) {
pte_t *first_pt = pmm_alloc_block();
pde_set_frame(pd, (uint32_t) first_pt);
pde_set_attr(pd, PDE_PRESENT | PDE_WRITABLE);
pde_set_frame(pd + 1023, (uint32_t) pd);
pde_set_attr(pd + 1023, PDE_PRESENT | PDE_WRITABLE);
memset(first_pt, sizeof(pte_t) * 1024, 0x00);
for (int x = 0; x < 1024; x++) {
pte_set_frame(first_pt + x, x * 4096);
pte_set_attr(first_pt + x, PTE_PRESENT | PDE_WRITABLE);
}
enable_paging((uint32_t) pd);
}
/*
* vmmap_map_addr:
* ---------------
*
* Paging is now enabled and this routine works with kpd instead of pd.
*
* It checks if there is a corresponding entry for virt in PD. If there
* is an entry, it further checks if there is an PT entry for virt. If
* there is no entry then it creates one, else it returns -1.
*
* If there is no PD entry, it creates one and also creates a PT entry.
*
*/
int vmm_map_addr(uint32_t phys, uint32_t virt) {
int pd_index = PD_INDEX(virt);
int pt_index = PT_INDEX(virt);
if (pde_is_present(kpd[pd_index])) {
uint32_t *pt = (uint32_t *) (VIRT_PT_START + pd_index * 0x1000);
if (!pte_is_present(pt[pt_index])) {
pte_set_frame(pt + pt_index, phys);
pte_set_attr(pt + pt_index, PTE_PRESENT | PTE_WRITABLE);
} else
return -1;
} else {
pte_t *new_pt_phys = pmm_alloc_block();
pde_set_frame(kpd + pd_index, (uint32_t) new_pt_phys);
pde_set_attr(kpd + pd_index, PDE_PRESENT | PDE_WRITABLE);
pte_t *new_pt_virt = (pte_t *) (VIRT_PT_START + pd_index * 0x1000);
pte_set_frame(new_pt_virt, phys);
pte_set_attr(new_pt_virt + pt_index, PTE_PRESENT | PTE_WRITABLE);
}
return 0;
}
/*
* vmm_unmap_addr:
* ---------------
*
* It finds the corresponding PD and PT entries for virt. It also checks if
* the page table that contains virt can be free'd by probing all PTE's available
* in the current page table.
*
* If there are 1024 non-present entries then the page table can be freed.
*
*/
void vmm_unmap_addr(uint32_t virt) {
int pd_index = PD_INDEX(virt);
int pt_index = PT_INDEX(virt);
if (pde_is_present(kpd[pd_index])) {
pte_t *pt_virt = (pte_t *) (VIRT_PT_START + pd_index * 0x1000);
if (pte_is_present(pt_virt[pt_index]))
pt_virt[pt_index] = 0x00;
int i;
for (i = 0; i < 1024; i++)
if (pte_is_present(pt_virt[pt_index]))
break;
if (i == 1024) {
pmm_free_block((void *) pde_get_frame(kpd[pd_index]));
kpd[pd_index] = 0x00;
}
}
}