]> git.dujemihanovic.xyz Git - nameless-os.git/commitdiff
Merge branch 'elf' into mm
authorDuje Mihanović <duje.mihanovic@skole.hr>
Mon, 27 Jun 2022 19:29:50 +0000 (21:29 +0200)
committerDuje Mihanović <duje.mihanovic@skole.hr>
Mon, 27 Jun 2022 19:29:50 +0000 (21:29 +0200)
boot/x86/stage3/loader.s
boot/x86/stage3/paging.s [new file with mode: 0644]
include/arch/x86/mm/paging.h [new file with mode: 0644]

index 87598f4b0c93a6d95cbb1aa21dcf6a87723ff1da..bd2fb9b912fa31d65b95f25dbc093e0264527f21 100644 (file)
@@ -210,3 +210,5 @@ ss_s: db "SS: ", 0
 space: db " ", 0
 hex_delm: db "0x", 0
 newline: db 0xd, 0xa, 0
+
+%include "paging.s"
diff --git a/boot/x86/stage3/paging.s b/boot/x86/stage3/paging.s
new file mode 100644 (file)
index 0000000..004da84
--- /dev/null
@@ -0,0 +1,80 @@
+; Code for enabling paging before calling the kernel
+; Identity maps the VGA framebuffer memory and high-half maps the kernel memory
+bits 32
+
+section .text
+
+; The problem with this code is that it assumes that the kernel's various
+; sections occupy a certain number of pages. As of writing it is correct, but as
+; the kernel grows this code may fail to map those pages, which is not good. The
+; solution to this is to use ELF instead of a flat binary. This was not done
+; before because it would require paging, but now that paging works using ELF is
+; a possibility which must be exploited.
+
+enable_paging:
+       push eax
+       mov eax, cr0
+       or eax, 0x80000000
+       mov cr0, eax
+       pop eax
+       ret
+
+load_paging_structs:
+       push eax
+       push ebx
+       xor ebx, ebx
+.pt_low_loop:
+       mov eax, ebx
+       shl eax, 12
+       or eax, 1|2 ; P and R/W flags
+       mov [page_table_low+ebx*4], eax
+       inc ebx
+       cmp ebx, 0x100
+       jl .pt_low_loop
+
+       xor ebx, ebx
+.pt_high_ro_loop:
+       mov eax, ebx
+       add eax, 0x100
+       shl eax, 12
+       or eax, 1 ; P flag
+       mov [page_table_high+ebx*4], eax
+       inc ebx
+       cmp ebx, 0x2
+       jl .pt_high_ro_loop
+
+       mov ebx, 0x2
+.pt_high_rw_loop:
+       mov eax, ebx
+       add eax, 0x100
+       shl eax, 12
+       or eax, 1|2 ; P and R/W flags
+       mov [page_table_high+ebx*4], eax
+       inc ebx
+       cmp ebx, 0x9
+       jl .pt_high_rw_loop
+
+       mov eax, page_table_low
+       and eax, 0xfffff000
+       or eax, 1|2
+       mov [page_directory], eax
+
+       mov eax, page_table_high
+       and eax, 0xfffff000
+       or eax, 1|2
+       mov [page_directory+768*4], eax
+
+       mov eax, page_directory
+       mov cr3, eax
+       pop ebx
+       pop eax
+       ret
+
+section .data
+align 4096
+page_table_low:
+       times 1024 dd 0
+page_table_high:
+       times 1024 dd 0
+page_directory:
+       times 1024 dd 0
diff --git a/include/arch/x86/mm/paging.h b/include/arch/x86/mm/paging.h
new file mode 100644 (file)
index 0000000..a4bdf22
--- /dev/null
@@ -0,0 +1,35 @@
+#ifndef X86_PAGING_H
+#define X86_PAGING_H
+
+struct page_directory_entry {
+       unsigned p: 1,
+                rw: 1,
+                us: 1,
+                pwt: 1,
+                pcd: 1,
+                a: 1,
+                ignored: 1,
+                ps: 1,
+                ignored2: 4,
+                page_table_addr: 20;
+} __attribute__((packed));
+
+struct page_table_entry {
+       unsigned p: 1,
+                rw: 1,
+                us: 1,
+                pwt: 1,
+                pcd: 1,
+                a: 1,
+                d: 1,
+                pat: 1,
+                g: 1,
+                ignored: 3,
+                page_frame_addr: 20;
+} __attribute__((packed));
+
+struct pf_errcode {
+       unsigned p: 1, wr: 1, us: 1, rsvd: 1, id: 1, pk: 1, ss: 1, hlat: 1, reserved: 7, sgx: 1, reserved2: 15;
+} __attribute__((packed));
+
+#endif