fix (kernel): rewrote the complete scheduler so, that it has no max_threads limit and is faster

This commit is contained in:
antifallobst 2023-02-19 00:05:45 +01:00
parent f6ee1e572f
commit f550660e21
6 changed files with 160 additions and 131 deletions

View File

@ -23,18 +23,8 @@
#define SCHEDULER_MAX_THREADS_PER_PROCESS 16 #define SCHEDULER_MAX_THREADS_PER_PROCESS 16
typedef struct { typedef struct {
uint32_t max_threads;
uint32_t num_threads; uint32_t num_threads;
thread_descriptor_T* running_thread;
thread_T* threads;
bitmap_T threads_bitmap;
bitmap_T running_threads_bitmap;
thread_T** queue;
uint32_t queue_index;
uint32_t queue_length;
uint32_t running_thread;
bool blocked; bool blocked;
bool initialized; bool initialized;
@ -44,15 +34,13 @@ void scheduler_init ();
cpu_state_T* scheduler_start (cpu_state_T* state); cpu_state_T* scheduler_start (cpu_state_T* state);
bool scheduler_is_initialized (); bool scheduler_is_initialized ();
uint32_t scheduler_register_thread (thread_T* thread); thread_t scheduler_register_thread (thread_T* thread);
void scheduler_pause_thread (uint32_t id); void scheduler_pause_thread (thread_t thread_descriptor);
void scheduler_start_thread (uint32_t id); void scheduler_start_thread (thread_t thread_descriptor);
void scheduler_kill_thread (uint32_t id); void scheduler_kill_thread (thread_t thread_descriptor);
thread_T* scheduler_get_thread (uint32_t id);
uint32_t scheduler_get_current_thread (); thread_t scheduler_get_current_thread ();
void scheduler_calculate_queue ();
cpu_state_T* scheduler_switch_context (cpu_state_T* state); cpu_state_T* scheduler_switch_context (cpu_state_T* state);
#endif //NOX_SCHEDULER_H #endif //NOX_SCHEDULER_H

View File

@ -23,10 +23,27 @@ typedef enum {
THREAD_NONE = -1 THREAD_NONE = -1
} thread_standard_E; } thread_standard_E;
typedef struct thread_descriptor_T thread_descriptor_T;
typedef const thread_descriptor_T* thread_t;
typedef struct { typedef struct {
uint32_t id;
cpu_state_T state; cpu_state_T state;
uint64_t cpu_time; uint64_t cpu_time;
void* stack;
uint32_t stack_size;
thread_descriptor_T* descriptor;
} thread_T; } thread_T;
struct thread_descriptor_T{
thread_descriptor_T* prev;
thread_descriptor_T* next;
thread_T* thread;
};
thread_t thread_spawn (void* function);
thread_t thread_spawn_from_state (cpu_state_T* state);
void thread_start (thread_t thread_descriptor);
void thread_pause (thread_t thread_descriptor);
void thread_kill (thread_t thread_descriptor);
#endif //NOX_THREAD_H #endif //NOX_THREAD_H

View File

@ -56,6 +56,12 @@ void test_b() {
} }
} }
void test_c() {
while (true) {
io_out_byte(LOG_PORT, 'C');
}
}
void kmain(boot_info_T boot_info) { void kmain(boot_info_T boot_info) {
limine_terminal_print(&boot_info, "Booting NoxOS...\n"); limine_terminal_print(&boot_info, "Booting NoxOS...\n");
@ -66,27 +72,11 @@ void kmain(boot_info_T boot_info) {
limine_terminal_print(&boot_info, "Kernel initialized\n"); limine_terminal_print(&boot_info, "Kernel initialized\n");
log(LOG_INFO, "!=====[ Kernel Initialized ]=====!\n"); log(LOG_INFO, "!=====[ Kernel Initialized ]=====!\n");
void* stack = memory_allocate(PFRAME_SIZE * 10); thread_t thread = thread_spawn(test_b);
thread_T test; thread_start(thread);
test.state = (cpu_state_T){
.cr3 = (uint64_t)g_kernel_page_map, thread_t thread1 = thread_spawn(test_c);
.rax = 0, thread_start(thread1);
.rbx = 0,
.rcx = 0,
.rdx = 0,
.rsi = 0,
.rdi = 0,
.rbp = (uint64_t)stack + (PFRAME_SIZE * 10),
.interrupt_id = 0,
.error_code = 0,
.rip = (uint64_t)test_b,
.cs = GDT_SELECTOR_KERNEL_CODE,
.flags = 1 << CPU_FLAG_INTERRUPT_ENABLE,
.rsp = (uint64_t)stack + (PFRAME_SIZE * 10),
.ss = GDT_SELECTOR_KERNEL_DATA
};
scheduler_register_thread(&test);
scheduler_start_thread(test.id);
test_a(); test_a();

View File

@ -248,7 +248,6 @@ cpu_state_T* irq_handle(cpu_state_T* state, pic_irq_E irq) {
} }
cpu_state_T* interrupts_handle(cpu_state_T* state) { cpu_state_T* interrupts_handle(cpu_state_T* state) {
log(LOG_DEBUG, "INT 0x%xb", state->interrupt_id);
if (state->interrupt_id < EXCEPTIONS_ENUM_END) { if (state->interrupt_id < EXCEPTIONS_ENUM_END) {
return exception_handle(state); return exception_handle(state);
} }

View File

@ -22,29 +22,21 @@
scheduler_T g_scheduler; scheduler_T g_scheduler;
void scheduler_init() { void scheduler_init() {
g_scheduler.max_threads = SCHEDULER_MAX_THREADS_PER_PROCESS * SCHEDULER_MAX_PROCESSES;
g_scheduler.num_threads = 0; g_scheduler.num_threads = 0;
g_scheduler.threads = memory_allocate(g_scheduler.max_threads * sizeof(thread_T)); g_scheduler.running_thread = NULL;
g_scheduler.threads_bitmap = bitmap_init(g_scheduler.max_threads);
g_scheduler.running_threads_bitmap = bitmap_init(g_scheduler.max_threads);
g_scheduler.queue = memory_allocate(g_scheduler.max_threads * sizeof(thread_T*));
g_scheduler.queue_index = 0;
g_scheduler.queue_length = 0;
g_scheduler.running_thread = 0;
g_scheduler.blocked = false; g_scheduler.blocked = false;
syscall_perform(SYSCALL_KERNEL_SCHEDULER_START); syscall_perform(SYSCALL_KERNEL_SCHEDULER_START);
} }
cpu_state_T* scheduler_start(cpu_state_T* state) { cpu_state_T* scheduler_start(cpu_state_T* state) {
thread_T thread; thread_descriptor_T* thread = thread_spawn_from_state(state);
thread->prev = thread;
thread->next = thread;
g_scheduler.running_thread = thread;
memory_copy(state, &thread.state, sizeof(cpu_state_T));
thread.id = 0;
thread.cpu_time = 0;
scheduler_register_thread(&thread); scheduler_start_thread(thread);
scheduler_start_thread(thread.id);
g_scheduler.initialized = true; g_scheduler.initialized = true;
@ -55,113 +47,75 @@ bool scheduler_is_initialized() {
return g_scheduler.initialized; return g_scheduler.initialized;
} }
uint32_t scheduler_request_thread_id() { void scheduler_queue_add_thread_descriptor(thread_descriptor_T* descriptor) {
for (int i = 0; i < g_scheduler.max_threads; i++) { if (g_scheduler.running_thread == NULL) { return; }
if (!bitmap_get(&g_scheduler.threads_bitmap, i)) {
return i; descriptor->prev = g_scheduler.running_thread;
} descriptor->next = g_scheduler.running_thread->next;
} g_scheduler.running_thread->next->prev = descriptor;
return THREAD_NONE; g_scheduler.running_thread->next = descriptor;
} }
uint32_t scheduler_register_thread(thread_T* thread) { void scheduler_queue_remove_thread_descriptor(thread_descriptor_T* descriptor) {
if (g_scheduler.num_threads >= g_scheduler.max_threads) { if (descriptor->prev == NULL || descriptor->next == NULL) { return; }
log(LOG_WARNING, "<Scheduler> Failed to register Thread (max thread num reached)");
return THREAD_NONE;
}
descriptor->prev->next = descriptor->next;
descriptor->next->prev = descriptor->prev;
descriptor->prev = NULL;
descriptor->next = NULL;
}
thread->id = scheduler_request_thread_id(); thread_t scheduler_register_thread(thread_T* thread) {
if (thread->id == THREAD_NONE) { thread->descriptor = memory_allocate(sizeof(thread_descriptor_T));
log(LOG_WARNING, "<Scheduler> Failed to register Thread (generation of thread id failed)"); thread->descriptor->thread = thread;
return THREAD_NONE;
}
bitmap_set(&g_scheduler.threads_bitmap, thread->id, true);
memory_copy(thread, &g_scheduler.threads[thread->id], sizeof(thread_T));
g_scheduler.num_threads++; g_scheduler.num_threads++;
log(LOG_INFO, "<Scheduler> Registered thread %d", thread->id); log(LOG_INFO, "<Scheduler> Registered thread");
return thread->id; return thread->descriptor;
} }
void scheduler_pause_thread(uint32_t id) { void scheduler_pause_thread(thread_t thread_descriptor) {
bitmap_set(&g_scheduler.running_threads_bitmap, id, false); scheduler_queue_remove_thread_descriptor((thread_descriptor_T*)thread_descriptor);
log(LOG_INFO, "<Scheduler> Paused thread %d", id); log(LOG_INFO, "<Scheduler> Paused thread");
scheduler_calculate_queue();
} }
void scheduler_start_thread(uint32_t id) { void scheduler_start_thread(thread_t thread_descriptor) {
bitmap_set(&g_scheduler.running_threads_bitmap, id, true); scheduler_queue_add_thread_descriptor((thread_descriptor_T*)thread_descriptor);
log(LOG_INFO, "<Scheduler> Started thread %d", id); log(LOG_INFO, "<Scheduler> Started thread");
scheduler_calculate_queue();
} }
void scheduler_kill_thread(uint32_t id) { void scheduler_kill_thread(thread_t thread_descriptor) {
scheduler_pause_thread(id); scheduler_queue_remove_thread_descriptor((thread_descriptor_T*)thread_descriptor);
bitmap_set(&g_scheduler.threads_bitmap, id, false);
memory_free((void*)thread_descriptor);
g_scheduler.num_threads--; g_scheduler.num_threads--;
log(LOG_INFO, "<Scheduler> Killed thread %d", id); log(LOG_INFO, "<Scheduler> Killed thread");
} }
thread_T* scheduler_get_thread(uint32_t id) { thread_t scheduler_get_current_thread() {
if (!bitmap_get(&g_scheduler.threads_bitmap, id)) {
return NULL;
}
return &g_scheduler.threads[id];
}
uint32_t scheduler_get_current_thread() {
return g_scheduler.running_thread; return g_scheduler.running_thread;
} }
void scheduler_calculate_queue() {
int index = 0;
log(LOG_INFO, "<Scheduler> Calculating queue:");
for (int i = 0; i < g_scheduler.max_threads; i++) {
if (!bitmap_get(&g_scheduler.running_threads_bitmap, i)) { continue; }
log(LOG_NONE, " > [%d] Added thread %d", index, i);
g_scheduler.queue[index] = &g_scheduler.threads[i];
index++;
}
g_scheduler.queue_length = index;
}
cpu_state_T* scheduler_switch_context(cpu_state_T* state) { cpu_state_T* scheduler_switch_context(cpu_state_T* state) {
if (!g_scheduler.initialized) { if (!g_scheduler.initialized) {
log(LOG_WARNING, "Failed to switch context (scheduler not initialized) INT[0x%xb]", state->interrupt_id);
return state; return state;
} }
CORE_HALT_WHILE(g_scheduler.blocked) CORE_HALT_WHILE(g_scheduler.blocked)
g_scheduler.blocked = true; g_scheduler.blocked = true;
thread_T* old_thread = &g_scheduler.threads[g_scheduler.running_thread]; thread_T* old_thread = g_scheduler.running_thread->thread;
thread_T* new_thread = g_scheduler.queue [g_scheduler.queue_index]; thread_T* new_thread = g_scheduler.running_thread->next->thread;
log(LOG_DEBUG, "Switching Thread OLD[%d] NEW[%d]", old_thread->id, new_thread->id);
if (old_thread->cpu_time > 0) { if (old_thread->cpu_time > 0) {
memory_copy(state, &old_thread->state, sizeof(cpu_state_T)); memory_copy(state, &old_thread->state, sizeof(cpu_state_T));
} }
old_thread->cpu_time += 1; old_thread->cpu_time += 1;
g_scheduler.running_thread = new_thread->id; g_scheduler.running_thread = g_scheduler.running_thread->next;
g_scheduler.queue_index++;
if (g_scheduler.queue_index == g_scheduler.queue_length) {
g_scheduler.queue_index = 0;
}
g_scheduler.blocked = false; g_scheduler.blocked = false;
return &new_thread->state; return &new_thread->state;

81
kernel/src/proc/thread.c Normal file
View File

@ -0,0 +1,81 @@
/* Copyright (C) Antifallobst <antifallobst@systemausfall.org>
*
* NoxOS is free software:
* you can redistribute it and/or modify it under the terms of the GNU General Public License
* as published by the Free Software Foundation, either version 3 of the License,
* or (at your option) any later version.
*
* NoxOS is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY;
* without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License along with this program.
* If not, see <https://www.gnu.org/licenses/>.
*/
#include "proc/thread.h"
#include "proc/scheduler.h"
#include "mm/page_map.h"
#include "mm/page_frame.h"
#include "platform/gdt.h"
#include "utils/memory.h"
thread_t thread_spawn(void* function) {
thread_T* thread = memory_allocate(sizeof(thread_T));
thread->stack_size = PFRAME_SIZE * 4;
thread->stack = memory_allocate(thread->stack_size);
thread->cpu_time = 0;
thread->state = (cpu_state_T){
.cr3 = (uint64_t)g_kernel_page_map,
.rax = 0,
.rbx = 0,
.rcx = 0,
.rdx = 0,
.rsi = 0,
.rdi = 0,
.rbp = (uint64_t)thread->stack + (thread->stack_size),
.interrupt_id = 0,
.error_code = 0,
.rip = (uint64_t)function,
.cs = GDT_SELECTOR_KERNEL_CODE,
.flags = 1 << CPU_FLAG_INTERRUPT_ENABLE,
.rsp = (uint64_t)thread->stack + (thread->stack_size),
.ss = GDT_SELECTOR_KERNEL_DATA
};
return scheduler_register_thread(thread);
}
thread_t thread_spawn_from_state(cpu_state_T* state) {
thread_T* thread = memory_allocate(sizeof(thread_T));
thread->stack_size = 0;
thread->stack = NULL;
thread->cpu_time = 0;
memory_copy(state, &thread->state, sizeof(cpu_state_T));
return scheduler_register_thread(thread);
}
void thread_start(thread_t thread_descriptor) {
scheduler_start_thread(thread_descriptor);
}
void thread_pause(thread_t thread_descriptor) {
scheduler_pause_thread(thread_descriptor);
}
void thread_kill(thread_t thread_descriptor) {
thread_T* thread = thread_descriptor->thread;
scheduler_kill_thread(thread_descriptor);
if (thread->stack != NULL) {
memory_free(thread->stack);
}
memory_free(thread);
}