/* * Based on arch/arm/kernel/process.c * * Original Copyright (C) 1995 Linus Torvalds * Copyright (C) 1996-2000 Russell King - Converted to ARM. * Copyright (C) 2012 ARM Ltd. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 as * published by the Free Software Foundation. * * This program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with this program. If not, see . */ #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #include #ifdef CONFIG_CC_STACKPROTECTOR #include unsigned long __stack_chk_guard __read_mostly; EXPORT_SYMBOL(__stack_chk_guard); #endif /* * Function pointers to optional machine specific functions */ void (*pm_power_off)(void); EXPORT_SYMBOL_GPL(pm_power_off); void (*arm_pm_restart)(enum reboot_mode reboot_mode, const char *cmd); /* * This is our default idle handler. */ void arch_cpu_idle(void) { /* * This should do all the clock switching and wait for interrupt * tricks */ trace_cpu_idle_rcuidle(1, smp_processor_id()); cpu_do_idle(); local_irq_enable(); trace_cpu_idle_rcuidle(PWR_EVENT_EXIT, smp_processor_id()); } #ifdef CONFIG_HOTPLUG_CPU void arch_cpu_idle_dead(void) { cpu_die(); } #endif /* * Called by kexec, immediately prior to machine_kexec(). * * This must completely disable all secondary CPUs; simply causing those CPUs * to execute e.g. a RAM-based pin loop is not sufficient. This allows the * kexec'd kernel to use any and all RAM as it sees fit, without having to * avoid any code or data used by any SW CPU pin loop. The CPU hotplug * functionality embodied in disable_nonboot_cpus() to achieve this. */ void machine_shutdown(void) { disable_nonboot_cpus(); } /* * Halting simply requires that the secondary CPUs stop performing any * activity (executing tasks, handling interrupts). smp_send_stop() * achieves this. */ void machine_halt(void) { local_irq_disable(); smp_send_stop(); while (1); } /* * Power-off simply requires that the secondary CPUs stop performing any * activity (executing tasks, handling interrupts). smp_send_stop() * achieves this. When the system power is turned off, it will take all CPUs * with it. */ void machine_power_off(void) { local_irq_disable(); smp_send_stop(); if (pm_power_off) pm_power_off(); } /* * Restart requires that the secondary CPUs stop performing any activity * while the primary CPU resets the system. Systems with a single CPU can * use soft_restart() as their machine descriptor's .restart hook, since that * will cause the only available CPU to reset. Systems with multiple CPUs must * provide a HW restart implementation, to ensure that all CPUs reset at once. * This is required so that any code running after reset on the primary CPU * doesn't have to co-ordinate with other CPUs to ensure they aren't still * executing pre-reset code, and using RAM that the primary CPU's code wishes * to use. Implementing such co-ordination would be essentially impossible. */ void machine_restart(char *cmd) { /* Disable interrupts first */ local_irq_disable(); smp_send_stop(); /* * UpdateCapsule() depends on the system being reset via * ResetSystem(). */ if (efi_enabled(EFI_RUNTIME_SERVICES)) efi_reboot(reboot_mode, NULL); dbg_snapshot_post_reboot(cmd); /* Now call the architecture specific reboot code. */ if (arm_pm_restart) arm_pm_restart(reboot_mode, cmd); else do_kernel_restart(cmd); /* * Whoops - the architecture was unable to reboot. */ printk("Reboot failed -- System halted\n"); while (1); } #ifdef CONFIG_SEC_DEBUG_AVOID_UNNECESSARY_TRAP extern unsigned long long incorrect_addr; #endif /* * dump a block of kernel memory from around the given address */ static void show_data(unsigned long addr, int nbytes, const char *name) { int i, j; int nlines; #ifdef CONFIG_SEC_DEBUG_AVOID_UNNECESSARY_TRAP int nbytes_offset = nbytes; #endif u32 *p; /* * don't attempt to dump non-kernel addresses or * values that are probably just small negative numbers */ if (addr < PAGE_OFFSET || addr > -256UL) { /* * If kaslr is enabled, Kernel code is able to * locate in VMALLOC address. */ if (IS_ENABLED(CONFIG_RANDOMIZE_BASE)) { if (addr < (unsigned long)KERNEL_START || addr > (unsigned long)KERNEL_END) return; } else { return; } } printk("\n%s: %#lx:\n", name, addr); /* * round address down to a 32 bit boundary * and always dump a multiple of 32 bytes */ p = (u32 *)(addr & ~(sizeof(u32) - 1)); nbytes += (addr & (sizeof(u32) - 1)); nlines = (nbytes + 31) / 32; for (i = 0; i < nlines; i++) { /* * just display low 16 bits of address to keep * each line of the dump < 80 characters */ printk("%04lx :", (unsigned long)p & 0xffff); for (j = 0; j < 8; j++) { u32 data; #ifdef CONFIG_SEC_DEBUG_AVOID_UNNECESSARY_TRAP if ((incorrect_addr != 0) && (((unsigned long long)p >= (incorrect_addr - nbytes_offset)) && ((unsigned long long)p <= (incorrect_addr + nbytes_offset)))) { if (j == 7) pr_cont(" ********\n"); else pr_cont(" ********"); } else if (probe_kernel_address(p, data)) { #else if (probe_kernel_address(p, data)) { #endif if (j == 7) pr_cont(" ********\n"); else pr_cont(" ********"); } else { if (j == 7) pr_cont(" %08X\n", data); else pr_cont(" %08X", data); } ++p; } } } static void show_extra_register_data(struct pt_regs *regs, int nbytes) { mm_segment_t fs; unsigned int i; fs = get_fs(); set_fs(KERNEL_DS); show_data(regs->pc - nbytes, nbytes * 2, "PC"); show_data(regs->regs[30] - nbytes, nbytes * 2, "LR"); show_data(regs->sp - nbytes, nbytes * 2, "SP"); for (i = 0; i < 30; i++) { char name[4]; snprintf(name, sizeof(name), "X%u", i); show_data(regs->regs[i] - nbytes, nbytes * 2, name); } set_fs(fs); } void __show_regs(struct pt_regs *regs) { int i, top_reg; u64 lr, sp; if (compat_user_mode(regs)) { lr = regs->compat_lr; sp = regs->compat_sp; top_reg = 12; } else { lr = regs->regs[30]; sp = regs->sp; top_reg = 29; } if (!user_mode(regs)) { dbg_snapshot_save_context(regs); /* * If you want to see more kernel events after panic, * you should modify dbg_snapshot_set_enable's function 2nd parameter * to true. */ dbg_snapshot_set_enable("log_kevents", false); } pr_info("TIF_FOREIGN_FPSTATE: %d, FP/SIMD depth %d, cpu: %d\n", atomic_read(¤t->thread.fpsimd_kernel_state.depth), current->thread.fpsimd_kernel_state.cpu); show_regs_print_info(KERN_DEFAULT); print_symbol("PC is at %s\n", instruction_pointer(regs)); print_symbol("LR is at %s\n", lr); printk("pc : [<%016llx>] lr : [<%016llx>] pstate: %08llx\n", regs->pc, lr, regs->pstate); printk("sp : %016llx\n", sp); i = top_reg; while (i >= 0) { printk("x%-2d: %016llx ", i, regs->regs[i]); i--; if (i % 2 == 0) { pr_cont("x%-2d: %016llx ", i, regs->regs[i]); i--; } pr_cont("\n"); } if (!user_mode(regs)) show_extra_register_data(regs, 256); printk("\n"); } void show_regs(struct pt_regs * regs) { __show_regs(regs); dump_backtrace(regs, NULL); } #ifdef CONFIG_SEC_DEBUG_AUTO_COMMENT void show_regs_auto_comment(struct pt_regs * regs, bool comm) { __show_regs(regs); if (comm) dump_backtrace_auto_summary(regs, NULL); else dump_backtrace(regs, NULL); } #endif static void tls_thread_flush(void) { write_sysreg(0, tpidr_el0); if (is_compat_task()) { current->thread.tp_value = 0; /* * We need to ensure ordering between the shadow state and the * hardware state, so that we don't corrupt the hardware state * with a stale shadow state during context switch. */ barrier(); write_sysreg(0, tpidrro_el0); } } void flush_thread(void) { fpsimd_flush_thread(); tls_thread_flush(); flush_ptrace_hw_breakpoint(current); } void release_thread(struct task_struct *dead_task) { } int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src) { if (current->mm) fpsimd_preserve_current_state(); *dst = *src; return 0; } asmlinkage void ret_from_fork(void) asm("ret_from_fork"); int copy_thread(unsigned long clone_flags, unsigned long stack_start, unsigned long stk_sz, struct task_struct *p) { struct pt_regs *childregs = task_pt_regs(p); memset(&p->thread.cpu_context, 0, sizeof(struct cpu_context)); /* * In case p was allocated the same task_struct pointer as some * other recently-exited task, make sure p is disassociated from * any cpu that may have run that now-exited task recently. * Otherwise we could erroneously skip reloading the FPSIMD * registers for p. */ fpsimd_flush_task_state(p); if (likely(!(p->flags & PF_KTHREAD))) { *childregs = *current_pt_regs(); childregs->regs[0] = 0; /* * Read the current TLS pointer from tpidr_el0 as it may be * out-of-sync with the saved value. */ *task_user_tls(p) = read_sysreg(tpidr_el0); if (stack_start) { if (is_compat_thread(task_thread_info(p))) childregs->compat_sp = stack_start; else childregs->sp = stack_start; } /* * If a TLS pointer was passed to clone (4th argument), use it * for the new thread. */ if (clone_flags & CLONE_SETTLS) p->thread.tp_value = childregs->regs[3]; } else { memset(childregs, 0, sizeof(struct pt_regs)); childregs->pstate = PSR_MODE_EL1h; if (IS_ENABLED(CONFIG_ARM64_UAO) && cpus_have_const_cap(ARM64_HAS_UAO)) childregs->pstate |= PSR_UAO_BIT; p->thread.cpu_context.x19 = stack_start; p->thread.cpu_context.x20 = stk_sz; } p->thread.cpu_context.pc = (unsigned long)ret_from_fork; p->thread.cpu_context.sp = (unsigned long)childregs; ptrace_hw_copy_thread(p); return 0; } void tls_preserve_current_state(void) { *task_user_tls(current) = read_sysreg(tpidr_el0); } static void tls_thread_switch(struct task_struct *next) { tls_preserve_current_state(); if (is_compat_thread(task_thread_info(next))) write_sysreg(next->thread.tp_value, tpidrro_el0); else if (!arm64_kernel_unmapped_at_el0()) write_sysreg(0, tpidrro_el0); write_sysreg(*task_user_tls(next), tpidr_el0); } /* Restore the UAO state depending on next's addr_limit */ void uao_thread_switch(struct task_struct *next) { if (IS_ENABLED(CONFIG_ARM64_UAO)) { if (task_thread_info(next)->addr_limit == KERNEL_DS) asm(ALTERNATIVE("nop", SET_PSTATE_UAO(1), ARM64_HAS_UAO)); else asm(ALTERNATIVE("nop", SET_PSTATE_UAO(0), ARM64_HAS_UAO)); } } /* * We store our current task in sp_el0, which is clobbered by userspace. Keep a * shadow copy so that we can restore this upon entry from userspace. * * This is *only* for exception entry from EL0, and is not valid until we * __switch_to() a user task. */ DEFINE_PER_CPU(struct task_struct *, __entry_task); static void entry_task_switch(struct task_struct *next) { __this_cpu_write(__entry_task, next); } /* * Thread switching. */ __notrace_funcgraph struct task_struct *__switch_to(struct task_struct *prev, struct task_struct *next) { struct task_struct *last; fpsimd_thread_switch(next); tls_thread_switch(next); hw_breakpoint_thread_switch(next); contextidr_thread_switch(next); entry_task_switch(next); uao_thread_switch(next); /* * Complete any pending TLB or cache maintenance on this CPU in case * the thread migrates to a different CPU. * This full barrier is also required by the membarrier system * call. */ dsb(ish); /* the actual thread switch */ last = cpu_switch_to(prev, next); return last; } unsigned long get_wchan(struct task_struct *p) { struct stackframe frame; unsigned long stack_page, ret = 0; int count = 0; if (!p || p == current || p->state == TASK_RUNNING) return 0; stack_page = (unsigned long)try_get_task_stack(p); if (!stack_page) return 0; frame.fp = thread_saved_fp(p); frame.pc = thread_saved_pc(p); #ifdef CONFIG_FUNCTION_GRAPH_TRACER frame.graph = p->curr_ret_stack; #endif do { if (unwind_frame(p, &frame)) goto out; if (!in_sched_functions(frame.pc)) { ret = frame.pc; goto out; } } while (count ++ < 16); out: put_task_stack(p); return ret; } unsigned long arch_align_stack(unsigned long sp) { if (!(current->personality & ADDR_NO_RANDOMIZE) && randomize_va_space) sp -= get_random_int() & ~PAGE_MASK; return sp & ~0xf; } unsigned long arch_randomize_brk(struct mm_struct *mm) { if (is_compat_task()) return randomize_page(mm->brk, SZ_32M); else return randomize_page(mm->brk, SZ_1G); } /* * Called from setup_new_exec() after (COMPAT_)SET_PERSONALITY. */ void arch_setup_new_exec(void) { current->mm->context.flags = is_compat_task() ? MMCF_AARCH32 : 0; }