Thomas Gleixner | 50acfb2 | 2019-05-29 07:18:00 -0700 | [diff] [blame] | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 2 | /* |
| 3 | * Copyright (C) 2012 Regents of the University of California |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 4 | */ |
| 5 | |
| 6 | #ifndef _ASM_RISCV_PROCESSOR_H |
| 7 | #define _ASM_RISCV_PROCESSOR_H |
| 8 | |
| 9 | #include <linux/const.h> |
| 10 | |
| 11 | #include <asm/ptrace.h> |
| 12 | |
| 13 | /* |
| 14 | * This decides where the kernel will search for a free chunk of vm |
| 15 | * space during mmap's. |
| 16 | */ |
Alexandre Ghiti | ae662ee | 2018-12-10 06:21:46 +0000 | [diff] [blame] | 17 | #define TASK_UNMAPPED_BASE PAGE_ALIGN(TASK_SIZE / 3) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 18 | |
| 19 | #define STACK_TOP TASK_SIZE |
| 20 | #define STACK_TOP_MAX STACK_TOP |
| 21 | #define STACK_ALIGN 16 |
| 22 | |
| 23 | #ifndef __ASSEMBLY__ |
| 24 | |
| 25 | struct task_struct; |
| 26 | struct pt_regs; |
| 27 | |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 28 | /* CPU-specific state of a task */ |
| 29 | struct thread_struct { |
| 30 | /* Callee-saved registers */ |
| 31 | unsigned long ra; |
| 32 | unsigned long sp; /* Kernel mode stack */ |
| 33 | unsigned long s[12]; /* s[0]: frame pointer */ |
| 34 | struct __riscv_d_ext_state fstate; |
| 35 | }; |
| 36 | |
| 37 | #define INIT_THREAD { \ |
| 38 | .sp = sizeof(init_stack) + (long)&init_stack, \ |
| 39 | } |
| 40 | |
| 41 | #define task_pt_regs(tsk) \ |
| 42 | ((struct pt_regs *)(task_stack_page(tsk) + THREAD_SIZE \ |
| 43 | - ALIGN(sizeof(struct pt_regs), STACK_ALIGN))) |
| 44 | |
Christoph Hellwig | a4c3733 | 2019-10-28 13:10:32 +0100 | [diff] [blame] | 45 | #define KSTK_EIP(tsk) (task_pt_regs(tsk)->epc) |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 46 | #define KSTK_ESP(tsk) (task_pt_regs(tsk)->sp) |
| 47 | |
| 48 | |
| 49 | /* Do necessary setup to start up a newly executed thread. */ |
| 50 | extern void start_thread(struct pt_regs *regs, |
| 51 | unsigned long pc, unsigned long sp); |
| 52 | |
| 53 | /* Free all resources held by a thread. */ |
| 54 | static inline void release_thread(struct task_struct *dead_task) |
| 55 | { |
| 56 | } |
| 57 | |
| 58 | extern unsigned long get_wchan(struct task_struct *p); |
| 59 | |
| 60 | |
| 61 | static inline void cpu_relax(void) |
| 62 | { |
| 63 | #ifdef __riscv_muldiv |
| 64 | int dummy; |
| 65 | /* In lieu of a halt instruction, induce a long-latency stall. */ |
| 66 | __asm__ __volatile__ ("div %0, %0, zero" : "=r" (dummy)); |
| 67 | #endif |
| 68 | barrier(); |
| 69 | } |
| 70 | |
| 71 | static inline void wait_for_interrupt(void) |
| 72 | { |
| 73 | __asm__ __volatile__ ("wfi"); |
| 74 | } |
| 75 | |
| 76 | struct device_node; |
Palmer Dabbelt | b2f8cfa7 | 2018-10-02 12:15:00 -0700 | [diff] [blame] | 77 | int riscv_of_processor_hartid(struct device_node *node); |
Anup Patel | d175d69 | 2020-06-01 14:45:39 +0530 | [diff] [blame^] | 78 | int riscv_of_parent_hartid(struct device_node *node); |
Palmer Dabbelt | 7db91e5 | 2017-07-10 18:04:30 -0700 | [diff] [blame] | 79 | |
| 80 | extern void riscv_fill_hwcap(void); |
| 81 | |
| 82 | #endif /* __ASSEMBLY__ */ |
| 83 | |
| 84 | #endif /* _ASM_RISCV_PROCESSOR_H */ |