blob: 8c77e623c4e5bf908611c3c13f6d5e90a8ba427b [file] [log] [blame]
Vineet Gupta3be80aa2013-01-18 15:12:17 +05301/*
2 * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com)
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 *
8 * vineetg: March 2009
9 * -Implemented task_pt_regs( )
10 *
11 * Amit Bhor, Sameer Dhavale, Ashwin Chaugule: Codito Technologies 2004
12 */
13
14#ifndef __ASM_ARC_PROCESSOR_H
15#define __ASM_ARC_PROCESSOR_H
16
17#ifdef __KERNEL__
18
19#ifndef __ASSEMBLY__
20
21#include <asm/arcregs.h> /* for STATUS_E1_MASK et all */
Vineet Gupta16f9afe2013-05-27 21:43:41 +053022#include <asm/ptrace.h>
Vineet Gupta3be80aa2013-01-18 15:12:17 +053023
24/* Arch specific stuff which needs to be saved per task.
25 * However these items are not so important so as to earn a place in
26 * struct thread_info
27 */
28struct thread_struct {
29 unsigned long ksp; /* kernel mode stack pointer */
30 unsigned long callee_reg; /* pointer to callee regs */
31 unsigned long fault_address; /* dbls as brkpt holder as well */
32 unsigned long cause_code; /* Exception Cause Code (ECR) */
Vineet Gupta080c3742013-02-11 19:52:57 +053033#ifdef CONFIG_ARC_CURR_IN_REG
34 unsigned long user_r25;
35#endif
Vineet Guptabf90e1e2013-01-18 15:12:18 +053036#ifdef CONFIG_ARC_FPU_SAVE_RESTORE
37 struct arc_fpu fpu;
38#endif
Vineet Gupta3be80aa2013-01-18 15:12:17 +053039};
40
41#define INIT_THREAD { \
42 .ksp = sizeof(init_stack) + (unsigned long) init_stack, \
43}
44
45/* Forward declaration, a strange C thing */
46struct task_struct;
47
48/*
49 * Return saved PC of a blocked thread.
50 */
51unsigned long thread_saved_pc(struct task_struct *t);
52
53#define task_pt_regs(p) \
Vineet Gupta283237a2013-05-28 09:34:45 +053054 ((struct pt_regs *)(THREAD_SIZE + (void *)task_stack_page(p)) - 1)
Vineet Gupta3be80aa2013-01-18 15:12:17 +053055
56/* Free all resources held by a thread. */
57#define release_thread(thread) do { } while (0)
58
59/* Prepare to copy thread state - unlazy all lazy status */
60#define prepare_to_copy(tsk) do { } while (0)
61
Vineet Gupta41195d22013-01-18 15:12:23 +053062/*
63 * A lot of busy-wait loops in SMP are based off of non-volatile data otherwise
64 * get optimised away by gcc
65 */
66#ifdef CONFIG_SMP
67#define cpu_relax() __asm__ __volatile__ ("" : : : "memory")
68#else
Vineet Gupta3be80aa2013-01-18 15:12:17 +053069#define cpu_relax() do { } while (0)
Vineet Gupta41195d22013-01-18 15:12:23 +053070#endif
Vineet Gupta3be80aa2013-01-18 15:12:17 +053071
Vineet Gupta3be80aa2013-01-18 15:12:17 +053072#define copy_segments(tsk, mm) do { } while (0)
73#define release_segments(mm) do { } while (0)
74
75#define KSTK_EIP(tsk) (task_pt_regs(tsk)->ret)
76
77/*
78 * Where abouts of Task's sp, fp, blink when it was last seen in kernel mode.
Vineet Gupta16f9afe2013-05-27 21:43:41 +053079 * Look in process.c for details of kernel stack layout
Vineet Gupta3be80aa2013-01-18 15:12:17 +053080 */
81#define KSTK_ESP(tsk) (tsk->thread.ksp)
Vineet Gupta16f9afe2013-05-27 21:43:41 +053082
83#define KSTK_REG(tsk, off) (*((unsigned int *)(KSTK_ESP(tsk) + \
84 sizeof(struct callee_regs) + off)))
85
86#define KSTK_BLINK(tsk) KSTK_REG(tsk, 4)
87#define KSTK_FP(tsk) KSTK_REG(tsk, 0)
Vineet Gupta3be80aa2013-01-18 15:12:17 +053088
89/*
90 * Do necessary setup to start up a newly executed thread.
91 *
92 * E1,E2 so that Interrupts are enabled in user mode
93 * L set, so Loop inhibited to begin with
94 * lp_start and lp_end seeded with bogus non-zero values so to easily catch
95 * the ARC700 sr to lp_start hardware bug
96 */
97#define start_thread(_regs, _pc, _usp) \
98do { \
99 set_fs(USER_DS); /* reads from user space */ \
100 (_regs)->ret = (_pc); \
101 /* Interrupts enabled in User Mode */ \
102 (_regs)->status32 = STATUS_U_MASK | STATUS_L_MASK \
103 | STATUS_E1_MASK | STATUS_E2_MASK; \
104 (_regs)->sp = (_usp); \
105 /* bogus seed values for debugging */ \
106 (_regs)->lp_start = 0x10; \
107 (_regs)->lp_end = 0x80; \
108} while (0)
109
110extern unsigned int get_wchan(struct task_struct *p);
111
112/*
113 * Default implementation of macro that returns current
114 * instruction pointer ("program counter").
115 * Should the PC register be read instead ? This macro does not seem to
116 * be used in many places so this wont be all that bad.
117 */
118#define current_text_addr() ({ __label__ _l; _l: &&_l; })
119
120#endif /* !__ASSEMBLY__ */
121
122/* Kernels Virtual memory area.
123 * Unlike other architectures(MIPS, sh, cris ) ARC 700 does not have a
124 * "kernel translated" region (like KSEG2 in MIPS). So we use a upper part
125 * of the translated bottom 2GB for kernel virtual memory and protect
126 * these pages from user accesses by disabling Ru, Eu and Wu.
127 */
128#define VMALLOC_SIZE (0x10000000) /* 256M */
129#define VMALLOC_START (PAGE_OFFSET - VMALLOC_SIZE)
130#define VMALLOC_END (PAGE_OFFSET)
131
132/* Most of the architectures seem to be keeping some kind of padding between
133 * userspace TASK_SIZE and PAGE_OFFSET. i.e TASK_SIZE != PAGE_OFFSET.
134 */
135#define USER_KERNEL_GUTTER 0x10000000
136
137/* User address space:
138 * On ARC700, CPU allows the entire lower half of 32 bit address space to be
139 * translated. Thus potentially 2G (0:0x7FFF_FFFF) could be User vaddr space.
140 * However we steal 256M for kernel addr (0x7000_0000:0x7FFF_FFFF) and another
141 * 256M (0x6000_0000:0x6FFF_FFFF) is gutter between user/kernel spaces
142 * Thus total User vaddr space is (0:0x5FFF_FFFF)
143 */
144#define TASK_SIZE (PAGE_OFFSET - VMALLOC_SIZE - USER_KERNEL_GUTTER)
145
146#define STACK_TOP TASK_SIZE
147#define STACK_TOP_MAX STACK_TOP
148
149/* This decides where the kernel will search for a free chunk of vm
150 * space during mmap's.
151 */
152#define TASK_UNMAPPED_BASE (TASK_SIZE / 3)
153
154#endif /* __KERNEL__ */
155
156#endif /* __ASM_ARC_PROCESSOR_H */