Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 1 | /* |
| 2 | * Copyright 2014 IBM Corp. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or |
| 5 | * modify it under the terms of the GNU General Public License |
| 6 | * as published by the Free Software Foundation; either version |
| 7 | * 2 of the License, or (at your option) any later version. |
| 8 | */ |
| 9 | |
| 10 | #include <linux/workqueue.h> |
| 11 | #include <linux/sched.h> |
| 12 | #include <linux/pid.h> |
| 13 | #include <linux/mm.h> |
| 14 | #include <linux/moduleparam.h> |
| 15 | |
| 16 | #undef MODULE_PARAM_PREFIX |
| 17 | #define MODULE_PARAM_PREFIX "cxl" "." |
| 18 | #include <asm/current.h> |
| 19 | #include <asm/copro.h> |
| 20 | #include <asm/mmu.h> |
| 21 | |
| 22 | #include "cxl.h" |
| 23 | |
| 24 | static struct cxl_sste* find_free_sste(struct cxl_sste *primary_group, |
Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 25 | unsigned int *lru) |
| 26 | { |
Ian Munsie | 5100a9d | 2014-10-28 14:25:27 +1100 | [diff] [blame^] | 27 | unsigned int entry; |
Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 28 | struct cxl_sste *sste, *group = primary_group; |
| 29 | |
Ian Munsie | 5100a9d | 2014-10-28 14:25:27 +1100 | [diff] [blame^] | 30 | for (entry = 0; entry < 8; entry++) { |
| 31 | sste = group + entry; |
| 32 | if (!(be64_to_cpu(sste->esid_data) & SLB_ESID_V)) |
| 33 | return sste; |
Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 34 | } |
| 35 | /* Nothing free, select an entry to cast out */ |
Ian Munsie | 5100a9d | 2014-10-28 14:25:27 +1100 | [diff] [blame^] | 36 | sste = primary_group + *lru; |
| 37 | *lru = (*lru + 1) & 0x7; |
Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 38 | |
| 39 | return sste; |
| 40 | } |
| 41 | |
| 42 | static void cxl_load_segment(struct cxl_context *ctx, struct copro_slb *slb) |
| 43 | { |
| 44 | /* mask is the group index, we search primary and secondary here. */ |
| 45 | unsigned int mask = (ctx->sst_size >> 7)-1; /* SSTP0[SegTableSize] */ |
Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 46 | struct cxl_sste *sste; |
| 47 | unsigned int hash; |
| 48 | unsigned long flags; |
| 49 | |
| 50 | |
Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 51 | if (slb->vsid & SLB_VSID_B_1T) |
| 52 | hash = (slb->esid >> SID_SHIFT_1T) & mask; |
| 53 | else /* 256M */ |
| 54 | hash = (slb->esid >> SID_SHIFT) & mask; |
| 55 | |
| 56 | spin_lock_irqsave(&ctx->sste_lock, flags); |
Ian Munsie | 5100a9d | 2014-10-28 14:25:27 +1100 | [diff] [blame^] | 57 | sste = find_free_sste(ctx->sstp + (hash << 3), &ctx->sst_lru); |
Ian Munsie | f204e0b | 2014-10-08 19:55:02 +1100 | [diff] [blame] | 58 | |
| 59 | pr_devel("CXL Populating SST[%li]: %#llx %#llx\n", |
| 60 | sste - ctx->sstp, slb->vsid, slb->esid); |
| 61 | |
| 62 | sste->vsid_data = cpu_to_be64(slb->vsid); |
| 63 | sste->esid_data = cpu_to_be64(slb->esid); |
| 64 | spin_unlock_irqrestore(&ctx->sste_lock, flags); |
| 65 | } |
| 66 | |
| 67 | static int cxl_fault_segment(struct cxl_context *ctx, struct mm_struct *mm, |
| 68 | u64 ea) |
| 69 | { |
| 70 | struct copro_slb slb = {0,0}; |
| 71 | int rc; |
| 72 | |
| 73 | if (!(rc = copro_calculate_slb(mm, ea, &slb))) { |
| 74 | cxl_load_segment(ctx, &slb); |
| 75 | } |
| 76 | |
| 77 | return rc; |
| 78 | } |
| 79 | |
| 80 | static void cxl_ack_ae(struct cxl_context *ctx) |
| 81 | { |
| 82 | unsigned long flags; |
| 83 | |
| 84 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_AE, 0); |
| 85 | |
| 86 | spin_lock_irqsave(&ctx->lock, flags); |
| 87 | ctx->pending_fault = true; |
| 88 | ctx->fault_addr = ctx->dar; |
| 89 | ctx->fault_dsisr = ctx->dsisr; |
| 90 | spin_unlock_irqrestore(&ctx->lock, flags); |
| 91 | |
| 92 | wake_up_all(&ctx->wq); |
| 93 | } |
| 94 | |
| 95 | static int cxl_handle_segment_miss(struct cxl_context *ctx, |
| 96 | struct mm_struct *mm, u64 ea) |
| 97 | { |
| 98 | int rc; |
| 99 | |
| 100 | pr_devel("CXL interrupt: Segment fault pe: %i ea: %#llx\n", ctx->pe, ea); |
| 101 | |
| 102 | if ((rc = cxl_fault_segment(ctx, mm, ea))) |
| 103 | cxl_ack_ae(ctx); |
| 104 | else { |
| 105 | |
| 106 | mb(); /* Order seg table write to TFC MMIO write */ |
| 107 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
| 108 | } |
| 109 | |
| 110 | return IRQ_HANDLED; |
| 111 | } |
| 112 | |
| 113 | static void cxl_handle_page_fault(struct cxl_context *ctx, |
| 114 | struct mm_struct *mm, u64 dsisr, u64 dar) |
| 115 | { |
| 116 | unsigned flt = 0; |
| 117 | int result; |
| 118 | unsigned long access, flags; |
| 119 | |
| 120 | if ((result = copro_handle_mm_fault(mm, dar, dsisr, &flt))) { |
| 121 | pr_devel("copro_handle_mm_fault failed: %#x\n", result); |
| 122 | return cxl_ack_ae(ctx); |
| 123 | } |
| 124 | |
| 125 | /* |
| 126 | * update_mmu_cache() will not have loaded the hash since current->trap |
| 127 | * is not a 0x400 or 0x300, so just call hash_page_mm() here. |
| 128 | */ |
| 129 | access = _PAGE_PRESENT; |
| 130 | if (dsisr & CXL_PSL_DSISR_An_S) |
| 131 | access |= _PAGE_RW; |
| 132 | if ((!ctx->kernel) || ~(dar & (1ULL << 63))) |
| 133 | access |= _PAGE_USER; |
| 134 | local_irq_save(flags); |
| 135 | hash_page_mm(mm, dar, access, 0x300); |
| 136 | local_irq_restore(flags); |
| 137 | |
| 138 | pr_devel("Page fault successfully handled for pe: %i!\n", ctx->pe); |
| 139 | cxl_ack_irq(ctx, CXL_PSL_TFC_An_R, 0); |
| 140 | } |
| 141 | |
| 142 | void cxl_handle_fault(struct work_struct *fault_work) |
| 143 | { |
| 144 | struct cxl_context *ctx = |
| 145 | container_of(fault_work, struct cxl_context, fault_work); |
| 146 | u64 dsisr = ctx->dsisr; |
| 147 | u64 dar = ctx->dar; |
| 148 | struct task_struct *task; |
| 149 | struct mm_struct *mm; |
| 150 | |
| 151 | if (cxl_p2n_read(ctx->afu, CXL_PSL_DSISR_An) != dsisr || |
| 152 | cxl_p2n_read(ctx->afu, CXL_PSL_DAR_An) != dar || |
| 153 | cxl_p2n_read(ctx->afu, CXL_PSL_PEHandle_An) != ctx->pe) { |
| 154 | /* Most likely explanation is harmless - a dedicated process |
| 155 | * has detached and these were cleared by the PSL purge, but |
| 156 | * warn about it just in case */ |
| 157 | dev_notice(&ctx->afu->dev, "cxl_handle_fault: Translation fault regs changed\n"); |
| 158 | return; |
| 159 | } |
| 160 | |
| 161 | pr_devel("CXL BOTTOM HALF handling fault for afu pe: %i. " |
| 162 | "DSISR: %#llx DAR: %#llx\n", ctx->pe, dsisr, dar); |
| 163 | |
| 164 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { |
| 165 | pr_devel("cxl_handle_fault unable to get task %i\n", |
| 166 | pid_nr(ctx->pid)); |
| 167 | cxl_ack_ae(ctx); |
| 168 | return; |
| 169 | } |
| 170 | if (!(mm = get_task_mm(task))) { |
| 171 | pr_devel("cxl_handle_fault unable to get mm %i\n", |
| 172 | pid_nr(ctx->pid)); |
| 173 | cxl_ack_ae(ctx); |
| 174 | goto out; |
| 175 | } |
| 176 | |
| 177 | if (dsisr & CXL_PSL_DSISR_An_DS) |
| 178 | cxl_handle_segment_miss(ctx, mm, dar); |
| 179 | else if (dsisr & CXL_PSL_DSISR_An_DM) |
| 180 | cxl_handle_page_fault(ctx, mm, dsisr, dar); |
| 181 | else |
| 182 | WARN(1, "cxl_handle_fault has nothing to handle\n"); |
| 183 | |
| 184 | mmput(mm); |
| 185 | out: |
| 186 | put_task_struct(task); |
| 187 | } |
| 188 | |
| 189 | static void cxl_prefault_one(struct cxl_context *ctx, u64 ea) |
| 190 | { |
| 191 | int rc; |
| 192 | struct task_struct *task; |
| 193 | struct mm_struct *mm; |
| 194 | |
| 195 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { |
| 196 | pr_devel("cxl_prefault_one unable to get task %i\n", |
| 197 | pid_nr(ctx->pid)); |
| 198 | return; |
| 199 | } |
| 200 | if (!(mm = get_task_mm(task))) { |
| 201 | pr_devel("cxl_prefault_one unable to get mm %i\n", |
| 202 | pid_nr(ctx->pid)); |
| 203 | put_task_struct(task); |
| 204 | return; |
| 205 | } |
| 206 | |
| 207 | rc = cxl_fault_segment(ctx, mm, ea); |
| 208 | |
| 209 | mmput(mm); |
| 210 | put_task_struct(task); |
| 211 | } |
| 212 | |
| 213 | static u64 next_segment(u64 ea, u64 vsid) |
| 214 | { |
| 215 | if (vsid & SLB_VSID_B_1T) |
| 216 | ea |= (1ULL << 40) - 1; |
| 217 | else |
| 218 | ea |= (1ULL << 28) - 1; |
| 219 | |
| 220 | return ea + 1; |
| 221 | } |
| 222 | |
| 223 | static void cxl_prefault_vma(struct cxl_context *ctx) |
| 224 | { |
| 225 | u64 ea, last_esid = 0; |
| 226 | struct copro_slb slb; |
| 227 | struct vm_area_struct *vma; |
| 228 | int rc; |
| 229 | struct task_struct *task; |
| 230 | struct mm_struct *mm; |
| 231 | |
| 232 | if (!(task = get_pid_task(ctx->pid, PIDTYPE_PID))) { |
| 233 | pr_devel("cxl_prefault_vma unable to get task %i\n", |
| 234 | pid_nr(ctx->pid)); |
| 235 | return; |
| 236 | } |
| 237 | if (!(mm = get_task_mm(task))) { |
| 238 | pr_devel("cxl_prefault_vm unable to get mm %i\n", |
| 239 | pid_nr(ctx->pid)); |
| 240 | goto out1; |
| 241 | } |
| 242 | |
| 243 | down_read(&mm->mmap_sem); |
| 244 | for (vma = mm->mmap; vma; vma = vma->vm_next) { |
| 245 | for (ea = vma->vm_start; ea < vma->vm_end; |
| 246 | ea = next_segment(ea, slb.vsid)) { |
| 247 | rc = copro_calculate_slb(mm, ea, &slb); |
| 248 | if (rc) |
| 249 | continue; |
| 250 | |
| 251 | if (last_esid == slb.esid) |
| 252 | continue; |
| 253 | |
| 254 | cxl_load_segment(ctx, &slb); |
| 255 | last_esid = slb.esid; |
| 256 | } |
| 257 | } |
| 258 | up_read(&mm->mmap_sem); |
| 259 | |
| 260 | mmput(mm); |
| 261 | out1: |
| 262 | put_task_struct(task); |
| 263 | } |
| 264 | |
| 265 | void cxl_prefault(struct cxl_context *ctx, u64 wed) |
| 266 | { |
| 267 | switch (ctx->afu->prefault_mode) { |
| 268 | case CXL_PREFAULT_WED: |
| 269 | cxl_prefault_one(ctx, wed); |
| 270 | break; |
| 271 | case CXL_PREFAULT_ALL: |
| 272 | cxl_prefault_vma(ctx); |
| 273 | break; |
| 274 | default: |
| 275 | break; |
| 276 | } |
| 277 | } |