blob: 15de4b19caa5cc513d6936d55c2c0347931bf10b [file] [log] [blame]
Oren Weilab841162011-05-15 13:43:41 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilab841162011-05-15 13:43:41 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler2f3d2b42012-03-19 22:38:13 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
Oren Weilab841162011-05-15 13:43:41 +030019#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/aio.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h>
32#include <linux/cdev.h>
Oren Weilab841162011-05-15 13:43:41 +030033#include <linux/sched.h>
34#include <linux/uuid.h>
35#include <linux/compat.h>
36#include <linux/jiffies.h>
37#include <linux/interrupt.h>
Oren Weil5b881e32011-11-13 09:41:14 +020038#include <linux/miscdevice.h>
Oren Weilab841162011-05-15 13:43:41 +030039
40#include "mei_dev.h"
Tomas Winkler4f3afe12012-05-09 16:38:59 +030041#include <linux/mei.h>
Oren Weilab841162011-05-15 13:43:41 +030042#include "interface.h"
Oren Weilab841162011-05-15 13:43:41 +030043
Tomas Winklerdaed6b52012-08-17 09:54:23 +030044/* AMT device is a singleton on the platform */
45static struct pci_dev *mei_pdev;
Oren Weilab841162011-05-15 13:43:41 +030046
Oren Weilab841162011-05-15 13:43:41 +030047/* mei_pci_tbl - PCI Device ID Table */
48static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
49 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
Tomas Winkler9af51422012-08-29 01:15:50 +030080 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
Oren Weilab841162011-05-15 13:43:41 +030082
83 /* required last entry */
84 {0, }
85};
86
87MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
88
89static DEFINE_MUTEX(mei_mutex);
90
Oren Weilab841162011-05-15 13:43:41 +030091
92/**
93 * mei_clear_list - removes all callbacks associated with file
94 * from mei_cb_list
95 *
96 * @dev: device structure.
97 * @file: file structure
98 * @mei_cb_list: callbacks list
99 *
100 * mei_clear_list is called to clear resources associated with file
101 * when application calls close function or Ctrl-C was pressed
102 *
103 * returns true if callback removed from the list, false otherwise
104 */
105static bool mei_clear_list(struct mei_device *dev,
106 struct file *file, struct list_head *mei_cb_list)
107{
108 struct mei_cl_cb *cb_pos = NULL;
109 struct mei_cl_cb *cb_next = NULL;
110 struct file *file_temp;
111 bool removed = false;
112
113 /* list all list member */
114 list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, cb_list) {
115 file_temp = (struct file *)cb_pos->file_object;
116 /* check if list member associated with a file */
117 if (file_temp == file) {
118 /* remove member from the list */
119 list_del(&cb_pos->cb_list);
120 /* check if cb equal to current iamthif cb */
121 if (dev->iamthif_current_cb == cb_pos) {
122 dev->iamthif_current_cb = NULL;
123 /* send flow control to iamthif client */
124 mei_send_flow_control(dev, &dev->iamthif_cl);
125 }
126 /* free all allocated buffers */
127 mei_free_cb_private(cb_pos);
128 cb_pos = NULL;
129 removed = true;
130 }
131 }
132 return removed;
133}
134
135/**
136 * mei_clear_lists - removes all callbacks associated with file
137 *
138 * @dev: device structure
139 * @file: file structure
140 *
141 * mei_clear_lists is called to clear resources associated with file
142 * when application calls close function or Ctrl-C was pressed
143 *
144 * returns true if callback removed from the list, false otherwise
145 */
146static bool mei_clear_lists(struct mei_device *dev, struct file *file)
147{
148 bool removed = false;
149
150 /* remove callbacks associated with a file */
151 mei_clear_list(dev, file, &dev->amthi_cmd_list.mei_cb.cb_list);
152 if (mei_clear_list(dev, file,
153 &dev->amthi_read_complete_list.mei_cb.cb_list))
154 removed = true;
155
156 mei_clear_list(dev, file, &dev->ctrl_rd_list.mei_cb.cb_list);
157
158 if (mei_clear_list(dev, file, &dev->ctrl_wr_list.mei_cb.cb_list))
159 removed = true;
160
161 if (mei_clear_list(dev, file, &dev->write_waiting_list.mei_cb.cb_list))
162 removed = true;
163
164 if (mei_clear_list(dev, file, &dev->write_list.mei_cb.cb_list))
165 removed = true;
166
167 /* check if iamthif_current_cb not NULL */
168 if (dev->iamthif_current_cb && !removed) {
169 /* check file and iamthif current cb association */
170 if (dev->iamthif_current_cb->file_object == file) {
171 /* remove cb */
172 mei_free_cb_private(dev->iamthif_current_cb);
173 dev->iamthif_current_cb = NULL;
174 removed = true;
175 }
176 }
177 return removed;
178}
179/**
180 * find_read_list_entry - find read list entry
181 *
182 * @dev: device structure
183 * @file: pointer to file structure
184 *
185 * returns cb on success, NULL on error
186 */
187static struct mei_cl_cb *find_read_list_entry(
188 struct mei_device *dev,
189 struct mei_cl *cl)
190{
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200191 struct mei_cl_cb *pos = NULL;
192 struct mei_cl_cb *next = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300193
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200194 dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
195 list_for_each_entry_safe(pos, next,
196 &dev->read_list.mei_cb.cb_list, cb_list) {
197 struct mei_cl *cl_temp;
198 cl_temp = (struct mei_cl *)pos->file_private;
Oren Weilab841162011-05-15 13:43:41 +0300199
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200200 if (mei_cl_cmp_id(cl, cl_temp))
201 return pos;
Oren Weilab841162011-05-15 13:43:41 +0300202 }
203 return NULL;
204}
205
206/**
207 * mei_open - the open function
208 *
209 * @inode: pointer to inode structure
210 * @file: pointer to file structure
211 *
212 * returns 0 on success, <0 on error
213 */
214static int mei_open(struct inode *inode, struct file *file)
215{
216 struct mei_cl *cl;
Oren Weilab841162011-05-15 13:43:41 +0300217 struct mei_device *dev;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200218 unsigned long cl_id;
219 int err;
Oren Weilab841162011-05-15 13:43:41 +0300220
221 err = -ENODEV;
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300222 if (!mei_pdev)
Oren Weilab841162011-05-15 13:43:41 +0300223 goto out;
224
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300225 dev = pci_get_drvdata(mei_pdev);
Oren Weil5b881e32011-11-13 09:41:14 +0200226 if (!dev)
Oren Weilab841162011-05-15 13:43:41 +0300227 goto out;
228
229 mutex_lock(&dev->device_lock);
230 err = -ENOMEM;
Tomas Winklerc95efb72011-05-25 17:28:21 +0300231 cl = mei_cl_allocate(dev);
Oren Weilab841162011-05-15 13:43:41 +0300232 if (!cl)
Alexey Khoroshilov303dfbf2011-08-31 00:41:14 +0400233 goto out_unlock;
Oren Weilab841162011-05-15 13:43:41 +0300234
235 err = -ENODEV;
Tomas Winklerb210d752012-08-07 00:03:56 +0300236 if (dev->dev_state != MEI_DEV_ENABLED) {
237 dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
238 mei_dev_state_str(dev->dev_state));
Oren Weilab841162011-05-15 13:43:41 +0300239 goto out_unlock;
240 }
241 err = -EMFILE;
Tomas Winkler1b812942012-09-11 00:43:20 +0300242 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
243 dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
244 MEI_MAX_OPEN_HANDLE_COUNT);
Oren Weilab841162011-05-15 13:43:41 +0300245 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300246 }
Oren Weilab841162011-05-15 13:43:41 +0300247
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200248 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler1b812942012-09-11 00:43:20 +0300249 if (cl_id >= MEI_CLIENTS_MAX) {
250 dev_err(&dev->pdev->dev, "client_id exceded %d",
251 MEI_CLIENTS_MAX) ;
Oren Weilab841162011-05-15 13:43:41 +0300252 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300253 }
Oren Weilab841162011-05-15 13:43:41 +0300254
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200255 cl->host_client_id = cl_id;
256
Oren Weilab841162011-05-15 13:43:41 +0300257 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
258
259 dev->open_handle_count++;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200260
Oren Weilab841162011-05-15 13:43:41 +0300261 list_add_tail(&cl->link, &dev->file_list);
262
263 set_bit(cl->host_client_id, dev->host_clients_map);
264 cl->state = MEI_FILE_INITIALIZING;
265 cl->sm_state = 0;
266
267 file->private_data = cl;
268 mutex_unlock(&dev->device_lock);
269
Oren Weil5b881e32011-11-13 09:41:14 +0200270 return nonseekable_open(inode, file);
Oren Weilab841162011-05-15 13:43:41 +0300271
272out_unlock:
273 mutex_unlock(&dev->device_lock);
274 kfree(cl);
275out:
276 return err;
277}
278
279/**
280 * mei_release - the release function
281 *
282 * @inode: pointer to inode structure
283 * @file: pointer to file structure
284 *
285 * returns 0 on success, <0 on error
286 */
287static int mei_release(struct inode *inode, struct file *file)
288{
289 struct mei_cl *cl = file->private_data;
290 struct mei_cl_cb *cb;
291 struct mei_device *dev;
292 int rets = 0;
293
294 if (WARN_ON(!cl || !cl->dev))
295 return -ENODEV;
296
297 dev = cl->dev;
298
299 mutex_lock(&dev->device_lock);
300 if (cl != &dev->iamthif_cl) {
301 if (cl->state == MEI_FILE_CONNECTED) {
302 cl->state = MEI_FILE_DISCONNECTING;
303 dev_dbg(&dev->pdev->dev,
304 "disconnecting client host client = %d, "
305 "ME client = %d\n",
306 cl->host_client_id,
307 cl->me_client_id);
308 rets = mei_disconnect_host_client(dev, cl);
309 }
Tomas Winkler0288c7c2011-06-06 10:44:34 +0300310 mei_cl_flush_queues(cl);
Oren Weilab841162011-05-15 13:43:41 +0300311 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
312 cl->host_client_id,
313 cl->me_client_id);
314
315 if (dev->open_handle_count > 0) {
Tomas Winkler441ab502011-12-13 23:39:34 +0200316 clear_bit(cl->host_client_id, dev->host_clients_map);
Oren Weilab841162011-05-15 13:43:41 +0300317 dev->open_handle_count--;
318 }
319 mei_remove_client_from_file_list(dev, cl->host_client_id);
320
321 /* free read cb */
322 cb = NULL;
323 if (cl->read_cb) {
324 cb = find_read_list_entry(dev, cl);
325 /* Remove entry from read list */
326 if (cb)
327 list_del(&cb->cb_list);
328
329 cb = cl->read_cb;
330 cl->read_cb = NULL;
331 }
332
333 file->private_data = NULL;
334
335 if (cb) {
336 mei_free_cb_private(cb);
337 cb = NULL;
338 }
339
340 kfree(cl);
341 } else {
342 if (dev->open_handle_count > 0)
343 dev->open_handle_count--;
344
345 if (dev->iamthif_file_object == file &&
346 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
347
348 dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
349 dev->iamthif_state);
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300350 dev->iamthif_canceled = true;
Oren Weilab841162011-05-15 13:43:41 +0300351 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
352 dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300353 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300354 }
355 }
356
357 if (mei_clear_lists(dev, file))
358 dev->iamthif_state = MEI_IAMTHIF_IDLE;
359
360 }
361 mutex_unlock(&dev->device_lock);
362 return rets;
363}
364
365
366/**
367 * mei_read - the read function.
368 *
369 * @file: pointer to file structure
370 * @ubuf: pointer to user buffer
371 * @length: buffer length
372 * @offset: data offset in buffer
373 *
374 * returns >=0 data length on success , <0 on error
375 */
376static ssize_t mei_read(struct file *file, char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200377 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300378{
379 struct mei_cl *cl = file->private_data;
380 struct mei_cl_cb *cb_pos = NULL;
381 struct mei_cl_cb *cb = NULL;
382 struct mei_device *dev;
383 int i;
384 int rets;
385 int err;
386
387
388 if (WARN_ON(!cl || !cl->dev))
389 return -ENODEV;
390
391 dev = cl->dev;
392
393 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300394 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300395 rets = -ENODEV;
396 goto out;
397 }
398
399 if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
400 /* Do not allow to read watchdog client */
Tomas Winkler07b509b2012-07-23 14:05:39 +0300401 i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
Oren Weilab841162011-05-15 13:43:41 +0300402 if (i >= 0) {
403 struct mei_me_client *me_client = &dev->me_clients[i];
Oren Weilab841162011-05-15 13:43:41 +0300404 if (cl->me_client_id == me_client->client_id) {
405 rets = -EBADF;
406 goto out;
407 }
408 }
409 } else {
410 cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
411 }
412
413 if (cl == &dev->iamthif_cl) {
414 rets = amthi_read(dev, file, ubuf, length, offset);
415 goto out;
416 }
417
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200418 if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300419 cb = cl->read_cb;
420 goto copy_buffer;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200421 } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
422 cl->read_cb->buf_idx <= *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300423 cb = cl->read_cb;
424 rets = 0;
425 goto free;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200426 } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700427 /*Offset needs to be cleaned for contiguous reads*/
Oren Weilab841162011-05-15 13:43:41 +0300428 *offset = 0;
429 rets = 0;
430 goto out;
431 }
432
433 err = mei_start_read(dev, cl);
434 if (err && err != -EBUSY) {
435 dev_dbg(&dev->pdev->dev,
436 "mei start read failure with status = %d\n", err);
437 rets = err;
438 goto out;
439 }
440
441 if (MEI_READ_COMPLETE != cl->reading_state &&
442 !waitqueue_active(&cl->rx_wait)) {
443 if (file->f_flags & O_NONBLOCK) {
444 rets = -EAGAIN;
445 goto out;
446 }
447
448 mutex_unlock(&dev->device_lock);
449
450 if (wait_event_interruptible(cl->rx_wait,
451 (MEI_READ_COMPLETE == cl->reading_state ||
452 MEI_FILE_INITIALIZING == cl->state ||
453 MEI_FILE_DISCONNECTED == cl->state ||
454 MEI_FILE_DISCONNECTING == cl->state))) {
455 if (signal_pending(current))
456 return -EINTR;
457 return -ERESTARTSYS;
458 }
459
460 mutex_lock(&dev->device_lock);
461 if (MEI_FILE_INITIALIZING == cl->state ||
462 MEI_FILE_DISCONNECTED == cl->state ||
463 MEI_FILE_DISCONNECTING == cl->state) {
464 rets = -EBUSY;
465 goto out;
466 }
467 }
468
469 cb = cl->read_cb;
470
471 if (!cb) {
472 rets = -ENODEV;
473 goto out;
474 }
475 if (cl->reading_state != MEI_READ_COMPLETE) {
476 rets = 0;
477 goto out;
478 }
479 /* now copy the data to user space */
480copy_buffer:
481 dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
482 cb->response_buffer.size);
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200483 dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
484 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
Oren Weilab841162011-05-15 13:43:41 +0300485 rets = -EMSGSIZE;
486 goto free;
487 }
488
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200489 /* length is being truncated to PAGE_SIZE,
490 * however buf_idx may point beyond that */
491 length = min_t(size_t, length, cb->buf_idx - *offset);
Oren Weilab841162011-05-15 13:43:41 +0300492
Tomas Winkler441ab502011-12-13 23:39:34 +0200493 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
Oren Weilab841162011-05-15 13:43:41 +0300494 rets = -EFAULT;
495 goto free;
496 }
497
498 rets = length;
499 *offset += length;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200500 if ((unsigned long)*offset < cb->buf_idx)
Oren Weilab841162011-05-15 13:43:41 +0300501 goto out;
502
503free:
504 cb_pos = find_read_list_entry(dev, cl);
505 /* Remove entry from read list */
506 if (cb_pos)
507 list_del(&cb_pos->cb_list);
508 mei_free_cb_private(cb);
509 cl->reading_state = MEI_IDLE;
510 cl->read_cb = NULL;
511 cl->read_pending = 0;
512out:
513 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
514 mutex_unlock(&dev->device_lock);
515 return rets;
516}
517
518/**
Tomas Winkler33d28c92012-10-09 16:50:17 +0200519 * mei_io_cb_init - allocate and initialize io callback
520 *
521 * @cl - mei client
522 * @file: pointer to file structure
523 *
524 * returns mei_cl_cb pointer or NULL;
525 */
526static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
527{
528 struct mei_cl_cb *cb;
529 struct mei_device *dev;
530
531 dev = cl->dev;
532
533 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
534 if (!cb)
535 return NULL;
536
537 INIT_LIST_HEAD(&cb->cb_list);
538
539 cb->file_object = fp;
540 cb->file_private = cl;
541 cb->buf_idx = 0;
542 return cb;
543}
544
545
546/**
547 * mei_io_cb_alloc_req_buf - allocate request buffer
548 *
549 * @cb - io callback structure
550 * @size: size of the buffer
551 *
552 * returns 0 on success
553 * -EINVAL if cb is NULL
554 * -ENOMEM if allocation failed
555 */
556static int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
557{
558 if (!cb)
559 return -EINVAL;
560
561 if (length == 0)
562 return 0;
563
564 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
565 if (!cb->request_buffer.data)
566 return -ENOMEM;
567 cb->request_buffer.size = length;
568 return 0;
569}
570/**
571 * mei_io_cb_alloc_req_buf - allocate respose buffer
572 *
573 * @cb - io callback structure
574 * @size: size of the buffer
575 *
576 * returns 0 on success
577 * -EINVAL if cb is NULL
578 * -ENOMEM if allocation failed
579 */
580static int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
581{
582 if (!cb)
583 return -EINVAL;
584
585 if (length == 0)
586 return 0;
587
588 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
589 if (!cb->response_buffer.data)
590 return -ENOMEM;
591 cb->response_buffer.size = length;
592 return 0;
593}
594
595/**
Oren Weilab841162011-05-15 13:43:41 +0300596 * mei_write - the write function.
597 *
598 * @file: pointer to file structure
599 * @ubuf: pointer to user buffer
600 * @length: buffer length
601 * @offset: data offset in buffer
602 *
603 * returns >=0 data length on success , <0 on error
604 */
605static ssize_t mei_write(struct file *file, const char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200606 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300607{
608 struct mei_cl *cl = file->private_data;
609 struct mei_cl_cb *write_cb = NULL;
610 struct mei_msg_hdr mei_hdr;
611 struct mei_device *dev;
612 unsigned long timeout = 0;
613 int rets;
614 int i;
615
616 if (WARN_ON(!cl || !cl->dev))
617 return -ENODEV;
618
619 dev = cl->dev;
620
621 mutex_lock(&dev->device_lock);
622
Tomas Winklerb210d752012-08-07 00:03:56 +0300623 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300624 mutex_unlock(&dev->device_lock);
625 return -ENODEV;
626 }
627
628 if (cl == &dev->iamthif_cl) {
629 write_cb = find_amthi_read_list_entry(dev, file);
630
631 if (write_cb) {
632 timeout = write_cb->read_time +
633 msecs_to_jiffies(IAMTHIF_READ_TIMER);
634
635 if (time_after(jiffies, timeout) ||
636 cl->reading_state == MEI_READ_COMPLETE) {
637 *offset = 0;
638 list_del(&write_cb->cb_list);
639 mei_free_cb_private(write_cb);
640 write_cb = NULL;
641 }
642 }
643 }
644
645 /* free entry used in read */
646 if (cl->reading_state == MEI_READ_COMPLETE) {
647 *offset = 0;
648 write_cb = find_read_list_entry(dev, cl);
649 if (write_cb) {
650 list_del(&write_cb->cb_list);
651 mei_free_cb_private(write_cb);
652 write_cb = NULL;
653 cl->reading_state = MEI_IDLE;
654 cl->read_cb = NULL;
655 cl->read_pending = 0;
656 }
Tomas Winkler441ab502011-12-13 23:39:34 +0200657 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
Oren Weilab841162011-05-15 13:43:41 +0300658 *offset = 0;
659
660
Tomas Winkler33d28c92012-10-09 16:50:17 +0200661 write_cb = mei_io_cb_init(cl, file);
Oren Weilab841162011-05-15 13:43:41 +0300662 if (!write_cb) {
Tomas Winkler33d28c92012-10-09 16:50:17 +0200663 dev_err(&dev->pdev->dev, "write cb allocation failed\n");
664 rets = -ENOMEM;
665 goto unlock_dev;
Oren Weilab841162011-05-15 13:43:41 +0300666 }
Tomas Winkler33d28c92012-10-09 16:50:17 +0200667 rets = mei_io_cb_alloc_req_buf(write_cb, length);
668 if (rets)
Oren Weilab841162011-05-15 13:43:41 +0300669 goto unlock_dev;
670
Tomas Winkler33d28c92012-10-09 16:50:17 +0200671 dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
Oren Weilab841162011-05-15 13:43:41 +0300672
673 rets = -EFAULT;
674 if (copy_from_user(write_cb->request_buffer.data, ubuf, length))
675 goto unlock_dev;
676
677 cl->sm_state = 0;
678 if (length == 4 &&
679 ((memcmp(mei_wd_state_independence_msg[0],
680 write_cb->request_buffer.data, 4) == 0) ||
681 (memcmp(mei_wd_state_independence_msg[1],
682 write_cb->request_buffer.data, 4) == 0) ||
683 (memcmp(mei_wd_state_independence_msg[2],
684 write_cb->request_buffer.data, 4) == 0)))
685 cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
686
Oren Weilab841162011-05-15 13:43:41 +0300687 if (cl == &dev->iamthif_cl) {
Tomas Winklerb210d752012-08-07 00:03:56 +0300688 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300689 rets = -ENODEV;
690 goto unlock_dev;
691 }
Tomas Winkler07b509b2012-07-23 14:05:39 +0300692 i = mei_me_cl_by_id(dev, dev->iamthif_cl.me_client_id);
693 if (i < 0) {
Oren Weilab841162011-05-15 13:43:41 +0300694 rets = -ENODEV;
695 goto unlock_dev;
696 }
Tomas Winkler07b509b2012-07-23 14:05:39 +0300697 if (length > dev->me_clients[i].props.max_msg_length ||
Oren Weilab841162011-05-15 13:43:41 +0300698 length <= 0) {
699 rets = -EMSGSIZE;
700 goto unlock_dev;
701 }
Tomas Winkler33d28c92012-10-09 16:50:17 +0200702 rets = mei_io_cb_alloc_resp_buf(write_cb, dev->iamthif_mtu);
703 if (rets)
704 goto unlock_dev;
Oren Weilab841162011-05-15 13:43:41 +0300705
Oren Weilab841162011-05-15 13:43:41 +0300706 write_cb->major_file_operations = MEI_IOCTL;
Oren Weilab841162011-05-15 13:43:41 +0300707 if (dev->iamthif_cl.state != MEI_FILE_CONNECTED) {
708 rets = -ENODEV;
709 goto unlock_dev;
710 }
711
712 if (!list_empty(&dev->amthi_cmd_list.mei_cb.cb_list) ||
713 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
714 dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
715 (int) dev->iamthif_state);
716 dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
717 list_add_tail(&write_cb->cb_list,
718 &dev->amthi_cmd_list.mei_cb.cb_list);
719 rets = length;
720 } else {
721 dev_dbg(&dev->pdev->dev, "call amthi write\n");
722 rets = amthi_write(dev, write_cb);
723
724 if (rets) {
725 dev_dbg(&dev->pdev->dev, "amthi write failed with status = %d\n",
726 rets);
727 goto unlock_dev;
728 }
729 rets = length;
730 }
731 mutex_unlock(&dev->device_lock);
732 return rets;
733 }
734
735 write_cb->major_file_operations = MEI_WRITE;
Oren Weilab841162011-05-15 13:43:41 +0300736
737 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
738 cl->host_client_id, cl->me_client_id);
739 if (cl->state != MEI_FILE_CONNECTED) {
740 rets = -ENODEV;
741 dev_dbg(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
742 cl->host_client_id,
743 cl->me_client_id);
744 goto unlock_dev;
745 }
Tomas Winkler07b509b2012-07-23 14:05:39 +0300746 i = mei_me_cl_by_id(dev, cl->me_client_id);
747 if (i < 0) {
Oren Weilab841162011-05-15 13:43:41 +0300748 rets = -ENODEV;
749 goto unlock_dev;
750 }
751 if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
752 rets = -EINVAL;
753 goto unlock_dev;
754 }
Oren Weilab841162011-05-15 13:43:41 +0300755
756 rets = mei_flow_ctrl_creds(dev, cl);
757 if (rets < 0)
758 goto unlock_dev;
759
760 if (rets && dev->mei_host_buffer_is_empty) {
761 rets = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300762 dev->mei_host_buffer_is_empty = false;
Tomas Winkler24aadc82012-06-25 23:46:27 +0300763 if (length > mei_hbuf_max_data(dev)) {
764 mei_hdr.length = mei_hbuf_max_data(dev);
Oren Weilab841162011-05-15 13:43:41 +0300765 mei_hdr.msg_complete = 0;
766 } else {
767 mei_hdr.length = length;
768 mei_hdr.msg_complete = 1;
769 }
770 mei_hdr.host_addr = cl->host_client_id;
771 mei_hdr.me_addr = cl->me_client_id;
772 mei_hdr.reserved = 0;
773 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
774 *((u32 *) &mei_hdr));
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200775 if (mei_write_message(dev, &mei_hdr,
Oren Weilab841162011-05-15 13:43:41 +0300776 (unsigned char *) (write_cb->request_buffer.data),
777 mei_hdr.length)) {
778 rets = -ENODEV;
779 goto unlock_dev;
780 }
781 cl->writing_state = MEI_WRITING;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200782 write_cb->buf_idx = mei_hdr.length;
Oren Weilab841162011-05-15 13:43:41 +0300783 if (mei_hdr.msg_complete) {
784 if (mei_flow_ctrl_reduce(dev, cl)) {
785 rets = -ENODEV;
786 goto unlock_dev;
787 }
788 list_add_tail(&write_cb->cb_list,
789 &dev->write_waiting_list.mei_cb.cb_list);
790 } else {
791 list_add_tail(&write_cb->cb_list,
792 &dev->write_list.mei_cb.cb_list);
793 }
794
795 } else {
796
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200797 write_cb->buf_idx = 0;
Oren Weilab841162011-05-15 13:43:41 +0300798 cl->writing_state = MEI_WRITING;
799 list_add_tail(&write_cb->cb_list,
800 &dev->write_list.mei_cb.cb_list);
801 }
802 mutex_unlock(&dev->device_lock);
803 return length;
804
805unlock_dev:
806 mutex_unlock(&dev->device_lock);
807 mei_free_cb_private(write_cb);
808 return rets;
809}
810
811
812/**
813 * mei_ioctl - the IOCTL function
814 *
815 * @file: pointer to file structure
816 * @cmd: ioctl command
817 * @data: pointer to mei message structure
818 *
819 * returns 0 on success , <0 on error
820 */
821static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
822{
823 struct mei_device *dev;
824 struct mei_cl *cl = file->private_data;
825 struct mei_connect_client_data *connect_data = NULL;
826 int rets;
827
828 if (cmd != IOCTL_MEI_CONNECT_CLIENT)
829 return -EINVAL;
830
831 if (WARN_ON(!cl || !cl->dev))
832 return -ENODEV;
833
834 dev = cl->dev;
835
836 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
837
838 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300839 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300840 rets = -ENODEV;
841 goto out;
842 }
843
844 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
845
846 connect_data = kzalloc(sizeof(struct mei_connect_client_data),
847 GFP_KERNEL);
848 if (!connect_data) {
849 rets = -ENOMEM;
850 goto out;
851 }
852 dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
853 if (copy_from_user(connect_data, (char __user *)data,
854 sizeof(struct mei_connect_client_data))) {
855 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
856 rets = -EFAULT;
857 goto out;
858 }
859 rets = mei_ioctl_connect_client(file, connect_data);
860
861 /* if all is ok, copying the data back to user. */
862 if (rets)
863 goto out;
864
865 dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
866 if (copy_to_user((char __user *)data, connect_data,
867 sizeof(struct mei_connect_client_data))) {
868 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
869 rets = -EFAULT;
870 goto out;
871 }
872
873out:
874 kfree(connect_data);
875 mutex_unlock(&dev->device_lock);
876 return rets;
877}
878
879/**
880 * mei_compat_ioctl - the compat IOCTL function
881 *
882 * @file: pointer to file structure
883 * @cmd: ioctl command
884 * @data: pointer to mei message structure
885 *
886 * returns 0 on success , <0 on error
887 */
888#ifdef CONFIG_COMPAT
889static long mei_compat_ioctl(struct file *file,
Tomas Winkler441ab502011-12-13 23:39:34 +0200890 unsigned int cmd, unsigned long data)
Oren Weilab841162011-05-15 13:43:41 +0300891{
892 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
893}
894#endif
895
896
897/**
898 * mei_poll - the poll function
899 *
900 * @file: pointer to file structure
901 * @wait: pointer to poll_table structure
902 *
903 * returns poll mask
904 */
905static unsigned int mei_poll(struct file *file, poll_table *wait)
906{
907 struct mei_cl *cl = file->private_data;
908 struct mei_device *dev;
909 unsigned int mask = 0;
910
911 if (WARN_ON(!cl || !cl->dev))
912 return mask;
913
914 dev = cl->dev;
915
916 mutex_lock(&dev->device_lock);
917
Tomas Winklerb210d752012-08-07 00:03:56 +0300918 if (dev->dev_state != MEI_DEV_ENABLED)
Oren Weilab841162011-05-15 13:43:41 +0300919 goto out;
920
921
922 if (cl == &dev->iamthif_cl) {
923 mutex_unlock(&dev->device_lock);
924 poll_wait(file, &dev->iamthif_cl.wait, wait);
925 mutex_lock(&dev->device_lock);
926 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
927 dev->iamthif_file_object == file) {
928 mask |= (POLLIN | POLLRDNORM);
929 dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300930 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300931 }
932 goto out;
933 }
934
935 mutex_unlock(&dev->device_lock);
936 poll_wait(file, &cl->tx_wait, wait);
937 mutex_lock(&dev->device_lock);
938 if (MEI_WRITE_COMPLETE == cl->writing_state)
939 mask |= (POLLIN | POLLRDNORM);
940
941out:
942 mutex_unlock(&dev->device_lock);
943 return mask;
944}
945
Oren Weil5b881e32011-11-13 09:41:14 +0200946/*
947 * file operations structure will be used for mei char device.
948 */
949static const struct file_operations mei_fops = {
950 .owner = THIS_MODULE,
951 .read = mei_read,
952 .unlocked_ioctl = mei_ioctl,
953#ifdef CONFIG_COMPAT
954 .compat_ioctl = mei_compat_ioctl,
955#endif
956 .open = mei_open,
957 .release = mei_release,
958 .write = mei_write,
959 .poll = mei_poll,
960 .llseek = no_llseek
961};
962
963
964/*
965 * Misc Device Struct
966 */
967static struct miscdevice mei_misc_device = {
Tomas Winklerc38ea242012-04-02 20:32:39 +0300968 .name = "mei",
Oren Weil5b881e32011-11-13 09:41:14 +0200969 .fops = &mei_fops,
970 .minor = MISC_DYNAMIC_MINOR,
971};
972
973/**
Tomas Winkler9a123f12012-08-06 15:23:55 +0300974 * mei_quirk_probe - probe for devices that doesn't valid ME interface
975 * @pdev: PCI device structure
976 * @ent: entry into pci_device_table
977 *
978 * returns true if ME Interface is valid, false otherwise
979 */
980static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
981 const struct pci_device_id *ent)
982{
983 u32 reg;
984 if (ent->device == MEI_DEV_ID_PBG_1) {
985 pci_read_config_dword(pdev, 0x48, &reg);
986 /* make sure that bit 9 is up and bit 10 is down */
987 if ((reg & 0x600) == 0x200) {
988 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
989 return false;
990 }
991 }
992 return true;
993}
994/**
Oren Weil5b881e32011-11-13 09:41:14 +0200995 * mei_probe - Device Initialization Routine
996 *
997 * @pdev: PCI device structure
998 * @ent: entry in kcs_pci_tbl
999 *
1000 * returns 0 on success, <0 on failure.
1001 */
1002static int __devinit mei_probe(struct pci_dev *pdev,
1003 const struct pci_device_id *ent)
1004{
1005 struct mei_device *dev;
1006 int err;
1007
1008 mutex_lock(&mei_mutex);
Tomas Winkler9a123f12012-08-06 15:23:55 +03001009
1010 if (!mei_quirk_probe(pdev, ent)) {
1011 err = -ENODEV;
1012 goto end;
1013 }
1014
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001015 if (mei_pdev) {
Oren Weil5b881e32011-11-13 09:41:14 +02001016 err = -EEXIST;
1017 goto end;
1018 }
1019 /* enable pci dev */
1020 err = pci_enable_device(pdev);
1021 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001022 dev_err(&pdev->dev, "failed to enable pci device.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001023 goto end;
1024 }
1025 /* set PCI host mastering */
1026 pci_set_master(pdev);
1027 /* pci request regions for mei driver */
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001028 err = pci_request_regions(pdev, KBUILD_MODNAME);
Oren Weil5b881e32011-11-13 09:41:14 +02001029 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001030 dev_err(&pdev->dev, "failed to get pci regions.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001031 goto disable_device;
1032 }
1033 /* allocates and initializes the mei dev structure */
1034 dev = mei_device_init(pdev);
1035 if (!dev) {
1036 err = -ENOMEM;
1037 goto release_regions;
1038 }
1039 /* mapping IO device memory */
1040 dev->mem_addr = pci_iomap(pdev, 0, 0);
1041 if (!dev->mem_addr) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001042 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001043 err = -ENOMEM;
1044 goto free_device;
1045 }
1046 pci_enable_msi(pdev);
1047
1048 /* request and enable interrupt */
1049 if (pci_dev_msi_enabled(pdev))
1050 err = request_threaded_irq(pdev->irq,
1051 NULL,
1052 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001053 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001054 else
1055 err = request_threaded_irq(pdev->irq,
1056 mei_interrupt_quick_handler,
1057 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001058 IRQF_SHARED, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001059
1060 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001061 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
Oren Weil5b881e32011-11-13 09:41:14 +02001062 pdev->irq);
Samuel Ortiz169dc382012-06-11 12:18:30 +03001063 goto disable_msi;
Oren Weil5b881e32011-11-13 09:41:14 +02001064 }
1065 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
1066 if (mei_hw_init(dev)) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001067 dev_err(&pdev->dev, "init hw failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001068 err = -ENODEV;
1069 goto release_irq;
1070 }
1071
1072 err = misc_register(&mei_misc_device);
1073 if (err)
1074 goto release_irq;
1075
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001076 mei_pdev = pdev;
Oren Weil5b881e32011-11-13 09:41:14 +02001077 pci_set_drvdata(pdev, dev);
1078
1079
1080 schedule_delayed_work(&dev->timer_work, HZ);
1081
1082 mutex_unlock(&mei_mutex);
1083
Tomas Winkler2f3d2b42012-03-19 22:38:13 +02001084 pr_debug("initialization successful.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001085
1086 return 0;
1087
1088release_irq:
1089 /* disable interrupts */
1090 dev->host_hw_state = mei_hcsr_read(dev);
1091 mei_disable_interrupts(dev);
1092 flush_scheduled_work();
1093 free_irq(pdev->irq, dev);
Samuel Ortiz169dc382012-06-11 12:18:30 +03001094disable_msi:
Oren Weil5b881e32011-11-13 09:41:14 +02001095 pci_disable_msi(pdev);
Oren Weil5b881e32011-11-13 09:41:14 +02001096 pci_iounmap(pdev, dev->mem_addr);
1097free_device:
1098 kfree(dev);
1099release_regions:
1100 pci_release_regions(pdev);
1101disable_device:
1102 pci_disable_device(pdev);
1103end:
1104 mutex_unlock(&mei_mutex);
Tomas Winkler32c826b2012-05-08 23:04:56 +03001105 dev_err(&pdev->dev, "initialization failed.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001106 return err;
1107}
1108
1109/**
1110 * mei_remove - Device Removal Routine
1111 *
1112 * @pdev: PCI device structure
1113 *
1114 * mei_remove is called by the PCI subsystem to alert the driver
1115 * that it should release a PCI device.
1116 */
1117static void __devexit mei_remove(struct pci_dev *pdev)
1118{
1119 struct mei_device *dev;
1120
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001121 if (mei_pdev != pdev)
Oren Weil5b881e32011-11-13 09:41:14 +02001122 return;
1123
1124 dev = pci_get_drvdata(pdev);
1125 if (!dev)
1126 return;
1127
1128 mutex_lock(&dev->device_lock);
1129
Tomas Winklerc216fde2012-08-16 19:39:43 +03001130 cancel_delayed_work(&dev->timer_work);
1131
1132 mei_wd_stop(dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001133
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001134 mei_pdev = NULL;
Oren Weil5b881e32011-11-13 09:41:14 +02001135
1136 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
1137 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
1138 mei_disconnect_host_client(dev, &dev->iamthif_cl);
1139 }
1140 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
1141 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
1142 mei_disconnect_host_client(dev, &dev->wd_cl);
1143 }
1144
1145 /* Unregistering watchdog device */
Tomas Winkler70cd5332011-12-22 18:50:50 +02001146 mei_watchdog_unregister(dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001147
1148 /* remove entry if already in list */
1149 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
1150 mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
1151 mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
1152
1153 dev->iamthif_current_cb = NULL;
1154 dev->me_clients_num = 0;
1155
1156 mutex_unlock(&dev->device_lock);
1157
1158 flush_scheduled_work();
1159
1160 /* disable interrupts */
1161 mei_disable_interrupts(dev);
1162
1163 free_irq(pdev->irq, dev);
1164 pci_disable_msi(pdev);
1165 pci_set_drvdata(pdev, NULL);
1166
1167 if (dev->mem_addr)
1168 pci_iounmap(pdev, dev->mem_addr);
1169
1170 kfree(dev);
1171
1172 pci_release_regions(pdev);
1173 pci_disable_device(pdev);
Tomas Winklera44cab42012-05-29 16:39:11 +03001174
1175 misc_deregister(&mei_misc_device);
Oren Weil5b881e32011-11-13 09:41:14 +02001176}
Oren Weilab841162011-05-15 13:43:41 +03001177#ifdef CONFIG_PM
1178static int mei_pci_suspend(struct device *device)
1179{
1180 struct pci_dev *pdev = to_pci_dev(device);
1181 struct mei_device *dev = pci_get_drvdata(pdev);
1182 int err;
1183
1184 if (!dev)
1185 return -ENODEV;
1186 mutex_lock(&dev->device_lock);
Tomas Winklerc216fde2012-08-16 19:39:43 +03001187
1188 cancel_delayed_work(&dev->timer_work);
1189
Oren Weilab841162011-05-15 13:43:41 +03001190 /* Stop watchdog if exists */
Tomas Winklerc216fde2012-08-16 19:39:43 +03001191 err = mei_wd_stop(dev);
Oren Weilab841162011-05-15 13:43:41 +03001192 /* Set new mei state */
Tomas Winklerb210d752012-08-07 00:03:56 +03001193 if (dev->dev_state == MEI_DEV_ENABLED ||
1194 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
1195 dev->dev_state = MEI_DEV_POWER_DOWN;
Oren Weilab841162011-05-15 13:43:41 +03001196 mei_reset(dev, 0);
1197 }
1198 mutex_unlock(&dev->device_lock);
1199
1200 free_irq(pdev->irq, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001201 pci_disable_msi(pdev);
Oren Weilab841162011-05-15 13:43:41 +03001202
1203 return err;
1204}
1205
1206static int mei_pci_resume(struct device *device)
1207{
1208 struct pci_dev *pdev = to_pci_dev(device);
1209 struct mei_device *dev;
1210 int err;
1211
1212 dev = pci_get_drvdata(pdev);
1213 if (!dev)
1214 return -ENODEV;
1215
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001216 pci_enable_msi(pdev);
1217
1218 /* request and enable interrupt */
1219 if (pci_dev_msi_enabled(pdev))
1220 err = request_threaded_irq(pdev->irq,
1221 NULL,
1222 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001223 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001224 else
1225 err = request_threaded_irq(pdev->irq,
Oren Weilab841162011-05-15 13:43:41 +03001226 mei_interrupt_quick_handler,
1227 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001228 IRQF_SHARED, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001229
Oren Weilab841162011-05-15 13:43:41 +03001230 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001231 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
1232 pdev->irq);
Oren Weilab841162011-05-15 13:43:41 +03001233 return err;
1234 }
1235
1236 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +03001237 dev->dev_state = MEI_DEV_POWER_UP;
Oren Weilab841162011-05-15 13:43:41 +03001238 mei_reset(dev, 1);
1239 mutex_unlock(&dev->device_lock);
1240
Oren Weil6d70e932011-09-07 09:03:14 +03001241 /* Start timer if stopped in suspend */
1242 schedule_delayed_work(&dev->timer_work, HZ);
1243
Oren Weilab841162011-05-15 13:43:41 +03001244 return err;
1245}
1246static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1247#define MEI_PM_OPS (&mei_pm_ops)
1248#else
Randy Dunlap2d990362011-05-19 08:52:34 -07001249#define MEI_PM_OPS NULL
Oren Weilab841162011-05-15 13:43:41 +03001250#endif /* CONFIG_PM */
1251/*
1252 * PCI driver structure
1253 */
1254static struct pci_driver mei_driver = {
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001255 .name = KBUILD_MODNAME,
Oren Weilab841162011-05-15 13:43:41 +03001256 .id_table = mei_pci_tbl,
1257 .probe = mei_probe,
1258 .remove = __devexit_p(mei_remove),
1259 .shutdown = __devexit_p(mei_remove),
1260 .driver.pm = MEI_PM_OPS,
1261};
1262
Tomas Winkler60781882012-07-19 09:45:32 +03001263module_pci_driver(mei_driver);
Oren Weilab841162011-05-15 13:43:41 +03001264
1265MODULE_AUTHOR("Intel Corporation");
1266MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1267MODULE_LICENSE("GPL v2");