blob: da94260548151c002d06ce761cb1ed2fc97dc227 [file] [log] [blame]
Oren Weilab841162011-05-15 13:43:41 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilab841162011-05-15 13:43:41 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler2f3d2b42012-03-19 22:38:13 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
Oren Weilab841162011-05-15 13:43:41 +030019#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/aio.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h>
32#include <linux/cdev.h>
Oren Weilab841162011-05-15 13:43:41 +030033#include <linux/sched.h>
34#include <linux/uuid.h>
35#include <linux/compat.h>
36#include <linux/jiffies.h>
37#include <linux/interrupt.h>
Oren Weil5b881e32011-11-13 09:41:14 +020038#include <linux/miscdevice.h>
Oren Weilab841162011-05-15 13:43:41 +030039
Tomas Winkler4f3afe12012-05-09 16:38:59 +030040#include <linux/mei.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020041
42#include "mei_dev.h"
Oren Weilab841162011-05-15 13:43:41 +030043#include "interface.h"
Oren Weilab841162011-05-15 13:43:41 +030044
Tomas Winklerdaed6b52012-08-17 09:54:23 +030045/* AMT device is a singleton on the platform */
46static struct pci_dev *mei_pdev;
Oren Weilab841162011-05-15 13:43:41 +030047
Oren Weilab841162011-05-15 13:43:41 +030048/* mei_pci_tbl - PCI Device ID Table */
49static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
80 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
Tomas Winkler9af51422012-08-29 01:15:50 +030081 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
82 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
Oren Weilab841162011-05-15 13:43:41 +030083
84 /* required last entry */
85 {0, }
86};
87
88MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
89
90static DEFINE_MUTEX(mei_mutex);
91
Oren Weilab841162011-05-15 13:43:41 +030092
93/**
Oren Weilab841162011-05-15 13:43:41 +030094 * find_read_list_entry - find read list entry
95 *
96 * @dev: device structure
97 * @file: pointer to file structure
98 *
99 * returns cb on success, NULL on error
100 */
101static struct mei_cl_cb *find_read_list_entry(
102 struct mei_device *dev,
103 struct mei_cl *cl)
104{
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200105 struct mei_cl_cb *pos = NULL;
106 struct mei_cl_cb *next = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300107
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200108 dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200109 list_for_each_entry_safe(pos, next, &dev->read_list.list, list)
110 if (mei_cl_cmp_id(cl, pos->cl))
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200111 return pos;
Oren Weilab841162011-05-15 13:43:41 +0300112 return NULL;
113}
114
115/**
116 * mei_open - the open function
117 *
118 * @inode: pointer to inode structure
119 * @file: pointer to file structure
120 *
121 * returns 0 on success, <0 on error
122 */
123static int mei_open(struct inode *inode, struct file *file)
124{
125 struct mei_cl *cl;
Oren Weilab841162011-05-15 13:43:41 +0300126 struct mei_device *dev;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200127 unsigned long cl_id;
128 int err;
Oren Weilab841162011-05-15 13:43:41 +0300129
130 err = -ENODEV;
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300131 if (!mei_pdev)
Oren Weilab841162011-05-15 13:43:41 +0300132 goto out;
133
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300134 dev = pci_get_drvdata(mei_pdev);
Oren Weil5b881e32011-11-13 09:41:14 +0200135 if (!dev)
Oren Weilab841162011-05-15 13:43:41 +0300136 goto out;
137
138 mutex_lock(&dev->device_lock);
139 err = -ENOMEM;
Tomas Winklerc95efb72011-05-25 17:28:21 +0300140 cl = mei_cl_allocate(dev);
Oren Weilab841162011-05-15 13:43:41 +0300141 if (!cl)
Alexey Khoroshilov303dfbf2011-08-31 00:41:14 +0400142 goto out_unlock;
Oren Weilab841162011-05-15 13:43:41 +0300143
144 err = -ENODEV;
Tomas Winklerb210d752012-08-07 00:03:56 +0300145 if (dev->dev_state != MEI_DEV_ENABLED) {
146 dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
147 mei_dev_state_str(dev->dev_state));
Oren Weilab841162011-05-15 13:43:41 +0300148 goto out_unlock;
149 }
150 err = -EMFILE;
Tomas Winkler1b812942012-09-11 00:43:20 +0300151 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
152 dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
153 MEI_MAX_OPEN_HANDLE_COUNT);
Oren Weilab841162011-05-15 13:43:41 +0300154 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300155 }
Oren Weilab841162011-05-15 13:43:41 +0300156
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200157 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler1b812942012-09-11 00:43:20 +0300158 if (cl_id >= MEI_CLIENTS_MAX) {
159 dev_err(&dev->pdev->dev, "client_id exceded %d",
160 MEI_CLIENTS_MAX) ;
Oren Weilab841162011-05-15 13:43:41 +0300161 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300162 }
Oren Weilab841162011-05-15 13:43:41 +0300163
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200164 cl->host_client_id = cl_id;
165
Oren Weilab841162011-05-15 13:43:41 +0300166 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
167
168 dev->open_handle_count++;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200169
Oren Weilab841162011-05-15 13:43:41 +0300170 list_add_tail(&cl->link, &dev->file_list);
171
172 set_bit(cl->host_client_id, dev->host_clients_map);
173 cl->state = MEI_FILE_INITIALIZING;
174 cl->sm_state = 0;
175
176 file->private_data = cl;
177 mutex_unlock(&dev->device_lock);
178
Oren Weil5b881e32011-11-13 09:41:14 +0200179 return nonseekable_open(inode, file);
Oren Weilab841162011-05-15 13:43:41 +0300180
181out_unlock:
182 mutex_unlock(&dev->device_lock);
183 kfree(cl);
184out:
185 return err;
186}
187
188/**
189 * mei_release - the release function
190 *
191 * @inode: pointer to inode structure
192 * @file: pointer to file structure
193 *
194 * returns 0 on success, <0 on error
195 */
196static int mei_release(struct inode *inode, struct file *file)
197{
198 struct mei_cl *cl = file->private_data;
199 struct mei_cl_cb *cb;
200 struct mei_device *dev;
201 int rets = 0;
202
203 if (WARN_ON(!cl || !cl->dev))
204 return -ENODEV;
205
206 dev = cl->dev;
207
208 mutex_lock(&dev->device_lock);
Tomas Winklera562d5c2012-11-11 17:38:01 +0200209 if (cl == &dev->iamthif_cl) {
210 rets = mei_amthif_release(dev, file);
211 goto out;
212 }
213 if (cl->state == MEI_FILE_CONNECTED) {
214 cl->state = MEI_FILE_DISCONNECTING;
215 dev_dbg(&dev->pdev->dev,
216 "disconnecting client host client = %d, "
217 "ME client = %d\n",
Oren Weilab841162011-05-15 13:43:41 +0300218 cl->host_client_id,
219 cl->me_client_id);
Tomas Winklera562d5c2012-11-11 17:38:01 +0200220 rets = mei_disconnect_host_client(dev, cl);
Oren Weilab841162011-05-15 13:43:41 +0300221 }
Tomas Winklera562d5c2012-11-11 17:38:01 +0200222 mei_cl_flush_queues(cl);
223 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
224 cl->host_client_id,
225 cl->me_client_id);
226
227 if (dev->open_handle_count > 0) {
228 clear_bit(cl->host_client_id, dev->host_clients_map);
229 dev->open_handle_count--;
230 }
Tomas Winklerff8b2f42012-11-11 17:38:03 +0200231 mei_me_cl_unlink(dev, cl);
Tomas Winklera562d5c2012-11-11 17:38:01 +0200232
233 /* free read cb */
234 cb = NULL;
235 if (cl->read_cb) {
236 cb = find_read_list_entry(dev, cl);
237 /* Remove entry from read list */
238 if (cb)
239 list_del(&cb->list);
240
241 cb = cl->read_cb;
242 cl->read_cb = NULL;
243 }
244
245 file->private_data = NULL;
246
247 if (cb) {
248 mei_io_cb_free(cb);
249 cb = NULL;
250 }
251
252 kfree(cl);
253out:
Oren Weilab841162011-05-15 13:43:41 +0300254 mutex_unlock(&dev->device_lock);
255 return rets;
256}
257
258
259/**
260 * mei_read - the read function.
261 *
262 * @file: pointer to file structure
263 * @ubuf: pointer to user buffer
264 * @length: buffer length
265 * @offset: data offset in buffer
266 *
267 * returns >=0 data length on success , <0 on error
268 */
269static ssize_t mei_read(struct file *file, char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200270 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300271{
272 struct mei_cl *cl = file->private_data;
273 struct mei_cl_cb *cb_pos = NULL;
274 struct mei_cl_cb *cb = NULL;
275 struct mei_device *dev;
276 int i;
277 int rets;
278 int err;
279
280
281 if (WARN_ON(!cl || !cl->dev))
282 return -ENODEV;
283
284 dev = cl->dev;
285
286 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300287 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300288 rets = -ENODEV;
289 goto out;
290 }
291
292 if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
293 /* Do not allow to read watchdog client */
Tomas Winkler07b509b2012-07-23 14:05:39 +0300294 i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
Oren Weilab841162011-05-15 13:43:41 +0300295 if (i >= 0) {
296 struct mei_me_client *me_client = &dev->me_clients[i];
Oren Weilab841162011-05-15 13:43:41 +0300297 if (cl->me_client_id == me_client->client_id) {
298 rets = -EBADF;
299 goto out;
300 }
301 }
302 } else {
303 cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
304 }
305
306 if (cl == &dev->iamthif_cl) {
Tomas Winkler19838fb2012-11-01 21:17:15 +0200307 rets = mei_amthif_read(dev, file, ubuf, length, offset);
Oren Weilab841162011-05-15 13:43:41 +0300308 goto out;
309 }
310
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200311 if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300312 cb = cl->read_cb;
313 goto copy_buffer;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200314 } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
315 cl->read_cb->buf_idx <= *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300316 cb = cl->read_cb;
317 rets = 0;
318 goto free;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200319 } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700320 /*Offset needs to be cleaned for contiguous reads*/
Oren Weilab841162011-05-15 13:43:41 +0300321 *offset = 0;
322 rets = 0;
323 goto out;
324 }
325
326 err = mei_start_read(dev, cl);
327 if (err && err != -EBUSY) {
328 dev_dbg(&dev->pdev->dev,
329 "mei start read failure with status = %d\n", err);
330 rets = err;
331 goto out;
332 }
333
334 if (MEI_READ_COMPLETE != cl->reading_state &&
335 !waitqueue_active(&cl->rx_wait)) {
336 if (file->f_flags & O_NONBLOCK) {
337 rets = -EAGAIN;
338 goto out;
339 }
340
341 mutex_unlock(&dev->device_lock);
342
343 if (wait_event_interruptible(cl->rx_wait,
344 (MEI_READ_COMPLETE == cl->reading_state ||
345 MEI_FILE_INITIALIZING == cl->state ||
346 MEI_FILE_DISCONNECTED == cl->state ||
347 MEI_FILE_DISCONNECTING == cl->state))) {
348 if (signal_pending(current))
349 return -EINTR;
350 return -ERESTARTSYS;
351 }
352
353 mutex_lock(&dev->device_lock);
354 if (MEI_FILE_INITIALIZING == cl->state ||
355 MEI_FILE_DISCONNECTED == cl->state ||
356 MEI_FILE_DISCONNECTING == cl->state) {
357 rets = -EBUSY;
358 goto out;
359 }
360 }
361
362 cb = cl->read_cb;
363
364 if (!cb) {
365 rets = -ENODEV;
366 goto out;
367 }
368 if (cl->reading_state != MEI_READ_COMPLETE) {
369 rets = 0;
370 goto out;
371 }
372 /* now copy the data to user space */
373copy_buffer:
374 dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
375 cb->response_buffer.size);
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200376 dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
377 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
Oren Weilab841162011-05-15 13:43:41 +0300378 rets = -EMSGSIZE;
379 goto free;
380 }
381
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200382 /* length is being truncated to PAGE_SIZE,
383 * however buf_idx may point beyond that */
384 length = min_t(size_t, length, cb->buf_idx - *offset);
Oren Weilab841162011-05-15 13:43:41 +0300385
Tomas Winkler441ab502011-12-13 23:39:34 +0200386 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
Oren Weilab841162011-05-15 13:43:41 +0300387 rets = -EFAULT;
388 goto free;
389 }
390
391 rets = length;
392 *offset += length;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200393 if ((unsigned long)*offset < cb->buf_idx)
Oren Weilab841162011-05-15 13:43:41 +0300394 goto out;
395
396free:
397 cb_pos = find_read_list_entry(dev, cl);
398 /* Remove entry from read list */
399 if (cb_pos)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200400 list_del(&cb_pos->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200401 mei_io_cb_free(cb);
Oren Weilab841162011-05-15 13:43:41 +0300402 cl->reading_state = MEI_IDLE;
403 cl->read_cb = NULL;
404 cl->read_pending = 0;
405out:
406 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
407 mutex_unlock(&dev->device_lock);
408 return rets;
409}
Tomas Winkler33d28c92012-10-09 16:50:17 +0200410/**
Oren Weilab841162011-05-15 13:43:41 +0300411 * mei_write - the write function.
412 *
413 * @file: pointer to file structure
414 * @ubuf: pointer to user buffer
415 * @length: buffer length
416 * @offset: data offset in buffer
417 *
418 * returns >=0 data length on success , <0 on error
419 */
420static ssize_t mei_write(struct file *file, const char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200421 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300422{
423 struct mei_cl *cl = file->private_data;
424 struct mei_cl_cb *write_cb = NULL;
425 struct mei_msg_hdr mei_hdr;
426 struct mei_device *dev;
427 unsigned long timeout = 0;
428 int rets;
429 int i;
430
431 if (WARN_ON(!cl || !cl->dev))
432 return -ENODEV;
433
434 dev = cl->dev;
435
436 mutex_lock(&dev->device_lock);
437
Tomas Winklerb210d752012-08-07 00:03:56 +0300438 if (dev->dev_state != MEI_DEV_ENABLED) {
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200439 rets = -ENODEV;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200440 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300441 }
442
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200443 i = mei_me_cl_by_id(dev, cl->me_client_id);
444 if (i < 0) {
445 rets = -ENODEV;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200446 goto err;
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200447 }
448 if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
449 rets = -EMSGSIZE;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200450 goto err;
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200451 }
452
453 if (cl->state != MEI_FILE_CONNECTED) {
454 rets = -ENODEV;
455 dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
456 cl->host_client_id, cl->me_client_id);
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200457 goto err;
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200458 }
Oren Weilab841162011-05-15 13:43:41 +0300459 if (cl == &dev->iamthif_cl) {
Tomas Winkler19838fb2012-11-01 21:17:15 +0200460 write_cb = mei_amthif_find_read_list_entry(dev, file);
Oren Weilab841162011-05-15 13:43:41 +0300461
462 if (write_cb) {
463 timeout = write_cb->read_time +
Tomas Winkler3870c322012-11-01 21:17:14 +0200464 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
Oren Weilab841162011-05-15 13:43:41 +0300465
466 if (time_after(jiffies, timeout) ||
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200467 cl->reading_state == MEI_READ_COMPLETE) {
468 *offset = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200469 list_del(&write_cb->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200470 mei_io_cb_free(write_cb);
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200471 write_cb = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300472 }
473 }
474 }
475
476 /* free entry used in read */
477 if (cl->reading_state == MEI_READ_COMPLETE) {
478 *offset = 0;
479 write_cb = find_read_list_entry(dev, cl);
480 if (write_cb) {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200481 list_del(&write_cb->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200482 mei_io_cb_free(write_cb);
Oren Weilab841162011-05-15 13:43:41 +0300483 write_cb = NULL;
484 cl->reading_state = MEI_IDLE;
485 cl->read_cb = NULL;
486 cl->read_pending = 0;
487 }
Tomas Winkler441ab502011-12-13 23:39:34 +0200488 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
Oren Weilab841162011-05-15 13:43:41 +0300489 *offset = 0;
490
491
Tomas Winkler33d28c92012-10-09 16:50:17 +0200492 write_cb = mei_io_cb_init(cl, file);
Oren Weilab841162011-05-15 13:43:41 +0300493 if (!write_cb) {
Tomas Winkler33d28c92012-10-09 16:50:17 +0200494 dev_err(&dev->pdev->dev, "write cb allocation failed\n");
495 rets = -ENOMEM;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200496 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300497 }
Tomas Winkler33d28c92012-10-09 16:50:17 +0200498 rets = mei_io_cb_alloc_req_buf(write_cb, length);
499 if (rets)
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200500 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300501
Tomas Winkler33d28c92012-10-09 16:50:17 +0200502 dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
Oren Weilab841162011-05-15 13:43:41 +0300503
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200504 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
505 if (rets)
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200506 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300507
508 cl->sm_state = 0;
509 if (length == 4 &&
510 ((memcmp(mei_wd_state_independence_msg[0],
511 write_cb->request_buffer.data, 4) == 0) ||
512 (memcmp(mei_wd_state_independence_msg[1],
513 write_cb->request_buffer.data, 4) == 0) ||
514 (memcmp(mei_wd_state_independence_msg[2],
515 write_cb->request_buffer.data, 4) == 0)))
516 cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
517
Oren Weilab841162011-05-15 13:43:41 +0300518 if (cl == &dev->iamthif_cl) {
Tomas Winklerab5c4a52012-11-01 21:17:18 +0200519 rets = mei_amthif_write(dev, write_cb);
520
521 if (rets) {
522 dev_err(&dev->pdev->dev,
523 "amthi write failed with status = %d\n", rets);
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200524 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300525 }
526 mutex_unlock(&dev->device_lock);
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200527 return length;
Oren Weilab841162011-05-15 13:43:41 +0300528 }
529
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200530 write_cb->fop_type = MEI_FOP_WRITE;
Oren Weilab841162011-05-15 13:43:41 +0300531
532 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
533 cl->host_client_id, cl->me_client_id);
Oren Weilab841162011-05-15 13:43:41 +0300534 rets = mei_flow_ctrl_creds(dev, cl);
535 if (rets < 0)
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200536 goto err;
Oren Weilab841162011-05-15 13:43:41 +0300537
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200538 if (rets == 0 || dev->mei_host_buffer_is_empty == false) {
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200539 write_cb->buf_idx = 0;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200540 mei_hdr.msg_complete = 0;
Oren Weilab841162011-05-15 13:43:41 +0300541 cl->writing_state = MEI_WRITING;
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200542 goto out;
543 }
544
545 dev->mei_host_buffer_is_empty = false;
546 if (length > mei_hbuf_max_data(dev)) {
547 mei_hdr.length = mei_hbuf_max_data(dev);
548 mei_hdr.msg_complete = 0;
549 } else {
550 mei_hdr.length = length;
551 mei_hdr.msg_complete = 1;
552 }
553 mei_hdr.host_addr = cl->host_client_id;
554 mei_hdr.me_addr = cl->me_client_id;
555 mei_hdr.reserved = 0;
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200556
557 dev_dbg(&dev->pdev->dev, "write " MEI_HDR_FMT "\n",
558 MEI_HDR_PRM(&mei_hdr));
Tomas Winkler438763f2012-12-25 19:05:59 +0200559 if (mei_write_message(dev, &mei_hdr, write_cb->request_buffer.data)) {
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200560 rets = -ENODEV;
561 goto err;
562 }
563 cl->writing_state = MEI_WRITING;
564 write_cb->buf_idx = mei_hdr.length;
565
566out:
567 if (mei_hdr.msg_complete) {
568 if (mei_flow_ctrl_reduce(dev, cl)) {
569 rets = -ENODEV;
570 goto err;
571 }
572 list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
573 } else {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200574 list_add_tail(&write_cb->list, &dev->write_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300575 }
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200576
Oren Weilab841162011-05-15 13:43:41 +0300577 mutex_unlock(&dev->device_lock);
578 return length;
579
Tomas Winklerb0d0cf72012-11-01 21:17:13 +0200580err:
Oren Weilab841162011-05-15 13:43:41 +0300581 mutex_unlock(&dev->device_lock);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200582 mei_io_cb_free(write_cb);
Oren Weilab841162011-05-15 13:43:41 +0300583 return rets;
584}
585
586
587/**
588 * mei_ioctl - the IOCTL function
589 *
590 * @file: pointer to file structure
591 * @cmd: ioctl command
592 * @data: pointer to mei message structure
593 *
594 * returns 0 on success , <0 on error
595 */
596static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
597{
598 struct mei_device *dev;
599 struct mei_cl *cl = file->private_data;
600 struct mei_connect_client_data *connect_data = NULL;
601 int rets;
602
603 if (cmd != IOCTL_MEI_CONNECT_CLIENT)
604 return -EINVAL;
605
606 if (WARN_ON(!cl || !cl->dev))
607 return -ENODEV;
608
609 dev = cl->dev;
610
611 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
612
613 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300614 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300615 rets = -ENODEV;
616 goto out;
617 }
618
619 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
620
621 connect_data = kzalloc(sizeof(struct mei_connect_client_data),
622 GFP_KERNEL);
623 if (!connect_data) {
624 rets = -ENOMEM;
625 goto out;
626 }
627 dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
628 if (copy_from_user(connect_data, (char __user *)data,
629 sizeof(struct mei_connect_client_data))) {
630 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
631 rets = -EFAULT;
632 goto out;
633 }
634 rets = mei_ioctl_connect_client(file, connect_data);
635
636 /* if all is ok, copying the data back to user. */
637 if (rets)
638 goto out;
639
640 dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
641 if (copy_to_user((char __user *)data, connect_data,
642 sizeof(struct mei_connect_client_data))) {
643 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
644 rets = -EFAULT;
645 goto out;
646 }
647
648out:
649 kfree(connect_data);
650 mutex_unlock(&dev->device_lock);
651 return rets;
652}
653
654/**
655 * mei_compat_ioctl - the compat IOCTL function
656 *
657 * @file: pointer to file structure
658 * @cmd: ioctl command
659 * @data: pointer to mei message structure
660 *
661 * returns 0 on success , <0 on error
662 */
663#ifdef CONFIG_COMPAT
664static long mei_compat_ioctl(struct file *file,
Tomas Winkler441ab502011-12-13 23:39:34 +0200665 unsigned int cmd, unsigned long data)
Oren Weilab841162011-05-15 13:43:41 +0300666{
667 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
668}
669#endif
670
671
672/**
673 * mei_poll - the poll function
674 *
675 * @file: pointer to file structure
676 * @wait: pointer to poll_table structure
677 *
678 * returns poll mask
679 */
680static unsigned int mei_poll(struct file *file, poll_table *wait)
681{
682 struct mei_cl *cl = file->private_data;
683 struct mei_device *dev;
684 unsigned int mask = 0;
685
686 if (WARN_ON(!cl || !cl->dev))
687 return mask;
688
689 dev = cl->dev;
690
691 mutex_lock(&dev->device_lock);
692
Tomas Winklerb210d752012-08-07 00:03:56 +0300693 if (dev->dev_state != MEI_DEV_ENABLED)
Oren Weilab841162011-05-15 13:43:41 +0300694 goto out;
695
696
697 if (cl == &dev->iamthif_cl) {
Tomas Winkler744f0f22012-11-11 17:38:02 +0200698 mask = mei_amthif_poll(dev, file, wait);
Oren Weilab841162011-05-15 13:43:41 +0300699 goto out;
700 }
701
702 mutex_unlock(&dev->device_lock);
703 poll_wait(file, &cl->tx_wait, wait);
704 mutex_lock(&dev->device_lock);
705 if (MEI_WRITE_COMPLETE == cl->writing_state)
706 mask |= (POLLIN | POLLRDNORM);
707
708out:
709 mutex_unlock(&dev->device_lock);
710 return mask;
711}
712
Oren Weil5b881e32011-11-13 09:41:14 +0200713/*
714 * file operations structure will be used for mei char device.
715 */
716static const struct file_operations mei_fops = {
717 .owner = THIS_MODULE,
718 .read = mei_read,
719 .unlocked_ioctl = mei_ioctl,
720#ifdef CONFIG_COMPAT
721 .compat_ioctl = mei_compat_ioctl,
722#endif
723 .open = mei_open,
724 .release = mei_release,
725 .write = mei_write,
726 .poll = mei_poll,
727 .llseek = no_llseek
728};
729
730
731/*
732 * Misc Device Struct
733 */
734static struct miscdevice mei_misc_device = {
Tomas Winklerc38ea242012-04-02 20:32:39 +0300735 .name = "mei",
Oren Weil5b881e32011-11-13 09:41:14 +0200736 .fops = &mei_fops,
737 .minor = MISC_DYNAMIC_MINOR,
738};
739
740/**
Tomas Winkler9a123f12012-08-06 15:23:55 +0300741 * mei_quirk_probe - probe for devices that doesn't valid ME interface
742 * @pdev: PCI device structure
743 * @ent: entry into pci_device_table
744 *
745 * returns true if ME Interface is valid, false otherwise
746 */
Bill Pemberton80c8ae22012-11-19 13:23:05 -0500747static bool mei_quirk_probe(struct pci_dev *pdev,
Tomas Winkler9a123f12012-08-06 15:23:55 +0300748 const struct pci_device_id *ent)
749{
750 u32 reg;
751 if (ent->device == MEI_DEV_ID_PBG_1) {
752 pci_read_config_dword(pdev, 0x48, &reg);
753 /* make sure that bit 9 is up and bit 10 is down */
754 if ((reg & 0x600) == 0x200) {
755 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
756 return false;
757 }
758 }
759 return true;
760}
761/**
Oren Weil5b881e32011-11-13 09:41:14 +0200762 * mei_probe - Device Initialization Routine
763 *
764 * @pdev: PCI device structure
765 * @ent: entry in kcs_pci_tbl
766 *
767 * returns 0 on success, <0 on failure.
768 */
Bill Pemberton80c8ae22012-11-19 13:23:05 -0500769static int mei_probe(struct pci_dev *pdev,
Oren Weil5b881e32011-11-13 09:41:14 +0200770 const struct pci_device_id *ent)
771{
772 struct mei_device *dev;
773 int err;
774
775 mutex_lock(&mei_mutex);
Tomas Winkler9a123f12012-08-06 15:23:55 +0300776
777 if (!mei_quirk_probe(pdev, ent)) {
778 err = -ENODEV;
779 goto end;
780 }
781
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300782 if (mei_pdev) {
Oren Weil5b881e32011-11-13 09:41:14 +0200783 err = -EEXIST;
784 goto end;
785 }
786 /* enable pci dev */
787 err = pci_enable_device(pdev);
788 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300789 dev_err(&pdev->dev, "failed to enable pci device.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200790 goto end;
791 }
792 /* set PCI host mastering */
793 pci_set_master(pdev);
794 /* pci request regions for mei driver */
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300795 err = pci_request_regions(pdev, KBUILD_MODNAME);
Oren Weil5b881e32011-11-13 09:41:14 +0200796 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300797 dev_err(&pdev->dev, "failed to get pci regions.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200798 goto disable_device;
799 }
800 /* allocates and initializes the mei dev structure */
801 dev = mei_device_init(pdev);
802 if (!dev) {
803 err = -ENOMEM;
804 goto release_regions;
805 }
806 /* mapping IO device memory */
807 dev->mem_addr = pci_iomap(pdev, 0, 0);
808 if (!dev->mem_addr) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300809 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200810 err = -ENOMEM;
811 goto free_device;
812 }
813 pci_enable_msi(pdev);
814
815 /* request and enable interrupt */
816 if (pci_dev_msi_enabled(pdev))
817 err = request_threaded_irq(pdev->irq,
818 NULL,
819 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300820 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200821 else
822 err = request_threaded_irq(pdev->irq,
823 mei_interrupt_quick_handler,
824 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300825 IRQF_SHARED, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200826
827 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300828 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
Oren Weil5b881e32011-11-13 09:41:14 +0200829 pdev->irq);
Samuel Ortiz169dc382012-06-11 12:18:30 +0300830 goto disable_msi;
Oren Weil5b881e32011-11-13 09:41:14 +0200831 }
832 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
Samuel Ortizc1174c02012-11-18 15:13:20 +0200833 INIT_WORK(&dev->init_work, mei_host_client_init);
834
Oren Weil5b881e32011-11-13 09:41:14 +0200835 if (mei_hw_init(dev)) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300836 dev_err(&pdev->dev, "init hw failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200837 err = -ENODEV;
838 goto release_irq;
839 }
840
841 err = misc_register(&mei_misc_device);
842 if (err)
843 goto release_irq;
844
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300845 mei_pdev = pdev;
Oren Weil5b881e32011-11-13 09:41:14 +0200846 pci_set_drvdata(pdev, dev);
847
848
849 schedule_delayed_work(&dev->timer_work, HZ);
850
851 mutex_unlock(&mei_mutex);
852
Tomas Winkler2f3d2b42012-03-19 22:38:13 +0200853 pr_debug("initialization successful.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200854
855 return 0;
856
857release_irq:
858 /* disable interrupts */
859 dev->host_hw_state = mei_hcsr_read(dev);
860 mei_disable_interrupts(dev);
861 flush_scheduled_work();
862 free_irq(pdev->irq, dev);
Samuel Ortiz169dc382012-06-11 12:18:30 +0300863disable_msi:
Oren Weil5b881e32011-11-13 09:41:14 +0200864 pci_disable_msi(pdev);
Oren Weil5b881e32011-11-13 09:41:14 +0200865 pci_iounmap(pdev, dev->mem_addr);
866free_device:
867 kfree(dev);
868release_regions:
869 pci_release_regions(pdev);
870disable_device:
871 pci_disable_device(pdev);
872end:
873 mutex_unlock(&mei_mutex);
Tomas Winkler32c826b2012-05-08 23:04:56 +0300874 dev_err(&pdev->dev, "initialization failed.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200875 return err;
876}
877
878/**
879 * mei_remove - Device Removal Routine
880 *
881 * @pdev: PCI device structure
882 *
883 * mei_remove is called by the PCI subsystem to alert the driver
884 * that it should release a PCI device.
885 */
Bill Pemberton486a5c22012-11-19 13:26:02 -0500886static void mei_remove(struct pci_dev *pdev)
Oren Weil5b881e32011-11-13 09:41:14 +0200887{
888 struct mei_device *dev;
889
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300890 if (mei_pdev != pdev)
Oren Weil5b881e32011-11-13 09:41:14 +0200891 return;
892
893 dev = pci_get_drvdata(pdev);
894 if (!dev)
895 return;
896
897 mutex_lock(&dev->device_lock);
898
Tomas Winklerc216fde2012-08-16 19:39:43 +0300899 cancel_delayed_work(&dev->timer_work);
900
901 mei_wd_stop(dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200902
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300903 mei_pdev = NULL;
Oren Weil5b881e32011-11-13 09:41:14 +0200904
905 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
906 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
907 mei_disconnect_host_client(dev, &dev->iamthif_cl);
908 }
909 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
910 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
911 mei_disconnect_host_client(dev, &dev->wd_cl);
912 }
913
914 /* Unregistering watchdog device */
Tomas Winkler70cd5332011-12-22 18:50:50 +0200915 mei_watchdog_unregister(dev);
Oren Weil5b881e32011-11-13 09:41:14 +0200916
917 /* remove entry if already in list */
918 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
Tomas Winklerff8b2f42012-11-11 17:38:03 +0200919 mei_me_cl_unlink(dev, &dev->wd_cl);
920 mei_me_cl_unlink(dev, &dev->iamthif_cl);
Oren Weil5b881e32011-11-13 09:41:14 +0200921
922 dev->iamthif_current_cb = NULL;
923 dev->me_clients_num = 0;
924
925 mutex_unlock(&dev->device_lock);
926
927 flush_scheduled_work();
928
929 /* disable interrupts */
930 mei_disable_interrupts(dev);
931
932 free_irq(pdev->irq, dev);
933 pci_disable_msi(pdev);
934 pci_set_drvdata(pdev, NULL);
935
936 if (dev->mem_addr)
937 pci_iounmap(pdev, dev->mem_addr);
938
939 kfree(dev);
940
941 pci_release_regions(pdev);
942 pci_disable_device(pdev);
Tomas Winklera44cab42012-05-29 16:39:11 +0300943
944 misc_deregister(&mei_misc_device);
Oren Weil5b881e32011-11-13 09:41:14 +0200945}
Oren Weilab841162011-05-15 13:43:41 +0300946#ifdef CONFIG_PM
947static int mei_pci_suspend(struct device *device)
948{
949 struct pci_dev *pdev = to_pci_dev(device);
950 struct mei_device *dev = pci_get_drvdata(pdev);
951 int err;
952
953 if (!dev)
954 return -ENODEV;
955 mutex_lock(&dev->device_lock);
Tomas Winklerc216fde2012-08-16 19:39:43 +0300956
957 cancel_delayed_work(&dev->timer_work);
958
Oren Weilab841162011-05-15 13:43:41 +0300959 /* Stop watchdog if exists */
Tomas Winklerc216fde2012-08-16 19:39:43 +0300960 err = mei_wd_stop(dev);
Oren Weilab841162011-05-15 13:43:41 +0300961 /* Set new mei state */
Tomas Winklerb210d752012-08-07 00:03:56 +0300962 if (dev->dev_state == MEI_DEV_ENABLED ||
963 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
964 dev->dev_state = MEI_DEV_POWER_DOWN;
Oren Weilab841162011-05-15 13:43:41 +0300965 mei_reset(dev, 0);
966 }
967 mutex_unlock(&dev->device_lock);
968
969 free_irq(pdev->irq, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300970 pci_disable_msi(pdev);
Oren Weilab841162011-05-15 13:43:41 +0300971
972 return err;
973}
974
975static int mei_pci_resume(struct device *device)
976{
977 struct pci_dev *pdev = to_pci_dev(device);
978 struct mei_device *dev;
979 int err;
980
981 dev = pci_get_drvdata(pdev);
982 if (!dev)
983 return -ENODEV;
984
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300985 pci_enable_msi(pdev);
986
987 /* request and enable interrupt */
988 if (pci_dev_msi_enabled(pdev))
989 err = request_threaded_irq(pdev->irq,
990 NULL,
991 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300992 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300993 else
994 err = request_threaded_irq(pdev->irq,
Oren Weilab841162011-05-15 13:43:41 +0300995 mei_interrupt_quick_handler,
996 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +0300997 IRQF_SHARED, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +0300998
Oren Weilab841162011-05-15 13:43:41 +0300999 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001000 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
1001 pdev->irq);
Oren Weilab841162011-05-15 13:43:41 +03001002 return err;
1003 }
1004
1005 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +03001006 dev->dev_state = MEI_DEV_POWER_UP;
Oren Weilab841162011-05-15 13:43:41 +03001007 mei_reset(dev, 1);
1008 mutex_unlock(&dev->device_lock);
1009
Oren Weil6d70e932011-09-07 09:03:14 +03001010 /* Start timer if stopped in suspend */
1011 schedule_delayed_work(&dev->timer_work, HZ);
1012
Oren Weilab841162011-05-15 13:43:41 +03001013 return err;
1014}
1015static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1016#define MEI_PM_OPS (&mei_pm_ops)
1017#else
Randy Dunlap2d990362011-05-19 08:52:34 -07001018#define MEI_PM_OPS NULL
Oren Weilab841162011-05-15 13:43:41 +03001019#endif /* CONFIG_PM */
1020/*
1021 * PCI driver structure
1022 */
1023static struct pci_driver mei_driver = {
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001024 .name = KBUILD_MODNAME,
Oren Weilab841162011-05-15 13:43:41 +03001025 .id_table = mei_pci_tbl,
1026 .probe = mei_probe,
Bill Pemberton9306a8b2012-11-19 13:20:25 -05001027 .remove = mei_remove,
1028 .shutdown = mei_remove,
Oren Weilab841162011-05-15 13:43:41 +03001029 .driver.pm = MEI_PM_OPS,
1030};
1031
Tomas Winkler60781882012-07-19 09:45:32 +03001032module_pci_driver(mei_driver);
Oren Weilab841162011-05-15 13:43:41 +03001033
1034MODULE_AUTHOR("Intel Corporation");
1035MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1036MODULE_LICENSE("GPL v2");