blob: 518e07eb1075e1d9266eb8df249305d5fd805744 [file] [log] [blame]
Oren Weilab841162011-05-15 13:43:41 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba912012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilab841162011-05-15 13:43:41 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler2f3d2b42012-03-19 22:38:13 +020017#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
18
Oren Weilab841162011-05-15 13:43:41 +030019#include <linux/module.h>
20#include <linux/moduleparam.h>
21#include <linux/kernel.h>
22#include <linux/device.h>
23#include <linux/fs.h>
24#include <linux/errno.h>
25#include <linux/types.h>
26#include <linux/fcntl.h>
27#include <linux/aio.h>
28#include <linux/pci.h>
29#include <linux/poll.h>
30#include <linux/init.h>
31#include <linux/ioctl.h>
32#include <linux/cdev.h>
Oren Weilab841162011-05-15 13:43:41 +030033#include <linux/sched.h>
34#include <linux/uuid.h>
35#include <linux/compat.h>
36#include <linux/jiffies.h>
37#include <linux/interrupt.h>
Oren Weil5b881e32011-11-13 09:41:14 +020038#include <linux/miscdevice.h>
Oren Weilab841162011-05-15 13:43:41 +030039
40#include "mei_dev.h"
Tomas Winkler4f3afe12012-05-09 16:38:59 +030041#include <linux/mei.h>
Oren Weilab841162011-05-15 13:43:41 +030042#include "interface.h"
Oren Weilab841162011-05-15 13:43:41 +030043
Tomas Winklerdaed6b52012-08-17 09:54:23 +030044/* AMT device is a singleton on the platform */
45static struct pci_dev *mei_pdev;
Oren Weilab841162011-05-15 13:43:41 +030046
Oren Weilab841162011-05-15 13:43:41 +030047/* mei_pci_tbl - PCI Device ID Table */
48static DEFINE_PCI_DEVICE_TABLE(mei_pci_tbl) = {
49 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82946GZ)},
50 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G35)},
51 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82Q965)},
52 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82G965)},
53 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GM965)},
54 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_82GME965)},
55 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q35)},
56 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82G33)},
57 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82Q33)},
58 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_82X38)},
59 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_3200)},
60 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_6)},
61 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_7)},
62 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_8)},
63 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_9)},
64 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9_10)},
65 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_1)},
66 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_2)},
67 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_3)},
68 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH9M_4)},
69 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_1)},
70 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_2)},
71 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_3)},
72 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_ICH10_4)},
73 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_1)},
74 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_IBXPK_2)},
75 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_CPT_1)},
76 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PBG_1)},
77 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_1)},
78 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_2)},
79 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_PPT_3)},
Tomas Winkler9af51422012-08-29 01:15:50 +030080 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT)},
81 {PCI_DEVICE(PCI_VENDOR_ID_INTEL, MEI_DEV_ID_LPT_LP)},
Oren Weilab841162011-05-15 13:43:41 +030082
83 /* required last entry */
84 {0, }
85};
86
87MODULE_DEVICE_TABLE(pci, mei_pci_tbl);
88
89static DEFINE_MUTEX(mei_mutex);
90
Oren Weilab841162011-05-15 13:43:41 +030091
92/**
93 * mei_clear_list - removes all callbacks associated with file
94 * from mei_cb_list
95 *
96 * @dev: device structure.
97 * @file: file structure
98 * @mei_cb_list: callbacks list
99 *
100 * mei_clear_list is called to clear resources associated with file
101 * when application calls close function or Ctrl-C was pressed
102 *
103 * returns true if callback removed from the list, false otherwise
104 */
105static bool mei_clear_list(struct mei_device *dev,
106 struct file *file, struct list_head *mei_cb_list)
107{
108 struct mei_cl_cb *cb_pos = NULL;
109 struct mei_cl_cb *cb_next = NULL;
110 struct file *file_temp;
111 bool removed = false;
112
113 /* list all list member */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200114 list_for_each_entry_safe(cb_pos, cb_next, mei_cb_list, list) {
Oren Weilab841162011-05-15 13:43:41 +0300115 file_temp = (struct file *)cb_pos->file_object;
116 /* check if list member associated with a file */
117 if (file_temp == file) {
118 /* remove member from the list */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200119 list_del(&cb_pos->list);
Oren Weilab841162011-05-15 13:43:41 +0300120 /* check if cb equal to current iamthif cb */
121 if (dev->iamthif_current_cb == cb_pos) {
122 dev->iamthif_current_cb = NULL;
123 /* send flow control to iamthif client */
124 mei_send_flow_control(dev, &dev->iamthif_cl);
125 }
126 /* free all allocated buffers */
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200127 mei_io_cb_free(cb_pos);
Oren Weilab841162011-05-15 13:43:41 +0300128 cb_pos = NULL;
129 removed = true;
130 }
131 }
132 return removed;
133}
134
135/**
136 * mei_clear_lists - removes all callbacks associated with file
137 *
138 * @dev: device structure
139 * @file: file structure
140 *
141 * mei_clear_lists is called to clear resources associated with file
142 * when application calls close function or Ctrl-C was pressed
143 *
144 * returns true if callback removed from the list, false otherwise
145 */
146static bool mei_clear_lists(struct mei_device *dev, struct file *file)
147{
148 bool removed = false;
149
150 /* remove callbacks associated with a file */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200151 mei_clear_list(dev, file, &dev->amthi_cmd_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300152 if (mei_clear_list(dev, file,
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200153 &dev->amthi_read_complete_list.list))
Oren Weilab841162011-05-15 13:43:41 +0300154 removed = true;
155
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200156 mei_clear_list(dev, file, &dev->ctrl_rd_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300157
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200158 if (mei_clear_list(dev, file, &dev->ctrl_wr_list.list))
Oren Weilab841162011-05-15 13:43:41 +0300159 removed = true;
160
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200161 if (mei_clear_list(dev, file, &dev->write_waiting_list.list))
Oren Weilab841162011-05-15 13:43:41 +0300162 removed = true;
163
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200164 if (mei_clear_list(dev, file, &dev->write_list.list))
Oren Weilab841162011-05-15 13:43:41 +0300165 removed = true;
166
167 /* check if iamthif_current_cb not NULL */
168 if (dev->iamthif_current_cb && !removed) {
169 /* check file and iamthif current cb association */
170 if (dev->iamthif_current_cb->file_object == file) {
171 /* remove cb */
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200172 mei_io_cb_free(dev->iamthif_current_cb);
Oren Weilab841162011-05-15 13:43:41 +0300173 dev->iamthif_current_cb = NULL;
174 removed = true;
175 }
176 }
177 return removed;
178}
179/**
180 * find_read_list_entry - find read list entry
181 *
182 * @dev: device structure
183 * @file: pointer to file structure
184 *
185 * returns cb on success, NULL on error
186 */
187static struct mei_cl_cb *find_read_list_entry(
188 struct mei_device *dev,
189 struct mei_cl *cl)
190{
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200191 struct mei_cl_cb *pos = NULL;
192 struct mei_cl_cb *next = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300193
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200194 dev_dbg(&dev->pdev->dev, "remove read_list CB\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200195 list_for_each_entry_safe(pos, next, &dev->read_list.list, list) {
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200196 struct mei_cl *cl_temp;
197 cl_temp = (struct mei_cl *)pos->file_private;
Oren Weilab841162011-05-15 13:43:41 +0300198
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200199 if (mei_cl_cmp_id(cl, cl_temp))
200 return pos;
Oren Weilab841162011-05-15 13:43:41 +0300201 }
202 return NULL;
203}
204
205/**
206 * mei_open - the open function
207 *
208 * @inode: pointer to inode structure
209 * @file: pointer to file structure
210 *
211 * returns 0 on success, <0 on error
212 */
213static int mei_open(struct inode *inode, struct file *file)
214{
215 struct mei_cl *cl;
Oren Weilab841162011-05-15 13:43:41 +0300216 struct mei_device *dev;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200217 unsigned long cl_id;
218 int err;
Oren Weilab841162011-05-15 13:43:41 +0300219
220 err = -ENODEV;
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300221 if (!mei_pdev)
Oren Weilab841162011-05-15 13:43:41 +0300222 goto out;
223
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300224 dev = pci_get_drvdata(mei_pdev);
Oren Weil5b881e32011-11-13 09:41:14 +0200225 if (!dev)
Oren Weilab841162011-05-15 13:43:41 +0300226 goto out;
227
228 mutex_lock(&dev->device_lock);
229 err = -ENOMEM;
Tomas Winklerc95efb72011-05-25 17:28:21 +0300230 cl = mei_cl_allocate(dev);
Oren Weilab841162011-05-15 13:43:41 +0300231 if (!cl)
Alexey Khoroshilov303dfbf2011-08-31 00:41:14 +0400232 goto out_unlock;
Oren Weilab841162011-05-15 13:43:41 +0300233
234 err = -ENODEV;
Tomas Winklerb210d752012-08-07 00:03:56 +0300235 if (dev->dev_state != MEI_DEV_ENABLED) {
236 dev_dbg(&dev->pdev->dev, "dev_state != MEI_ENABLED dev_state = %s\n",
237 mei_dev_state_str(dev->dev_state));
Oren Weilab841162011-05-15 13:43:41 +0300238 goto out_unlock;
239 }
240 err = -EMFILE;
Tomas Winkler1b812942012-09-11 00:43:20 +0300241 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
242 dev_err(&dev->pdev->dev, "open_handle_count exceded %d",
243 MEI_MAX_OPEN_HANDLE_COUNT);
Oren Weilab841162011-05-15 13:43:41 +0300244 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300245 }
Oren Weilab841162011-05-15 13:43:41 +0300246
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200247 cl_id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler1b812942012-09-11 00:43:20 +0300248 if (cl_id >= MEI_CLIENTS_MAX) {
249 dev_err(&dev->pdev->dev, "client_id exceded %d",
250 MEI_CLIENTS_MAX) ;
Oren Weilab841162011-05-15 13:43:41 +0300251 goto out_unlock;
Tomas Winkler1b812942012-09-11 00:43:20 +0300252 }
Oren Weilab841162011-05-15 13:43:41 +0300253
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200254 cl->host_client_id = cl_id;
255
Oren Weilab841162011-05-15 13:43:41 +0300256 dev_dbg(&dev->pdev->dev, "client_id = %d\n", cl->host_client_id);
257
258 dev->open_handle_count++;
Tomas Winkler6f37aca2011-11-13 09:41:15 +0200259
Oren Weilab841162011-05-15 13:43:41 +0300260 list_add_tail(&cl->link, &dev->file_list);
261
262 set_bit(cl->host_client_id, dev->host_clients_map);
263 cl->state = MEI_FILE_INITIALIZING;
264 cl->sm_state = 0;
265
266 file->private_data = cl;
267 mutex_unlock(&dev->device_lock);
268
Oren Weil5b881e32011-11-13 09:41:14 +0200269 return nonseekable_open(inode, file);
Oren Weilab841162011-05-15 13:43:41 +0300270
271out_unlock:
272 mutex_unlock(&dev->device_lock);
273 kfree(cl);
274out:
275 return err;
276}
277
278/**
279 * mei_release - the release function
280 *
281 * @inode: pointer to inode structure
282 * @file: pointer to file structure
283 *
284 * returns 0 on success, <0 on error
285 */
286static int mei_release(struct inode *inode, struct file *file)
287{
288 struct mei_cl *cl = file->private_data;
289 struct mei_cl_cb *cb;
290 struct mei_device *dev;
291 int rets = 0;
292
293 if (WARN_ON(!cl || !cl->dev))
294 return -ENODEV;
295
296 dev = cl->dev;
297
298 mutex_lock(&dev->device_lock);
299 if (cl != &dev->iamthif_cl) {
300 if (cl->state == MEI_FILE_CONNECTED) {
301 cl->state = MEI_FILE_DISCONNECTING;
302 dev_dbg(&dev->pdev->dev,
303 "disconnecting client host client = %d, "
304 "ME client = %d\n",
305 cl->host_client_id,
306 cl->me_client_id);
307 rets = mei_disconnect_host_client(dev, cl);
308 }
Tomas Winkler0288c7c2011-06-06 10:44:34 +0300309 mei_cl_flush_queues(cl);
Oren Weilab841162011-05-15 13:43:41 +0300310 dev_dbg(&dev->pdev->dev, "remove client host client = %d, ME client = %d\n",
311 cl->host_client_id,
312 cl->me_client_id);
313
314 if (dev->open_handle_count > 0) {
Tomas Winkler441ab502011-12-13 23:39:34 +0200315 clear_bit(cl->host_client_id, dev->host_clients_map);
Oren Weilab841162011-05-15 13:43:41 +0300316 dev->open_handle_count--;
317 }
318 mei_remove_client_from_file_list(dev, cl->host_client_id);
319
320 /* free read cb */
321 cb = NULL;
322 if (cl->read_cb) {
323 cb = find_read_list_entry(dev, cl);
324 /* Remove entry from read list */
325 if (cb)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200326 list_del(&cb->list);
Oren Weilab841162011-05-15 13:43:41 +0300327
328 cb = cl->read_cb;
329 cl->read_cb = NULL;
330 }
331
332 file->private_data = NULL;
333
334 if (cb) {
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200335 mei_io_cb_free(cb);
Oren Weilab841162011-05-15 13:43:41 +0300336 cb = NULL;
337 }
338
339 kfree(cl);
340 } else {
341 if (dev->open_handle_count > 0)
342 dev->open_handle_count--;
343
344 if (dev->iamthif_file_object == file &&
345 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
346
347 dev_dbg(&dev->pdev->dev, "amthi canceled iamthif state %d\n",
348 dev->iamthif_state);
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300349 dev->iamthif_canceled = true;
Oren Weilab841162011-05-15 13:43:41 +0300350 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE) {
351 dev_dbg(&dev->pdev->dev, "run next amthi iamthif cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300352 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300353 }
354 }
355
356 if (mei_clear_lists(dev, file))
357 dev->iamthif_state = MEI_IAMTHIF_IDLE;
358
359 }
360 mutex_unlock(&dev->device_lock);
361 return rets;
362}
363
364
365/**
366 * mei_read - the read function.
367 *
368 * @file: pointer to file structure
369 * @ubuf: pointer to user buffer
370 * @length: buffer length
371 * @offset: data offset in buffer
372 *
373 * returns >=0 data length on success , <0 on error
374 */
375static ssize_t mei_read(struct file *file, char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200376 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300377{
378 struct mei_cl *cl = file->private_data;
379 struct mei_cl_cb *cb_pos = NULL;
380 struct mei_cl_cb *cb = NULL;
381 struct mei_device *dev;
382 int i;
383 int rets;
384 int err;
385
386
387 if (WARN_ON(!cl || !cl->dev))
388 return -ENODEV;
389
390 dev = cl->dev;
391
392 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300393 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300394 rets = -ENODEV;
395 goto out;
396 }
397
398 if ((cl->sm_state & MEI_WD_STATE_INDEPENDENCE_MSG_SENT) == 0) {
399 /* Do not allow to read watchdog client */
Tomas Winkler07b509b2012-07-23 14:05:39 +0300400 i = mei_me_cl_by_uuid(dev, &mei_wd_guid);
Oren Weilab841162011-05-15 13:43:41 +0300401 if (i >= 0) {
402 struct mei_me_client *me_client = &dev->me_clients[i];
Oren Weilab841162011-05-15 13:43:41 +0300403 if (cl->me_client_id == me_client->client_id) {
404 rets = -EBADF;
405 goto out;
406 }
407 }
408 } else {
409 cl->sm_state &= ~MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
410 }
411
412 if (cl == &dev->iamthif_cl) {
413 rets = amthi_read(dev, file, ubuf, length, offset);
414 goto out;
415 }
416
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200417 if (cl->read_cb && cl->read_cb->buf_idx > *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300418 cb = cl->read_cb;
419 goto copy_buffer;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200420 } else if (cl->read_cb && cl->read_cb->buf_idx > 0 &&
421 cl->read_cb->buf_idx <= *offset) {
Oren Weilab841162011-05-15 13:43:41 +0300422 cb = cl->read_cb;
423 rets = 0;
424 goto free;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200425 } else if ((!cl->read_cb || !cl->read_cb->buf_idx) && *offset > 0) {
Justin P. Mattock5f9092f32012-03-12 07:18:09 -0700426 /*Offset needs to be cleaned for contiguous reads*/
Oren Weilab841162011-05-15 13:43:41 +0300427 *offset = 0;
428 rets = 0;
429 goto out;
430 }
431
432 err = mei_start_read(dev, cl);
433 if (err && err != -EBUSY) {
434 dev_dbg(&dev->pdev->dev,
435 "mei start read failure with status = %d\n", err);
436 rets = err;
437 goto out;
438 }
439
440 if (MEI_READ_COMPLETE != cl->reading_state &&
441 !waitqueue_active(&cl->rx_wait)) {
442 if (file->f_flags & O_NONBLOCK) {
443 rets = -EAGAIN;
444 goto out;
445 }
446
447 mutex_unlock(&dev->device_lock);
448
449 if (wait_event_interruptible(cl->rx_wait,
450 (MEI_READ_COMPLETE == cl->reading_state ||
451 MEI_FILE_INITIALIZING == cl->state ||
452 MEI_FILE_DISCONNECTED == cl->state ||
453 MEI_FILE_DISCONNECTING == cl->state))) {
454 if (signal_pending(current))
455 return -EINTR;
456 return -ERESTARTSYS;
457 }
458
459 mutex_lock(&dev->device_lock);
460 if (MEI_FILE_INITIALIZING == cl->state ||
461 MEI_FILE_DISCONNECTED == cl->state ||
462 MEI_FILE_DISCONNECTING == cl->state) {
463 rets = -EBUSY;
464 goto out;
465 }
466 }
467
468 cb = cl->read_cb;
469
470 if (!cb) {
471 rets = -ENODEV;
472 goto out;
473 }
474 if (cl->reading_state != MEI_READ_COMPLETE) {
475 rets = 0;
476 goto out;
477 }
478 /* now copy the data to user space */
479copy_buffer:
480 dev_dbg(&dev->pdev->dev, "cb->response_buffer size - %d\n",
481 cb->response_buffer.size);
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200482 dev_dbg(&dev->pdev->dev, "cb->buf_idx - %lu\n", cb->buf_idx);
483 if (length == 0 || ubuf == NULL || *offset > cb->buf_idx) {
Oren Weilab841162011-05-15 13:43:41 +0300484 rets = -EMSGSIZE;
485 goto free;
486 }
487
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200488 /* length is being truncated to PAGE_SIZE,
489 * however buf_idx may point beyond that */
490 length = min_t(size_t, length, cb->buf_idx - *offset);
Oren Weilab841162011-05-15 13:43:41 +0300491
Tomas Winkler441ab502011-12-13 23:39:34 +0200492 if (copy_to_user(ubuf, cb->response_buffer.data + *offset, length)) {
Oren Weilab841162011-05-15 13:43:41 +0300493 rets = -EFAULT;
494 goto free;
495 }
496
497 rets = length;
498 *offset += length;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200499 if ((unsigned long)*offset < cb->buf_idx)
Oren Weilab841162011-05-15 13:43:41 +0300500 goto out;
501
502free:
503 cb_pos = find_read_list_entry(dev, cl);
504 /* Remove entry from read list */
505 if (cb_pos)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200506 list_del(&cb_pos->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200507 mei_io_cb_free(cb);
Oren Weilab841162011-05-15 13:43:41 +0300508 cl->reading_state = MEI_IDLE;
509 cl->read_cb = NULL;
510 cl->read_pending = 0;
511out:
512 dev_dbg(&dev->pdev->dev, "end mei read rets= %d\n", rets);
513 mutex_unlock(&dev->device_lock);
514 return rets;
515}
516
517/**
Tomas Winkler33d28c92012-10-09 16:50:17 +0200518 * mei_io_cb_init - allocate and initialize io callback
519 *
520 * @cl - mei client
521 * @file: pointer to file structure
522 *
523 * returns mei_cl_cb pointer or NULL;
524 */
525static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl, struct file *fp)
526{
527 struct mei_cl_cb *cb;
528 struct mei_device *dev;
529
530 dev = cl->dev;
531
532 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
533 if (!cb)
534 return NULL;
535
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200536 mei_io_list_init(cb);
Tomas Winkler33d28c92012-10-09 16:50:17 +0200537
538 cb->file_object = fp;
539 cb->file_private = cl;
540 cb->buf_idx = 0;
541 return cb;
542}
543
544
545/**
546 * mei_io_cb_alloc_req_buf - allocate request buffer
547 *
548 * @cb - io callback structure
549 * @size: size of the buffer
550 *
551 * returns 0 on success
552 * -EINVAL if cb is NULL
553 * -ENOMEM if allocation failed
554 */
555static int mei_io_cb_alloc_req_buf(struct mei_cl_cb *cb, size_t length)
556{
557 if (!cb)
558 return -EINVAL;
559
560 if (length == 0)
561 return 0;
562
563 cb->request_buffer.data = kmalloc(length, GFP_KERNEL);
564 if (!cb->request_buffer.data)
565 return -ENOMEM;
566 cb->request_buffer.size = length;
567 return 0;
568}
569/**
570 * mei_io_cb_alloc_req_buf - allocate respose buffer
571 *
572 * @cb - io callback structure
573 * @size: size of the buffer
574 *
575 * returns 0 on success
576 * -EINVAL if cb is NULL
577 * -ENOMEM if allocation failed
578 */
579static int mei_io_cb_alloc_resp_buf(struct mei_cl_cb *cb, size_t length)
580{
581 if (!cb)
582 return -EINVAL;
583
584 if (length == 0)
585 return 0;
586
587 cb->response_buffer.data = kmalloc(length, GFP_KERNEL);
588 if (!cb->response_buffer.data)
589 return -ENOMEM;
590 cb->response_buffer.size = length;
591 return 0;
592}
593
594/**
Oren Weilab841162011-05-15 13:43:41 +0300595 * mei_write - the write function.
596 *
597 * @file: pointer to file structure
598 * @ubuf: pointer to user buffer
599 * @length: buffer length
600 * @offset: data offset in buffer
601 *
602 * returns >=0 data length on success , <0 on error
603 */
604static ssize_t mei_write(struct file *file, const char __user *ubuf,
Tomas Winkler441ab502011-12-13 23:39:34 +0200605 size_t length, loff_t *offset)
Oren Weilab841162011-05-15 13:43:41 +0300606{
607 struct mei_cl *cl = file->private_data;
608 struct mei_cl_cb *write_cb = NULL;
609 struct mei_msg_hdr mei_hdr;
610 struct mei_device *dev;
611 unsigned long timeout = 0;
612 int rets;
613 int i;
614
615 if (WARN_ON(!cl || !cl->dev))
616 return -ENODEV;
617
618 dev = cl->dev;
619
620 mutex_lock(&dev->device_lock);
621
Tomas Winklerb210d752012-08-07 00:03:56 +0300622 if (dev->dev_state != MEI_DEV_ENABLED) {
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200623 rets = -ENODEV;
624 goto unlock_dev;
Oren Weilab841162011-05-15 13:43:41 +0300625 }
626
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200627 i = mei_me_cl_by_id(dev, cl->me_client_id);
628 if (i < 0) {
629 rets = -ENODEV;
630 goto unlock_dev;
631 }
632 if (length > dev->me_clients[i].props.max_msg_length || length <= 0) {
633 rets = -EMSGSIZE;
634 goto unlock_dev;
635 }
636
637 if (cl->state != MEI_FILE_CONNECTED) {
638 rets = -ENODEV;
639 dev_err(&dev->pdev->dev, "host client = %d, is not connected to ME client = %d",
640 cl->host_client_id, cl->me_client_id);
641 goto unlock_dev;
642 }
Oren Weilab841162011-05-15 13:43:41 +0300643 if (cl == &dev->iamthif_cl) {
644 write_cb = find_amthi_read_list_entry(dev, file);
645
646 if (write_cb) {
647 timeout = write_cb->read_time +
648 msecs_to_jiffies(IAMTHIF_READ_TIMER);
649
650 if (time_after(jiffies, timeout) ||
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200651 cl->reading_state == MEI_READ_COMPLETE) {
652 *offset = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200653 list_del(&write_cb->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200654 mei_io_cb_free(write_cb);
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200655 write_cb = NULL;
Oren Weilab841162011-05-15 13:43:41 +0300656 }
657 }
658 }
659
660 /* free entry used in read */
661 if (cl->reading_state == MEI_READ_COMPLETE) {
662 *offset = 0;
663 write_cb = find_read_list_entry(dev, cl);
664 if (write_cb) {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200665 list_del(&write_cb->list);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200666 mei_io_cb_free(write_cb);
Oren Weilab841162011-05-15 13:43:41 +0300667 write_cb = NULL;
668 cl->reading_state = MEI_IDLE;
669 cl->read_cb = NULL;
670 cl->read_pending = 0;
671 }
Tomas Winkler441ab502011-12-13 23:39:34 +0200672 } else if (cl->reading_state == MEI_IDLE && !cl->read_pending)
Oren Weilab841162011-05-15 13:43:41 +0300673 *offset = 0;
674
675
Tomas Winkler33d28c92012-10-09 16:50:17 +0200676 write_cb = mei_io_cb_init(cl, file);
Oren Weilab841162011-05-15 13:43:41 +0300677 if (!write_cb) {
Tomas Winkler33d28c92012-10-09 16:50:17 +0200678 dev_err(&dev->pdev->dev, "write cb allocation failed\n");
679 rets = -ENOMEM;
680 goto unlock_dev;
Oren Weilab841162011-05-15 13:43:41 +0300681 }
Tomas Winkler33d28c92012-10-09 16:50:17 +0200682 rets = mei_io_cb_alloc_req_buf(write_cb, length);
683 if (rets)
Oren Weilab841162011-05-15 13:43:41 +0300684 goto unlock_dev;
685
Tomas Winkler33d28c92012-10-09 16:50:17 +0200686 dev_dbg(&dev->pdev->dev, "cb request size = %zd\n", length);
Oren Weilab841162011-05-15 13:43:41 +0300687
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200688 rets = copy_from_user(write_cb->request_buffer.data, ubuf, length);
689 if (rets)
Oren Weilab841162011-05-15 13:43:41 +0300690 goto unlock_dev;
691
692 cl->sm_state = 0;
693 if (length == 4 &&
694 ((memcmp(mei_wd_state_independence_msg[0],
695 write_cb->request_buffer.data, 4) == 0) ||
696 (memcmp(mei_wd_state_independence_msg[1],
697 write_cb->request_buffer.data, 4) == 0) ||
698 (memcmp(mei_wd_state_independence_msg[2],
699 write_cb->request_buffer.data, 4) == 0)))
700 cl->sm_state |= MEI_WD_STATE_INDEPENDENCE_MSG_SENT;
701
Oren Weilab841162011-05-15 13:43:41 +0300702 if (cl == &dev->iamthif_cl) {
Tomas Winkler33d28c92012-10-09 16:50:17 +0200703 rets = mei_io_cb_alloc_resp_buf(write_cb, dev->iamthif_mtu);
704 if (rets)
705 goto unlock_dev;
Oren Weilab841162011-05-15 13:43:41 +0300706
Oren Weilab841162011-05-15 13:43:41 +0300707 write_cb->major_file_operations = MEI_IOCTL;
Oren Weilab841162011-05-15 13:43:41 +0300708
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200709 if (!list_empty(&dev->amthi_cmd_list.list) ||
Oren Weilab841162011-05-15 13:43:41 +0300710 dev->iamthif_state != MEI_IAMTHIF_IDLE) {
711 dev_dbg(&dev->pdev->dev, "amthi_state = %d\n",
712 (int) dev->iamthif_state);
713 dev_dbg(&dev->pdev->dev, "add amthi cb to amthi cmd waiting list\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200714 list_add_tail(&write_cb->list, &dev->amthi_cmd_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300715 } else {
716 dev_dbg(&dev->pdev->dev, "call amthi write\n");
717 rets = amthi_write(dev, write_cb);
718
719 if (rets) {
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200720 dev_err(&dev->pdev->dev, "amthi write failed with status = %d\n",
Oren Weilab841162011-05-15 13:43:41 +0300721 rets);
722 goto unlock_dev;
723 }
Oren Weilab841162011-05-15 13:43:41 +0300724 }
725 mutex_unlock(&dev->device_lock);
Tomas Winkler75f0ee12012-10-09 16:50:18 +0200726 return length;
Oren Weilab841162011-05-15 13:43:41 +0300727 }
728
729 write_cb->major_file_operations = MEI_WRITE;
Oren Weilab841162011-05-15 13:43:41 +0300730
731 dev_dbg(&dev->pdev->dev, "host client = %d, ME client = %d\n",
732 cl->host_client_id, cl->me_client_id);
Oren Weilab841162011-05-15 13:43:41 +0300733 rets = mei_flow_ctrl_creds(dev, cl);
734 if (rets < 0)
735 goto unlock_dev;
736
737 if (rets && dev->mei_host_buffer_is_empty) {
738 rets = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300739 dev->mei_host_buffer_is_empty = false;
Tomas Winkler24aadc82012-06-25 23:46:27 +0300740 if (length > mei_hbuf_max_data(dev)) {
741 mei_hdr.length = mei_hbuf_max_data(dev);
Oren Weilab841162011-05-15 13:43:41 +0300742 mei_hdr.msg_complete = 0;
743 } else {
744 mei_hdr.length = length;
745 mei_hdr.msg_complete = 1;
746 }
747 mei_hdr.host_addr = cl->host_client_id;
748 mei_hdr.me_addr = cl->me_client_id;
749 mei_hdr.reserved = 0;
750 dev_dbg(&dev->pdev->dev, "call mei_write_message header=%08x.\n",
751 *((u32 *) &mei_hdr));
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200752 if (mei_write_message(dev, &mei_hdr,
Oren Weilab841162011-05-15 13:43:41 +0300753 (unsigned char *) (write_cb->request_buffer.data),
754 mei_hdr.length)) {
755 rets = -ENODEV;
756 goto unlock_dev;
757 }
758 cl->writing_state = MEI_WRITING;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200759 write_cb->buf_idx = mei_hdr.length;
Oren Weilab841162011-05-15 13:43:41 +0300760 if (mei_hdr.msg_complete) {
761 if (mei_flow_ctrl_reduce(dev, cl)) {
762 rets = -ENODEV;
763 goto unlock_dev;
764 }
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200765 list_add_tail(&write_cb->list, &dev->write_waiting_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300766 } else {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200767 list_add_tail(&write_cb->list, &dev->write_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300768 }
769
770 } else {
771
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200772 write_cb->buf_idx = 0;
Oren Weilab841162011-05-15 13:43:41 +0300773 cl->writing_state = MEI_WRITING;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200774 list_add_tail(&write_cb->list, &dev->write_list.list);
Oren Weilab841162011-05-15 13:43:41 +0300775 }
776 mutex_unlock(&dev->device_lock);
777 return length;
778
779unlock_dev:
780 mutex_unlock(&dev->device_lock);
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200781 mei_io_cb_free(write_cb);
Oren Weilab841162011-05-15 13:43:41 +0300782 return rets;
783}
784
785
786/**
787 * mei_ioctl - the IOCTL function
788 *
789 * @file: pointer to file structure
790 * @cmd: ioctl command
791 * @data: pointer to mei message structure
792 *
793 * returns 0 on success , <0 on error
794 */
795static long mei_ioctl(struct file *file, unsigned int cmd, unsigned long data)
796{
797 struct mei_device *dev;
798 struct mei_cl *cl = file->private_data;
799 struct mei_connect_client_data *connect_data = NULL;
800 int rets;
801
802 if (cmd != IOCTL_MEI_CONNECT_CLIENT)
803 return -EINVAL;
804
805 if (WARN_ON(!cl || !cl->dev))
806 return -ENODEV;
807
808 dev = cl->dev;
809
810 dev_dbg(&dev->pdev->dev, "IOCTL cmd = 0x%x", cmd);
811
812 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300813 if (dev->dev_state != MEI_DEV_ENABLED) {
Oren Weilab841162011-05-15 13:43:41 +0300814 rets = -ENODEV;
815 goto out;
816 }
817
818 dev_dbg(&dev->pdev->dev, ": IOCTL_MEI_CONNECT_CLIENT.\n");
819
820 connect_data = kzalloc(sizeof(struct mei_connect_client_data),
821 GFP_KERNEL);
822 if (!connect_data) {
823 rets = -ENOMEM;
824 goto out;
825 }
826 dev_dbg(&dev->pdev->dev, "copy connect data from user\n");
827 if (copy_from_user(connect_data, (char __user *)data,
828 sizeof(struct mei_connect_client_data))) {
829 dev_dbg(&dev->pdev->dev, "failed to copy data from userland\n");
830 rets = -EFAULT;
831 goto out;
832 }
833 rets = mei_ioctl_connect_client(file, connect_data);
834
835 /* if all is ok, copying the data back to user. */
836 if (rets)
837 goto out;
838
839 dev_dbg(&dev->pdev->dev, "copy connect data to user\n");
840 if (copy_to_user((char __user *)data, connect_data,
841 sizeof(struct mei_connect_client_data))) {
842 dev_dbg(&dev->pdev->dev, "failed to copy data to userland\n");
843 rets = -EFAULT;
844 goto out;
845 }
846
847out:
848 kfree(connect_data);
849 mutex_unlock(&dev->device_lock);
850 return rets;
851}
852
853/**
854 * mei_compat_ioctl - the compat IOCTL function
855 *
856 * @file: pointer to file structure
857 * @cmd: ioctl command
858 * @data: pointer to mei message structure
859 *
860 * returns 0 on success , <0 on error
861 */
862#ifdef CONFIG_COMPAT
863static long mei_compat_ioctl(struct file *file,
Tomas Winkler441ab502011-12-13 23:39:34 +0200864 unsigned int cmd, unsigned long data)
Oren Weilab841162011-05-15 13:43:41 +0300865{
866 return mei_ioctl(file, cmd, (unsigned long)compat_ptr(data));
867}
868#endif
869
870
871/**
872 * mei_poll - the poll function
873 *
874 * @file: pointer to file structure
875 * @wait: pointer to poll_table structure
876 *
877 * returns poll mask
878 */
879static unsigned int mei_poll(struct file *file, poll_table *wait)
880{
881 struct mei_cl *cl = file->private_data;
882 struct mei_device *dev;
883 unsigned int mask = 0;
884
885 if (WARN_ON(!cl || !cl->dev))
886 return mask;
887
888 dev = cl->dev;
889
890 mutex_lock(&dev->device_lock);
891
Tomas Winklerb210d752012-08-07 00:03:56 +0300892 if (dev->dev_state != MEI_DEV_ENABLED)
Oren Weilab841162011-05-15 13:43:41 +0300893 goto out;
894
895
896 if (cl == &dev->iamthif_cl) {
897 mutex_unlock(&dev->device_lock);
898 poll_wait(file, &dev->iamthif_cl.wait, wait);
899 mutex_lock(&dev->device_lock);
900 if (dev->iamthif_state == MEI_IAMTHIF_READ_COMPLETE &&
901 dev->iamthif_file_object == file) {
902 mask |= (POLLIN | POLLRDNORM);
903 dev_dbg(&dev->pdev->dev, "run next amthi cb\n");
Tomas Winklerc95efb72011-05-25 17:28:21 +0300904 mei_run_next_iamthif_cmd(dev);
Oren Weilab841162011-05-15 13:43:41 +0300905 }
906 goto out;
907 }
908
909 mutex_unlock(&dev->device_lock);
910 poll_wait(file, &cl->tx_wait, wait);
911 mutex_lock(&dev->device_lock);
912 if (MEI_WRITE_COMPLETE == cl->writing_state)
913 mask |= (POLLIN | POLLRDNORM);
914
915out:
916 mutex_unlock(&dev->device_lock);
917 return mask;
918}
919
Oren Weil5b881e32011-11-13 09:41:14 +0200920/*
921 * file operations structure will be used for mei char device.
922 */
923static const struct file_operations mei_fops = {
924 .owner = THIS_MODULE,
925 .read = mei_read,
926 .unlocked_ioctl = mei_ioctl,
927#ifdef CONFIG_COMPAT
928 .compat_ioctl = mei_compat_ioctl,
929#endif
930 .open = mei_open,
931 .release = mei_release,
932 .write = mei_write,
933 .poll = mei_poll,
934 .llseek = no_llseek
935};
936
937
938/*
939 * Misc Device Struct
940 */
941static struct miscdevice mei_misc_device = {
Tomas Winklerc38ea242012-04-02 20:32:39 +0300942 .name = "mei",
Oren Weil5b881e32011-11-13 09:41:14 +0200943 .fops = &mei_fops,
944 .minor = MISC_DYNAMIC_MINOR,
945};
946
947/**
Tomas Winkler9a123f12012-08-06 15:23:55 +0300948 * mei_quirk_probe - probe for devices that doesn't valid ME interface
949 * @pdev: PCI device structure
950 * @ent: entry into pci_device_table
951 *
952 * returns true if ME Interface is valid, false otherwise
953 */
954static bool __devinit mei_quirk_probe(struct pci_dev *pdev,
955 const struct pci_device_id *ent)
956{
957 u32 reg;
958 if (ent->device == MEI_DEV_ID_PBG_1) {
959 pci_read_config_dword(pdev, 0x48, &reg);
960 /* make sure that bit 9 is up and bit 10 is down */
961 if ((reg & 0x600) == 0x200) {
962 dev_info(&pdev->dev, "Device doesn't have valid ME Interface\n");
963 return false;
964 }
965 }
966 return true;
967}
968/**
Oren Weil5b881e32011-11-13 09:41:14 +0200969 * mei_probe - Device Initialization Routine
970 *
971 * @pdev: PCI device structure
972 * @ent: entry in kcs_pci_tbl
973 *
974 * returns 0 on success, <0 on failure.
975 */
976static int __devinit mei_probe(struct pci_dev *pdev,
977 const struct pci_device_id *ent)
978{
979 struct mei_device *dev;
980 int err;
981
982 mutex_lock(&mei_mutex);
Tomas Winkler9a123f12012-08-06 15:23:55 +0300983
984 if (!mei_quirk_probe(pdev, ent)) {
985 err = -ENODEV;
986 goto end;
987 }
988
Tomas Winklerdaed6b52012-08-17 09:54:23 +0300989 if (mei_pdev) {
Oren Weil5b881e32011-11-13 09:41:14 +0200990 err = -EEXIST;
991 goto end;
992 }
993 /* enable pci dev */
994 err = pci_enable_device(pdev);
995 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +0300996 dev_err(&pdev->dev, "failed to enable pci device.\n");
Oren Weil5b881e32011-11-13 09:41:14 +0200997 goto end;
998 }
999 /* set PCI host mastering */
1000 pci_set_master(pdev);
1001 /* pci request regions for mei driver */
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001002 err = pci_request_regions(pdev, KBUILD_MODNAME);
Oren Weil5b881e32011-11-13 09:41:14 +02001003 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001004 dev_err(&pdev->dev, "failed to get pci regions.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001005 goto disable_device;
1006 }
1007 /* allocates and initializes the mei dev structure */
1008 dev = mei_device_init(pdev);
1009 if (!dev) {
1010 err = -ENOMEM;
1011 goto release_regions;
1012 }
1013 /* mapping IO device memory */
1014 dev->mem_addr = pci_iomap(pdev, 0, 0);
1015 if (!dev->mem_addr) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001016 dev_err(&pdev->dev, "mapping I/O device memory failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001017 err = -ENOMEM;
1018 goto free_device;
1019 }
1020 pci_enable_msi(pdev);
1021
1022 /* request and enable interrupt */
1023 if (pci_dev_msi_enabled(pdev))
1024 err = request_threaded_irq(pdev->irq,
1025 NULL,
1026 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001027 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001028 else
1029 err = request_threaded_irq(pdev->irq,
1030 mei_interrupt_quick_handler,
1031 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001032 IRQF_SHARED, KBUILD_MODNAME, dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001033
1034 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001035 dev_err(&pdev->dev, "request_threaded_irq failure. irq = %d\n",
Oren Weil5b881e32011-11-13 09:41:14 +02001036 pdev->irq);
Samuel Ortiz169dc382012-06-11 12:18:30 +03001037 goto disable_msi;
Oren Weil5b881e32011-11-13 09:41:14 +02001038 }
1039 INIT_DELAYED_WORK(&dev->timer_work, mei_timer);
1040 if (mei_hw_init(dev)) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001041 dev_err(&pdev->dev, "init hw failure.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001042 err = -ENODEV;
1043 goto release_irq;
1044 }
1045
1046 err = misc_register(&mei_misc_device);
1047 if (err)
1048 goto release_irq;
1049
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001050 mei_pdev = pdev;
Oren Weil5b881e32011-11-13 09:41:14 +02001051 pci_set_drvdata(pdev, dev);
1052
1053
1054 schedule_delayed_work(&dev->timer_work, HZ);
1055
1056 mutex_unlock(&mei_mutex);
1057
Tomas Winkler2f3d2b42012-03-19 22:38:13 +02001058 pr_debug("initialization successful.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001059
1060 return 0;
1061
1062release_irq:
1063 /* disable interrupts */
1064 dev->host_hw_state = mei_hcsr_read(dev);
1065 mei_disable_interrupts(dev);
1066 flush_scheduled_work();
1067 free_irq(pdev->irq, dev);
Samuel Ortiz169dc382012-06-11 12:18:30 +03001068disable_msi:
Oren Weil5b881e32011-11-13 09:41:14 +02001069 pci_disable_msi(pdev);
Oren Weil5b881e32011-11-13 09:41:14 +02001070 pci_iounmap(pdev, dev->mem_addr);
1071free_device:
1072 kfree(dev);
1073release_regions:
1074 pci_release_regions(pdev);
1075disable_device:
1076 pci_disable_device(pdev);
1077end:
1078 mutex_unlock(&mei_mutex);
Tomas Winkler32c826b2012-05-08 23:04:56 +03001079 dev_err(&pdev->dev, "initialization failed.\n");
Oren Weil5b881e32011-11-13 09:41:14 +02001080 return err;
1081}
1082
1083/**
1084 * mei_remove - Device Removal Routine
1085 *
1086 * @pdev: PCI device structure
1087 *
1088 * mei_remove is called by the PCI subsystem to alert the driver
1089 * that it should release a PCI device.
1090 */
1091static void __devexit mei_remove(struct pci_dev *pdev)
1092{
1093 struct mei_device *dev;
1094
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001095 if (mei_pdev != pdev)
Oren Weil5b881e32011-11-13 09:41:14 +02001096 return;
1097
1098 dev = pci_get_drvdata(pdev);
1099 if (!dev)
1100 return;
1101
1102 mutex_lock(&dev->device_lock);
1103
Tomas Winklerc216fde2012-08-16 19:39:43 +03001104 cancel_delayed_work(&dev->timer_work);
1105
1106 mei_wd_stop(dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001107
Tomas Winklerdaed6b52012-08-17 09:54:23 +03001108 mei_pdev = NULL;
Oren Weil5b881e32011-11-13 09:41:14 +02001109
1110 if (dev->iamthif_cl.state == MEI_FILE_CONNECTED) {
1111 dev->iamthif_cl.state = MEI_FILE_DISCONNECTING;
1112 mei_disconnect_host_client(dev, &dev->iamthif_cl);
1113 }
1114 if (dev->wd_cl.state == MEI_FILE_CONNECTED) {
1115 dev->wd_cl.state = MEI_FILE_DISCONNECTING;
1116 mei_disconnect_host_client(dev, &dev->wd_cl);
1117 }
1118
1119 /* Unregistering watchdog device */
Tomas Winkler70cd5332011-12-22 18:50:50 +02001120 mei_watchdog_unregister(dev);
Oren Weil5b881e32011-11-13 09:41:14 +02001121
1122 /* remove entry if already in list */
1123 dev_dbg(&pdev->dev, "list del iamthif and wd file list.\n");
1124 mei_remove_client_from_file_list(dev, dev->wd_cl.host_client_id);
1125 mei_remove_client_from_file_list(dev, dev->iamthif_cl.host_client_id);
1126
1127 dev->iamthif_current_cb = NULL;
1128 dev->me_clients_num = 0;
1129
1130 mutex_unlock(&dev->device_lock);
1131
1132 flush_scheduled_work();
1133
1134 /* disable interrupts */
1135 mei_disable_interrupts(dev);
1136
1137 free_irq(pdev->irq, dev);
1138 pci_disable_msi(pdev);
1139 pci_set_drvdata(pdev, NULL);
1140
1141 if (dev->mem_addr)
1142 pci_iounmap(pdev, dev->mem_addr);
1143
1144 kfree(dev);
1145
1146 pci_release_regions(pdev);
1147 pci_disable_device(pdev);
Tomas Winklera44cab42012-05-29 16:39:11 +03001148
1149 misc_deregister(&mei_misc_device);
Oren Weil5b881e32011-11-13 09:41:14 +02001150}
Oren Weilab841162011-05-15 13:43:41 +03001151#ifdef CONFIG_PM
1152static int mei_pci_suspend(struct device *device)
1153{
1154 struct pci_dev *pdev = to_pci_dev(device);
1155 struct mei_device *dev = pci_get_drvdata(pdev);
1156 int err;
1157
1158 if (!dev)
1159 return -ENODEV;
1160 mutex_lock(&dev->device_lock);
Tomas Winklerc216fde2012-08-16 19:39:43 +03001161
1162 cancel_delayed_work(&dev->timer_work);
1163
Oren Weilab841162011-05-15 13:43:41 +03001164 /* Stop watchdog if exists */
Tomas Winklerc216fde2012-08-16 19:39:43 +03001165 err = mei_wd_stop(dev);
Oren Weilab841162011-05-15 13:43:41 +03001166 /* Set new mei state */
Tomas Winklerb210d752012-08-07 00:03:56 +03001167 if (dev->dev_state == MEI_DEV_ENABLED ||
1168 dev->dev_state == MEI_DEV_RECOVERING_FROM_RESET) {
1169 dev->dev_state = MEI_DEV_POWER_DOWN;
Oren Weilab841162011-05-15 13:43:41 +03001170 mei_reset(dev, 0);
1171 }
1172 mutex_unlock(&dev->device_lock);
1173
1174 free_irq(pdev->irq, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001175 pci_disable_msi(pdev);
Oren Weilab841162011-05-15 13:43:41 +03001176
1177 return err;
1178}
1179
1180static int mei_pci_resume(struct device *device)
1181{
1182 struct pci_dev *pdev = to_pci_dev(device);
1183 struct mei_device *dev;
1184 int err;
1185
1186 dev = pci_get_drvdata(pdev);
1187 if (!dev)
1188 return -ENODEV;
1189
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001190 pci_enable_msi(pdev);
1191
1192 /* request and enable interrupt */
1193 if (pci_dev_msi_enabled(pdev))
1194 err = request_threaded_irq(pdev->irq,
1195 NULL,
1196 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001197 IRQF_ONESHOT, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001198 else
1199 err = request_threaded_irq(pdev->irq,
Oren Weilab841162011-05-15 13:43:41 +03001200 mei_interrupt_quick_handler,
1201 mei_interrupt_thread_handler,
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001202 IRQF_SHARED, KBUILD_MODNAME, dev);
Tomas Winkler4f61a7a2011-07-14 20:11:25 +03001203
Oren Weilab841162011-05-15 13:43:41 +03001204 if (err) {
Tomas Winkler32c826b2012-05-08 23:04:56 +03001205 dev_err(&pdev->dev, "request_threaded_irq failed: irq = %d.\n",
1206 pdev->irq);
Oren Weilab841162011-05-15 13:43:41 +03001207 return err;
1208 }
1209
1210 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +03001211 dev->dev_state = MEI_DEV_POWER_UP;
Oren Weilab841162011-05-15 13:43:41 +03001212 mei_reset(dev, 1);
1213 mutex_unlock(&dev->device_lock);
1214
Oren Weil6d70e932011-09-07 09:03:14 +03001215 /* Start timer if stopped in suspend */
1216 schedule_delayed_work(&dev->timer_work, HZ);
1217
Oren Weilab841162011-05-15 13:43:41 +03001218 return err;
1219}
1220static SIMPLE_DEV_PM_OPS(mei_pm_ops, mei_pci_suspend, mei_pci_resume);
1221#define MEI_PM_OPS (&mei_pm_ops)
1222#else
Randy Dunlap2d990362011-05-19 08:52:34 -07001223#define MEI_PM_OPS NULL
Oren Weilab841162011-05-15 13:43:41 +03001224#endif /* CONFIG_PM */
1225/*
1226 * PCI driver structure
1227 */
1228static struct pci_driver mei_driver = {
Tomas Winkler068c0ae2012-08-07 00:03:54 +03001229 .name = KBUILD_MODNAME,
Oren Weilab841162011-05-15 13:43:41 +03001230 .id_table = mei_pci_tbl,
1231 .probe = mei_probe,
1232 .remove = __devexit_p(mei_remove),
1233 .shutdown = __devexit_p(mei_remove),
1234 .driver.pm = MEI_PM_OPS,
1235};
1236
Tomas Winkler60781882012-07-19 09:45:32 +03001237module_pci_driver(mei_driver);
Oren Weilab841162011-05-15 13:43:41 +03001238
1239MODULE_AUTHOR("Intel Corporation");
1240MODULE_DESCRIPTION("Intel(R) Management Engine Interface");
1241MODULE_LICENSE("GPL v2");