blob: 9bf64c06fa39ce61484748143831d41d19836671 [file] [log] [blame]
Oren Weilfb7d8792011-05-15 13:43:42 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba91c2012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weilfb7d8792011-05-15 13:43:42 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17
Tomas Winkler40e0b672013-03-27 16:58:30 +020018#include <linux/export.h>
Oren Weilfb7d8792011-05-15 13:43:42 +030019#include <linux/pci.h>
20#include <linux/kthread.h>
21#include <linux/interrupt.h>
22#include <linux/fs.h>
23#include <linux/jiffies.h>
24
Tomas Winkler4f3afe12012-05-09 16:38:59 +030025#include <linux/mei.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020026
27#include "mei_dev.h"
Tomas Winkler0edb23f2013-01-08 23:07:12 +020028#include "hbm.h"
Tomas Winkler9dc64d62013-01-08 23:07:17 +020029#include "hw-me.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020030#include "client.h"
Oren Weilfb7d8792011-05-15 13:43:42 +030031
32
33/**
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020034 * mei_cl_complete_handler - processes completed operation for a client
Oren Weilfb7d8792011-05-15 13:43:42 +030035 *
36 * @cl: private data of the file object.
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020037 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +030038 */
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020039static void mei_cl_complete_handler(struct mei_cl *cl, struct mei_cl_cb *cb)
Oren Weilfb7d8792011-05-15 13:43:42 +030040{
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020041 if (cb->fop_type == MEI_FOP_WRITE) {
42 mei_io_cb_free(cb);
43 cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +030044 cl->writing_state = MEI_WRITE_COMPLETE;
45 if (waitqueue_active(&cl->tx_wait))
46 wake_up_interruptible(&cl->tx_wait);
47
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020048 } else if (cb->fop_type == MEI_FOP_READ &&
Oren Weilfb7d8792011-05-15 13:43:42 +030049 MEI_READING == cl->reading_state) {
50 cl->reading_state = MEI_READ_COMPLETE;
51 if (waitqueue_active(&cl->rx_wait))
52 wake_up_interruptible(&cl->rx_wait);
Samuel Ortizcf3baef2013-03-27 17:29:57 +020053 else
54 mei_cl_bus_rx_event(cl);
Oren Weilfb7d8792011-05-15 13:43:42 +030055
56 }
57}
58
59/**
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020060 * mei_irq_compl_handler - dispatch complete handelers
61 * for the completed callbacks
62 *
63 * @dev - mei device
64 * @compl_list - list of completed cbs
65 */
66void mei_irq_compl_handler(struct mei_device *dev, struct mei_cl_cb *compl_list)
67{
68 struct mei_cl_cb *cb, *next;
69 struct mei_cl *cl;
70
71 list_for_each_entry_safe(cb, next, &compl_list->list, list) {
72 cl = cb->cl;
73 list_del(&cb->list);
74 if (!cl)
75 continue;
76
77 dev_dbg(&dev->pdev->dev, "completing call back.\n");
78 if (cl == &dev->iamthif_cl)
79 mei_amthif_complete(dev, cb);
80 else
81 mei_cl_complete_handler(cl, cb);
82 }
83}
Tomas Winkler40e0b672013-03-27 16:58:30 +020084EXPORT_SYMBOL_GPL(mei_irq_compl_handler);
Tomas Winkler6e0f1802013-04-19 22:01:34 +030085
Tomas Winkler4c6e22b2013-03-17 11:41:20 +020086/**
Tomas Winkler6e0f1802013-04-19 22:01:34 +030087 * mei_cl_hbm_equal - check if hbm is addressed to the client
Oren Weilfb7d8792011-05-15 13:43:42 +030088 *
Tomas Winkler6e0f1802013-04-19 22:01:34 +030089 * @cl: host client
Oren Weilfb7d8792011-05-15 13:43:42 +030090 * @mei_hdr: header of mei client message
91 *
Tomas Winkler6e0f1802013-04-19 22:01:34 +030092 * returns true if matches, false otherwise
Oren Weilfb7d8792011-05-15 13:43:42 +030093 */
Tomas Winkler6e0f1802013-04-19 22:01:34 +030094static inline int mei_cl_hbm_equal(struct mei_cl *cl,
95 struct mei_msg_hdr *mei_hdr)
Oren Weilfb7d8792011-05-15 13:43:42 +030096{
Tomas Winkler6e0f1802013-04-19 22:01:34 +030097 return cl->host_client_id == mei_hdr->host_addr &&
98 cl->me_client_id == mei_hdr->me_addr;
99}
100/**
101 * mei_cl_is_reading - checks if the client
102 is the one to read this message
103 *
104 * @cl: mei client
105 * @mei_hdr: header of mei message
106 *
107 * returns true on match and false otherwise
108 */
109static bool mei_cl_is_reading(struct mei_cl *cl, struct mei_msg_hdr *mei_hdr)
110{
111 return mei_cl_hbm_equal(cl, mei_hdr) &&
Oren Weilfb7d8792011-05-15 13:43:42 +0300112 cl->state == MEI_FILE_CONNECTED &&
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300113 cl->reading_state != MEI_READ_COMPLETE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300114}
115
116/**
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300117 * mei_irq_read_client_message - process client message
Oren Weilfb7d8792011-05-15 13:43:42 +0300118 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300119 * @dev: the device structure
120 * @mei_hdr: header of mei client message
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300121 * @complete_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300122 *
123 * returns 0 on success, <0 on failure.
124 */
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300125static int mei_cl_irq_read_msg(struct mei_device *dev,
126 struct mei_msg_hdr *mei_hdr,
127 struct mei_cl_cb *complete_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300128{
129 struct mei_cl *cl;
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300130 struct mei_cl_cb *cb, *next;
Tomas Winkler479bc592011-06-16 00:46:03 +0300131 unsigned char *buffer = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300132
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300133 list_for_each_entry_safe(cb, next, &dev->read_list.list, list) {
134 cl = cb->cl;
135 if (!cl || !mei_cl_is_reading(cl, mei_hdr))
136 continue;
Oren Weilfb7d8792011-05-15 13:43:42 +0300137
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300138 cl->reading_state = MEI_READING;
Oren Weilfb7d8792011-05-15 13:43:42 +0300139
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300140 if (cb->response_buffer.size == 0 ||
141 cb->response_buffer.data == NULL) {
142 dev_err(&dev->pdev->dev, "response buffer is not allocated.\n");
143 list_del(&cb->list);
144 return -ENOMEM;
Oren Weilfb7d8792011-05-15 13:43:42 +0300145 }
146
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300147 if (cb->response_buffer.size < mei_hdr->length + cb->buf_idx) {
148 dev_warn(&dev->pdev->dev, "message overflow.\n");
149 list_del(&cb->list);
150 return -ENOMEM;
151 }
152
153 buffer = cb->response_buffer.data + cb->buf_idx;
154 mei_read_slots(dev, buffer, mei_hdr->length);
155
156 cb->buf_idx += mei_hdr->length;
157 if (mei_hdr->msg_complete) {
158 cl->status = 0;
159 list_del(&cb->list);
160 dev_dbg(&dev->pdev->dev, "completed read H cl = %d, ME cl = %d, length = %lu\n",
161 cl->host_client_id,
162 cl->me_client_id,
163 cb->buf_idx);
164 list_add_tail(&cb->list, &complete_list->list);
165 }
166 break;
Oren Weilfb7d8792011-05-15 13:43:42 +0300167 }
168
Oren Weilfb7d8792011-05-15 13:43:42 +0300169 dev_dbg(&dev->pdev->dev, "message read\n");
170 if (!buffer) {
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200171 mei_read_slots(dev, dev->rd_msg_buf, mei_hdr->length);
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200172 dev_dbg(&dev->pdev->dev, "discarding message " MEI_HDR_FMT "\n",
173 MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300174 }
175
176 return 0;
177}
178
179/**
Oren Weilfb7d8792011-05-15 13:43:42 +0300180 * _mei_irq_thread_close - processes close related operation.
181 *
182 * @dev: the device structure.
183 * @slots: free slots.
184 * @cb_pos: callback block.
185 * @cl: private data of the file object.
186 * @cmpl_list: complete list.
187 *
188 * returns 0, OK; otherwise, error.
189 */
190static int _mei_irq_thread_close(struct mei_device *dev, s32 *slots,
191 struct mei_cl_cb *cb_pos,
192 struct mei_cl *cl,
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200193 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300194{
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200195 u32 msg_slots =
196 mei_data2slots(sizeof(struct hbm_client_connect_request));
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300197
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200198 if (*slots < msg_slots)
199 return -EMSGSIZE;
200
201 *slots -= msg_slots;
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300202
Tomas Winkler8120e722012-12-25 19:06:11 +0200203 if (mei_hbm_cl_disconnect_req(dev, cl)) {
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300204 cl->status = 0;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200205 cb_pos->buf_idx = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200206 list_move_tail(&cb_pos->list, &cmpl_list->list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200207 return -EIO;
Oren Weilfb7d8792011-05-15 13:43:42 +0300208 }
209
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200210 cl->state = MEI_FILE_DISCONNECTING;
211 cl->status = 0;
212 cb_pos->buf_idx = 0;
213 list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
214 cl->timer_count = MEI_CONNECT_TIMEOUT;
215
Oren Weilfb7d8792011-05-15 13:43:42 +0300216 return 0;
217}
218
Oren Weilfb7d8792011-05-15 13:43:42 +0300219
220/**
221 * _mei_hb_read - processes read related operation.
222 *
223 * @dev: the device structure.
224 * @slots: free slots.
225 * @cb_pos: callback block.
226 * @cl: private data of the file object.
227 * @cmpl_list: complete list.
228 *
229 * returns 0, OK; otherwise, error.
230 */
231static int _mei_irq_thread_read(struct mei_device *dev, s32 *slots,
232 struct mei_cl_cb *cb_pos,
233 struct mei_cl *cl,
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200234 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300235{
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200236 u32 msg_slots = mei_data2slots(sizeof(struct hbm_flow_control));
237
238 if (*slots < msg_slots) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300239 /* return the cancel routine */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200240 list_del(&cb_pos->list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200241 return -EMSGSIZE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300242 }
243
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200244 *slots -= msg_slots;
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300245
Tomas Winkler8120e722012-12-25 19:06:11 +0200246 if (mei_hbm_cl_flow_control_req(dev, cl)) {
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200247 cl->status = -ENODEV;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200248 cb_pos->buf_idx = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200249 list_move_tail(&cb_pos->list, &cmpl_list->list);
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200250 return -ENODEV;
251 }
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200252 list_move_tail(&cb_pos->list, &dev->read_list.list);
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200253
Oren Weilfb7d8792011-05-15 13:43:42 +0300254 return 0;
255}
256
257
258/**
259 * _mei_irq_thread_ioctl - processes ioctl related operation.
260 *
261 * @dev: the device structure.
262 * @slots: free slots.
263 * @cb_pos: callback block.
264 * @cl: private data of the file object.
265 * @cmpl_list: complete list.
266 *
267 * returns 0, OK; otherwise, error.
268 */
269static int _mei_irq_thread_ioctl(struct mei_device *dev, s32 *slots,
270 struct mei_cl_cb *cb_pos,
271 struct mei_cl *cl,
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200272 struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300273{
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200274 u32 msg_slots =
275 mei_data2slots(sizeof(struct hbm_client_connect_request));
276
277 if (*slots < msg_slots) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300278 /* return the cancel routine */
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200279 list_del(&cb_pos->list);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200280 return -EMSGSIZE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300281 }
282
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200283 *slots -= msg_slots;
284
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300285 cl->state = MEI_FILE_CONNECTING;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200286
Tomas Winkler8120e722012-12-25 19:06:11 +0200287 if (mei_hbm_cl_connect_req(dev, cl)) {
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300288 cl->status = -ENODEV;
Tomas Winklerebb108ef2012-10-09 16:50:16 +0200289 cb_pos->buf_idx = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200290 list_del(&cb_pos->list);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300291 return -ENODEV;
292 } else {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200293 list_move_tail(&cb_pos->list, &dev->ctrl_rd_list.list);
Tomas Winklerb45f3cc2012-07-04 19:24:53 +0300294 cl->timer_count = MEI_CONNECT_TIMEOUT;
295 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300296 return 0;
297}
298
299/**
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200300 * mei_irq_thread_write_complete - write messages to device.
Oren Weilfb7d8792011-05-15 13:43:42 +0300301 *
302 * @dev: the device structure.
303 * @slots: free slots.
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200304 * @cb: callback block.
Oren Weilfb7d8792011-05-15 13:43:42 +0300305 * @cmpl_list: complete list.
306 *
307 * returns 0, OK; otherwise, error.
308 */
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200309static int mei_irq_thread_write_complete(struct mei_device *dev, s32 *slots,
310 struct mei_cl_cb *cb, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300311{
Tomas Winklere46f1872012-12-25 19:06:10 +0200312 struct mei_msg_hdr mei_hdr;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200313 struct mei_cl *cl = cb->cl;
314 size_t len = cb->request_buffer.size - cb->buf_idx;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200315 u32 msg_slots = mei_data2slots(len);
Oren Weilfb7d8792011-05-15 13:43:42 +0300316
Tomas Winklere46f1872012-12-25 19:06:10 +0200317 mei_hdr.host_addr = cl->host_client_id;
318 mei_hdr.me_addr = cl->me_client_id;
319 mei_hdr.reserved = 0;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200320
321 if (*slots >= msg_slots) {
Tomas Winklere46f1872012-12-25 19:06:10 +0200322 mei_hdr.length = len;
323 mei_hdr.msg_complete = 1;
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200324 /* Split the message only if we can write the whole host buffer */
Tomas Winkler24aadc82012-06-25 23:46:27 +0300325 } else if (*slots == dev->hbuf_depth) {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200326 msg_slots = *slots;
327 len = (*slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winklere46f1872012-12-25 19:06:10 +0200328 mei_hdr.length = len;
329 mei_hdr.msg_complete = 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300330 } else {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200331 /* wait for next time the host buffer is empty */
332 return 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300333 }
334
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200335 dev_dbg(&dev->pdev->dev, "buf: size = %d idx = %lu\n",
336 cb->request_buffer.size, cb->buf_idx);
Tomas Winklere46f1872012-12-25 19:06:10 +0200337 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(&mei_hdr));
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200338
339 *slots -= msg_slots;
Tomas Winklere46f1872012-12-25 19:06:10 +0200340 if (mei_write_message(dev, &mei_hdr,
Tomas Winkler438763f2012-12-25 19:05:59 +0200341 cb->request_buffer.data + cb->buf_idx)) {
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200342 cl->status = -ENODEV;
343 list_move_tail(&cb->list, &cmpl_list->list);
344 return -ENODEV;
345 }
346
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200347 if (mei_cl_flow_ctrl_reduce(cl))
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200348 return -ENODEV;
349
350 cl->status = 0;
Tomas Winklere46f1872012-12-25 19:06:10 +0200351 cb->buf_idx += mei_hdr.length;
352 if (mei_hdr.msg_complete)
Tomas Winklerea3b5fb2012-11-18 15:13:16 +0200353 list_move_tail(&cb->list, &dev->write_waiting_list.list);
354
Oren Weilfb7d8792011-05-15 13:43:42 +0300355 return 0;
356}
357
358/**
Oren Weilfb7d8792011-05-15 13:43:42 +0300359 * mei_irq_thread_read_handler - bottom half read routine after ISR to
360 * handle the read processing.
361 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300362 * @dev: the device structure
Tomas Winkler06ecd642013-02-06 14:06:42 +0200363 * @cmpl_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300364 * @slots: slots to read.
365 *
366 * returns 0 on success, <0 on failure.
367 */
Tomas Winkler06ecd642013-02-06 14:06:42 +0200368int mei_irq_read_handler(struct mei_device *dev,
369 struct mei_cl_cb *cmpl_list, s32 *slots)
Oren Weilfb7d8792011-05-15 13:43:42 +0300370{
371 struct mei_msg_hdr *mei_hdr;
372 struct mei_cl *cl_pos = NULL;
373 struct mei_cl *cl_next = NULL;
374 int ret = 0;
375
376 if (!dev->rd_msg_hdr) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200377 dev->rd_msg_hdr = mei_read_hdr(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300378 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
379 (*slots)--;
380 dev_dbg(&dev->pdev->dev, "slots =%08x.\n", *slots);
381 }
382 mei_hdr = (struct mei_msg_hdr *) &dev->rd_msg_hdr;
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200383 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
Oren Weilfb7d8792011-05-15 13:43:42 +0300384
385 if (mei_hdr->reserved || !dev->rd_msg_hdr) {
386 dev_dbg(&dev->pdev->dev, "corrupted message header.\n");
387 ret = -EBADMSG;
388 goto end;
389 }
390
391 if (mei_hdr->host_addr || mei_hdr->me_addr) {
392 list_for_each_entry_safe(cl_pos, cl_next,
393 &dev->file_list, link) {
394 dev_dbg(&dev->pdev->dev,
395 "list_for_each_entry_safe read host"
396 " client = %d, ME client = %d\n",
397 cl_pos->host_client_id,
398 cl_pos->me_client_id);
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300399 if (mei_cl_hbm_equal(cl_pos, mei_hdr))
Oren Weilfb7d8792011-05-15 13:43:42 +0300400 break;
401 }
402
403 if (&cl_pos->link == &dev->file_list) {
404 dev_dbg(&dev->pdev->dev, "corrupted message header\n");
405 ret = -EBADMSG;
406 goto end;
407 }
408 }
409 if (((*slots) * sizeof(u32)) < mei_hdr->length) {
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300410 dev_err(&dev->pdev->dev,
Oren Weilfb7d8792011-05-15 13:43:42 +0300411 "we can't read the message slots =%08x.\n",
412 *slots);
413 /* we can't read the message */
414 ret = -ERANGE;
415 goto end;
416 }
417
418 /* decide where to read the message too */
419 if (!mei_hdr->host_addr) {
420 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_bus_message.\n");
Tomas Winklerbb1b0132012-12-25 19:06:07 +0200421 mei_hbm_dispatch(dev, mei_hdr);
Oren Weilfb7d8792011-05-15 13:43:42 +0300422 dev_dbg(&dev->pdev->dev, "end mei_irq_thread_read_bus_message.\n");
423 } else if (mei_hdr->host_addr == dev->iamthif_cl.host_client_id &&
424 (MEI_FILE_CONNECTED == dev->iamthif_cl.state) &&
425 (dev->iamthif_state == MEI_IAMTHIF_READING)) {
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200426
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300427 dev_dbg(&dev->pdev->dev, "call mei_irq_thread_read_iamthif_message.\n");
Tomas Winkler15d4acc2012-12-25 19:06:00 +0200428 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
Tomas Winkler19838fb2012-11-01 21:17:15 +0200429
Tomas Winkler5ceb46e2013-04-19 21:16:53 +0300430 ret = mei_amthif_irq_read_msg(dev, mei_hdr, cmpl_list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300431 if (ret)
432 goto end;
Oren Weilfb7d8792011-05-15 13:43:42 +0300433 } else {
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300434 dev_dbg(&dev->pdev->dev, "call mei_cl_irq_read_msg.\n");
435 dev_dbg(&dev->pdev->dev, MEI_HDR_FMT, MEI_HDR_PRM(mei_hdr));
436 ret = mei_cl_irq_read_msg(dev, mei_hdr, cmpl_list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300437 if (ret)
438 goto end;
Oren Weilfb7d8792011-05-15 13:43:42 +0300439 }
440
441 /* reset the number of slots and header */
442 *slots = mei_count_full_read_slots(dev);
443 dev->rd_msg_hdr = 0;
444
445 if (*slots == -EOVERFLOW) {
446 /* overflow - reset */
Tomas Winkler6e0f1802013-04-19 22:01:34 +0300447 dev_err(&dev->pdev->dev, "resetting due to slots overflow.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300448 /* set the event since message has been read */
449 ret = -ERANGE;
450 goto end;
451 }
452end:
453 return ret;
454}
Tomas Winkler40e0b672013-03-27 16:58:30 +0200455EXPORT_SYMBOL_GPL(mei_irq_read_handler);
Oren Weilfb7d8792011-05-15 13:43:42 +0300456
457
458/**
Tomas Winkler06ecd642013-02-06 14:06:42 +0200459 * mei_irq_write_handler - dispatch write requests
460 * after irq received
Oren Weilfb7d8792011-05-15 13:43:42 +0300461 *
Oren Weilfb7d8792011-05-15 13:43:42 +0300462 * @dev: the device structure
Tomas Winkler9a84d612012-11-18 15:13:18 +0200463 * @cmpl_list: An instance of our list structure
Oren Weilfb7d8792011-05-15 13:43:42 +0300464 *
465 * returns 0 on success, <0 on failure.
466 */
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200467int mei_irq_write_handler(struct mei_device *dev, struct mei_cl_cb *cmpl_list)
Oren Weilfb7d8792011-05-15 13:43:42 +0300468{
469
470 struct mei_cl *cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200471 struct mei_cl_cb *pos = NULL, *next = NULL;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200472 struct mei_cl_cb *list;
Tomas Winkler9a84d612012-11-18 15:13:18 +0200473 s32 slots;
Oren Weilfb7d8792011-05-15 13:43:42 +0300474 int ret;
475
Tomas Winkler827eef52013-02-06 14:06:41 +0200476 if (!mei_hbuf_is_ready(dev)) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300477 dev_dbg(&dev->pdev->dev, "host buffer is not empty.\n");
478 return 0;
479 }
Tomas Winkler9a84d612012-11-18 15:13:18 +0200480 slots = mei_hbuf_empty_slots(dev);
481 if (slots <= 0)
Tomas Winkler7d5e0e52012-06-19 09:13:36 +0300482 return -EMSGSIZE;
483
Oren Weilfb7d8792011-05-15 13:43:42 +0300484 /* complete all waiting for write CB */
485 dev_dbg(&dev->pdev->dev, "complete all waiting for write cb.\n");
486
487 list = &dev->write_waiting_list;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200488 list_for_each_entry_safe(pos, next, &list->list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200489 cl = pos->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200490 if (cl == NULL)
491 continue;
Oren Weilfb7d8792011-05-15 13:43:42 +0300492
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200493 cl->status = 0;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200494 list_del(&pos->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200495 if (MEI_WRITING == cl->writing_state &&
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200496 pos->fop_type == MEI_FOP_WRITE &&
497 cl != &dev->iamthif_cl) {
Tomas Winkler483136e2012-07-04 19:24:54 +0300498 dev_dbg(&dev->pdev->dev, "MEI WRITE COMPLETE\n");
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200499 cl->writing_state = MEI_WRITE_COMPLETE;
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200500 list_add_tail(&pos->list, &cmpl_list->list);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200501 }
502 if (cl == &dev->iamthif_cl) {
503 dev_dbg(&dev->pdev->dev, "check iamthif flow control.\n");
504 if (dev->iamthif_flow_control_pending) {
Tomas Winkler9a84d612012-11-18 15:13:18 +0200505 ret = mei_amthif_irq_read(dev, &slots);
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200506 if (ret)
507 return ret;
508 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300509 }
510 }
511
Tomas Winklerc216fde2012-08-16 19:39:43 +0300512 if (dev->wd_state == MEI_WD_STOPPING) {
513 dev->wd_state = MEI_WD_IDLE;
Oren Weilfb7d8792011-05-15 13:43:42 +0300514 wake_up_interruptible(&dev->wait_stop_wd);
Oren Weilfb7d8792011-05-15 13:43:42 +0300515 }
516
Tomas Winkler5fb54fb2012-11-18 15:13:15 +0200517 if (dev->wr_ext_msg.hdr.length) {
518 mei_write_message(dev, &dev->wr_ext_msg.hdr,
Tomas Winkler438763f2012-12-25 19:05:59 +0200519 dev->wr_ext_msg.data);
Tomas Winkler9a84d612012-11-18 15:13:18 +0200520 slots -= mei_data2slots(dev->wr_ext_msg.hdr.length);
Tomas Winkler5fb54fb2012-11-18 15:13:15 +0200521 dev->wr_ext_msg.hdr.length = 0;
Oren Weilfb7d8792011-05-15 13:43:42 +0300522 }
Tomas Winklerb210d752012-08-07 00:03:56 +0300523 if (dev->dev_state == MEI_DEV_ENABLED) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300524 if (dev->wd_pending &&
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200525 mei_cl_flow_ctrl_creds(&dev->wd_cl) > 0) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300526 if (mei_wd_send(dev))
527 dev_dbg(&dev->pdev->dev, "wd send failed.\n");
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200528 else if (mei_cl_flow_ctrl_reduce(&dev->wd_cl))
Tomas Winkler483136e2012-07-04 19:24:54 +0300529 return -ENODEV;
Oren Weilfb7d8792011-05-15 13:43:42 +0300530
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300531 dev->wd_pending = false;
Oren Weilfb7d8792011-05-15 13:43:42 +0300532
Tomas Winklerc216fde2012-08-16 19:39:43 +0300533 if (dev->wd_state == MEI_WD_RUNNING)
Tomas Winkler9a84d612012-11-18 15:13:18 +0200534 slots -= mei_data2slots(MEI_WD_START_MSG_SIZE);
Tomas Winklerd242a0a2012-07-04 19:24:50 +0300535 else
Tomas Winkler9a84d612012-11-18 15:13:18 +0200536 slots -= mei_data2slots(MEI_WD_STOP_MSG_SIZE);
Oren Weilfb7d8792011-05-15 13:43:42 +0300537 }
538 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300539
540 /* complete control write list CB */
Tomas Winklerc8372092011-11-27 21:43:33 +0200541 dev_dbg(&dev->pdev->dev, "complete control write list cb.\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200542 list_for_each_entry_safe(pos, next, &dev->ctrl_wr_list.list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200543 cl = pos->cl;
Tomas Winklerc8372092011-11-27 21:43:33 +0200544 if (!cl) {
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200545 list_del(&pos->list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200546 return -ENODEV;
Oren Weilfb7d8792011-05-15 13:43:42 +0300547 }
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200548 switch (pos->fop_type) {
549 case MEI_FOP_CLOSE:
Tomas Winklerc8372092011-11-27 21:43:33 +0200550 /* send disconnect message */
Tomas Winkler9a84d612012-11-18 15:13:18 +0200551 ret = _mei_irq_thread_close(dev, &slots, pos,
552 cl, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200553 if (ret)
554 return ret;
555
556 break;
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200557 case MEI_FOP_READ:
Tomas Winklerc8372092011-11-27 21:43:33 +0200558 /* send flow control message */
Tomas Winkler9a84d612012-11-18 15:13:18 +0200559 ret = _mei_irq_thread_read(dev, &slots, pos,
560 cl, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200561 if (ret)
562 return ret;
563
564 break;
Tomas Winkler4b8960b2012-11-11 17:38:00 +0200565 case MEI_FOP_IOCTL:
Tomas Winklerc8372092011-11-27 21:43:33 +0200566 /* connect message */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200567 if (mei_cl_is_other_connecting(cl))
Tomas Winklerc8372092011-11-27 21:43:33 +0200568 continue;
Tomas Winkler9a84d612012-11-18 15:13:18 +0200569 ret = _mei_irq_thread_ioctl(dev, &slots, pos,
570 cl, cmpl_list);
Tomas Winklerc8372092011-11-27 21:43:33 +0200571 if (ret)
572 return ret;
573
574 break;
575
576 default:
577 BUG();
578 }
579
Oren Weilfb7d8792011-05-15 13:43:42 +0300580 }
581 /* complete write list CB */
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200582 dev_dbg(&dev->pdev->dev, "complete write list cb.\n");
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200583 list_for_each_entry_safe(pos, next, &dev->write_list.list, list) {
Tomas Winklerdb3ed432012-11-11 17:37:59 +0200584 cl = pos->cl;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200585 if (cl == NULL)
586 continue;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200587 if (mei_cl_flow_ctrl_creds(cl) <= 0) {
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200588 dev_dbg(&dev->pdev->dev,
589 "No flow control credentials for client %d, not sending.\n",
590 cl->host_client_id);
591 continue;
592 }
Oren Weilfb7d8792011-05-15 13:43:42 +0300593
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200594 if (cl == &dev->iamthif_cl)
Tomas Winkler9a84d612012-11-18 15:13:18 +0200595 ret = mei_amthif_irq_write_complete(dev, &slots,
Tomas Winkler24c656e2012-11-18 15:13:17 +0200596 pos, cmpl_list);
Tomas Winklerbe9d87a2012-11-18 15:13:19 +0200597 else
598 ret = mei_irq_thread_write_complete(dev, &slots, pos,
599 cmpl_list);
600 if (ret)
601 return ret;
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200602
Oren Weilfb7d8792011-05-15 13:43:42 +0300603 }
604 return 0;
605}
Tomas Winkler40e0b672013-03-27 16:58:30 +0200606EXPORT_SYMBOL_GPL(mei_irq_write_handler);
Oren Weilfb7d8792011-05-15 13:43:42 +0300607
608
609
610/**
611 * mei_timer - timer function.
612 *
613 * @work: pointer to the work_struct structure
614 *
615 * NOTE: This function is called by timer interrupt work
616 */
Oren Weila61c6532011-09-07 09:03:13 +0300617void mei_timer(struct work_struct *work)
Oren Weilfb7d8792011-05-15 13:43:42 +0300618{
619 unsigned long timeout;
620 struct mei_cl *cl_pos = NULL;
621 struct mei_cl *cl_next = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300622 struct mei_cl_cb *cb_pos = NULL;
623 struct mei_cl_cb *cb_next = NULL;
624
625 struct mei_device *dev = container_of(work,
Oren Weila61c6532011-09-07 09:03:13 +0300626 struct mei_device, timer_work.work);
Oren Weilfb7d8792011-05-15 13:43:42 +0300627
628
629 mutex_lock(&dev->device_lock);
Tomas Winklerb210d752012-08-07 00:03:56 +0300630 if (dev->dev_state != MEI_DEV_ENABLED) {
631 if (dev->dev_state == MEI_DEV_INIT_CLIENTS) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300632 if (dev->init_clients_timer) {
633 if (--dev->init_clients_timer == 0) {
Tomas Winkler9b0d5ef2013-04-18 23:03:48 +0300634 dev_err(&dev->pdev->dev, "reset: init clients timeout hbm_state = %d.\n",
635 dev->hbm_state);
Oren Weilfb7d8792011-05-15 13:43:42 +0300636 mei_reset(dev, 1);
637 }
638 }
639 }
640 goto out;
641 }
642 /*** connect/disconnect timeouts ***/
643 list_for_each_entry_safe(cl_pos, cl_next, &dev->file_list, link) {
644 if (cl_pos->timer_count) {
645 if (--cl_pos->timer_count == 0) {
Tomas Winklerd6c36a42013-04-08 21:56:38 +0300646 dev_err(&dev->pdev->dev, "reset: connect/disconnect timeout.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300647 mei_reset(dev, 1);
648 goto out;
649 }
650 }
651 }
652
Oren Weilfb7d8792011-05-15 13:43:42 +0300653 if (dev->iamthif_stall_timer) {
654 if (--dev->iamthif_stall_timer == 0) {
Tomas Winklerd6c36a42013-04-08 21:56:38 +0300655 dev_err(&dev->pdev->dev, "reset: amthif hanged.\n");
Oren Weilfb7d8792011-05-15 13:43:42 +0300656 mei_reset(dev, 1);
657 dev->iamthif_msg_buf_size = 0;
658 dev->iamthif_msg_buf_index = 0;
Tomas Winklereb9af0a2011-05-25 17:28:22 +0300659 dev->iamthif_canceled = false;
660 dev->iamthif_ioctl = true;
Oren Weilfb7d8792011-05-15 13:43:42 +0300661 dev->iamthif_state = MEI_IAMTHIF_IDLE;
662 dev->iamthif_timer = 0;
663
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200664 mei_io_cb_free(dev->iamthif_current_cb);
665 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300666
667 dev->iamthif_file_object = NULL;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200668 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300669 }
670 }
671
672 if (dev->iamthif_timer) {
673
674 timeout = dev->iamthif_timer +
Tomas Winkler3870c322012-11-01 21:17:14 +0200675 mei_secs_to_jiffies(MEI_IAMTHIF_READ_TIMER);
Oren Weilfb7d8792011-05-15 13:43:42 +0300676
677 dev_dbg(&dev->pdev->dev, "dev->iamthif_timer = %ld\n",
678 dev->iamthif_timer);
679 dev_dbg(&dev->pdev->dev, "timeout = %ld\n", timeout);
680 dev_dbg(&dev->pdev->dev, "jiffies = %ld\n", jiffies);
681 if (time_after(jiffies, timeout)) {
682 /*
683 * User didn't read the AMTHI data on time (15sec)
684 * freeing AMTHI for other requests
685 */
686
687 dev_dbg(&dev->pdev->dev, "freeing AMTHI for other requests\n");
688
Tomas Winklere773efc2012-11-11 17:37:58 +0200689 list_for_each_entry_safe(cb_pos, cb_next,
690 &dev->amthif_rd_complete_list.list, list) {
Oren Weilfb7d8792011-05-15 13:43:42 +0300691
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200692 cl_pos = cb_pos->file_object->private_data;
Oren Weilfb7d8792011-05-15 13:43:42 +0300693
Tomas Winklerb7cd2d92011-11-27 21:43:34 +0200694 /* Finding the AMTHI entry. */
695 if (cl_pos == &dev->iamthif_cl)
Tomas Winklerfb601ad2012-10-15 12:06:48 +0200696 list_del(&cb_pos->list);
Oren Weilfb7d8792011-05-15 13:43:42 +0300697 }
Tomas Winkler601a1ef2012-10-09 16:50:20 +0200698 mei_io_cb_free(dev->iamthif_current_cb);
699 dev->iamthif_current_cb = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300700
701 dev->iamthif_file_object->private_data = NULL;
702 dev->iamthif_file_object = NULL;
Oren Weilfb7d8792011-05-15 13:43:42 +0300703 dev->iamthif_timer = 0;
Tomas Winkler19838fb2012-11-01 21:17:15 +0200704 mei_amthif_run_next_cmd(dev);
Oren Weilfb7d8792011-05-15 13:43:42 +0300705
706 }
707 }
708out:
Tomas Winkler441ab502011-12-13 23:39:34 +0200709 schedule_delayed_work(&dev->timer_work, 2 * HZ);
710 mutex_unlock(&dev->device_lock);
Oren Weilfb7d8792011-05-15 13:43:42 +0300711}
712