blob: 276021f99666be1b9435ccbce3b1a9d738d310b7 [file] [log] [blame]
Tomas Winkler9fff0422019-03-12 00:10:41 +02001// SPDX-License-Identifier: GPL-2.0
Tomas Winkler9ca90502013-01-08 23:07:13 +02002/*
Tomas Winkler4b40b222020-07-23 17:59:25 +03003 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
Tomas Winkler9ca90502013-01-08 23:07:13 +02004 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler9ca90502013-01-08 23:07:13 +02005 */
6
Ingo Molnar174cd4b2017-02-02 19:15:33 +01007#include <linux/sched/signal.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +02008#include <linux/wait.h>
9#include <linux/delay.h>
Tomas Winkler1f180352014-09-29 16:31:46 +030010#include <linux/slab.h>
Tomas Winkler04bb1392014-03-18 22:52:04 +020011#include <linux/pm_runtime.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +020012
13#include <linux/mei.h>
14
15#include "mei_dev.h"
16#include "hbm.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020017#include "client.h"
18
19/**
Tomas Winkler79563db2015-01-11 00:07:16 +020020 * mei_me_cl_init - initialize me client
21 *
22 * @me_cl: me client
23 */
24void mei_me_cl_init(struct mei_me_client *me_cl)
25{
26 INIT_LIST_HEAD(&me_cl->list);
27 kref_init(&me_cl->refcnt);
28}
29
30/**
31 * mei_me_cl_get - increases me client refcount
32 *
33 * @me_cl: me client
34 *
35 * Locking: called under "dev->device_lock" lock
36 *
37 * Return: me client or NULL
38 */
39struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
40{
Tomas Winklerb7d88512015-02-10 10:39:31 +020041 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
42 return me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +020043
Tomas Winklerb7d88512015-02-10 10:39:31 +020044 return NULL;
Tomas Winkler79563db2015-01-11 00:07:16 +020045}
46
47/**
Tomas Winklerb7d88512015-02-10 10:39:31 +020048 * mei_me_cl_release - free me client
Tomas Winkler79563db2015-01-11 00:07:16 +020049 *
50 * Locking: called under "dev->device_lock" lock
51 *
52 * @ref: me_client refcount
53 */
54static void mei_me_cl_release(struct kref *ref)
55{
56 struct mei_me_client *me_cl =
57 container_of(ref, struct mei_me_client, refcnt);
Tomas Winklerb7d88512015-02-10 10:39:31 +020058
Tomas Winkler79563db2015-01-11 00:07:16 +020059 kfree(me_cl);
60}
Tomas Winklerb7d88512015-02-10 10:39:31 +020061
Tomas Winkler79563db2015-01-11 00:07:16 +020062/**
63 * mei_me_cl_put - decrease me client refcount and free client if necessary
64 *
65 * Locking: called under "dev->device_lock" lock
66 *
67 * @me_cl: me client
68 */
69void mei_me_cl_put(struct mei_me_client *me_cl)
70{
71 if (me_cl)
72 kref_put(&me_cl->refcnt, mei_me_cl_release);
73}
74
75/**
Alexander Usyskind49ed642015-05-04 09:43:54 +030076 * __mei_me_cl_del - delete me client from the list and decrease
Tomas Winklerb7d88512015-02-10 10:39:31 +020077 * reference counter
78 *
79 * @dev: mei device
80 * @me_cl: me client
81 *
82 * Locking: dev->me_clients_rwsem
83 */
84static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
85{
86 if (!me_cl)
87 return;
88
Alexander Usyskind49ed642015-05-04 09:43:54 +030089 list_del_init(&me_cl->list);
Tomas Winklerb7d88512015-02-10 10:39:31 +020090 mei_me_cl_put(me_cl);
91}
92
93/**
Alexander Usyskind49ed642015-05-04 09:43:54 +030094 * mei_me_cl_del - delete me client from the list and decrease
95 * reference counter
96 *
97 * @dev: mei device
98 * @me_cl: me client
99 */
100void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
101{
102 down_write(&dev->me_clients_rwsem);
103 __mei_me_cl_del(dev, me_cl);
104 up_write(&dev->me_clients_rwsem);
105}
106
107/**
Tomas Winklerb7d88512015-02-10 10:39:31 +0200108 * mei_me_cl_add - add me client to the list
109 *
110 * @dev: mei device
111 * @me_cl: me client
112 */
113void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
114{
115 down_write(&dev->me_clients_rwsem);
116 list_add(&me_cl->list, &dev->me_clients);
117 up_write(&dev->me_clients_rwsem);
118}
119
120/**
121 * __mei_me_cl_by_uuid - locate me client by uuid
122 * increases ref count
123 *
124 * @dev: mei device
125 * @uuid: me client uuid
126 *
127 * Return: me client or NULL if not found
128 *
129 * Locking: dev->me_clients_rwsem
130 */
131static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
132 const uuid_le *uuid)
133{
134 struct mei_me_client *me_cl;
135 const uuid_le *pn;
136
137 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
138
139 list_for_each_entry(me_cl, &dev->me_clients, list) {
140 pn = &me_cl->props.protocol_name;
141 if (uuid_le_cmp(*uuid, *pn) == 0)
142 return mei_me_cl_get(me_cl);
143 }
144
145 return NULL;
146}
147
148/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300149 * mei_me_cl_by_uuid - locate me client by uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200150 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200151 *
152 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300153 * @uuid: me client uuid
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200154 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300155 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200156 *
157 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200158 */
Tomas Winklerb7d88512015-02-10 10:39:31 +0200159struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
Tomas Winklerd3208322014-08-24 12:08:55 +0300160 const uuid_le *uuid)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200161{
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300162 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200163
Tomas Winklerb7d88512015-02-10 10:39:31 +0200164 down_read(&dev->me_clients_rwsem);
165 me_cl = __mei_me_cl_by_uuid(dev, uuid);
166 up_read(&dev->me_clients_rwsem);
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200167
Tomas Winklerb7d88512015-02-10 10:39:31 +0200168 return me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200169}
170
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200171/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300172 * mei_me_cl_by_id - locate me client by client id
Tomas Winkler79563db2015-01-11 00:07:16 +0200173 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200174 *
175 * @dev: the device structure
176 * @client_id: me client id
177 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300178 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200179 *
180 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200181 */
Tomas Winklerd3208322014-08-24 12:08:55 +0300182struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200183{
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200184
Tomas Winklerb7d88512015-02-10 10:39:31 +0200185 struct mei_me_client *__me_cl, *me_cl = NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200186
Tomas Winklerb7d88512015-02-10 10:39:31 +0200187 down_read(&dev->me_clients_rwsem);
188 list_for_each_entry(__me_cl, &dev->me_clients, list) {
189 if (__me_cl->client_id == client_id) {
190 me_cl = mei_me_cl_get(__me_cl);
191 break;
192 }
193 }
194 up_read(&dev->me_clients_rwsem);
195
196 return me_cl;
197}
198
199/**
200 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
201 * increases ref count
202 *
203 * @dev: the device structure
204 * @uuid: me client uuid
205 * @client_id: me client id
206 *
207 * Return: me client or null if not found
208 *
209 * Locking: dev->me_clients_rwsem
210 */
211static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
212 const uuid_le *uuid, u8 client_id)
213{
214 struct mei_me_client *me_cl;
215 const uuid_le *pn;
216
217 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
218
219 list_for_each_entry(me_cl, &dev->me_clients, list) {
220 pn = &me_cl->props.protocol_name;
221 if (uuid_le_cmp(*uuid, *pn) == 0 &&
222 me_cl->client_id == client_id)
Tomas Winkler79563db2015-01-11 00:07:16 +0200223 return mei_me_cl_get(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200224 }
Tomas Winkler79563db2015-01-11 00:07:16 +0200225
Tomas Winklerd3208322014-08-24 12:08:55 +0300226 return NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200227}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200228
Tomas Winklerb7d88512015-02-10 10:39:31 +0200229
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300230/**
231 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200232 * increases ref count
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300233 *
234 * @dev: the device structure
235 * @uuid: me client uuid
236 * @client_id: me client id
237 *
Tomas Winklerb7d88512015-02-10 10:39:31 +0200238 * Return: me client or null if not found
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300239 */
Tomas Winklerd880f322014-08-21 14:29:15 +0300240struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
241 const uuid_le *uuid, u8 client_id)
242{
243 struct mei_me_client *me_cl;
244
Tomas Winklerb7d88512015-02-10 10:39:31 +0200245 down_read(&dev->me_clients_rwsem);
246 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
247 up_read(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200248
Tomas Winklerb7d88512015-02-10 10:39:31 +0200249 return me_cl;
Tomas Winklerd880f322014-08-21 14:29:15 +0300250}
251
Tomas Winkler25ca6472014-08-21 14:29:14 +0300252/**
Tomas Winkler79563db2015-01-11 00:07:16 +0200253 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
Tomas Winkler25ca6472014-08-21 14:29:14 +0300254 *
255 * @dev: the device structure
256 * @uuid: me client uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200257 *
258 * Locking: called under "dev->device_lock" lock
Tomas Winkler25ca6472014-08-21 14:29:14 +0300259 */
Tomas Winkler79563db2015-01-11 00:07:16 +0200260void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
Tomas Winkler25ca6472014-08-21 14:29:14 +0300261{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200262 struct mei_me_client *me_cl;
Tomas Winkler25ca6472014-08-21 14:29:14 +0300263
Tomas Winkler79563db2015-01-11 00:07:16 +0200264 dev_dbg(dev->dev, "remove %pUl\n", uuid);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200265
266 down_write(&dev->me_clients_rwsem);
267 me_cl = __mei_me_cl_by_uuid(dev, uuid);
268 __mei_me_cl_del(dev, me_cl);
Alexander Usyskinfc9c03c2020-05-13 01:31:40 +0300269 mei_me_cl_put(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200270 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200271}
272
273/**
274 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
275 *
276 * @dev: the device structure
277 * @uuid: me client uuid
278 * @id: me client id
279 *
280 * Locking: called under "dev->device_lock" lock
281 */
282void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
283{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200284 struct mei_me_client *me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +0200285
286 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200287
288 down_write(&dev->me_clients_rwsem);
289 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
290 __mei_me_cl_del(dev, me_cl);
Alexander Usyskinfc9c03c2020-05-13 01:31:40 +0300291 mei_me_cl_put(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200292 up_write(&dev->me_clients_rwsem);
Tomas Winkler25ca6472014-08-21 14:29:14 +0300293}
294
Tomas Winkler79563db2015-01-11 00:07:16 +0200295/**
296 * mei_me_cl_rm_all - remove all me clients
297 *
298 * @dev: the device structure
299 *
300 * Locking: called under "dev->device_lock" lock
301 */
302void mei_me_cl_rm_all(struct mei_device *dev)
303{
304 struct mei_me_client *me_cl, *next;
305
Tomas Winklerb7d88512015-02-10 10:39:31 +0200306 down_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200307 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
Tomas Winklerb7d88512015-02-10 10:39:31 +0200308 __mei_me_cl_del(dev, me_cl);
309 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200310}
311
Tomas Winkler9ca90502013-01-08 23:07:13 +0200312/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200313 * mei_io_cb_free - free mei_cb_private related memory
314 *
315 * @cb: mei callback struct
316 */
317void mei_io_cb_free(struct mei_cl_cb *cb)
318{
319 if (cb == NULL)
320 return;
321
Tomas Winkler928fa662015-02-10 10:39:45 +0200322 list_del(&cb->list);
Tomas Winkler5db75142015-02-10 10:39:42 +0200323 kfree(cb->buf.data);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200324 kfree(cb);
325}
326
327/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200328 * mei_tx_cb_queue - queue tx callback
329 *
330 * Locking: called under "dev->device_lock" lock
331 *
332 * @cb: mei callback struct
333 * @head: an instance of list to queue on
334 */
335static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
336 struct list_head *head)
337{
338 list_add_tail(&cb->list, head);
339 cb->cl->tx_cb_queued++;
340}
341
342/**
343 * mei_tx_cb_dequeue - dequeue tx callback
344 *
345 * Locking: called under "dev->device_lock" lock
346 *
347 * @cb: mei callback struct to dequeue and free
348 */
349static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
350{
351 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
352 cb->cl->tx_cb_queued--;
353
354 mei_io_cb_free(cb);
355}
356
357/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200358 * mei_io_cb_init - allocate and initialize io callback
359 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300360 * @cl: mei client
Tomas Winklerbca67d62015-02-10 10:39:43 +0200361 * @type: operation type
Masanari Iida393b1482013-04-05 01:05:05 +0900362 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200363 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300364 * Return: mei_cl_cb pointer or NULL;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200365 */
Tomas Winkler3030dc02016-07-26 01:06:05 +0300366static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
367 enum mei_cb_file_ops type,
368 const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200369{
370 struct mei_cl_cb *cb;
371
Tomas Winkler4b40b222020-07-23 17:59:25 +0300372 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200373 if (!cb)
374 return NULL;
375
Tomas Winkler928fa662015-02-10 10:39:45 +0200376 INIT_LIST_HEAD(&cb->list);
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200377 cb->fp = fp;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200378 cb->cl = cl;
379 cb->buf_idx = 0;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200380 cb->fop_type = type;
Tomas Winkler0cd7c012020-08-18 14:51:38 +0300381 cb->vtag = 0;
382
Tomas Winkler9ca90502013-01-08 23:07:13 +0200383 return cb;
384}
385
386/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200387 * mei_io_list_flush_cl - removes cbs belonging to the cl.
Tomas Winkler928fa662015-02-10 10:39:45 +0200388 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200389 * @head: an instance of our list structure
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200390 * @cl: host client
Tomas Winkler928fa662015-02-10 10:39:45 +0200391 */
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200392static void mei_io_list_flush_cl(struct list_head *head,
393 const struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200394{
395 struct mei_cl_cb *cb, *next;
396
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200397 list_for_each_entry_safe(cb, next, head, list) {
Alexander Usyskincee4c4d62019-01-30 10:12:26 +0200398 if (cl == cb->cl) {
Tomas Winkler928fa662015-02-10 10:39:45 +0200399 list_del_init(&cb->list);
Alexander Usyskincee4c4d62019-01-30 10:12:26 +0200400 if (cb->fop_type == MEI_FOP_READ)
401 mei_io_cb_free(cb);
402 }
Tomas Winkler928fa662015-02-10 10:39:45 +0200403 }
404}
405
406/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200407 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
Tomas Winkler928fa662015-02-10 10:39:45 +0200408 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200409 * @head: An instance of our list structure
Tomas Winkler928fa662015-02-10 10:39:45 +0200410 * @cl: host client
411 */
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200412static void mei_io_tx_list_free_cl(struct list_head *head,
413 const struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200414{
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200415 struct mei_cl_cb *cb, *next;
Tomas Winkler928fa662015-02-10 10:39:45 +0200416
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200417 list_for_each_entry_safe(cb, next, head, list) {
Alexander Usyskin87d63352018-11-06 12:04:39 +0200418 if (cl == cb->cl)
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200419 mei_tx_cb_dequeue(cb);
420 }
Tomas Winklerf0461922017-01-27 16:32:46 +0200421}
422
423/**
424 * mei_io_list_free_fp - free cb from a list that matches file pointer
425 *
426 * @head: io list
427 * @fp: file pointer (matching cb file object), may be NULL
428 */
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200429static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
Tomas Winklerf0461922017-01-27 16:32:46 +0200430{
431 struct mei_cl_cb *cb, *next;
432
433 list_for_each_entry_safe(cb, next, head, list)
434 if (!fp || fp == cb->fp)
435 mei_io_cb_free(cb);
Tomas Winkler928fa662015-02-10 10:39:45 +0200436}
437
438/**
Tomas Winklerbca67d62015-02-10 10:39:43 +0200439 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
440 *
441 * @cl: host client
442 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200443 * @fop_type: operation type
Tomas Winklerbca67d62015-02-10 10:39:43 +0200444 * @fp: associated file pointer (might be NULL)
445 *
446 * Return: cb on success and NULL on failure
447 */
448struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
Tomas Winkler3030dc02016-07-26 01:06:05 +0300449 enum mei_cb_file_ops fop_type,
Tomas Winklerf23e2cc2016-02-07 23:35:23 +0200450 const struct file *fp)
Tomas Winklerbca67d62015-02-10 10:39:43 +0200451{
452 struct mei_cl_cb *cb;
453
Tomas Winkler3030dc02016-07-26 01:06:05 +0300454 cb = mei_io_cb_init(cl, fop_type, fp);
Tomas Winklerbca67d62015-02-10 10:39:43 +0200455 if (!cb)
456 return NULL;
457
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400458 if (length == 0)
459 return cb;
460
Tomas Winkler63163212018-11-22 13:11:39 +0200461 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400462 if (!cb->buf.data) {
Tomas Winklerbca67d62015-02-10 10:39:43 +0200463 mei_io_cb_free(cb);
464 return NULL;
465 }
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400466 cb->buf.size = length;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200467
468 return cb;
469}
470
471/**
Tomas Winkler3030dc02016-07-26 01:06:05 +0300472 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
473 * and enqueuing of the control commands cb
474 *
475 * @cl: host client
476 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200477 * @fop_type: operation type
Tomas Winkler3030dc02016-07-26 01:06:05 +0300478 * @fp: associated file pointer (might be NULL)
479 *
480 * Return: cb on success and NULL on failure
481 * Locking: called under "dev->device_lock" lock
482 */
483struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
484 enum mei_cb_file_ops fop_type,
485 const struct file *fp)
486{
487 struct mei_cl_cb *cb;
488
489 /* for RX always allocate at least client's mtu */
490 if (length)
491 length = max_t(size_t, length, mei_cl_mtu(cl));
492
493 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
494 if (!cb)
495 return NULL;
496
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200497 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
Tomas Winkler3030dc02016-07-26 01:06:05 +0300498 return cb;
499}
500
501/**
Tomas Winklera9bed612015-02-10 10:39:46 +0200502 * mei_cl_read_cb - find this cl's callback in the read list
503 * for a specific file
504 *
505 * @cl: host client
506 * @fp: file pointer (matching cb file object), may be NULL
507 *
508 * Return: cb on success, NULL if cb is not found
509 */
Alexander Usyskind1376f32020-08-18 14:51:40 +0300510struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
Tomas Winklera9bed612015-02-10 10:39:46 +0200511{
512 struct mei_cl_cb *cb;
Alexander Usyskind1376f32020-08-18 14:51:40 +0300513 struct mei_cl_cb *ret_cb = NULL;
Tomas Winklera9bed612015-02-10 10:39:46 +0200514
Alexander Usyskind1376f32020-08-18 14:51:40 +0300515 spin_lock(&cl->rd_completed_lock);
Tomas Winklera9bed612015-02-10 10:39:46 +0200516 list_for_each_entry(cb, &cl->rd_completed, list)
Alexander Usyskind1376f32020-08-18 14:51:40 +0300517 if (!fp || fp == cb->fp) {
518 ret_cb = cb;
519 break;
520 }
521 spin_unlock(&cl->rd_completed_lock);
522 return ret_cb;
Tomas Winklera9bed612015-02-10 10:39:46 +0200523}
524
525/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200526 * mei_cl_flush_queues - flushes queue lists belonging to cl.
527 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200528 * @cl: host client
Tomas Winklera9bed612015-02-10 10:39:46 +0200529 * @fp: file pointer (matching cb file object), may be NULL
Alexander Usyskince231392014-09-29 16:31:50 +0300530 *
531 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200532 */
Tomas Winklera9bed612015-02-10 10:39:46 +0200533int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200534{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300535 struct mei_device *dev;
536
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200537 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200538 return -EINVAL;
539
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300540 dev = cl->dev;
541
542 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200543 mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
544 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
Tomas Winklerf0461922017-01-27 16:32:46 +0200545 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
546 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
547 mei_io_list_free_fp(&cl->rd_pending, fp);
Alexander Usyskind1376f32020-08-18 14:51:40 +0300548 spin_lock(&cl->rd_completed_lock);
Tomas Winklerf0461922017-01-27 16:32:46 +0200549 mei_io_list_free_fp(&cl->rd_completed, fp);
Alexander Usyskind1376f32020-08-18 14:51:40 +0300550 spin_unlock(&cl->rd_completed_lock);
Tomas Winklera9bed612015-02-10 10:39:46 +0200551
Tomas Winkler9ca90502013-01-08 23:07:13 +0200552 return 0;
553}
554
Tomas Winkler9ca90502013-01-08 23:07:13 +0200555/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200556 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200557 *
558 * @cl: host client to be initialized
559 * @dev: mei device
560 */
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200561static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200562{
Tomas Winkler4b40b222020-07-23 17:59:25 +0300563 memset(cl, 0, sizeof(*cl));
Tomas Winkler9ca90502013-01-08 23:07:13 +0200564 init_waitqueue_head(&cl->wait);
565 init_waitqueue_head(&cl->rx_wait);
566 init_waitqueue_head(&cl->tx_wait);
Tomas Winklerb38a3622015-07-26 09:54:19 +0300567 init_waitqueue_head(&cl->ev_wait);
Alexander Usyskind1376f32020-08-18 14:51:40 +0300568 spin_lock_init(&cl->rd_completed_lock);
Tomas Winklera9bed612015-02-10 10:39:46 +0200569 INIT_LIST_HEAD(&cl->rd_completed);
570 INIT_LIST_HEAD(&cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200571 INIT_LIST_HEAD(&cl->link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200572 cl->writing_state = MEI_IDLE;
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200573 cl->state = MEI_FILE_UNINITIALIZED;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200574 cl->dev = dev;
575}
576
577/**
578 * mei_cl_allocate - allocates cl structure and sets it up.
579 *
580 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300581 * Return: The allocated file or NULL on failure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200582 */
583struct mei_cl *mei_cl_allocate(struct mei_device *dev)
584{
585 struct mei_cl *cl;
586
Tomas Winkler4b40b222020-07-23 17:59:25 +0300587 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200588 if (!cl)
589 return NULL;
590
591 mei_cl_init(cl, dev);
592
593 return cl;
594}
595
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200596/**
Alexander Usyskin3908be62015-02-10 10:39:35 +0200597 * mei_cl_link - allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200598 *
Alexander Usyskin3908be62015-02-10 10:39:35 +0200599 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +0900600 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300601 * Return: 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200602 * -EINVAL on incorrect values
Tomas Winkler03b8d342015-02-10 10:39:44 +0200603 * -EMFILE if open count exceeded.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200604 */
Alexander Usyskin7851e002016-02-07 23:35:40 +0200605int mei_cl_link(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200606{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200607 struct mei_device *dev;
Alexander Usyskin7851e002016-02-07 23:35:40 +0200608 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200609
Tomas Winkler781d0d82013-01-08 23:07:22 +0200610 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200611 return -EINVAL;
612
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200613 dev = cl->dev;
614
Alexander Usyskin7851e002016-02-07 23:35:40 +0200615 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler781d0d82013-01-08 23:07:22 +0200616 if (id >= MEI_CLIENTS_MAX) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300617 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300618 return -EMFILE;
619 }
620
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200621 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300622 dev_err(dev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300623 MEI_MAX_OPEN_HANDLE_COUNT);
624 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200625 }
626
Tomas Winkler781d0d82013-01-08 23:07:22 +0200627 dev->open_handle_count++;
628
629 cl->host_client_id = id;
630 list_add_tail(&cl->link, &dev->file_list);
631
632 set_bit(id, dev->host_clients_map);
633
634 cl->state = MEI_FILE_INITIALIZING;
635
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300636 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200637 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200638}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200639
Tomas Winkler9ca90502013-01-08 23:07:13 +0200640/**
Alexander Usyskind49ed642015-05-04 09:43:54 +0300641 * mei_cl_unlink - remove host client from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200642 *
Masanari Iida393b1482013-04-05 01:05:05 +0900643 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +0300644 *
645 * Return: always 0
Tomas Winkler9ca90502013-01-08 23:07:13 +0200646 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200647int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200648{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200649 struct mei_device *dev;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200650
Tomas Winkler781d0d82013-01-08 23:07:22 +0200651 /* don't shout on error exit path */
652 if (!cl)
653 return 0;
654
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200655 if (WARN_ON(!cl->dev))
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200656 return 0;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200657
658 dev = cl->dev;
659
Tomas Winklera14c44d2013-09-16 23:44:45 +0300660 cl_dbg(dev, cl, "unlink client");
661
Tomas Winkler22f96a02013-09-16 23:44:47 +0300662 if (dev->open_handle_count > 0)
663 dev->open_handle_count--;
664
665 /* never clear the 0 bit */
666 if (cl->host_client_id)
667 clear_bit(cl->host_client_id, dev->host_clients_map);
668
669 list_del_init(&cl->link);
670
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200671 cl->state = MEI_FILE_UNINITIALIZED;
Alexander Usyskin7c7a6072016-11-16 22:51:29 +0200672 cl->writing_state = MEI_IDLE;
673
674 WARN_ON(!list_empty(&cl->rd_completed) ||
675 !list_empty(&cl->rd_pending) ||
676 !list_empty(&cl->link));
Tomas Winkler22f96a02013-09-16 23:44:47 +0300677
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200678 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200679}
680
Alexander Usyskin025fb792016-02-07 23:35:43 +0200681void mei_host_client_init(struct mei_device *dev)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200682{
Alexander Usyskin43b8a7e2019-04-22 09:51:07 +0300683 mei_set_devstate(dev, MEI_DEV_ENABLED);
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200684 dev->reset_count = 0;
Tomas Winkler04bb1392014-03-18 22:52:04 +0200685
Alexander Usyskin025fb792016-02-07 23:35:43 +0200686 schedule_work(&dev->bus_rescan_work);
Tomas Winkler60095952015-07-23 15:08:47 +0300687
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300688 pm_runtime_mark_last_busy(dev->dev);
689 dev_dbg(dev->dev, "rpm: autosuspend\n");
Alexander Usyskind5f8e162016-11-24 13:34:02 +0200690 pm_request_autosuspend(dev->dev);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200691}
692
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200693/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300694 * mei_hbuf_acquire - try to acquire host buffer
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200695 *
696 * @dev: the device structure
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300697 * Return: true if host buffer was acquired
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200698 */
699bool mei_hbuf_acquire(struct mei_device *dev)
700{
Tomas Winkler04bb1392014-03-18 22:52:04 +0200701 if (mei_pg_state(dev) == MEI_PG_ON ||
Alexander Usyskin3dc196e2015-06-13 08:51:17 +0300702 mei_pg_in_transition(dev)) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300703 dev_dbg(dev->dev, "device is in pg\n");
Tomas Winkler04bb1392014-03-18 22:52:04 +0200704 return false;
705 }
706
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200707 if (!dev->hbuf_is_ready) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300708 dev_dbg(dev->dev, "hbuf is not ready\n");
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200709 return false;
710 }
711
712 dev->hbuf_is_ready = false;
713
714 return true;
715}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200716
717/**
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200718 * mei_cl_wake_all - wake up readers, writers and event waiters so
719 * they can be interrupted
720 *
721 * @cl: host client
722 */
723static void mei_cl_wake_all(struct mei_cl *cl)
724{
725 struct mei_device *dev = cl->dev;
726
727 /* synchronized under device mutex */
728 if (waitqueue_active(&cl->rx_wait)) {
729 cl_dbg(dev, cl, "Waking up reading client!\n");
730 wake_up_interruptible(&cl->rx_wait);
731 }
732 /* synchronized under device mutex */
733 if (waitqueue_active(&cl->tx_wait)) {
734 cl_dbg(dev, cl, "Waking up writing client!\n");
735 wake_up_interruptible(&cl->tx_wait);
736 }
737 /* synchronized under device mutex */
738 if (waitqueue_active(&cl->ev_wait)) {
739 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
740 wake_up_interruptible(&cl->ev_wait);
741 }
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400742 /* synchronized under device mutex */
743 if (waitqueue_active(&cl->wait)) {
744 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
Alexander Usyskin69f18042016-05-09 00:07:46 -0400745 wake_up(&cl->wait);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400746 }
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200747}
748
749/**
Tomas Winkler3c666182015-05-04 09:43:52 +0300750 * mei_cl_set_disconnected - set disconnected state and clear
751 * associated states and resources
752 *
753 * @cl: host client
754 */
Alexander Usyskin669c2562017-01-20 02:17:17 +0200755static void mei_cl_set_disconnected(struct mei_cl *cl)
Tomas Winkler3c666182015-05-04 09:43:52 +0300756{
757 struct mei_device *dev = cl->dev;
758
759 if (cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200760 cl->state <= MEI_FILE_INITIALIZING)
Tomas Winkler3c666182015-05-04 09:43:52 +0300761 return;
762
763 cl->state = MEI_FILE_DISCONNECTED;
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200764 mei_io_tx_list_free_cl(&dev->write_list, cl);
765 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
Tomas Winklerf0461922017-01-27 16:32:46 +0200766 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
767 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200768 mei_cl_wake_all(cl);
Alexander Usyskin46978ad2016-07-26 01:06:03 +0300769 cl->rx_flow_ctrl_creds = 0;
Tomas Winkler4034b812016-07-26 01:06:04 +0300770 cl->tx_flow_ctrl_creds = 0;
Tomas Winkler3c666182015-05-04 09:43:52 +0300771 cl->timer_count = 0;
Alexander Usyskind49ed642015-05-04 09:43:54 +0300772
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300773 if (!cl->me_cl)
774 return;
775
776 if (!WARN_ON(cl->me_cl->connect_count == 0))
777 cl->me_cl->connect_count--;
778
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300779 if (cl->me_cl->connect_count == 0)
Tomas Winkler4034b812016-07-26 01:06:04 +0300780 cl->me_cl->tx_flow_ctrl_creds = 0;
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300781
Alexander Usyskind49ed642015-05-04 09:43:54 +0300782 mei_me_cl_put(cl->me_cl);
783 cl->me_cl = NULL;
Tomas Winkler3c666182015-05-04 09:43:52 +0300784}
785
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300786static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
787{
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300788 if (!mei_me_cl_get(me_cl))
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300789 return -ENOENT;
790
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300791 /* only one connection is allowed for fixed address clients */
792 if (me_cl->props.fixed_address) {
793 if (me_cl->connect_count) {
794 mei_me_cl_put(me_cl);
795 return -EBUSY;
796 }
797 }
798
799 cl->me_cl = me_cl;
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300800 cl->state = MEI_FILE_CONNECTING;
801 cl->me_cl->connect_count++;
802
803 return 0;
804}
805
Tomas Winkler3c666182015-05-04 09:43:52 +0300806/*
807 * mei_cl_send_disconnect - send disconnect request
808 *
809 * @cl: host client
810 * @cb: callback block
811 *
812 * Return: 0, OK; otherwise, error.
813 */
814static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
815{
816 struct mei_device *dev;
817 int ret;
818
819 dev = cl->dev;
820
821 ret = mei_hbm_cl_disconnect_req(dev, cl);
822 cl->status = ret;
823 if (ret) {
824 cl->state = MEI_FILE_DISCONNECT_REPLY;
825 return ret;
826 }
827
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200828 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300829 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +0300830 mei_schedule_stall_timer(dev);
Tomas Winkler3c666182015-05-04 09:43:52 +0300831
832 return 0;
833}
834
835/**
836 * mei_cl_irq_disconnect - processes close related operation from
837 * interrupt thread context - send disconnect request
838 *
839 * @cl: client
840 * @cb: callback block.
841 * @cmpl_list: complete list.
842 *
843 * Return: 0, OK; otherwise, error.
844 */
845int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200846 struct list_head *cmpl_list)
Tomas Winkler3c666182015-05-04 09:43:52 +0300847{
848 struct mei_device *dev = cl->dev;
849 u32 msg_slots;
850 int slots;
851 int ret;
852
Tomas Winkler98e70862018-07-31 09:35:33 +0300853 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winkler3c666182015-05-04 09:43:52 +0300854 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerde877432018-07-12 17:10:08 +0300855 if (slots < 0)
856 return -EOVERFLOW;
Tomas Winkler3c666182015-05-04 09:43:52 +0300857
Tomas Winklerde877432018-07-12 17:10:08 +0300858 if ((u32)slots < msg_slots)
Tomas Winkler3c666182015-05-04 09:43:52 +0300859 return -EMSGSIZE;
860
861 ret = mei_cl_send_disconnect(cl, cb);
862 if (ret)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200863 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300864
865 return ret;
866}
867
Tomas Winkler3c666182015-05-04 09:43:52 +0300868/**
Alexander Usyskin18901352015-07-23 21:37:13 +0300869 * __mei_cl_disconnect - disconnect host client from the me one
870 * internal function runtime pm has to be already acquired
Tomas Winkler9ca90502013-01-08 23:07:13 +0200871 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200872 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200873 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300874 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200875 */
Alexander Usyskin18901352015-07-23 21:37:13 +0300876static int __mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200877{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200878 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200879 struct mei_cl_cb *cb;
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300880 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200881
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200882 dev = cl->dev;
883
Tomas Winkler3c666182015-05-04 09:43:52 +0300884 cl->state = MEI_FILE_DISCONNECTING;
885
Tomas Winkler3030dc02016-07-26 01:06:05 +0300886 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
887 if (!cb) {
888 rets = -ENOMEM;
Tomas Winkler3c666182015-05-04 09:43:52 +0300889 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +0300890 }
Tomas Winkler5a8373f2014-08-21 14:29:17 +0300891
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200892 if (mei_hbuf_acquire(dev)) {
Tomas Winkler3c666182015-05-04 09:43:52 +0300893 rets = mei_cl_send_disconnect(cl, cb);
894 if (rets) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300895 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler3c666182015-05-04 09:43:52 +0300896 goto out;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200897 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200898 }
Tomas Winkler3c666182015-05-04 09:43:52 +0300899
Tomas Winkler9ca90502013-01-08 23:07:13 +0200900 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400901 wait_event_timeout(cl->wait,
902 cl->state == MEI_FILE_DISCONNECT_REPLY ||
903 cl->state == MEI_FILE_DISCONNECTED,
Tomas Winkler3c666182015-05-04 09:43:52 +0300904 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9ca90502013-01-08 23:07:13 +0200905 mutex_lock(&dev->device_lock);
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300906
Tomas Winkler3c666182015-05-04 09:43:52 +0300907 rets = cl->status;
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400908 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
909 cl->state != MEI_FILE_DISCONNECTED) {
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300910 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
911 rets = -ETIME;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200912 }
913
Tomas Winkler3c666182015-05-04 09:43:52 +0300914out:
915 /* we disconnect also on error */
916 mei_cl_set_disconnected(cl);
917 if (!rets)
918 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
919
Alexander Usyskin18901352015-07-23 21:37:13 +0300920 mei_io_cb_free(cb);
921 return rets;
922}
923
924/**
925 * mei_cl_disconnect - disconnect host client from the me one
926 *
927 * @cl: host client
928 *
929 * Locking: called under "dev->device_lock" lock
930 *
931 * Return: 0 on success, <0 on failure.
932 */
933int mei_cl_disconnect(struct mei_cl *cl)
934{
935 struct mei_device *dev;
936 int rets;
937
938 if (WARN_ON(!cl || !cl->dev))
939 return -ENODEV;
940
941 dev = cl->dev;
942
943 cl_dbg(dev, cl, "disconnecting");
944
945 if (!mei_cl_is_connected(cl))
946 return 0;
947
948 if (mei_cl_is_fixed_address(cl)) {
949 mei_cl_set_disconnected(cl);
950 return 0;
951 }
952
Tomas Winkler7ae079a2018-02-14 14:03:29 +0200953 if (dev->dev_state == MEI_DEV_POWER_DOWN) {
954 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
955 mei_cl_set_disconnected(cl);
956 return 0;
957 }
958
Alexander Usyskin18901352015-07-23 21:37:13 +0300959 rets = pm_runtime_get(dev->dev);
960 if (rets < 0 && rets != -EINPROGRESS) {
961 pm_runtime_put_noidle(dev->dev);
962 cl_err(dev, cl, "rpm: get failed %d\n", rets);
963 return rets;
964 }
965
966 rets = __mei_cl_disconnect(cl);
967
Tomas Winkler04bb1392014-03-18 22:52:04 +0200968 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300969 pm_runtime_mark_last_busy(dev->dev);
970 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200971
Tomas Winkler9ca90502013-01-08 23:07:13 +0200972 return rets;
973}
974
975
976/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200977 * mei_cl_is_other_connecting - checks if other
978 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200979 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200980 * @cl: private data of the file object
981 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300982 * Return: true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200983 */
Tomas Winkler0c533572015-05-04 09:43:53 +0300984static bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200985{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200986 struct mei_device *dev;
Tomas Winkler0c533572015-05-04 09:43:53 +0300987 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200988
989 dev = cl->dev;
990
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200991 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
Tomas Winkler0c533572015-05-04 09:43:53 +0300992 if (cb->fop_type == MEI_FOP_CONNECT &&
Alexander Usyskind49ed642015-05-04 09:43:54 +0300993 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200994 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200995 }
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200996
997 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200998}
999
1000/**
Tomas Winkler0c533572015-05-04 09:43:53 +03001001 * mei_cl_send_connect - send connect request
1002 *
1003 * @cl: host client
1004 * @cb: callback block
1005 *
1006 * Return: 0, OK; otherwise, error.
1007 */
1008static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1009{
1010 struct mei_device *dev;
1011 int ret;
1012
1013 dev = cl->dev;
1014
1015 ret = mei_hbm_cl_connect_req(dev, cl);
1016 cl->status = ret;
1017 if (ret) {
1018 cl->state = MEI_FILE_DISCONNECT_REPLY;
1019 return ret;
1020 }
1021
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001022 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001023 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +03001024 mei_schedule_stall_timer(dev);
Tomas Winkler0c533572015-05-04 09:43:53 +03001025 return 0;
1026}
1027
1028/**
1029 * mei_cl_irq_connect - send connect request in irq_thread context
1030 *
1031 * @cl: host client
1032 * @cb: callback block
1033 * @cmpl_list: complete list
1034 *
1035 * Return: 0, OK; otherwise, error.
1036 */
1037int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001038 struct list_head *cmpl_list)
Tomas Winkler0c533572015-05-04 09:43:53 +03001039{
1040 struct mei_device *dev = cl->dev;
1041 u32 msg_slots;
1042 int slots;
1043 int rets;
1044
Tomas Winkler0c533572015-05-04 09:43:53 +03001045 if (mei_cl_is_other_connecting(cl))
1046 return 0;
1047
Tomas Winkler98e70862018-07-31 09:35:33 +03001048 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winklerde877432018-07-12 17:10:08 +03001049 slots = mei_hbuf_empty_slots(dev);
1050 if (slots < 0)
1051 return -EOVERFLOW;
1052
1053 if ((u32)slots < msg_slots)
Tomas Winkler0c533572015-05-04 09:43:53 +03001054 return -EMSGSIZE;
1055
1056 rets = mei_cl_send_connect(cl, cb);
1057 if (rets)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001058 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001059
1060 return rets;
1061}
1062
1063/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +02001064 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001065 *
1066 * @cl: host client
Alexander Usyskind49ed642015-05-04 09:43:54 +03001067 * @me_cl: me client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001068 * @fp: pointer to file structure
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001069 *
1070 * Locking: called under "dev->device_lock" lock
1071 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001072 * Return: 0 on success, <0 on failure.
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001073 */
Alexander Usyskind49ed642015-05-04 09:43:54 +03001074int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001075 const struct file *fp)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001076{
1077 struct mei_device *dev;
1078 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001079 int rets;
1080
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001081 if (WARN_ON(!cl || !cl->dev || !me_cl))
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001082 return -ENODEV;
1083
1084 dev = cl->dev;
1085
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001086 rets = mei_cl_set_connecting(cl, me_cl);
1087 if (rets)
Alexander Usyskin5d882462017-01-27 16:32:39 +02001088 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001089
1090 if (mei_cl_is_fixed_address(cl)) {
1091 cl->state = MEI_FILE_CONNECTED;
Alexander Usyskin5d882462017-01-27 16:32:39 +02001092 rets = 0;
1093 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001094 }
1095
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001096 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001097 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001098 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001099 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001100 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001101 }
1102
Tomas Winkler3030dc02016-07-26 01:06:05 +03001103 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1104 if (!cb) {
1105 rets = -ENOMEM;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001106 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +03001107 }
Tomas Winkler0c533572015-05-04 09:43:53 +03001108
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001109 /* run hbuf acquire last so we don't have to undo */
1110 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Tomas Winkler0c533572015-05-04 09:43:53 +03001111 rets = mei_cl_send_connect(cl, cb);
1112 if (rets)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001113 goto out;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001114 }
1115
1116 mutex_unlock(&dev->device_lock);
Tomas Winkler12f45ed2014-08-21 14:29:18 +03001117 wait_event_timeout(cl->wait,
Alexander Usyskin285e2992014-02-17 15:13:20 +02001118 (cl->state == MEI_FILE_CONNECTED ||
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001119 cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskin18901352015-07-23 21:37:13 +03001120 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
Tomas Winkler3c666182015-05-04 09:43:52 +03001121 cl->state == MEI_FILE_DISCONNECT_REPLY),
Alexander Usyskin285e2992014-02-17 15:13:20 +02001122 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001123 mutex_lock(&dev->device_lock);
1124
Tomas Winklerf3de9b62015-03-27 00:27:58 +02001125 if (!mei_cl_is_connected(cl)) {
Alexander Usyskin18901352015-07-23 21:37:13 +03001126 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
Tomas Winklerf0461922017-01-27 16:32:46 +02001127 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1128 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
Alexander Usyskin18901352015-07-23 21:37:13 +03001129 /* ignore disconnect return valuue;
1130 * in case of failure reset will be invoked
1131 */
1132 __mei_cl_disconnect(cl);
1133 rets = -EFAULT;
1134 goto out;
1135 }
1136
Tomas Winkler0c533572015-05-04 09:43:53 +03001137 /* timeout or something went really wrong */
Alexander Usyskin285e2992014-02-17 15:13:20 +02001138 if (!cl->status)
1139 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001140 }
1141
1142 rets = cl->status;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001143out:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001144 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001145 pm_runtime_mark_last_busy(dev->dev);
1146 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001147
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001148 mei_io_cb_free(cb);
Tomas Winkler0c533572015-05-04 09:43:53 +03001149
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001150nortpm:
Tomas Winkler0c533572015-05-04 09:43:53 +03001151 if (!mei_cl_is_connected(cl))
1152 mei_cl_set_disconnected(cl);
1153
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001154 return rets;
1155}
1156
1157/**
Tomas Winkler03b8d342015-02-10 10:39:44 +02001158 * mei_cl_alloc_linked - allocate and link host client
1159 *
1160 * @dev: the device structure
Tomas Winkler03b8d342015-02-10 10:39:44 +02001161 *
1162 * Return: cl on success ERR_PTR on failure
1163 */
Alexander Usyskin7851e002016-02-07 23:35:40 +02001164struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
Tomas Winkler03b8d342015-02-10 10:39:44 +02001165{
1166 struct mei_cl *cl;
1167 int ret;
1168
1169 cl = mei_cl_allocate(dev);
1170 if (!cl) {
1171 ret = -ENOMEM;
1172 goto err;
1173 }
1174
Alexander Usyskin7851e002016-02-07 23:35:40 +02001175 ret = mei_cl_link(cl);
Tomas Winkler03b8d342015-02-10 10:39:44 +02001176 if (ret)
1177 goto err;
1178
1179 return cl;
1180err:
1181 kfree(cl);
1182 return ERR_PTR(ret);
1183}
1184
Tomas Winkler03b8d342015-02-10 10:39:44 +02001185/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001186 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001187 *
Alexander Usyskin06ee5362016-02-07 23:35:37 +02001188 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001189 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001190 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001191 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001192static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001193{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001194 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001195 return -EINVAL;
1196
Tomas Winkler4034b812016-07-26 01:06:04 +03001197 if (cl->tx_flow_ctrl_creds > 0)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001198 return 1;
1199
Alexander Usyskina808c802016-06-16 17:58:58 +03001200 if (mei_cl_is_fixed_address(cl))
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001201 return 1;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001202
Alexander Usyskind49ed642015-05-04 09:43:54 +03001203 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001204 if (cl->me_cl->tx_flow_ctrl_creds > 0)
Alexander Usyskind49ed642015-05-04 09:43:54 +03001205 return 1;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001206 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001207 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001208}
1209
1210/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001211 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1212 * for a client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001213 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001214 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +09001215 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001216 * Return:
Tomas Winkler9ca90502013-01-08 23:07:13 +02001217 * 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +02001218 * -EINVAL when ctrl credits are <= 0
1219 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001220static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001221{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001222 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001223 return -EINVAL;
1224
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001225 if (mei_cl_is_fixed_address(cl))
1226 return 0;
1227
Alexander Usyskind49ed642015-05-04 09:43:54 +03001228 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001229 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001230 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001231 cl->me_cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001232 } else {
Tomas Winkler4034b812016-07-26 01:06:04 +03001233 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001234 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001235 cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001236 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001237 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001238}
1239
Tomas Winkler9ca90502013-01-08 23:07:13 +02001240/**
Alexander Usyskind1376f32020-08-18 14:51:40 +03001241 * mei_cl_add_rd_completed - add read completed callback to list with lock
1242 *
1243 * @cl: host client
1244 * @cb: callback block
1245 *
1246 */
1247void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1248{
1249 spin_lock(&cl->rd_completed_lock);
1250 list_add_tail(&cb->list, &cl->rd_completed);
1251 spin_unlock(&cl->rd_completed_lock);
1252}
1253
1254/**
1255 * mei_cl_del_rd_completed - free read completed callback with lock
1256 *
1257 * @cl: host client
1258 * @cb: callback block
1259 *
1260 */
1261void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1262{
1263 spin_lock(&cl->rd_completed_lock);
1264 mei_io_cb_free(cb);
1265 spin_unlock(&cl->rd_completed_lock);
1266}
1267
1268/**
Tomas Winkler51678cc2015-07-26 09:54:18 +03001269 * mei_cl_notify_fop2req - convert fop to proper request
1270 *
1271 * @fop: client notification start response command
1272 *
1273 * Return: MEI_HBM_NOTIFICATION_START/STOP
1274 */
1275u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1276{
1277 if (fop == MEI_FOP_NOTIFY_START)
1278 return MEI_HBM_NOTIFICATION_START;
1279 else
1280 return MEI_HBM_NOTIFICATION_STOP;
1281}
1282
1283/**
1284 * mei_cl_notify_req2fop - convert notification request top file operation type
1285 *
1286 * @req: hbm notification request type
1287 *
1288 * Return: MEI_FOP_NOTIFY_START/STOP
1289 */
1290enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1291{
1292 if (req == MEI_HBM_NOTIFICATION_START)
1293 return MEI_FOP_NOTIFY_START;
1294 else
1295 return MEI_FOP_NOTIFY_STOP;
1296}
1297
1298/**
1299 * mei_cl_irq_notify - send notification request in irq_thread context
1300 *
1301 * @cl: client
1302 * @cb: callback block.
1303 * @cmpl_list: complete list.
1304 *
1305 * Return: 0 on such and error otherwise.
1306 */
1307int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001308 struct list_head *cmpl_list)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001309{
1310 struct mei_device *dev = cl->dev;
1311 u32 msg_slots;
1312 int slots;
1313 int ret;
1314 bool request;
1315
Tomas Winkler98e70862018-07-31 09:35:33 +03001316 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winkler51678cc2015-07-26 09:54:18 +03001317 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerde877432018-07-12 17:10:08 +03001318 if (slots < 0)
1319 return -EOVERFLOW;
Tomas Winkler51678cc2015-07-26 09:54:18 +03001320
Tomas Winklerde877432018-07-12 17:10:08 +03001321 if ((u32)slots < msg_slots)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001322 return -EMSGSIZE;
1323
1324 request = mei_cl_notify_fop2req(cb->fop_type);
1325 ret = mei_hbm_cl_notify_req(dev, cl, request);
1326 if (ret) {
1327 cl->status = ret;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001328 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001329 return ret;
1330 }
1331
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001332 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001333 return 0;
1334}
1335
1336/**
1337 * mei_cl_notify_request - send notification stop/start request
1338 *
1339 * @cl: host client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001340 * @fp: associate request with file
Tomas Winkler51678cc2015-07-26 09:54:18 +03001341 * @request: 1 for start or 0 for stop
1342 *
1343 * Locking: called under "dev->device_lock" lock
1344 *
1345 * Return: 0 on such and error otherwise.
1346 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001347int mei_cl_notify_request(struct mei_cl *cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001348 const struct file *fp, u8 request)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001349{
1350 struct mei_device *dev;
1351 struct mei_cl_cb *cb;
1352 enum mei_cb_file_ops fop_type;
1353 int rets;
1354
1355 if (WARN_ON(!cl || !cl->dev))
1356 return -ENODEV;
1357
1358 dev = cl->dev;
1359
1360 if (!dev->hbm_f_ev_supported) {
1361 cl_dbg(dev, cl, "notifications not supported\n");
1362 return -EOPNOTSUPP;
1363 }
1364
Alexander Usyskin7c47d2c2017-01-27 16:32:41 +02001365 if (!mei_cl_is_connected(cl))
1366 return -ENODEV;
1367
Tomas Winkler51678cc2015-07-26 09:54:18 +03001368 rets = pm_runtime_get(dev->dev);
1369 if (rets < 0 && rets != -EINPROGRESS) {
1370 pm_runtime_put_noidle(dev->dev);
1371 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1372 return rets;
1373 }
1374
1375 fop_type = mei_cl_notify_req2fop(request);
Tomas Winkler3030dc02016-07-26 01:06:05 +03001376 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001377 if (!cb) {
1378 rets = -ENOMEM;
1379 goto out;
1380 }
1381
1382 if (mei_hbuf_acquire(dev)) {
1383 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1384 rets = -ENODEV;
1385 goto out;
1386 }
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001387 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001388 }
1389
1390 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001391 wait_event_timeout(cl->wait,
Alexander Usyskina19bf052018-11-06 12:04:40 +02001392 cl->notify_en == request ||
1393 cl->status ||
1394 !mei_cl_is_connected(cl),
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001395 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler51678cc2015-07-26 09:54:18 +03001396 mutex_lock(&dev->device_lock);
1397
Alexander Usyskin4a8eaa92016-04-20 11:03:54 -04001398 if (cl->notify_en != request && !cl->status)
1399 cl->status = -EFAULT;
Tomas Winkler51678cc2015-07-26 09:54:18 +03001400
1401 rets = cl->status;
1402
1403out:
1404 cl_dbg(dev, cl, "rpm: autosuspend\n");
1405 pm_runtime_mark_last_busy(dev->dev);
1406 pm_runtime_put_autosuspend(dev->dev);
1407
1408 mei_io_cb_free(cb);
1409 return rets;
1410}
1411
1412/**
Tomas Winkler237092b2015-07-26 09:54:22 +03001413 * mei_cl_notify - raise notification
1414 *
1415 * @cl: host client
1416 *
1417 * Locking: called under "dev->device_lock" lock
1418 */
1419void mei_cl_notify(struct mei_cl *cl)
1420{
1421 struct mei_device *dev;
1422
1423 if (!cl || !cl->dev)
1424 return;
1425
1426 dev = cl->dev;
1427
1428 if (!cl->notify_en)
1429 return;
1430
1431 cl_dbg(dev, cl, "notify event");
1432 cl->notify_ev = true;
Tomas Winkler850f8942016-02-07 23:35:31 +02001433 if (!mei_cl_bus_notify_event(cl))
1434 wake_up_interruptible(&cl->ev_wait);
Tomas Winkler237092b2015-07-26 09:54:22 +03001435
1436 if (cl->ev_async)
1437 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
Alexander Usyskinbb2ef9c2015-07-26 09:54:23 +03001438
Tomas Winkler237092b2015-07-26 09:54:22 +03001439}
1440
1441/**
Tomas Winklerb38a3622015-07-26 09:54:19 +03001442 * mei_cl_notify_get - get or wait for notification event
1443 *
1444 * @cl: host client
1445 * @block: this request is blocking
1446 * @notify_ev: true if notification event was received
1447 *
1448 * Locking: called under "dev->device_lock" lock
1449 *
1450 * Return: 0 on such and error otherwise.
1451 */
1452int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1453{
1454 struct mei_device *dev;
1455 int rets;
1456
1457 *notify_ev = false;
1458
1459 if (WARN_ON(!cl || !cl->dev))
1460 return -ENODEV;
1461
1462 dev = cl->dev;
1463
Alexander Usyskin6c0d6702017-01-27 16:32:42 +02001464 if (!dev->hbm_f_ev_supported) {
1465 cl_dbg(dev, cl, "notifications not supported\n");
1466 return -EOPNOTSUPP;
1467 }
1468
Tomas Winklerb38a3622015-07-26 09:54:19 +03001469 if (!mei_cl_is_connected(cl))
1470 return -ENODEV;
1471
1472 if (cl->notify_ev)
1473 goto out;
1474
1475 if (!block)
1476 return -EAGAIN;
1477
1478 mutex_unlock(&dev->device_lock);
1479 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1480 mutex_lock(&dev->device_lock);
1481
1482 if (rets < 0)
1483 return rets;
1484
1485out:
1486 *notify_ev = cl->notify_ev;
1487 cl->notify_ev = false;
1488 return 0;
1489}
1490
1491/**
Masanari Iida393b1482013-04-05 01:05:05 +09001492 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001493 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001494 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +03001495 * @length: number of bytes to read
Tomas Winklerbca67d62015-02-10 10:39:43 +02001496 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +02001497 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001498 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001499 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001500int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001501{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001502 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001503 struct mei_cl_cb *cb;
1504 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001505
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001506 if (WARN_ON(!cl || !cl->dev))
1507 return -ENODEV;
1508
1509 dev = cl->dev;
1510
Tomas Winklerb950ac12013-07-25 20:15:53 +03001511 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +02001512 return -ENODEV;
1513
Alexander Usyskind49ed642015-05-04 09:43:54 +03001514 if (!mei_me_cl_is_active(cl->me_cl)) {
1515 cl_err(dev, cl, "no such me client\n");
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001516 return -ENOTTY;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001517 }
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001518
Alexander Usyskin394a77d2017-03-20 15:04:03 +02001519 if (mei_cl_is_fixed_address(cl))
Alexander Usyskine51dfa52016-07-26 01:06:02 +03001520 return 0;
1521
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001522 /* HW currently supports only one pending read */
1523 if (cl->rx_flow_ctrl_creds)
1524 return -EBUSY;
1525
Tomas Winkler3030dc02016-07-26 01:06:05 +03001526 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001527 if (!cb)
1528 return -ENOMEM;
1529
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001530 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001531 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001532 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001533 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001534 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001535 }
1536
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001537 rets = 0;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001538 if (mei_hbuf_acquire(dev)) {
Alexander Usyskin86113502014-03-31 17:59:24 +03001539 rets = mei_hbm_cl_flow_control_req(dev, cl);
1540 if (rets < 0)
Tomas Winkler04bb1392014-03-18 22:52:04 +02001541 goto out;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001542
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001543 list_move_tail(&cb->list, &cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +02001544 }
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001545 cl->rx_flow_ctrl_creds++;
Chao Biaccb8842014-02-12 21:27:25 +02001546
Tomas Winkler04bb1392014-03-18 22:52:04 +02001547out:
1548 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001549 pm_runtime_mark_last_busy(dev->dev);
1550 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001551nortpm:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001552 if (rets)
1553 mei_io_cb_free(cb);
1554
Tomas Winkler9ca90502013-01-08 23:07:13 +02001555 return rets;
1556}
1557
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001558static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
Tomas Winklera1c4d082018-07-23 13:21:24 +03001559{
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001560 ext->type = MEI_EXT_HDR_VTAG;
1561 ext->ext_payload[0] = vtag;
1562 ext->length = mei_data2slots(sizeof(*ext));
1563 return ext->length;
1564}
1565
1566/**
1567 * mei_msg_hdr_init - allocate and initialize mei message header
1568 *
1569 * @cb: message callback structure
1570 *
1571 * Return: a pointer to initialized header
1572 */
1573static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1574{
1575 size_t hdr_len;
1576 struct mei_ext_meta_hdr *meta;
1577 struct mei_ext_hdr *ext;
1578 struct mei_msg_hdr *mei_hdr;
1579 bool is_ext, is_vtag;
1580
1581 if (!cb)
1582 return ERR_PTR(-EINVAL);
1583
1584 /* Extended header for vtag is attached only on the first fragment */
1585 is_vtag = (cb->vtag && cb->buf_idx == 0);
1586 is_ext = is_vtag;
1587
1588 /* Compute extended header size */
1589 hdr_len = sizeof(*mei_hdr);
1590
1591 if (!is_ext)
1592 goto setup_hdr;
1593
1594 hdr_len += sizeof(*meta);
1595 if (is_vtag)
1596 hdr_len += sizeof(*ext);
1597
1598setup_hdr:
1599 mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1600 if (!mei_hdr)
1601 return ERR_PTR(-ENOMEM);
1602
Tomas Winklera1c4d082018-07-23 13:21:24 +03001603 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1604 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
Tomas Winklera1c4d082018-07-23 13:21:24 +03001605 mei_hdr->internal = cb->internal;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001606 mei_hdr->extended = is_ext;
1607
1608 if (!is_ext)
1609 goto out;
1610
1611 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1612 if (is_vtag) {
1613 meta->count++;
1614 meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
1615 }
1616out:
1617 mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1618 return mei_hdr;
Tomas Winklera1c4d082018-07-23 13:21:24 +03001619}
1620
1621/**
Tomas Winkler9d098192014-02-19 17:35:48 +02001622 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +03001623 * from the interrupt thread context
1624 *
1625 * @cl: client
1626 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +03001627 * @cmpl_list: complete list.
1628 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001629 * Return: 0, OK; otherwise error.
Tomas Winkler21767542013-06-23 09:36:59 +03001630 */
Tomas Winkler9d098192014-02-19 17:35:48 +02001631int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001632 struct list_head *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +03001633{
Tomas Winkler136698e2013-09-16 23:44:44 +03001634 struct mei_device *dev;
1635 struct mei_msg_data *buf;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001636 struct mei_msg_hdr *mei_hdr = NULL;
1637 size_t hdr_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001638 size_t hbuf_len, dr_len;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001639 size_t buf_len;
1640 size_t data_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001641 int hbuf_slots;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001642 u32 dr_slots;
1643 u32 dma_len;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001644 int rets;
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001645 bool first_chunk;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001646 const void *data;
Tomas Winkler21767542013-06-23 09:36:59 +03001647
Tomas Winkler136698e2013-09-16 23:44:44 +03001648 if (WARN_ON(!cl || !cl->dev))
1649 return -ENODEV;
1650
1651 dev = cl->dev;
1652
Tomas Winkler5db75142015-02-10 10:39:42 +02001653 buf = &cb->buf;
Tomas Winkler136698e2013-09-16 23:44:44 +03001654
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001655 first_chunk = cb->buf_idx == 0;
1656
Tomas Winkler4034b812016-07-26 01:06:04 +03001657 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
Tomas Winkler136698e2013-09-16 23:44:44 +03001658 if (rets < 0)
Alexander Usyskine09ee852016-12-14 17:56:52 +02001659 goto err;
Tomas Winkler136698e2013-09-16 23:44:44 +03001660
1661 if (rets == 0) {
Tomas Winkler04bb1392014-03-18 22:52:04 +02001662 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler136698e2013-09-16 23:44:44 +03001663 return 0;
1664 }
1665
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001666 buf_len = buf->size - cb->buf_idx;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001667 data = buf->data + cb->buf_idx;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001668 hbuf_slots = mei_hbuf_empty_slots(dev);
1669 if (hbuf_slots < 0) {
1670 rets = -EOVERFLOW;
1671 goto err;
1672 }
Tomas Winkler98e70862018-07-31 09:35:33 +03001673
Tomas Winkler3aef0212020-02-11 18:05:22 +02001674 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001675 dr_slots = mei_dma_ring_empty_slots(dev);
1676 dr_len = mei_slots2data(dr_slots);
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001677
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001678 mei_hdr = mei_msg_hdr_init(cb);
1679 if (IS_ERR(mei_hdr)) {
1680 rets = PTR_ERR(mei_hdr);
1681 mei_hdr = NULL;
1682 goto err;
1683 }
1684
1685 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1686 mei_hdr->extended, cb->vtag);
1687
1688 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
Tomas Winklera1c4d082018-07-23 13:21:24 +03001689
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001690 /**
1691 * Split the message only if we can write the whole host buffer
1692 * otherwise wait for next time the host buffer is empty.
1693 */
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001694 if (hdr_len + buf_len <= hbuf_len) {
1695 data_len = buf_len;
1696 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001697 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001698 mei_hdr->dma_ring = 1;
1699 if (buf_len > dr_len)
1700 buf_len = dr_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001701 else
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001702 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001703
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001704 data_len = sizeof(dma_len);
1705 dma_len = buf_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001706 data = &dma_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001707 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001708 buf_len = hbuf_len - hdr_len;
1709 data_len = buf_len;
Tomas Winkler21767542013-06-23 09:36:59 +03001710 } else {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001711 kfree(mei_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +03001712 return 0;
1713 }
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001714 mei_hdr->length += data_len;
Tomas Winkler21767542013-06-23 09:36:59 +03001715
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001716 if (mei_hdr->dma_ring)
1717 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1718 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
Tomas Winkler21767542013-06-23 09:36:59 +03001719
Alexander Usyskine09ee852016-12-14 17:56:52 +02001720 if (rets)
1721 goto err;
Tomas Winkler21767542013-06-23 09:36:59 +03001722
1723 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001724 cl->writing_state = MEI_WRITING;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001725 cb->buf_idx += buf_len;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001726
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001727 if (first_chunk) {
Alexander Usyskine09ee852016-12-14 17:56:52 +02001728 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1729 rets = -EIO;
1730 goto err;
1731 }
Tomas Winkler21767542013-06-23 09:36:59 +03001732 }
1733
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001734 if (mei_hdr->msg_complete)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001735 list_move_tail(&cb->list, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001736
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001737 kfree(mei_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +03001738 return 0;
Alexander Usyskine09ee852016-12-14 17:56:52 +02001739
1740err:
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001741 kfree(mei_hdr);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001742 cl->status = rets;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001743 list_move_tail(&cb->list, cmpl_list);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001744 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +03001745}
1746
1747/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001748 * mei_cl_write - submit a write cb to mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001749 * assumes device_lock is locked
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001750 *
1751 * @cl: host client
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001752 * @cb: write callback with filled data
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001753 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001754 * Return: number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001755 */
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001756ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001757{
1758 struct mei_device *dev;
1759 struct mei_msg_data *buf;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001760 struct mei_msg_hdr *mei_hdr = NULL;
1761 size_t hdr_len;
1762 size_t hbuf_len, dr_len;
1763 size_t buf_len;
1764 size_t data_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001765 int hbuf_slots;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001766 u32 dr_slots;
1767 u32 dma_len;
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001768 ssize_t rets;
Alexander Usyskine0cb6b22016-11-08 18:26:08 +02001769 bool blocking;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001770 const void *data;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001771
1772 if (WARN_ON(!cl || !cl->dev))
1773 return -ENODEV;
1774
1775 if (WARN_ON(!cb))
1776 return -EINVAL;
1777
1778 dev = cl->dev;
1779
Tomas Winkler5db75142015-02-10 10:39:42 +02001780 buf = &cb->buf;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001781 buf_len = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001782
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001783 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001784
Tomas Winklerc30362c2018-11-22 13:11:40 +02001785 blocking = cb->blocking;
1786 data = buf->data;
1787
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001788 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001789 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001790 pm_runtime_put_noidle(dev->dev);
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001791 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02001792 goto free;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001793 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001794
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001795 cb->buf_idx = 0;
1796 cl->writing_state = MEI_IDLE;
1797
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001798
Tomas Winkler4034b812016-07-26 01:06:04 +03001799 rets = mei_cl_tx_flow_ctrl_creds(cl);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001800 if (rets < 0)
1801 goto err;
1802
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001803 mei_hdr = mei_msg_hdr_init(cb);
1804 if (IS_ERR(mei_hdr)) {
1805 rets = -PTR_ERR(mei_hdr);
1806 mei_hdr = NULL;
1807 goto err;
1808 }
1809
1810 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1811 mei_hdr->extended, cb->vtag);
1812
1813 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
Tomas Winklera1c4d082018-07-23 13:21:24 +03001814
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001815 if (rets == 0) {
1816 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001817 rets = buf_len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001818 goto out;
1819 }
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001820
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001821 if (!mei_hbuf_acquire(dev)) {
1822 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001823 rets = buf_len;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001824 goto out;
1825 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001826
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001827 hbuf_slots = mei_hbuf_empty_slots(dev);
1828 if (hbuf_slots < 0) {
1829 rets = -EOVERFLOW;
1830 goto out;
1831 }
1832
Tomas Winkler3aef0212020-02-11 18:05:22 +02001833 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001834 dr_slots = mei_dma_ring_empty_slots(dev);
1835 dr_len = mei_slots2data(dr_slots);
Tomas Winkler98e70862018-07-31 09:35:33 +03001836
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001837 if (hdr_len + buf_len <= hbuf_len) {
1838 data_len = buf_len;
1839 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001840 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001841 mei_hdr->dma_ring = 1;
1842 if (buf_len > dr_len)
1843 buf_len = dr_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001844 else
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001845 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001846
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001847 data_len = sizeof(dma_len);
1848 dma_len = buf_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001849 data = &dma_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001850 } else {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001851 buf_len = hbuf_len - hdr_len;
1852 data_len = buf_len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001853 }
1854
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001855 mei_hdr->length += data_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001856
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001857 if (mei_hdr->dma_ring)
1858 mei_dma_ring_write(dev, buf->data, buf_len);
1859 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
1860
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001861 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001862 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001863
Tomas Winkler4034b812016-07-26 01:06:04 +03001864 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001865 if (rets)
1866 goto err;
1867
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001868 cl->writing_state = MEI_WRITING;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001869 cb->buf_idx = buf_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001870 /* restore return value */
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001871 buf_len = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001872
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001873out:
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001874 if (mei_hdr->msg_complete)
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02001875 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001876 else
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02001877 mei_tx_cb_enqueue(cb, &dev->write_list);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001878
Alexander Usyskin23253c32015-07-23 10:43:11 +03001879 cb = NULL;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001880 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1881
1882 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001883 rets = wait_event_interruptible(cl->tx_wait,
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02001884 cl->writing_state == MEI_WRITE_COMPLETE ||
1885 (!mei_cl_is_connected(cl)));
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001886 mutex_lock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001887 /* wait_event_interruptible returns -ERESTARTSYS */
1888 if (rets) {
1889 if (signal_pending(current))
1890 rets = -EINTR;
1891 goto err;
1892 }
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02001893 if (cl->writing_state != MEI_WRITE_COMPLETE) {
1894 rets = -EFAULT;
1895 goto err;
1896 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001897 }
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001898
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001899 rets = buf_len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001900err:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001901 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001902 pm_runtime_mark_last_busy(dev->dev);
1903 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02001904free:
1905 mei_io_cb_free(cb);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001906
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001907 kfree(mei_hdr);
1908
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001909 return rets;
1910}
1911
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001912/**
1913 * mei_cl_complete - processes completed operation for a client
1914 *
1915 * @cl: private data of the file object.
1916 * @cb: callback block.
1917 */
1918void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1919{
Alexander Usyskina1809d32015-05-07 15:53:59 +03001920 struct mei_device *dev = cl->dev;
1921
Tomas Winkler3c666182015-05-04 09:43:52 +03001922 switch (cb->fop_type) {
1923 case MEI_FOP_WRITE:
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02001924 mei_tx_cb_dequeue(cb);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001925 cl->writing_state = MEI_WRITE_COMPLETE;
Alexander Usyskina1809d32015-05-07 15:53:59 +03001926 if (waitqueue_active(&cl->tx_wait)) {
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001927 wake_up_interruptible(&cl->tx_wait);
Alexander Usyskina1809d32015-05-07 15:53:59 +03001928 } else {
1929 pm_runtime_mark_last_busy(dev->dev);
1930 pm_request_autosuspend(dev->dev);
1931 }
Tomas Winkler3c666182015-05-04 09:43:52 +03001932 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001933
Tomas Winkler3c666182015-05-04 09:43:52 +03001934 case MEI_FOP_READ:
Alexander Usyskind1376f32020-08-18 14:51:40 +03001935 mei_cl_add_rd_completed(cl, cb);
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001936 if (!mei_cl_is_fixed_address(cl) &&
1937 !WARN_ON(!cl->rx_flow_ctrl_creds))
1938 cl->rx_flow_ctrl_creds--;
Tomas Winklera1f9ae22016-02-07 23:35:30 +02001939 if (!mei_cl_bus_rx_event(cl))
1940 wake_up_interruptible(&cl->rx_wait);
Tomas Winkler3c666182015-05-04 09:43:52 +03001941 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001942
Tomas Winkler3c666182015-05-04 09:43:52 +03001943 case MEI_FOP_CONNECT:
1944 case MEI_FOP_DISCONNECT:
Tomas Winkler51678cc2015-07-26 09:54:18 +03001945 case MEI_FOP_NOTIFY_STOP:
1946 case MEI_FOP_NOTIFY_START:
Tomas Winkler3c666182015-05-04 09:43:52 +03001947 if (waitqueue_active(&cl->wait))
1948 wake_up(&cl->wait);
1949
1950 break;
Alexander Usyskin6a8d6482016-04-17 12:16:03 -04001951 case MEI_FOP_DISCONNECT_RSP:
1952 mei_io_cb_free(cb);
1953 mei_cl_set_disconnected(cl);
1954 break;
Tomas Winkler3c666182015-05-04 09:43:52 +03001955 default:
1956 BUG_ON(0);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001957 }
1958}
1959
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001960
1961/**
Tomas Winkler074b4c02013-02-06 14:06:44 +02001962 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1963 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001964 * @dev: mei device
Tomas Winkler074b4c02013-02-06 14:06:44 +02001965 */
Tomas Winkler074b4c02013-02-06 14:06:44 +02001966void mei_cl_all_disconnect(struct mei_device *dev)
1967{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001968 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001969
Tomas Winkler3c666182015-05-04 09:43:52 +03001970 list_for_each_entry(cl, &dev->file_list, link)
1971 mei_cl_set_disconnected(cl);
Tomas Winkler074b4c02013-02-06 14:06:44 +02001972}