blob: 3904fce182610bb289526c1b73b2b325eedc1d1d [file] [log] [blame]
Tomas Winkler9fff0422019-03-12 00:10:41 +02001// SPDX-License-Identifier: GPL-2.0
Tomas Winkler9ca90502013-01-08 23:07:13 +02002/*
Tomas Winkler4b40b222020-07-23 17:59:25 +03003 * Copyright (c) 2003-2020, Intel Corporation. All rights reserved.
Tomas Winkler9ca90502013-01-08 23:07:13 +02004 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler9ca90502013-01-08 23:07:13 +02005 */
6
Ingo Molnar174cd4b2017-02-02 19:15:33 +01007#include <linux/sched/signal.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +02008#include <linux/wait.h>
9#include <linux/delay.h>
Tomas Winkler1f180352014-09-29 16:31:46 +030010#include <linux/slab.h>
Tomas Winkler04bb1392014-03-18 22:52:04 +020011#include <linux/pm_runtime.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +020012
13#include <linux/mei.h>
14
15#include "mei_dev.h"
16#include "hbm.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020017#include "client.h"
18
19/**
Tomas Winkler79563db2015-01-11 00:07:16 +020020 * mei_me_cl_init - initialize me client
21 *
22 * @me_cl: me client
23 */
24void mei_me_cl_init(struct mei_me_client *me_cl)
25{
26 INIT_LIST_HEAD(&me_cl->list);
27 kref_init(&me_cl->refcnt);
28}
29
30/**
31 * mei_me_cl_get - increases me client refcount
32 *
33 * @me_cl: me client
34 *
35 * Locking: called under "dev->device_lock" lock
36 *
37 * Return: me client or NULL
38 */
39struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
40{
Tomas Winklerb7d88512015-02-10 10:39:31 +020041 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
42 return me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +020043
Tomas Winklerb7d88512015-02-10 10:39:31 +020044 return NULL;
Tomas Winkler79563db2015-01-11 00:07:16 +020045}
46
47/**
Tomas Winklerb7d88512015-02-10 10:39:31 +020048 * mei_me_cl_release - free me client
Tomas Winkler79563db2015-01-11 00:07:16 +020049 *
50 * Locking: called under "dev->device_lock" lock
51 *
52 * @ref: me_client refcount
53 */
54static void mei_me_cl_release(struct kref *ref)
55{
56 struct mei_me_client *me_cl =
57 container_of(ref, struct mei_me_client, refcnt);
Tomas Winklerb7d88512015-02-10 10:39:31 +020058
Tomas Winkler79563db2015-01-11 00:07:16 +020059 kfree(me_cl);
60}
Tomas Winklerb7d88512015-02-10 10:39:31 +020061
Tomas Winkler79563db2015-01-11 00:07:16 +020062/**
63 * mei_me_cl_put - decrease me client refcount and free client if necessary
64 *
65 * Locking: called under "dev->device_lock" lock
66 *
67 * @me_cl: me client
68 */
69void mei_me_cl_put(struct mei_me_client *me_cl)
70{
71 if (me_cl)
72 kref_put(&me_cl->refcnt, mei_me_cl_release);
73}
74
75/**
Alexander Usyskind49ed642015-05-04 09:43:54 +030076 * __mei_me_cl_del - delete me client from the list and decrease
Tomas Winklerb7d88512015-02-10 10:39:31 +020077 * reference counter
78 *
79 * @dev: mei device
80 * @me_cl: me client
81 *
82 * Locking: dev->me_clients_rwsem
83 */
84static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
85{
86 if (!me_cl)
87 return;
88
Alexander Usyskind49ed642015-05-04 09:43:54 +030089 list_del_init(&me_cl->list);
Tomas Winklerb7d88512015-02-10 10:39:31 +020090 mei_me_cl_put(me_cl);
91}
92
93/**
Alexander Usyskind49ed642015-05-04 09:43:54 +030094 * mei_me_cl_del - delete me client from the list and decrease
95 * reference counter
96 *
97 * @dev: mei device
98 * @me_cl: me client
99 */
100void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
101{
102 down_write(&dev->me_clients_rwsem);
103 __mei_me_cl_del(dev, me_cl);
104 up_write(&dev->me_clients_rwsem);
105}
106
107/**
Tomas Winklerb7d88512015-02-10 10:39:31 +0200108 * mei_me_cl_add - add me client to the list
109 *
110 * @dev: mei device
111 * @me_cl: me client
112 */
113void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
114{
115 down_write(&dev->me_clients_rwsem);
116 list_add(&me_cl->list, &dev->me_clients);
117 up_write(&dev->me_clients_rwsem);
118}
119
120/**
121 * __mei_me_cl_by_uuid - locate me client by uuid
122 * increases ref count
123 *
124 * @dev: mei device
125 * @uuid: me client uuid
126 *
127 * Return: me client or NULL if not found
128 *
129 * Locking: dev->me_clients_rwsem
130 */
131static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
132 const uuid_le *uuid)
133{
134 struct mei_me_client *me_cl;
135 const uuid_le *pn;
136
137 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
138
139 list_for_each_entry(me_cl, &dev->me_clients, list) {
140 pn = &me_cl->props.protocol_name;
141 if (uuid_le_cmp(*uuid, *pn) == 0)
142 return mei_me_cl_get(me_cl);
143 }
144
145 return NULL;
146}
147
148/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300149 * mei_me_cl_by_uuid - locate me client by uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200150 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200151 *
152 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300153 * @uuid: me client uuid
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200154 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300155 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200156 *
157 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200158 */
Tomas Winklerb7d88512015-02-10 10:39:31 +0200159struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
Tomas Winklerd3208322014-08-24 12:08:55 +0300160 const uuid_le *uuid)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200161{
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300162 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200163
Tomas Winklerb7d88512015-02-10 10:39:31 +0200164 down_read(&dev->me_clients_rwsem);
165 me_cl = __mei_me_cl_by_uuid(dev, uuid);
166 up_read(&dev->me_clients_rwsem);
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200167
Tomas Winklerb7d88512015-02-10 10:39:31 +0200168 return me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200169}
170
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200171/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300172 * mei_me_cl_by_id - locate me client by client id
Tomas Winkler79563db2015-01-11 00:07:16 +0200173 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200174 *
175 * @dev: the device structure
176 * @client_id: me client id
177 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300178 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200179 *
180 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200181 */
Tomas Winklerd3208322014-08-24 12:08:55 +0300182struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200183{
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200184
Tomas Winklerb7d88512015-02-10 10:39:31 +0200185 struct mei_me_client *__me_cl, *me_cl = NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200186
Tomas Winklerb7d88512015-02-10 10:39:31 +0200187 down_read(&dev->me_clients_rwsem);
188 list_for_each_entry(__me_cl, &dev->me_clients, list) {
189 if (__me_cl->client_id == client_id) {
190 me_cl = mei_me_cl_get(__me_cl);
191 break;
192 }
193 }
194 up_read(&dev->me_clients_rwsem);
195
196 return me_cl;
197}
198
199/**
200 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
201 * increases ref count
202 *
203 * @dev: the device structure
204 * @uuid: me client uuid
205 * @client_id: me client id
206 *
207 * Return: me client or null if not found
208 *
209 * Locking: dev->me_clients_rwsem
210 */
211static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
212 const uuid_le *uuid, u8 client_id)
213{
214 struct mei_me_client *me_cl;
215 const uuid_le *pn;
216
217 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
218
219 list_for_each_entry(me_cl, &dev->me_clients, list) {
220 pn = &me_cl->props.protocol_name;
221 if (uuid_le_cmp(*uuid, *pn) == 0 &&
222 me_cl->client_id == client_id)
Tomas Winkler79563db2015-01-11 00:07:16 +0200223 return mei_me_cl_get(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200224 }
Tomas Winkler79563db2015-01-11 00:07:16 +0200225
Tomas Winklerd3208322014-08-24 12:08:55 +0300226 return NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200227}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200228
Tomas Winklerb7d88512015-02-10 10:39:31 +0200229
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300230/**
231 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200232 * increases ref count
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300233 *
234 * @dev: the device structure
235 * @uuid: me client uuid
236 * @client_id: me client id
237 *
Tomas Winklerb7d88512015-02-10 10:39:31 +0200238 * Return: me client or null if not found
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300239 */
Tomas Winklerd880f322014-08-21 14:29:15 +0300240struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
241 const uuid_le *uuid, u8 client_id)
242{
243 struct mei_me_client *me_cl;
244
Tomas Winklerb7d88512015-02-10 10:39:31 +0200245 down_read(&dev->me_clients_rwsem);
246 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
247 up_read(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200248
Tomas Winklerb7d88512015-02-10 10:39:31 +0200249 return me_cl;
Tomas Winklerd880f322014-08-21 14:29:15 +0300250}
251
Tomas Winkler25ca6472014-08-21 14:29:14 +0300252/**
Tomas Winkler79563db2015-01-11 00:07:16 +0200253 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
Tomas Winkler25ca6472014-08-21 14:29:14 +0300254 *
255 * @dev: the device structure
256 * @uuid: me client uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200257 *
258 * Locking: called under "dev->device_lock" lock
Tomas Winkler25ca6472014-08-21 14:29:14 +0300259 */
Tomas Winkler79563db2015-01-11 00:07:16 +0200260void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
Tomas Winkler25ca6472014-08-21 14:29:14 +0300261{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200262 struct mei_me_client *me_cl;
Tomas Winkler25ca6472014-08-21 14:29:14 +0300263
Tomas Winkler79563db2015-01-11 00:07:16 +0200264 dev_dbg(dev->dev, "remove %pUl\n", uuid);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200265
266 down_write(&dev->me_clients_rwsem);
267 me_cl = __mei_me_cl_by_uuid(dev, uuid);
268 __mei_me_cl_del(dev, me_cl);
Alexander Usyskinfc9c03c2020-05-13 01:31:40 +0300269 mei_me_cl_put(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200270 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200271}
272
273/**
274 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
275 *
276 * @dev: the device structure
277 * @uuid: me client uuid
278 * @id: me client id
279 *
280 * Locking: called under "dev->device_lock" lock
281 */
282void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
283{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200284 struct mei_me_client *me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +0200285
286 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200287
288 down_write(&dev->me_clients_rwsem);
289 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
290 __mei_me_cl_del(dev, me_cl);
Alexander Usyskinfc9c03c2020-05-13 01:31:40 +0300291 mei_me_cl_put(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200292 up_write(&dev->me_clients_rwsem);
Tomas Winkler25ca6472014-08-21 14:29:14 +0300293}
294
Tomas Winkler79563db2015-01-11 00:07:16 +0200295/**
296 * mei_me_cl_rm_all - remove all me clients
297 *
298 * @dev: the device structure
299 *
300 * Locking: called under "dev->device_lock" lock
301 */
302void mei_me_cl_rm_all(struct mei_device *dev)
303{
304 struct mei_me_client *me_cl, *next;
305
Tomas Winklerb7d88512015-02-10 10:39:31 +0200306 down_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200307 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
Tomas Winklerb7d88512015-02-10 10:39:31 +0200308 __mei_me_cl_del(dev, me_cl);
309 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200310}
311
Tomas Winkler9ca90502013-01-08 23:07:13 +0200312/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200313 * mei_io_cb_free - free mei_cb_private related memory
314 *
315 * @cb: mei callback struct
316 */
317void mei_io_cb_free(struct mei_cl_cb *cb)
318{
319 if (cb == NULL)
320 return;
321
Tomas Winkler928fa662015-02-10 10:39:45 +0200322 list_del(&cb->list);
Tomas Winkler5db75142015-02-10 10:39:42 +0200323 kfree(cb->buf.data);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200324 kfree(cb);
325}
326
327/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200328 * mei_tx_cb_queue - queue tx callback
329 *
330 * Locking: called under "dev->device_lock" lock
331 *
332 * @cb: mei callback struct
333 * @head: an instance of list to queue on
334 */
335static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
336 struct list_head *head)
337{
338 list_add_tail(&cb->list, head);
339 cb->cl->tx_cb_queued++;
340}
341
342/**
343 * mei_tx_cb_dequeue - dequeue tx callback
344 *
345 * Locking: called under "dev->device_lock" lock
346 *
347 * @cb: mei callback struct to dequeue and free
348 */
349static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
350{
351 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
352 cb->cl->tx_cb_queued--;
353
354 mei_io_cb_free(cb);
355}
356
357/**
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +0300358 * mei_cl_set_read_by_fp - set pending_read flag to vtag struct for given fp
359 *
360 * Locking: called under "dev->device_lock" lock
361 *
362 * @cl: mei client
363 * @fp: pointer to file structure
364 */
365static void mei_cl_set_read_by_fp(const struct mei_cl *cl,
366 const struct file *fp)
367{
368 struct mei_cl_vtag *cl_vtag;
369
370 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
371 if (cl_vtag->fp == fp) {
372 cl_vtag->pending_read = true;
373 return;
374 }
375 }
376}
377
378/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200379 * mei_io_cb_init - allocate and initialize io callback
380 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300381 * @cl: mei client
Tomas Winklerbca67d62015-02-10 10:39:43 +0200382 * @type: operation type
Masanari Iida393b1482013-04-05 01:05:05 +0900383 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200384 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300385 * Return: mei_cl_cb pointer or NULL;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200386 */
Tomas Winkler3030dc02016-07-26 01:06:05 +0300387static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
388 enum mei_cb_file_ops type,
389 const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200390{
391 struct mei_cl_cb *cb;
392
Tomas Winkler4b40b222020-07-23 17:59:25 +0300393 cb = kzalloc(sizeof(*cb), GFP_KERNEL);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200394 if (!cb)
395 return NULL;
396
Tomas Winkler928fa662015-02-10 10:39:45 +0200397 INIT_LIST_HEAD(&cb->list);
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200398 cb->fp = fp;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200399 cb->cl = cl;
400 cb->buf_idx = 0;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200401 cb->fop_type = type;
Tomas Winkler0cd7c012020-08-18 14:51:38 +0300402 cb->vtag = 0;
403
Tomas Winkler9ca90502013-01-08 23:07:13 +0200404 return cb;
405}
406
407/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200408 * mei_io_list_flush_cl - removes cbs belonging to the cl.
Tomas Winkler928fa662015-02-10 10:39:45 +0200409 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200410 * @head: an instance of our list structure
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200411 * @cl: host client
Tomas Winkler928fa662015-02-10 10:39:45 +0200412 */
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200413static void mei_io_list_flush_cl(struct list_head *head,
414 const struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200415{
416 struct mei_cl_cb *cb, *next;
417
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200418 list_for_each_entry_safe(cb, next, head, list) {
Alexander Usyskincee4c4d62019-01-30 10:12:26 +0200419 if (cl == cb->cl) {
Tomas Winkler928fa662015-02-10 10:39:45 +0200420 list_del_init(&cb->list);
Alexander Usyskincee4c4d62019-01-30 10:12:26 +0200421 if (cb->fop_type == MEI_FOP_READ)
422 mei_io_cb_free(cb);
423 }
Tomas Winkler928fa662015-02-10 10:39:45 +0200424 }
425}
426
427/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200428 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
Tomas Winkler928fa662015-02-10 10:39:45 +0200429 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200430 * @head: An instance of our list structure
Tomas Winkler928fa662015-02-10 10:39:45 +0200431 * @cl: host client
432 */
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200433static void mei_io_tx_list_free_cl(struct list_head *head,
434 const struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200435{
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200436 struct mei_cl_cb *cb, *next;
Tomas Winkler928fa662015-02-10 10:39:45 +0200437
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200438 list_for_each_entry_safe(cb, next, head, list) {
Alexander Usyskin87d63352018-11-06 12:04:39 +0200439 if (cl == cb->cl)
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200440 mei_tx_cb_dequeue(cb);
441 }
Tomas Winklerf0461922017-01-27 16:32:46 +0200442}
443
444/**
445 * mei_io_list_free_fp - free cb from a list that matches file pointer
446 *
447 * @head: io list
448 * @fp: file pointer (matching cb file object), may be NULL
449 */
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200450static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
Tomas Winklerf0461922017-01-27 16:32:46 +0200451{
452 struct mei_cl_cb *cb, *next;
453
454 list_for_each_entry_safe(cb, next, head, list)
455 if (!fp || fp == cb->fp)
456 mei_io_cb_free(cb);
Tomas Winkler928fa662015-02-10 10:39:45 +0200457}
458
459/**
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +0300460 * mei_cl_free_pending - free pending cb
461 *
462 * @cl: host client
463 */
464static void mei_cl_free_pending(struct mei_cl *cl)
465{
466 struct mei_cl_cb *cb;
467
468 cb = list_first_entry_or_null(&cl->rd_pending, struct mei_cl_cb, list);
469 mei_io_cb_free(cb);
470}
471
472/**
Tomas Winklerbca67d62015-02-10 10:39:43 +0200473 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
474 *
475 * @cl: host client
476 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200477 * @fop_type: operation type
Tomas Winklerbca67d62015-02-10 10:39:43 +0200478 * @fp: associated file pointer (might be NULL)
479 *
480 * Return: cb on success and NULL on failure
481 */
482struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
Tomas Winkler3030dc02016-07-26 01:06:05 +0300483 enum mei_cb_file_ops fop_type,
Tomas Winklerf23e2cc2016-02-07 23:35:23 +0200484 const struct file *fp)
Tomas Winklerbca67d62015-02-10 10:39:43 +0200485{
486 struct mei_cl_cb *cb;
487
Tomas Winkler3030dc02016-07-26 01:06:05 +0300488 cb = mei_io_cb_init(cl, fop_type, fp);
Tomas Winklerbca67d62015-02-10 10:39:43 +0200489 if (!cb)
490 return NULL;
491
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400492 if (length == 0)
493 return cb;
494
Tomas Winkler63163212018-11-22 13:11:39 +0200495 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400496 if (!cb->buf.data) {
Tomas Winklerbca67d62015-02-10 10:39:43 +0200497 mei_io_cb_free(cb);
498 return NULL;
499 }
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400500 cb->buf.size = length;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200501
502 return cb;
503}
504
505/**
Tomas Winkler3030dc02016-07-26 01:06:05 +0300506 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
507 * and enqueuing of the control commands cb
508 *
509 * @cl: host client
510 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200511 * @fop_type: operation type
Tomas Winkler3030dc02016-07-26 01:06:05 +0300512 * @fp: associated file pointer (might be NULL)
513 *
514 * Return: cb on success and NULL on failure
515 * Locking: called under "dev->device_lock" lock
516 */
517struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
518 enum mei_cb_file_ops fop_type,
519 const struct file *fp)
520{
521 struct mei_cl_cb *cb;
522
523 /* for RX always allocate at least client's mtu */
524 if (length)
525 length = max_t(size_t, length, mei_cl_mtu(cl));
526
527 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
528 if (!cb)
529 return NULL;
530
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200531 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
Tomas Winkler3030dc02016-07-26 01:06:05 +0300532 return cb;
533}
534
535/**
Tomas Winklera9bed612015-02-10 10:39:46 +0200536 * mei_cl_read_cb - find this cl's callback in the read list
537 * for a specific file
538 *
539 * @cl: host client
540 * @fp: file pointer (matching cb file object), may be NULL
541 *
542 * Return: cb on success, NULL if cb is not found
543 */
Alexander Usyskind1376f32020-08-18 14:51:40 +0300544struct mei_cl_cb *mei_cl_read_cb(struct mei_cl *cl, const struct file *fp)
Tomas Winklera9bed612015-02-10 10:39:46 +0200545{
546 struct mei_cl_cb *cb;
Alexander Usyskind1376f32020-08-18 14:51:40 +0300547 struct mei_cl_cb *ret_cb = NULL;
Tomas Winklera9bed612015-02-10 10:39:46 +0200548
Alexander Usyskind1376f32020-08-18 14:51:40 +0300549 spin_lock(&cl->rd_completed_lock);
Tomas Winklera9bed612015-02-10 10:39:46 +0200550 list_for_each_entry(cb, &cl->rd_completed, list)
Alexander Usyskind1376f32020-08-18 14:51:40 +0300551 if (!fp || fp == cb->fp) {
552 ret_cb = cb;
553 break;
554 }
555 spin_unlock(&cl->rd_completed_lock);
556 return ret_cb;
Tomas Winklera9bed612015-02-10 10:39:46 +0200557}
558
559/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200560 * mei_cl_flush_queues - flushes queue lists belonging to cl.
561 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200562 * @cl: host client
Tomas Winklera9bed612015-02-10 10:39:46 +0200563 * @fp: file pointer (matching cb file object), may be NULL
Alexander Usyskince231392014-09-29 16:31:50 +0300564 *
565 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200566 */
Tomas Winklera9bed612015-02-10 10:39:46 +0200567int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200568{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300569 struct mei_device *dev;
570
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200571 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200572 return -EINVAL;
573
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300574 dev = cl->dev;
575
576 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200577 mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
578 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
Tomas Winklerf0461922017-01-27 16:32:46 +0200579 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
580 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +0300581 /* free pending cb only in final flush */
582 if (!fp)
583 mei_cl_free_pending(cl);
Alexander Usyskind1376f32020-08-18 14:51:40 +0300584 spin_lock(&cl->rd_completed_lock);
Tomas Winklerf0461922017-01-27 16:32:46 +0200585 mei_io_list_free_fp(&cl->rd_completed, fp);
Alexander Usyskind1376f32020-08-18 14:51:40 +0300586 spin_unlock(&cl->rd_completed_lock);
Tomas Winklera9bed612015-02-10 10:39:46 +0200587
Tomas Winkler9ca90502013-01-08 23:07:13 +0200588 return 0;
589}
590
Tomas Winkler9ca90502013-01-08 23:07:13 +0200591/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200592 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200593 *
594 * @cl: host client to be initialized
595 * @dev: mei device
596 */
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200597static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200598{
Tomas Winkler4b40b222020-07-23 17:59:25 +0300599 memset(cl, 0, sizeof(*cl));
Tomas Winkler9ca90502013-01-08 23:07:13 +0200600 init_waitqueue_head(&cl->wait);
601 init_waitqueue_head(&cl->rx_wait);
602 init_waitqueue_head(&cl->tx_wait);
Tomas Winklerb38a3622015-07-26 09:54:19 +0300603 init_waitqueue_head(&cl->ev_wait);
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +0300604 INIT_LIST_HEAD(&cl->vtag_map);
Alexander Usyskind1376f32020-08-18 14:51:40 +0300605 spin_lock_init(&cl->rd_completed_lock);
Tomas Winklera9bed612015-02-10 10:39:46 +0200606 INIT_LIST_HEAD(&cl->rd_completed);
607 INIT_LIST_HEAD(&cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200608 INIT_LIST_HEAD(&cl->link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200609 cl->writing_state = MEI_IDLE;
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200610 cl->state = MEI_FILE_UNINITIALIZED;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200611 cl->dev = dev;
612}
613
614/**
615 * mei_cl_allocate - allocates cl structure and sets it up.
616 *
617 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300618 * Return: The allocated file or NULL on failure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200619 */
620struct mei_cl *mei_cl_allocate(struct mei_device *dev)
621{
622 struct mei_cl *cl;
623
Tomas Winkler4b40b222020-07-23 17:59:25 +0300624 cl = kmalloc(sizeof(*cl), GFP_KERNEL);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200625 if (!cl)
626 return NULL;
627
628 mei_cl_init(cl, dev);
629
630 return cl;
631}
632
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200633/**
Alexander Usyskin3908be62015-02-10 10:39:35 +0200634 * mei_cl_link - allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200635 *
Alexander Usyskin3908be62015-02-10 10:39:35 +0200636 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +0900637 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300638 * Return: 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200639 * -EINVAL on incorrect values
Tomas Winkler03b8d342015-02-10 10:39:44 +0200640 * -EMFILE if open count exceeded.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200641 */
Alexander Usyskin7851e002016-02-07 23:35:40 +0200642int mei_cl_link(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200643{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200644 struct mei_device *dev;
Alexander Usyskin7851e002016-02-07 23:35:40 +0200645 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200646
Tomas Winkler781d0d82013-01-08 23:07:22 +0200647 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200648 return -EINVAL;
649
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200650 dev = cl->dev;
651
Alexander Usyskin7851e002016-02-07 23:35:40 +0200652 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler781d0d82013-01-08 23:07:22 +0200653 if (id >= MEI_CLIENTS_MAX) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300654 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300655 return -EMFILE;
656 }
657
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200658 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300659 dev_err(dev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300660 MEI_MAX_OPEN_HANDLE_COUNT);
661 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200662 }
663
Tomas Winkler781d0d82013-01-08 23:07:22 +0200664 dev->open_handle_count++;
665
666 cl->host_client_id = id;
667 list_add_tail(&cl->link, &dev->file_list);
668
669 set_bit(id, dev->host_clients_map);
670
671 cl->state = MEI_FILE_INITIALIZING;
672
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300673 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200674 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200675}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200676
Tomas Winkler9ca90502013-01-08 23:07:13 +0200677/**
Alexander Usyskind49ed642015-05-04 09:43:54 +0300678 * mei_cl_unlink - remove host client from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200679 *
Masanari Iida393b1482013-04-05 01:05:05 +0900680 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +0300681 *
682 * Return: always 0
Tomas Winkler9ca90502013-01-08 23:07:13 +0200683 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200684int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200685{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200686 struct mei_device *dev;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200687
Tomas Winkler781d0d82013-01-08 23:07:22 +0200688 /* don't shout on error exit path */
689 if (!cl)
690 return 0;
691
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200692 if (WARN_ON(!cl->dev))
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200693 return 0;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200694
695 dev = cl->dev;
696
Tomas Winklera14c44d2013-09-16 23:44:45 +0300697 cl_dbg(dev, cl, "unlink client");
698
Tomas Winkler22f96a02013-09-16 23:44:47 +0300699 if (dev->open_handle_count > 0)
700 dev->open_handle_count--;
701
702 /* never clear the 0 bit */
703 if (cl->host_client_id)
704 clear_bit(cl->host_client_id, dev->host_clients_map);
705
706 list_del_init(&cl->link);
707
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200708 cl->state = MEI_FILE_UNINITIALIZED;
Alexander Usyskin7c7a6072016-11-16 22:51:29 +0200709 cl->writing_state = MEI_IDLE;
710
711 WARN_ON(!list_empty(&cl->rd_completed) ||
712 !list_empty(&cl->rd_pending) ||
713 !list_empty(&cl->link));
Tomas Winkler22f96a02013-09-16 23:44:47 +0300714
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200715 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200716}
717
Alexander Usyskin025fb792016-02-07 23:35:43 +0200718void mei_host_client_init(struct mei_device *dev)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200719{
Alexander Usyskin43b8a7e2019-04-22 09:51:07 +0300720 mei_set_devstate(dev, MEI_DEV_ENABLED);
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200721 dev->reset_count = 0;
Tomas Winkler04bb1392014-03-18 22:52:04 +0200722
Alexander Usyskin025fb792016-02-07 23:35:43 +0200723 schedule_work(&dev->bus_rescan_work);
Tomas Winkler60095952015-07-23 15:08:47 +0300724
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300725 pm_runtime_mark_last_busy(dev->dev);
726 dev_dbg(dev->dev, "rpm: autosuspend\n");
Alexander Usyskind5f8e162016-11-24 13:34:02 +0200727 pm_request_autosuspend(dev->dev);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200728}
729
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200730/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300731 * mei_hbuf_acquire - try to acquire host buffer
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200732 *
733 * @dev: the device structure
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300734 * Return: true if host buffer was acquired
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200735 */
736bool mei_hbuf_acquire(struct mei_device *dev)
737{
Tomas Winkler04bb1392014-03-18 22:52:04 +0200738 if (mei_pg_state(dev) == MEI_PG_ON ||
Alexander Usyskin3dc196e2015-06-13 08:51:17 +0300739 mei_pg_in_transition(dev)) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300740 dev_dbg(dev->dev, "device is in pg\n");
Tomas Winkler04bb1392014-03-18 22:52:04 +0200741 return false;
742 }
743
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200744 if (!dev->hbuf_is_ready) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300745 dev_dbg(dev->dev, "hbuf is not ready\n");
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200746 return false;
747 }
748
749 dev->hbuf_is_ready = false;
750
751 return true;
752}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200753
754/**
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200755 * mei_cl_wake_all - wake up readers, writers and event waiters so
756 * they can be interrupted
757 *
758 * @cl: host client
759 */
760static void mei_cl_wake_all(struct mei_cl *cl)
761{
762 struct mei_device *dev = cl->dev;
763
764 /* synchronized under device mutex */
765 if (waitqueue_active(&cl->rx_wait)) {
766 cl_dbg(dev, cl, "Waking up reading client!\n");
767 wake_up_interruptible(&cl->rx_wait);
768 }
769 /* synchronized under device mutex */
770 if (waitqueue_active(&cl->tx_wait)) {
771 cl_dbg(dev, cl, "Waking up writing client!\n");
772 wake_up_interruptible(&cl->tx_wait);
773 }
774 /* synchronized under device mutex */
775 if (waitqueue_active(&cl->ev_wait)) {
776 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
777 wake_up_interruptible(&cl->ev_wait);
778 }
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400779 /* synchronized under device mutex */
780 if (waitqueue_active(&cl->wait)) {
781 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
Alexander Usyskin69f18042016-05-09 00:07:46 -0400782 wake_up(&cl->wait);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400783 }
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200784}
785
786/**
Tomas Winkler3c666182015-05-04 09:43:52 +0300787 * mei_cl_set_disconnected - set disconnected state and clear
788 * associated states and resources
789 *
790 * @cl: host client
791 */
Alexander Usyskin669c2562017-01-20 02:17:17 +0200792static void mei_cl_set_disconnected(struct mei_cl *cl)
Tomas Winkler3c666182015-05-04 09:43:52 +0300793{
794 struct mei_device *dev = cl->dev;
795
796 if (cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200797 cl->state <= MEI_FILE_INITIALIZING)
Tomas Winkler3c666182015-05-04 09:43:52 +0300798 return;
799
800 cl->state = MEI_FILE_DISCONNECTED;
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200801 mei_io_tx_list_free_cl(&dev->write_list, cl);
802 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
Tomas Winklerf0461922017-01-27 16:32:46 +0200803 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
804 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200805 mei_cl_wake_all(cl);
Alexander Usyskin46978ad2016-07-26 01:06:03 +0300806 cl->rx_flow_ctrl_creds = 0;
Tomas Winkler4034b812016-07-26 01:06:04 +0300807 cl->tx_flow_ctrl_creds = 0;
Tomas Winkler3c666182015-05-04 09:43:52 +0300808 cl->timer_count = 0;
Alexander Usyskind49ed642015-05-04 09:43:54 +0300809
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300810 if (!cl->me_cl)
811 return;
812
813 if (!WARN_ON(cl->me_cl->connect_count == 0))
814 cl->me_cl->connect_count--;
815
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300816 if (cl->me_cl->connect_count == 0)
Tomas Winkler4034b812016-07-26 01:06:04 +0300817 cl->me_cl->tx_flow_ctrl_creds = 0;
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300818
Alexander Usyskind49ed642015-05-04 09:43:54 +0300819 mei_me_cl_put(cl->me_cl);
820 cl->me_cl = NULL;
Tomas Winkler3c666182015-05-04 09:43:52 +0300821}
822
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300823static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
824{
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300825 if (!mei_me_cl_get(me_cl))
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300826 return -ENOENT;
827
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300828 /* only one connection is allowed for fixed address clients */
829 if (me_cl->props.fixed_address) {
830 if (me_cl->connect_count) {
831 mei_me_cl_put(me_cl);
832 return -EBUSY;
833 }
834 }
835
836 cl->me_cl = me_cl;
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300837 cl->state = MEI_FILE_CONNECTING;
838 cl->me_cl->connect_count++;
839
840 return 0;
841}
842
Tomas Winkler3c666182015-05-04 09:43:52 +0300843/*
844 * mei_cl_send_disconnect - send disconnect request
845 *
846 * @cl: host client
847 * @cb: callback block
848 *
849 * Return: 0, OK; otherwise, error.
850 */
851static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
852{
853 struct mei_device *dev;
854 int ret;
855
856 dev = cl->dev;
857
858 ret = mei_hbm_cl_disconnect_req(dev, cl);
859 cl->status = ret;
860 if (ret) {
861 cl->state = MEI_FILE_DISCONNECT_REPLY;
862 return ret;
863 }
864
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200865 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300866 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +0300867 mei_schedule_stall_timer(dev);
Tomas Winkler3c666182015-05-04 09:43:52 +0300868
869 return 0;
870}
871
872/**
873 * mei_cl_irq_disconnect - processes close related operation from
874 * interrupt thread context - send disconnect request
875 *
876 * @cl: client
877 * @cb: callback block.
878 * @cmpl_list: complete list.
879 *
880 * Return: 0, OK; otherwise, error.
881 */
882int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200883 struct list_head *cmpl_list)
Tomas Winkler3c666182015-05-04 09:43:52 +0300884{
885 struct mei_device *dev = cl->dev;
886 u32 msg_slots;
887 int slots;
888 int ret;
889
Tomas Winkler98e70862018-07-31 09:35:33 +0300890 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winkler3c666182015-05-04 09:43:52 +0300891 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerde877432018-07-12 17:10:08 +0300892 if (slots < 0)
893 return -EOVERFLOW;
Tomas Winkler3c666182015-05-04 09:43:52 +0300894
Tomas Winklerde877432018-07-12 17:10:08 +0300895 if ((u32)slots < msg_slots)
Tomas Winkler3c666182015-05-04 09:43:52 +0300896 return -EMSGSIZE;
897
898 ret = mei_cl_send_disconnect(cl, cb);
899 if (ret)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200900 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300901
902 return ret;
903}
904
Tomas Winkler3c666182015-05-04 09:43:52 +0300905/**
Alexander Usyskin18901352015-07-23 21:37:13 +0300906 * __mei_cl_disconnect - disconnect host client from the me one
907 * internal function runtime pm has to be already acquired
Tomas Winkler9ca90502013-01-08 23:07:13 +0200908 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200909 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200910 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300911 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200912 */
Alexander Usyskin18901352015-07-23 21:37:13 +0300913static int __mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200914{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200915 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200916 struct mei_cl_cb *cb;
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300917 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200918
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200919 dev = cl->dev;
920
Tomas Winkler3c666182015-05-04 09:43:52 +0300921 cl->state = MEI_FILE_DISCONNECTING;
922
Tomas Winkler3030dc02016-07-26 01:06:05 +0300923 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
924 if (!cb) {
925 rets = -ENOMEM;
Tomas Winkler3c666182015-05-04 09:43:52 +0300926 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +0300927 }
Tomas Winkler5a8373f2014-08-21 14:29:17 +0300928
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200929 if (mei_hbuf_acquire(dev)) {
Tomas Winkler3c666182015-05-04 09:43:52 +0300930 rets = mei_cl_send_disconnect(cl, cb);
931 if (rets) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300932 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler3c666182015-05-04 09:43:52 +0300933 goto out;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200934 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200935 }
Tomas Winkler3c666182015-05-04 09:43:52 +0300936
Tomas Winkler9ca90502013-01-08 23:07:13 +0200937 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400938 wait_event_timeout(cl->wait,
939 cl->state == MEI_FILE_DISCONNECT_REPLY ||
940 cl->state == MEI_FILE_DISCONNECTED,
Tomas Winkler3c666182015-05-04 09:43:52 +0300941 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9ca90502013-01-08 23:07:13 +0200942 mutex_lock(&dev->device_lock);
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300943
Tomas Winkler3c666182015-05-04 09:43:52 +0300944 rets = cl->status;
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400945 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
946 cl->state != MEI_FILE_DISCONNECTED) {
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300947 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
948 rets = -ETIME;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200949 }
950
Tomas Winkler3c666182015-05-04 09:43:52 +0300951out:
952 /* we disconnect also on error */
953 mei_cl_set_disconnected(cl);
954 if (!rets)
955 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
956
Alexander Usyskin18901352015-07-23 21:37:13 +0300957 mei_io_cb_free(cb);
958 return rets;
959}
960
961/**
962 * mei_cl_disconnect - disconnect host client from the me one
963 *
964 * @cl: host client
965 *
966 * Locking: called under "dev->device_lock" lock
967 *
968 * Return: 0 on success, <0 on failure.
969 */
970int mei_cl_disconnect(struct mei_cl *cl)
971{
972 struct mei_device *dev;
973 int rets;
974
975 if (WARN_ON(!cl || !cl->dev))
976 return -ENODEV;
977
978 dev = cl->dev;
979
980 cl_dbg(dev, cl, "disconnecting");
981
982 if (!mei_cl_is_connected(cl))
983 return 0;
984
985 if (mei_cl_is_fixed_address(cl)) {
986 mei_cl_set_disconnected(cl);
987 return 0;
988 }
989
Tomas Winkler7ae079a2018-02-14 14:03:29 +0200990 if (dev->dev_state == MEI_DEV_POWER_DOWN) {
991 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
992 mei_cl_set_disconnected(cl);
993 return 0;
994 }
995
Alexander Usyskin18901352015-07-23 21:37:13 +0300996 rets = pm_runtime_get(dev->dev);
997 if (rets < 0 && rets != -EINPROGRESS) {
998 pm_runtime_put_noidle(dev->dev);
999 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1000 return rets;
1001 }
1002
1003 rets = __mei_cl_disconnect(cl);
1004
Tomas Winkler04bb1392014-03-18 22:52:04 +02001005 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001006 pm_runtime_mark_last_busy(dev->dev);
1007 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001008
Tomas Winkler9ca90502013-01-08 23:07:13 +02001009 return rets;
1010}
1011
1012
1013/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001014 * mei_cl_is_other_connecting - checks if other
1015 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +02001016 *
Tomas Winkler9ca90502013-01-08 23:07:13 +02001017 * @cl: private data of the file object
1018 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001019 * Return: true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001020 */
Tomas Winkler0c533572015-05-04 09:43:53 +03001021static bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001022{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001023 struct mei_device *dev;
Tomas Winkler0c533572015-05-04 09:43:53 +03001024 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001025
1026 dev = cl->dev;
1027
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001028 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
Tomas Winkler0c533572015-05-04 09:43:53 +03001029 if (cb->fop_type == MEI_FOP_CONNECT &&
Alexander Usyskind49ed642015-05-04 09:43:54 +03001030 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001031 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001032 }
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001033
1034 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001035}
1036
1037/**
Tomas Winkler0c533572015-05-04 09:43:53 +03001038 * mei_cl_send_connect - send connect request
1039 *
1040 * @cl: host client
1041 * @cb: callback block
1042 *
1043 * Return: 0, OK; otherwise, error.
1044 */
1045static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1046{
1047 struct mei_device *dev;
1048 int ret;
1049
1050 dev = cl->dev;
1051
1052 ret = mei_hbm_cl_connect_req(dev, cl);
1053 cl->status = ret;
1054 if (ret) {
1055 cl->state = MEI_FILE_DISCONNECT_REPLY;
1056 return ret;
1057 }
1058
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001059 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001060 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +03001061 mei_schedule_stall_timer(dev);
Tomas Winkler0c533572015-05-04 09:43:53 +03001062 return 0;
1063}
1064
1065/**
1066 * mei_cl_irq_connect - send connect request in irq_thread context
1067 *
1068 * @cl: host client
1069 * @cb: callback block
1070 * @cmpl_list: complete list
1071 *
1072 * Return: 0, OK; otherwise, error.
1073 */
1074int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001075 struct list_head *cmpl_list)
Tomas Winkler0c533572015-05-04 09:43:53 +03001076{
1077 struct mei_device *dev = cl->dev;
1078 u32 msg_slots;
1079 int slots;
1080 int rets;
1081
Tomas Winkler0c533572015-05-04 09:43:53 +03001082 if (mei_cl_is_other_connecting(cl))
1083 return 0;
1084
Tomas Winkler98e70862018-07-31 09:35:33 +03001085 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winklerde877432018-07-12 17:10:08 +03001086 slots = mei_hbuf_empty_slots(dev);
1087 if (slots < 0)
1088 return -EOVERFLOW;
1089
1090 if ((u32)slots < msg_slots)
Tomas Winkler0c533572015-05-04 09:43:53 +03001091 return -EMSGSIZE;
1092
1093 rets = mei_cl_send_connect(cl, cb);
1094 if (rets)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001095 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001096
1097 return rets;
1098}
1099
1100/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +02001101 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001102 *
1103 * @cl: host client
Alexander Usyskind49ed642015-05-04 09:43:54 +03001104 * @me_cl: me client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001105 * @fp: pointer to file structure
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001106 *
1107 * Locking: called under "dev->device_lock" lock
1108 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001109 * Return: 0 on success, <0 on failure.
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001110 */
Alexander Usyskind49ed642015-05-04 09:43:54 +03001111int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001112 const struct file *fp)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001113{
1114 struct mei_device *dev;
1115 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001116 int rets;
1117
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001118 if (WARN_ON(!cl || !cl->dev || !me_cl))
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001119 return -ENODEV;
1120
1121 dev = cl->dev;
1122
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001123 rets = mei_cl_set_connecting(cl, me_cl);
1124 if (rets)
Alexander Usyskin5d882462017-01-27 16:32:39 +02001125 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001126
1127 if (mei_cl_is_fixed_address(cl)) {
1128 cl->state = MEI_FILE_CONNECTED;
Alexander Usyskin5d882462017-01-27 16:32:39 +02001129 rets = 0;
1130 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001131 }
1132
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001133 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001134 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001135 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001136 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001137 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001138 }
1139
Tomas Winkler3030dc02016-07-26 01:06:05 +03001140 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1141 if (!cb) {
1142 rets = -ENOMEM;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001143 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +03001144 }
Tomas Winkler0c533572015-05-04 09:43:53 +03001145
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001146 /* run hbuf acquire last so we don't have to undo */
1147 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Tomas Winkler0c533572015-05-04 09:43:53 +03001148 rets = mei_cl_send_connect(cl, cb);
1149 if (rets)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001150 goto out;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001151 }
1152
1153 mutex_unlock(&dev->device_lock);
Tomas Winkler12f45ed2014-08-21 14:29:18 +03001154 wait_event_timeout(cl->wait,
Alexander Usyskin285e2992014-02-17 15:13:20 +02001155 (cl->state == MEI_FILE_CONNECTED ||
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001156 cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskin18901352015-07-23 21:37:13 +03001157 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
Tomas Winkler3c666182015-05-04 09:43:52 +03001158 cl->state == MEI_FILE_DISCONNECT_REPLY),
Alexander Usyskin285e2992014-02-17 15:13:20 +02001159 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001160 mutex_lock(&dev->device_lock);
1161
Tomas Winklerf3de9b62015-03-27 00:27:58 +02001162 if (!mei_cl_is_connected(cl)) {
Alexander Usyskin18901352015-07-23 21:37:13 +03001163 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
Tomas Winklerf0461922017-01-27 16:32:46 +02001164 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1165 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
Alexander Usyskin18901352015-07-23 21:37:13 +03001166 /* ignore disconnect return valuue;
1167 * in case of failure reset will be invoked
1168 */
1169 __mei_cl_disconnect(cl);
1170 rets = -EFAULT;
1171 goto out;
1172 }
1173
Tomas Winkler0c533572015-05-04 09:43:53 +03001174 /* timeout or something went really wrong */
Alexander Usyskin285e2992014-02-17 15:13:20 +02001175 if (!cl->status)
1176 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001177 }
1178
1179 rets = cl->status;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001180out:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001181 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001182 pm_runtime_mark_last_busy(dev->dev);
1183 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001184
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001185 mei_io_cb_free(cb);
Tomas Winkler0c533572015-05-04 09:43:53 +03001186
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001187nortpm:
Tomas Winkler0c533572015-05-04 09:43:53 +03001188 if (!mei_cl_is_connected(cl))
1189 mei_cl_set_disconnected(cl);
1190
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001191 return rets;
1192}
1193
1194/**
Tomas Winkler03b8d342015-02-10 10:39:44 +02001195 * mei_cl_alloc_linked - allocate and link host client
1196 *
1197 * @dev: the device structure
Tomas Winkler03b8d342015-02-10 10:39:44 +02001198 *
1199 * Return: cl on success ERR_PTR on failure
1200 */
Alexander Usyskin7851e002016-02-07 23:35:40 +02001201struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
Tomas Winkler03b8d342015-02-10 10:39:44 +02001202{
1203 struct mei_cl *cl;
1204 int ret;
1205
1206 cl = mei_cl_allocate(dev);
1207 if (!cl) {
1208 ret = -ENOMEM;
1209 goto err;
1210 }
1211
Alexander Usyskin7851e002016-02-07 23:35:40 +02001212 ret = mei_cl_link(cl);
Tomas Winkler03b8d342015-02-10 10:39:44 +02001213 if (ret)
1214 goto err;
1215
1216 return cl;
1217err:
1218 kfree(cl);
1219 return ERR_PTR(ret);
1220}
1221
Tomas Winkler03b8d342015-02-10 10:39:44 +02001222/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001223 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001224 *
Alexander Usyskin06ee5362016-02-07 23:35:37 +02001225 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001226 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001227 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001228 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001229static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001230{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001231 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001232 return -EINVAL;
1233
Tomas Winkler4034b812016-07-26 01:06:04 +03001234 if (cl->tx_flow_ctrl_creds > 0)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001235 return 1;
1236
Alexander Usyskina808c802016-06-16 17:58:58 +03001237 if (mei_cl_is_fixed_address(cl))
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001238 return 1;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001239
Alexander Usyskind49ed642015-05-04 09:43:54 +03001240 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001241 if (cl->me_cl->tx_flow_ctrl_creds > 0)
Alexander Usyskind49ed642015-05-04 09:43:54 +03001242 return 1;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001243 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001244 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001245}
1246
1247/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001248 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1249 * for a client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001250 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001251 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +09001252 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001253 * Return:
Tomas Winkler9ca90502013-01-08 23:07:13 +02001254 * 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +02001255 * -EINVAL when ctrl credits are <= 0
1256 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001257static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001258{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001259 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001260 return -EINVAL;
1261
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001262 if (mei_cl_is_fixed_address(cl))
1263 return 0;
1264
Alexander Usyskind49ed642015-05-04 09:43:54 +03001265 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001266 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001267 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001268 cl->me_cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001269 } else {
Tomas Winkler4034b812016-07-26 01:06:04 +03001270 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001271 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001272 cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001273 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001274 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001275}
1276
Tomas Winkler9ca90502013-01-08 23:07:13 +02001277/**
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +03001278 * mei_cl_vtag_alloc - allocate and fill the vtag structure
1279 *
1280 * @fp: pointer to file structure
1281 * @vtag: vm tag
1282 *
1283 * Return:
1284 * * Pointer to allocated struct - on success
1285 * * ERR_PTR(-ENOMEM) on memory allocation failure
1286 */
1287struct mei_cl_vtag *mei_cl_vtag_alloc(struct file *fp, u8 vtag)
1288{
1289 struct mei_cl_vtag *cl_vtag;
1290
1291 cl_vtag = kzalloc(sizeof(*cl_vtag), GFP_KERNEL);
1292 if (!cl_vtag)
1293 return ERR_PTR(-ENOMEM);
1294
1295 INIT_LIST_HEAD(&cl_vtag->list);
1296 cl_vtag->vtag = vtag;
1297 cl_vtag->fp = fp;
1298
1299 return cl_vtag;
1300}
1301
1302/**
1303 * mei_cl_fp_by_vtag - obtain the file pointer by vtag
1304 *
1305 * @cl: host client
1306 * @vtag: vm tag
1307 *
1308 * Return:
1309 * * A file pointer - on success
1310 * * ERR_PTR(-ENOENT) if vtag is not found in the client vtag list
1311 */
1312const struct file *mei_cl_fp_by_vtag(const struct mei_cl *cl, u8 vtag)
1313{
1314 struct mei_cl_vtag *vtag_l;
1315
1316 list_for_each_entry(vtag_l, &cl->vtag_map, list)
1317 if (vtag_l->vtag == vtag)
1318 return vtag_l->fp;
1319
1320 return ERR_PTR(-ENOENT);
1321}
1322
1323/**
1324 * mei_cl_reset_read_by_vtag - reset pending_read flag by given vtag
1325 *
1326 * @cl: host client
1327 * @vtag: vm tag
1328 */
1329static void mei_cl_reset_read_by_vtag(const struct mei_cl *cl, u8 vtag)
1330{
1331 struct mei_cl_vtag *vtag_l;
1332
1333 list_for_each_entry(vtag_l, &cl->vtag_map, list) {
1334 if (vtag_l->vtag == vtag) {
1335 vtag_l->pending_read = false;
1336 break;
1337 }
1338 }
1339}
1340
1341/**
1342 * mei_cl_read_vtag_add_fc - add flow control for next pending reader
1343 * in the vtag list
1344 *
1345 * @cl: host client
1346 */
1347static void mei_cl_read_vtag_add_fc(struct mei_cl *cl)
1348{
1349 struct mei_cl_vtag *cl_vtag;
1350
1351 list_for_each_entry(cl_vtag, &cl->vtag_map, list) {
1352 if (cl_vtag->pending_read) {
1353 if (mei_cl_enqueue_ctrl_wr_cb(cl,
1354 mei_cl_mtu(cl),
1355 MEI_FOP_READ,
1356 cl_vtag->fp))
1357 cl->rx_flow_ctrl_creds++;
1358 break;
1359 }
1360 }
1361}
1362
1363/**
1364 * mei_cl_vt_support_check - check if client support vtags
1365 *
1366 * @cl: host client
1367 *
1368 * Return:
1369 * * 0 - supported, or not connected at all
1370 * * -EOPNOTSUPP - vtags are not supported by client
1371 */
1372int mei_cl_vt_support_check(const struct mei_cl *cl)
1373{
1374 struct mei_device *dev = cl->dev;
1375
1376 if (!dev->hbm_f_vt_supported)
1377 return -EOPNOTSUPP;
1378
1379 if (!cl->me_cl)
1380 return 0;
1381
1382 return cl->me_cl->props.vt_supported ? 0 : -EOPNOTSUPP;
1383}
1384
1385/**
Alexander Usyskind1376f32020-08-18 14:51:40 +03001386 * mei_cl_add_rd_completed - add read completed callback to list with lock
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +03001387 * and vtag check
Alexander Usyskind1376f32020-08-18 14:51:40 +03001388 *
1389 * @cl: host client
1390 * @cb: callback block
1391 *
1392 */
1393void mei_cl_add_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1394{
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +03001395 const struct file *fp;
1396
1397 if (!mei_cl_vt_support_check(cl)) {
1398 fp = mei_cl_fp_by_vtag(cl, cb->vtag);
1399 if (IS_ERR(fp)) {
1400 /* client already disconnected, discarding */
1401 mei_io_cb_free(cb);
1402 return;
1403 }
1404 cb->fp = fp;
1405 mei_cl_reset_read_by_vtag(cl, cb->vtag);
1406 mei_cl_read_vtag_add_fc(cl);
1407 }
1408
Alexander Usyskind1376f32020-08-18 14:51:40 +03001409 spin_lock(&cl->rd_completed_lock);
1410 list_add_tail(&cb->list, &cl->rd_completed);
1411 spin_unlock(&cl->rd_completed_lock);
1412}
1413
1414/**
1415 * mei_cl_del_rd_completed - free read completed callback with lock
1416 *
1417 * @cl: host client
1418 * @cb: callback block
1419 *
1420 */
1421void mei_cl_del_rd_completed(struct mei_cl *cl, struct mei_cl_cb *cb)
1422{
1423 spin_lock(&cl->rd_completed_lock);
1424 mei_io_cb_free(cb);
1425 spin_unlock(&cl->rd_completed_lock);
1426}
1427
1428/**
Tomas Winkler51678cc2015-07-26 09:54:18 +03001429 * mei_cl_notify_fop2req - convert fop to proper request
1430 *
1431 * @fop: client notification start response command
1432 *
1433 * Return: MEI_HBM_NOTIFICATION_START/STOP
1434 */
1435u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1436{
1437 if (fop == MEI_FOP_NOTIFY_START)
1438 return MEI_HBM_NOTIFICATION_START;
1439 else
1440 return MEI_HBM_NOTIFICATION_STOP;
1441}
1442
1443/**
1444 * mei_cl_notify_req2fop - convert notification request top file operation type
1445 *
1446 * @req: hbm notification request type
1447 *
1448 * Return: MEI_FOP_NOTIFY_START/STOP
1449 */
1450enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1451{
1452 if (req == MEI_HBM_NOTIFICATION_START)
1453 return MEI_FOP_NOTIFY_START;
1454 else
1455 return MEI_FOP_NOTIFY_STOP;
1456}
1457
1458/**
1459 * mei_cl_irq_notify - send notification request in irq_thread context
1460 *
1461 * @cl: client
1462 * @cb: callback block.
1463 * @cmpl_list: complete list.
1464 *
1465 * Return: 0 on such and error otherwise.
1466 */
1467int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001468 struct list_head *cmpl_list)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001469{
1470 struct mei_device *dev = cl->dev;
1471 u32 msg_slots;
1472 int slots;
1473 int ret;
1474 bool request;
1475
Tomas Winkler98e70862018-07-31 09:35:33 +03001476 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winkler51678cc2015-07-26 09:54:18 +03001477 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerde877432018-07-12 17:10:08 +03001478 if (slots < 0)
1479 return -EOVERFLOW;
Tomas Winkler51678cc2015-07-26 09:54:18 +03001480
Tomas Winklerde877432018-07-12 17:10:08 +03001481 if ((u32)slots < msg_slots)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001482 return -EMSGSIZE;
1483
1484 request = mei_cl_notify_fop2req(cb->fop_type);
1485 ret = mei_hbm_cl_notify_req(dev, cl, request);
1486 if (ret) {
1487 cl->status = ret;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001488 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001489 return ret;
1490 }
1491
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001492 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001493 return 0;
1494}
1495
1496/**
1497 * mei_cl_notify_request - send notification stop/start request
1498 *
1499 * @cl: host client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001500 * @fp: associate request with file
Tomas Winkler51678cc2015-07-26 09:54:18 +03001501 * @request: 1 for start or 0 for stop
1502 *
1503 * Locking: called under "dev->device_lock" lock
1504 *
1505 * Return: 0 on such and error otherwise.
1506 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001507int mei_cl_notify_request(struct mei_cl *cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001508 const struct file *fp, u8 request)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001509{
1510 struct mei_device *dev;
1511 struct mei_cl_cb *cb;
1512 enum mei_cb_file_ops fop_type;
1513 int rets;
1514
1515 if (WARN_ON(!cl || !cl->dev))
1516 return -ENODEV;
1517
1518 dev = cl->dev;
1519
1520 if (!dev->hbm_f_ev_supported) {
1521 cl_dbg(dev, cl, "notifications not supported\n");
1522 return -EOPNOTSUPP;
1523 }
1524
Alexander Usyskin7c47d2c2017-01-27 16:32:41 +02001525 if (!mei_cl_is_connected(cl))
1526 return -ENODEV;
1527
Tomas Winkler51678cc2015-07-26 09:54:18 +03001528 rets = pm_runtime_get(dev->dev);
1529 if (rets < 0 && rets != -EINPROGRESS) {
1530 pm_runtime_put_noidle(dev->dev);
1531 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1532 return rets;
1533 }
1534
1535 fop_type = mei_cl_notify_req2fop(request);
Tomas Winkler3030dc02016-07-26 01:06:05 +03001536 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001537 if (!cb) {
1538 rets = -ENOMEM;
1539 goto out;
1540 }
1541
1542 if (mei_hbuf_acquire(dev)) {
1543 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1544 rets = -ENODEV;
1545 goto out;
1546 }
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001547 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001548 }
1549
1550 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001551 wait_event_timeout(cl->wait,
Alexander Usyskina19bf052018-11-06 12:04:40 +02001552 cl->notify_en == request ||
1553 cl->status ||
1554 !mei_cl_is_connected(cl),
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001555 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler51678cc2015-07-26 09:54:18 +03001556 mutex_lock(&dev->device_lock);
1557
Alexander Usyskin4a8eaa92016-04-20 11:03:54 -04001558 if (cl->notify_en != request && !cl->status)
1559 cl->status = -EFAULT;
Tomas Winkler51678cc2015-07-26 09:54:18 +03001560
1561 rets = cl->status;
1562
1563out:
1564 cl_dbg(dev, cl, "rpm: autosuspend\n");
1565 pm_runtime_mark_last_busy(dev->dev);
1566 pm_runtime_put_autosuspend(dev->dev);
1567
1568 mei_io_cb_free(cb);
1569 return rets;
1570}
1571
1572/**
Tomas Winkler237092b2015-07-26 09:54:22 +03001573 * mei_cl_notify - raise notification
1574 *
1575 * @cl: host client
1576 *
1577 * Locking: called under "dev->device_lock" lock
1578 */
1579void mei_cl_notify(struct mei_cl *cl)
1580{
1581 struct mei_device *dev;
1582
1583 if (!cl || !cl->dev)
1584 return;
1585
1586 dev = cl->dev;
1587
1588 if (!cl->notify_en)
1589 return;
1590
1591 cl_dbg(dev, cl, "notify event");
1592 cl->notify_ev = true;
Tomas Winkler850f8942016-02-07 23:35:31 +02001593 if (!mei_cl_bus_notify_event(cl))
1594 wake_up_interruptible(&cl->ev_wait);
Tomas Winkler237092b2015-07-26 09:54:22 +03001595
1596 if (cl->ev_async)
1597 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
Alexander Usyskinbb2ef9c2015-07-26 09:54:23 +03001598
Tomas Winkler237092b2015-07-26 09:54:22 +03001599}
1600
1601/**
Tomas Winklerb38a3622015-07-26 09:54:19 +03001602 * mei_cl_notify_get - get or wait for notification event
1603 *
1604 * @cl: host client
1605 * @block: this request is blocking
1606 * @notify_ev: true if notification event was received
1607 *
1608 * Locking: called under "dev->device_lock" lock
1609 *
1610 * Return: 0 on such and error otherwise.
1611 */
1612int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1613{
1614 struct mei_device *dev;
1615 int rets;
1616
1617 *notify_ev = false;
1618
1619 if (WARN_ON(!cl || !cl->dev))
1620 return -ENODEV;
1621
1622 dev = cl->dev;
1623
Alexander Usyskin6c0d6702017-01-27 16:32:42 +02001624 if (!dev->hbm_f_ev_supported) {
1625 cl_dbg(dev, cl, "notifications not supported\n");
1626 return -EOPNOTSUPP;
1627 }
1628
Tomas Winklerb38a3622015-07-26 09:54:19 +03001629 if (!mei_cl_is_connected(cl))
1630 return -ENODEV;
1631
1632 if (cl->notify_ev)
1633 goto out;
1634
1635 if (!block)
1636 return -EAGAIN;
1637
1638 mutex_unlock(&dev->device_lock);
1639 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1640 mutex_lock(&dev->device_lock);
1641
1642 if (rets < 0)
1643 return rets;
1644
1645out:
1646 *notify_ev = cl->notify_ev;
1647 cl->notify_ev = false;
1648 return 0;
1649}
1650
1651/**
Masanari Iida393b1482013-04-05 01:05:05 +09001652 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001653 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001654 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +03001655 * @length: number of bytes to read
Tomas Winklerbca67d62015-02-10 10:39:43 +02001656 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +02001657 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001658 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001659 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001660int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001661{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001662 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001663 struct mei_cl_cb *cb;
1664 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001665
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001666 if (WARN_ON(!cl || !cl->dev))
1667 return -ENODEV;
1668
1669 dev = cl->dev;
1670
Tomas Winklerb950ac12013-07-25 20:15:53 +03001671 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +02001672 return -ENODEV;
1673
Alexander Usyskind49ed642015-05-04 09:43:54 +03001674 if (!mei_me_cl_is_active(cl->me_cl)) {
1675 cl_err(dev, cl, "no such me client\n");
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001676 return -ENOTTY;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001677 }
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001678
Alexander Usyskin394a77d2017-03-20 15:04:03 +02001679 if (mei_cl_is_fixed_address(cl))
Alexander Usyskine51dfa52016-07-26 01:06:02 +03001680 return 0;
1681
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001682 /* HW currently supports only one pending read */
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +03001683 if (cl->rx_flow_ctrl_creds) {
1684 mei_cl_set_read_by_fp(cl, fp);
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001685 return -EBUSY;
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +03001686 }
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001687
Tomas Winkler3030dc02016-07-26 01:06:05 +03001688 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001689 if (!cb)
1690 return -ENOMEM;
1691
Alexander Usyskinf35fe5f2020-08-18 14:51:41 +03001692 mei_cl_set_read_by_fp(cl, fp);
1693
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001694 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001695 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001696 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001697 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001698 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001699 }
1700
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001701 rets = 0;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001702 if (mei_hbuf_acquire(dev)) {
Alexander Usyskin86113502014-03-31 17:59:24 +03001703 rets = mei_hbm_cl_flow_control_req(dev, cl);
1704 if (rets < 0)
Tomas Winkler04bb1392014-03-18 22:52:04 +02001705 goto out;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001706
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001707 list_move_tail(&cb->list, &cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +02001708 }
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001709 cl->rx_flow_ctrl_creds++;
Chao Biaccb8842014-02-12 21:27:25 +02001710
Tomas Winkler04bb1392014-03-18 22:52:04 +02001711out:
1712 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001713 pm_runtime_mark_last_busy(dev->dev);
1714 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001715nortpm:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001716 if (rets)
1717 mei_io_cb_free(cb);
1718
Tomas Winkler9ca90502013-01-08 23:07:13 +02001719 return rets;
1720}
1721
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001722static inline u8 mei_ext_hdr_set_vtag(struct mei_ext_hdr *ext, u8 vtag)
Tomas Winklera1c4d082018-07-23 13:21:24 +03001723{
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001724 ext->type = MEI_EXT_HDR_VTAG;
1725 ext->ext_payload[0] = vtag;
1726 ext->length = mei_data2slots(sizeof(*ext));
1727 return ext->length;
1728}
1729
1730/**
1731 * mei_msg_hdr_init - allocate and initialize mei message header
1732 *
1733 * @cb: message callback structure
1734 *
1735 * Return: a pointer to initialized header
1736 */
1737static struct mei_msg_hdr *mei_msg_hdr_init(const struct mei_cl_cb *cb)
1738{
1739 size_t hdr_len;
1740 struct mei_ext_meta_hdr *meta;
1741 struct mei_ext_hdr *ext;
1742 struct mei_msg_hdr *mei_hdr;
1743 bool is_ext, is_vtag;
1744
1745 if (!cb)
1746 return ERR_PTR(-EINVAL);
1747
1748 /* Extended header for vtag is attached only on the first fragment */
1749 is_vtag = (cb->vtag && cb->buf_idx == 0);
1750 is_ext = is_vtag;
1751
1752 /* Compute extended header size */
1753 hdr_len = sizeof(*mei_hdr);
1754
1755 if (!is_ext)
1756 goto setup_hdr;
1757
1758 hdr_len += sizeof(*meta);
1759 if (is_vtag)
1760 hdr_len += sizeof(*ext);
1761
1762setup_hdr:
1763 mei_hdr = kzalloc(hdr_len, GFP_KERNEL);
1764 if (!mei_hdr)
1765 return ERR_PTR(-ENOMEM);
1766
Tomas Winklera1c4d082018-07-23 13:21:24 +03001767 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1768 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
Tomas Winklera1c4d082018-07-23 13:21:24 +03001769 mei_hdr->internal = cb->internal;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001770 mei_hdr->extended = is_ext;
1771
1772 if (!is_ext)
1773 goto out;
1774
1775 meta = (struct mei_ext_meta_hdr *)mei_hdr->extension;
1776 if (is_vtag) {
1777 meta->count++;
1778 meta->size += mei_ext_hdr_set_vtag(meta->hdrs, cb->vtag);
1779 }
1780out:
1781 mei_hdr->length = hdr_len - sizeof(*mei_hdr);
1782 return mei_hdr;
Tomas Winklera1c4d082018-07-23 13:21:24 +03001783}
1784
1785/**
Tomas Winkler9d098192014-02-19 17:35:48 +02001786 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +03001787 * from the interrupt thread context
1788 *
1789 * @cl: client
1790 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +03001791 * @cmpl_list: complete list.
1792 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001793 * Return: 0, OK; otherwise error.
Tomas Winkler21767542013-06-23 09:36:59 +03001794 */
Tomas Winkler9d098192014-02-19 17:35:48 +02001795int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001796 struct list_head *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +03001797{
Tomas Winkler136698e2013-09-16 23:44:44 +03001798 struct mei_device *dev;
1799 struct mei_msg_data *buf;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001800 struct mei_msg_hdr *mei_hdr = NULL;
1801 size_t hdr_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001802 size_t hbuf_len, dr_len;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001803 size_t buf_len;
1804 size_t data_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001805 int hbuf_slots;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001806 u32 dr_slots;
1807 u32 dma_len;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001808 int rets;
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001809 bool first_chunk;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001810 const void *data;
Tomas Winkler21767542013-06-23 09:36:59 +03001811
Tomas Winkler136698e2013-09-16 23:44:44 +03001812 if (WARN_ON(!cl || !cl->dev))
1813 return -ENODEV;
1814
1815 dev = cl->dev;
1816
Tomas Winkler5db75142015-02-10 10:39:42 +02001817 buf = &cb->buf;
Tomas Winkler136698e2013-09-16 23:44:44 +03001818
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001819 first_chunk = cb->buf_idx == 0;
1820
Tomas Winkler4034b812016-07-26 01:06:04 +03001821 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
Tomas Winkler136698e2013-09-16 23:44:44 +03001822 if (rets < 0)
Alexander Usyskine09ee852016-12-14 17:56:52 +02001823 goto err;
Tomas Winkler136698e2013-09-16 23:44:44 +03001824
1825 if (rets == 0) {
Tomas Winkler04bb1392014-03-18 22:52:04 +02001826 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler136698e2013-09-16 23:44:44 +03001827 return 0;
1828 }
1829
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001830 buf_len = buf->size - cb->buf_idx;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001831 data = buf->data + cb->buf_idx;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001832 hbuf_slots = mei_hbuf_empty_slots(dev);
1833 if (hbuf_slots < 0) {
1834 rets = -EOVERFLOW;
1835 goto err;
1836 }
Tomas Winkler98e70862018-07-31 09:35:33 +03001837
Tomas Winkler3aef0212020-02-11 18:05:22 +02001838 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001839 dr_slots = mei_dma_ring_empty_slots(dev);
1840 dr_len = mei_slots2data(dr_slots);
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001841
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001842 mei_hdr = mei_msg_hdr_init(cb);
1843 if (IS_ERR(mei_hdr)) {
1844 rets = PTR_ERR(mei_hdr);
1845 mei_hdr = NULL;
1846 goto err;
1847 }
1848
1849 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1850 mei_hdr->extended, cb->vtag);
1851
1852 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
Tomas Winklera1c4d082018-07-23 13:21:24 +03001853
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001854 /**
1855 * Split the message only if we can write the whole host buffer
1856 * otherwise wait for next time the host buffer is empty.
1857 */
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001858 if (hdr_len + buf_len <= hbuf_len) {
1859 data_len = buf_len;
1860 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001861 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001862 mei_hdr->dma_ring = 1;
1863 if (buf_len > dr_len)
1864 buf_len = dr_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001865 else
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001866 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001867
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001868 data_len = sizeof(dma_len);
1869 dma_len = buf_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001870 data = &dma_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001871 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001872 buf_len = hbuf_len - hdr_len;
1873 data_len = buf_len;
Tomas Winkler21767542013-06-23 09:36:59 +03001874 } else {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001875 kfree(mei_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +03001876 return 0;
1877 }
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001878 mei_hdr->length += data_len;
Tomas Winkler21767542013-06-23 09:36:59 +03001879
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001880 if (mei_hdr->dma_ring)
1881 mei_dma_ring_write(dev, buf->data + cb->buf_idx, buf_len);
1882 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
Tomas Winkler21767542013-06-23 09:36:59 +03001883
Alexander Usyskine09ee852016-12-14 17:56:52 +02001884 if (rets)
1885 goto err;
Tomas Winkler21767542013-06-23 09:36:59 +03001886
1887 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001888 cl->writing_state = MEI_WRITING;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001889 cb->buf_idx += buf_len;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001890
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001891 if (first_chunk) {
Alexander Usyskine09ee852016-12-14 17:56:52 +02001892 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1893 rets = -EIO;
1894 goto err;
1895 }
Tomas Winkler21767542013-06-23 09:36:59 +03001896 }
1897
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001898 if (mei_hdr->msg_complete)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001899 list_move_tail(&cb->list, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001900
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001901 kfree(mei_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +03001902 return 0;
Alexander Usyskine09ee852016-12-14 17:56:52 +02001903
1904err:
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001905 kfree(mei_hdr);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001906 cl->status = rets;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001907 list_move_tail(&cb->list, cmpl_list);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001908 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +03001909}
1910
1911/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001912 * mei_cl_write - submit a write cb to mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001913 * assumes device_lock is locked
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001914 *
1915 * @cl: host client
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001916 * @cb: write callback with filled data
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001917 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001918 * Return: number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001919 */
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001920ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001921{
1922 struct mei_device *dev;
1923 struct mei_msg_data *buf;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001924 struct mei_msg_hdr *mei_hdr = NULL;
1925 size_t hdr_len;
1926 size_t hbuf_len, dr_len;
1927 size_t buf_len;
1928 size_t data_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001929 int hbuf_slots;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001930 u32 dr_slots;
1931 u32 dma_len;
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001932 ssize_t rets;
Alexander Usyskine0cb6b22016-11-08 18:26:08 +02001933 bool blocking;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001934 const void *data;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001935
1936 if (WARN_ON(!cl || !cl->dev))
1937 return -ENODEV;
1938
1939 if (WARN_ON(!cb))
1940 return -EINVAL;
1941
1942 dev = cl->dev;
1943
Tomas Winkler5db75142015-02-10 10:39:42 +02001944 buf = &cb->buf;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001945 buf_len = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001946
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001947 cl_dbg(dev, cl, "buf_len=%zd\n", buf_len);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001948
Tomas Winklerc30362c2018-11-22 13:11:40 +02001949 blocking = cb->blocking;
1950 data = buf->data;
1951
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001952 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001953 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001954 pm_runtime_put_noidle(dev->dev);
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001955 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02001956 goto free;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001957 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001958
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001959 cb->buf_idx = 0;
1960 cl->writing_state = MEI_IDLE;
1961
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001962
Tomas Winkler4034b812016-07-26 01:06:04 +03001963 rets = mei_cl_tx_flow_ctrl_creds(cl);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001964 if (rets < 0)
1965 goto err;
1966
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001967 mei_hdr = mei_msg_hdr_init(cb);
1968 if (IS_ERR(mei_hdr)) {
1969 rets = -PTR_ERR(mei_hdr);
1970 mei_hdr = NULL;
1971 goto err;
1972 }
1973
1974 cl_dbg(dev, cl, "Extended Header %d vtag = %d\n",
1975 mei_hdr->extended, cb->vtag);
1976
1977 hdr_len = sizeof(*mei_hdr) + mei_hdr->length;
Tomas Winklera1c4d082018-07-23 13:21:24 +03001978
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001979 if (rets == 0) {
1980 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001981 rets = buf_len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001982 goto out;
1983 }
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001984
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001985 if (!mei_hbuf_acquire(dev)) {
1986 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
Tomas Winkler0cd7c012020-08-18 14:51:38 +03001987 rets = buf_len;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001988 goto out;
1989 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001990
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001991 hbuf_slots = mei_hbuf_empty_slots(dev);
1992 if (hbuf_slots < 0) {
1993 rets = -EOVERFLOW;
1994 goto out;
1995 }
1996
Tomas Winkler3aef0212020-02-11 18:05:22 +02001997 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001998 dr_slots = mei_dma_ring_empty_slots(dev);
1999 dr_len = mei_slots2data(dr_slots);
Tomas Winkler98e70862018-07-31 09:35:33 +03002000
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002001 if (hdr_len + buf_len <= hbuf_len) {
2002 data_len = buf_len;
2003 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02002004 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002005 mei_hdr->dma_ring = 1;
2006 if (buf_len > dr_len)
2007 buf_len = dr_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02002008 else
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002009 mei_hdr->msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02002010
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002011 data_len = sizeof(dma_len);
2012 dma_len = buf_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02002013 data = &dma_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03002014 } else {
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002015 buf_len = hbuf_len - hdr_len;
2016 data_len = buf_len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002017 }
2018
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002019 mei_hdr->length += data_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02002020
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002021 if (mei_hdr->dma_ring)
2022 mei_dma_ring_write(dev, buf->data, buf_len);
2023 rets = mei_write_message(dev, mei_hdr, hdr_len, data, data_len);
2024
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03002025 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002026 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002027
Tomas Winkler4034b812016-07-26 01:06:04 +03002028 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03002029 if (rets)
2030 goto err;
2031
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002032 cl->writing_state = MEI_WRITING;
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002033 cb->buf_idx = buf_len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02002034 /* restore return value */
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002035 buf_len = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002036
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002037out:
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002038 if (mei_hdr->msg_complete)
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02002039 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03002040 else
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02002041 mei_tx_cb_enqueue(cb, &dev->write_list);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002042
Alexander Usyskin23253c32015-07-23 10:43:11 +03002043 cb = NULL;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002044 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
2045
2046 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02002047 rets = wait_event_interruptible(cl->tx_wait,
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02002048 cl->writing_state == MEI_WRITE_COMPLETE ||
2049 (!mei_cl_is_connected(cl)));
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002050 mutex_lock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02002051 /* wait_event_interruptible returns -ERESTARTSYS */
2052 if (rets) {
2053 if (signal_pending(current))
2054 rets = -EINTR;
2055 goto err;
2056 }
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02002057 if (cl->writing_state != MEI_WRITE_COMPLETE) {
2058 rets = -EFAULT;
2059 goto err;
2060 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002061 }
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02002062
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002063 rets = buf_len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002064err:
Tomas Winkler04bb1392014-03-18 22:52:04 +02002065 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03002066 pm_runtime_mark_last_busy(dev->dev);
2067 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02002068free:
2069 mei_io_cb_free(cb);
Tomas Winkler04bb1392014-03-18 22:52:04 +02002070
Tomas Winkler0cd7c012020-08-18 14:51:38 +03002071 kfree(mei_hdr);
2072
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002073 return rets;
2074}
2075
Tomas Winklerdb086fa2013-05-12 15:34:45 +03002076/**
2077 * mei_cl_complete - processes completed operation for a client
2078 *
2079 * @cl: private data of the file object.
2080 * @cb: callback block.
2081 */
2082void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
2083{
Alexander Usyskina1809d32015-05-07 15:53:59 +03002084 struct mei_device *dev = cl->dev;
2085
Tomas Winkler3c666182015-05-04 09:43:52 +03002086 switch (cb->fop_type) {
2087 case MEI_FOP_WRITE:
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02002088 mei_tx_cb_dequeue(cb);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03002089 cl->writing_state = MEI_WRITE_COMPLETE;
Alexander Usyskina1809d32015-05-07 15:53:59 +03002090 if (waitqueue_active(&cl->tx_wait)) {
Tomas Winklerdb086fa2013-05-12 15:34:45 +03002091 wake_up_interruptible(&cl->tx_wait);
Alexander Usyskina1809d32015-05-07 15:53:59 +03002092 } else {
2093 pm_runtime_mark_last_busy(dev->dev);
2094 pm_request_autosuspend(dev->dev);
2095 }
Tomas Winkler3c666182015-05-04 09:43:52 +03002096 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03002097
Tomas Winkler3c666182015-05-04 09:43:52 +03002098 case MEI_FOP_READ:
Alexander Usyskind1376f32020-08-18 14:51:40 +03002099 mei_cl_add_rd_completed(cl, cb);
Alexander Usyskin46978ad2016-07-26 01:06:03 +03002100 if (!mei_cl_is_fixed_address(cl) &&
2101 !WARN_ON(!cl->rx_flow_ctrl_creds))
2102 cl->rx_flow_ctrl_creds--;
Tomas Winklera1f9ae22016-02-07 23:35:30 +02002103 if (!mei_cl_bus_rx_event(cl))
2104 wake_up_interruptible(&cl->rx_wait);
Tomas Winkler3c666182015-05-04 09:43:52 +03002105 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03002106
Tomas Winkler3c666182015-05-04 09:43:52 +03002107 case MEI_FOP_CONNECT:
2108 case MEI_FOP_DISCONNECT:
Tomas Winkler51678cc2015-07-26 09:54:18 +03002109 case MEI_FOP_NOTIFY_STOP:
2110 case MEI_FOP_NOTIFY_START:
Tomas Winkler3c666182015-05-04 09:43:52 +03002111 if (waitqueue_active(&cl->wait))
2112 wake_up(&cl->wait);
2113
2114 break;
Alexander Usyskin6a8d6482016-04-17 12:16:03 -04002115 case MEI_FOP_DISCONNECT_RSP:
2116 mei_io_cb_free(cb);
2117 mei_cl_set_disconnected(cl);
2118 break;
Tomas Winkler3c666182015-05-04 09:43:52 +03002119 default:
2120 BUG_ON(0);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03002121 }
2122}
2123
Tomas Winkler4234a6d2013-04-08 21:56:37 +03002124
2125/**
Tomas Winkler074b4c02013-02-06 14:06:44 +02002126 * mei_cl_all_disconnect - disconnect forcefully all connected clients
2127 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03002128 * @dev: mei device
Tomas Winkler074b4c02013-02-06 14:06:44 +02002129 */
Tomas Winkler074b4c02013-02-06 14:06:44 +02002130void mei_cl_all_disconnect(struct mei_device *dev)
2131{
Tomas Winkler31f88f52014-02-17 15:13:25 +02002132 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +02002133
Tomas Winkler3c666182015-05-04 09:43:52 +03002134 list_for_each_entry(cl, &dev->file_list, link)
2135 mei_cl_set_disconnected(cl);
Tomas Winkler074b4c02013-02-06 14:06:44 +02002136}