blob: b32c825a09459e56e64a5e6d07f0454cff91d5b4 [file] [log] [blame]
Tomas Winkler9fff0422019-03-12 00:10:41 +02001// SPDX-License-Identifier: GPL-2.0
Tomas Winkler9ca90502013-01-08 23:07:13 +02002/*
Tomas Winkler1e55b602019-03-12 00:10:44 +02003 * Copyright (c) 2003-2019, Intel Corporation. All rights reserved.
Tomas Winkler9ca90502013-01-08 23:07:13 +02004 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler9ca90502013-01-08 23:07:13 +02005 */
6
Ingo Molnar174cd4b2017-02-02 19:15:33 +01007#include <linux/sched/signal.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +02008#include <linux/wait.h>
9#include <linux/delay.h>
Tomas Winkler1f180352014-09-29 16:31:46 +030010#include <linux/slab.h>
Tomas Winkler04bb1392014-03-18 22:52:04 +020011#include <linux/pm_runtime.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +020012
13#include <linux/mei.h>
14
15#include "mei_dev.h"
16#include "hbm.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020017#include "client.h"
18
19/**
Tomas Winkler79563db2015-01-11 00:07:16 +020020 * mei_me_cl_init - initialize me client
21 *
22 * @me_cl: me client
23 */
24void mei_me_cl_init(struct mei_me_client *me_cl)
25{
26 INIT_LIST_HEAD(&me_cl->list);
27 kref_init(&me_cl->refcnt);
28}
29
30/**
31 * mei_me_cl_get - increases me client refcount
32 *
33 * @me_cl: me client
34 *
35 * Locking: called under "dev->device_lock" lock
36 *
37 * Return: me client or NULL
38 */
39struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
40{
Tomas Winklerb7d88512015-02-10 10:39:31 +020041 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
42 return me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +020043
Tomas Winklerb7d88512015-02-10 10:39:31 +020044 return NULL;
Tomas Winkler79563db2015-01-11 00:07:16 +020045}
46
47/**
Tomas Winklerb7d88512015-02-10 10:39:31 +020048 * mei_me_cl_release - free me client
Tomas Winkler79563db2015-01-11 00:07:16 +020049 *
50 * Locking: called under "dev->device_lock" lock
51 *
52 * @ref: me_client refcount
53 */
54static void mei_me_cl_release(struct kref *ref)
55{
56 struct mei_me_client *me_cl =
57 container_of(ref, struct mei_me_client, refcnt);
Tomas Winklerb7d88512015-02-10 10:39:31 +020058
Tomas Winkler79563db2015-01-11 00:07:16 +020059 kfree(me_cl);
60}
Tomas Winklerb7d88512015-02-10 10:39:31 +020061
Tomas Winkler79563db2015-01-11 00:07:16 +020062/**
63 * mei_me_cl_put - decrease me client refcount and free client if necessary
64 *
65 * Locking: called under "dev->device_lock" lock
66 *
67 * @me_cl: me client
68 */
69void mei_me_cl_put(struct mei_me_client *me_cl)
70{
71 if (me_cl)
72 kref_put(&me_cl->refcnt, mei_me_cl_release);
73}
74
75/**
Alexander Usyskind49ed642015-05-04 09:43:54 +030076 * __mei_me_cl_del - delete me client from the list and decrease
Tomas Winklerb7d88512015-02-10 10:39:31 +020077 * reference counter
78 *
79 * @dev: mei device
80 * @me_cl: me client
81 *
82 * Locking: dev->me_clients_rwsem
83 */
84static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
85{
86 if (!me_cl)
87 return;
88
Alexander Usyskind49ed642015-05-04 09:43:54 +030089 list_del_init(&me_cl->list);
Tomas Winklerb7d88512015-02-10 10:39:31 +020090 mei_me_cl_put(me_cl);
91}
92
93/**
Alexander Usyskind49ed642015-05-04 09:43:54 +030094 * mei_me_cl_del - delete me client from the list and decrease
95 * reference counter
96 *
97 * @dev: mei device
98 * @me_cl: me client
99 */
100void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
101{
102 down_write(&dev->me_clients_rwsem);
103 __mei_me_cl_del(dev, me_cl);
104 up_write(&dev->me_clients_rwsem);
105}
106
107/**
Tomas Winklerb7d88512015-02-10 10:39:31 +0200108 * mei_me_cl_add - add me client to the list
109 *
110 * @dev: mei device
111 * @me_cl: me client
112 */
113void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
114{
115 down_write(&dev->me_clients_rwsem);
116 list_add(&me_cl->list, &dev->me_clients);
117 up_write(&dev->me_clients_rwsem);
118}
119
120/**
121 * __mei_me_cl_by_uuid - locate me client by uuid
122 * increases ref count
123 *
124 * @dev: mei device
125 * @uuid: me client uuid
126 *
127 * Return: me client or NULL if not found
128 *
129 * Locking: dev->me_clients_rwsem
130 */
131static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
132 const uuid_le *uuid)
133{
134 struct mei_me_client *me_cl;
135 const uuid_le *pn;
136
137 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
138
139 list_for_each_entry(me_cl, &dev->me_clients, list) {
140 pn = &me_cl->props.protocol_name;
141 if (uuid_le_cmp(*uuid, *pn) == 0)
142 return mei_me_cl_get(me_cl);
143 }
144
145 return NULL;
146}
147
148/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300149 * mei_me_cl_by_uuid - locate me client by uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200150 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200151 *
152 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300153 * @uuid: me client uuid
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200154 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300155 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200156 *
157 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200158 */
Tomas Winklerb7d88512015-02-10 10:39:31 +0200159struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
Tomas Winklerd3208322014-08-24 12:08:55 +0300160 const uuid_le *uuid)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200161{
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300162 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200163
Tomas Winklerb7d88512015-02-10 10:39:31 +0200164 down_read(&dev->me_clients_rwsem);
165 me_cl = __mei_me_cl_by_uuid(dev, uuid);
166 up_read(&dev->me_clients_rwsem);
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200167
Tomas Winklerb7d88512015-02-10 10:39:31 +0200168 return me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200169}
170
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200171/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300172 * mei_me_cl_by_id - locate me client by client id
Tomas Winkler79563db2015-01-11 00:07:16 +0200173 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200174 *
175 * @dev: the device structure
176 * @client_id: me client id
177 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300178 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200179 *
180 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200181 */
Tomas Winklerd3208322014-08-24 12:08:55 +0300182struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200183{
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200184
Tomas Winklerb7d88512015-02-10 10:39:31 +0200185 struct mei_me_client *__me_cl, *me_cl = NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200186
Tomas Winklerb7d88512015-02-10 10:39:31 +0200187 down_read(&dev->me_clients_rwsem);
188 list_for_each_entry(__me_cl, &dev->me_clients, list) {
189 if (__me_cl->client_id == client_id) {
190 me_cl = mei_me_cl_get(__me_cl);
191 break;
192 }
193 }
194 up_read(&dev->me_clients_rwsem);
195
196 return me_cl;
197}
198
199/**
200 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
201 * increases ref count
202 *
203 * @dev: the device structure
204 * @uuid: me client uuid
205 * @client_id: me client id
206 *
207 * Return: me client or null if not found
208 *
209 * Locking: dev->me_clients_rwsem
210 */
211static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
212 const uuid_le *uuid, u8 client_id)
213{
214 struct mei_me_client *me_cl;
215 const uuid_le *pn;
216
217 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
218
219 list_for_each_entry(me_cl, &dev->me_clients, list) {
220 pn = &me_cl->props.protocol_name;
221 if (uuid_le_cmp(*uuid, *pn) == 0 &&
222 me_cl->client_id == client_id)
Tomas Winkler79563db2015-01-11 00:07:16 +0200223 return mei_me_cl_get(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200224 }
Tomas Winkler79563db2015-01-11 00:07:16 +0200225
Tomas Winklerd3208322014-08-24 12:08:55 +0300226 return NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200227}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200228
Tomas Winklerb7d88512015-02-10 10:39:31 +0200229
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300230/**
231 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200232 * increases ref count
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300233 *
234 * @dev: the device structure
235 * @uuid: me client uuid
236 * @client_id: me client id
237 *
Tomas Winklerb7d88512015-02-10 10:39:31 +0200238 * Return: me client or null if not found
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300239 */
Tomas Winklerd880f322014-08-21 14:29:15 +0300240struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
241 const uuid_le *uuid, u8 client_id)
242{
243 struct mei_me_client *me_cl;
244
Tomas Winklerb7d88512015-02-10 10:39:31 +0200245 down_read(&dev->me_clients_rwsem);
246 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
247 up_read(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200248
Tomas Winklerb7d88512015-02-10 10:39:31 +0200249 return me_cl;
Tomas Winklerd880f322014-08-21 14:29:15 +0300250}
251
Tomas Winkler25ca6472014-08-21 14:29:14 +0300252/**
Tomas Winkler79563db2015-01-11 00:07:16 +0200253 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
Tomas Winkler25ca6472014-08-21 14:29:14 +0300254 *
255 * @dev: the device structure
256 * @uuid: me client uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200257 *
258 * Locking: called under "dev->device_lock" lock
Tomas Winkler25ca6472014-08-21 14:29:14 +0300259 */
Tomas Winkler79563db2015-01-11 00:07:16 +0200260void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
Tomas Winkler25ca6472014-08-21 14:29:14 +0300261{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200262 struct mei_me_client *me_cl;
Tomas Winkler25ca6472014-08-21 14:29:14 +0300263
Tomas Winkler79563db2015-01-11 00:07:16 +0200264 dev_dbg(dev->dev, "remove %pUl\n", uuid);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200265
266 down_write(&dev->me_clients_rwsem);
267 me_cl = __mei_me_cl_by_uuid(dev, uuid);
268 __mei_me_cl_del(dev, me_cl);
Alexander Usyskinfc9c03c2020-05-13 01:31:40 +0300269 mei_me_cl_put(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200270 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200271}
272
273/**
274 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
275 *
276 * @dev: the device structure
277 * @uuid: me client uuid
278 * @id: me client id
279 *
280 * Locking: called under "dev->device_lock" lock
281 */
282void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
283{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200284 struct mei_me_client *me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +0200285
286 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200287
288 down_write(&dev->me_clients_rwsem);
289 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
290 __mei_me_cl_del(dev, me_cl);
Alexander Usyskinfc9c03c2020-05-13 01:31:40 +0300291 mei_me_cl_put(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200292 up_write(&dev->me_clients_rwsem);
Tomas Winkler25ca6472014-08-21 14:29:14 +0300293}
294
Tomas Winkler79563db2015-01-11 00:07:16 +0200295/**
296 * mei_me_cl_rm_all - remove all me clients
297 *
298 * @dev: the device structure
299 *
300 * Locking: called under "dev->device_lock" lock
301 */
302void mei_me_cl_rm_all(struct mei_device *dev)
303{
304 struct mei_me_client *me_cl, *next;
305
Tomas Winklerb7d88512015-02-10 10:39:31 +0200306 down_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200307 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
Tomas Winklerb7d88512015-02-10 10:39:31 +0200308 __mei_me_cl_del(dev, me_cl);
309 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200310}
311
Tomas Winkler9ca90502013-01-08 23:07:13 +0200312/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200313 * mei_io_cb_free - free mei_cb_private related memory
314 *
315 * @cb: mei callback struct
316 */
317void mei_io_cb_free(struct mei_cl_cb *cb)
318{
319 if (cb == NULL)
320 return;
321
Tomas Winkler928fa662015-02-10 10:39:45 +0200322 list_del(&cb->list);
Tomas Winkler5db75142015-02-10 10:39:42 +0200323 kfree(cb->buf.data);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200324 kfree(cb);
325}
326
327/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200328 * mei_tx_cb_queue - queue tx callback
329 *
330 * Locking: called under "dev->device_lock" lock
331 *
332 * @cb: mei callback struct
333 * @head: an instance of list to queue on
334 */
335static inline void mei_tx_cb_enqueue(struct mei_cl_cb *cb,
336 struct list_head *head)
337{
338 list_add_tail(&cb->list, head);
339 cb->cl->tx_cb_queued++;
340}
341
342/**
343 * mei_tx_cb_dequeue - dequeue tx callback
344 *
345 * Locking: called under "dev->device_lock" lock
346 *
347 * @cb: mei callback struct to dequeue and free
348 */
349static inline void mei_tx_cb_dequeue(struct mei_cl_cb *cb)
350{
351 if (!WARN_ON(cb->cl->tx_cb_queued == 0))
352 cb->cl->tx_cb_queued--;
353
354 mei_io_cb_free(cb);
355}
356
357/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200358 * mei_io_cb_init - allocate and initialize io callback
359 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300360 * @cl: mei client
Tomas Winklerbca67d62015-02-10 10:39:43 +0200361 * @type: operation type
Masanari Iida393b1482013-04-05 01:05:05 +0900362 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200363 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300364 * Return: mei_cl_cb pointer or NULL;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200365 */
Tomas Winkler3030dc02016-07-26 01:06:05 +0300366static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
367 enum mei_cb_file_ops type,
368 const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200369{
370 struct mei_cl_cb *cb;
371
372 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
373 if (!cb)
374 return NULL;
375
Tomas Winkler928fa662015-02-10 10:39:45 +0200376 INIT_LIST_HEAD(&cb->list);
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200377 cb->fp = fp;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200378 cb->cl = cl;
379 cb->buf_idx = 0;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200380 cb->fop_type = type;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200381 return cb;
382}
383
384/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200385 * mei_io_list_flush_cl - removes cbs belonging to the cl.
Tomas Winkler928fa662015-02-10 10:39:45 +0200386 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200387 * @head: an instance of our list structure
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200388 * @cl: host client
Tomas Winkler928fa662015-02-10 10:39:45 +0200389 */
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200390static void mei_io_list_flush_cl(struct list_head *head,
391 const struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200392{
393 struct mei_cl_cb *cb, *next;
394
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200395 list_for_each_entry_safe(cb, next, head, list) {
Alexander Usyskincee4c4d62019-01-30 10:12:26 +0200396 if (cl == cb->cl) {
Tomas Winkler928fa662015-02-10 10:39:45 +0200397 list_del_init(&cb->list);
Alexander Usyskincee4c4d62019-01-30 10:12:26 +0200398 if (cb->fop_type == MEI_FOP_READ)
399 mei_io_cb_free(cb);
400 }
Tomas Winkler928fa662015-02-10 10:39:45 +0200401 }
402}
403
404/**
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200405 * mei_io_tx_list_free_cl - removes cb belonging to the cl and free them
Tomas Winkler928fa662015-02-10 10:39:45 +0200406 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200407 * @head: An instance of our list structure
Tomas Winkler928fa662015-02-10 10:39:45 +0200408 * @cl: host client
409 */
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200410static void mei_io_tx_list_free_cl(struct list_head *head,
411 const struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200412{
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200413 struct mei_cl_cb *cb, *next;
Tomas Winkler928fa662015-02-10 10:39:45 +0200414
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200415 list_for_each_entry_safe(cb, next, head, list) {
Alexander Usyskin87d63352018-11-06 12:04:39 +0200416 if (cl == cb->cl)
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200417 mei_tx_cb_dequeue(cb);
418 }
Tomas Winklerf0461922017-01-27 16:32:46 +0200419}
420
421/**
422 * mei_io_list_free_fp - free cb from a list that matches file pointer
423 *
424 * @head: io list
425 * @fp: file pointer (matching cb file object), may be NULL
426 */
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200427static void mei_io_list_free_fp(struct list_head *head, const struct file *fp)
Tomas Winklerf0461922017-01-27 16:32:46 +0200428{
429 struct mei_cl_cb *cb, *next;
430
431 list_for_each_entry_safe(cb, next, head, list)
432 if (!fp || fp == cb->fp)
433 mei_io_cb_free(cb);
Tomas Winkler928fa662015-02-10 10:39:45 +0200434}
435
436/**
Tomas Winklerbca67d62015-02-10 10:39:43 +0200437 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
438 *
439 * @cl: host client
440 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200441 * @fop_type: operation type
Tomas Winklerbca67d62015-02-10 10:39:43 +0200442 * @fp: associated file pointer (might be NULL)
443 *
444 * Return: cb on success and NULL on failure
445 */
446struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
Tomas Winkler3030dc02016-07-26 01:06:05 +0300447 enum mei_cb_file_ops fop_type,
Tomas Winklerf23e2cc2016-02-07 23:35:23 +0200448 const struct file *fp)
Tomas Winklerbca67d62015-02-10 10:39:43 +0200449{
450 struct mei_cl_cb *cb;
451
Tomas Winkler3030dc02016-07-26 01:06:05 +0300452 cb = mei_io_cb_init(cl, fop_type, fp);
Tomas Winklerbca67d62015-02-10 10:39:43 +0200453 if (!cb)
454 return NULL;
455
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400456 if (length == 0)
457 return cb;
458
Tomas Winkler63163212018-11-22 13:11:39 +0200459 cb->buf.data = kmalloc(roundup(length, MEI_SLOT_SIZE), GFP_KERNEL);
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400460 if (!cb->buf.data) {
Tomas Winklerbca67d62015-02-10 10:39:43 +0200461 mei_io_cb_free(cb);
462 return NULL;
463 }
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400464 cb->buf.size = length;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200465
466 return cb;
467}
468
469/**
Tomas Winkler3030dc02016-07-26 01:06:05 +0300470 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
471 * and enqueuing of the control commands cb
472 *
473 * @cl: host client
474 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200475 * @fop_type: operation type
Tomas Winkler3030dc02016-07-26 01:06:05 +0300476 * @fp: associated file pointer (might be NULL)
477 *
478 * Return: cb on success and NULL on failure
479 * Locking: called under "dev->device_lock" lock
480 */
481struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
482 enum mei_cb_file_ops fop_type,
483 const struct file *fp)
484{
485 struct mei_cl_cb *cb;
486
487 /* for RX always allocate at least client's mtu */
488 if (length)
489 length = max_t(size_t, length, mei_cl_mtu(cl));
490
491 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
492 if (!cb)
493 return NULL;
494
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200495 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
Tomas Winkler3030dc02016-07-26 01:06:05 +0300496 return cb;
497}
498
499/**
Tomas Winklera9bed612015-02-10 10:39:46 +0200500 * mei_cl_read_cb - find this cl's callback in the read list
501 * for a specific file
502 *
503 * @cl: host client
504 * @fp: file pointer (matching cb file object), may be NULL
505 *
506 * Return: cb on success, NULL if cb is not found
507 */
508struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
509{
510 struct mei_cl_cb *cb;
511
512 list_for_each_entry(cb, &cl->rd_completed, list)
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200513 if (!fp || fp == cb->fp)
Tomas Winklera9bed612015-02-10 10:39:46 +0200514 return cb;
515
516 return NULL;
517}
518
519/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200520 * mei_cl_flush_queues - flushes queue lists belonging to cl.
521 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200522 * @cl: host client
Tomas Winklera9bed612015-02-10 10:39:46 +0200523 * @fp: file pointer (matching cb file object), may be NULL
Alexander Usyskince231392014-09-29 16:31:50 +0300524 *
525 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200526 */
Tomas Winklera9bed612015-02-10 10:39:46 +0200527int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200528{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300529 struct mei_device *dev;
530
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200531 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200532 return -EINVAL;
533
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300534 dev = cl->dev;
535
536 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200537 mei_io_tx_list_free_cl(&cl->dev->write_list, cl);
538 mei_io_tx_list_free_cl(&cl->dev->write_waiting_list, cl);
Tomas Winklerf0461922017-01-27 16:32:46 +0200539 mei_io_list_flush_cl(&cl->dev->ctrl_wr_list, cl);
540 mei_io_list_flush_cl(&cl->dev->ctrl_rd_list, cl);
541 mei_io_list_free_fp(&cl->rd_pending, fp);
542 mei_io_list_free_fp(&cl->rd_completed, fp);
Tomas Winklera9bed612015-02-10 10:39:46 +0200543
Tomas Winkler9ca90502013-01-08 23:07:13 +0200544 return 0;
545}
546
Tomas Winkler9ca90502013-01-08 23:07:13 +0200547/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200548 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200549 *
550 * @cl: host client to be initialized
551 * @dev: mei device
552 */
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200553static void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200554{
555 memset(cl, 0, sizeof(struct mei_cl));
556 init_waitqueue_head(&cl->wait);
557 init_waitqueue_head(&cl->rx_wait);
558 init_waitqueue_head(&cl->tx_wait);
Tomas Winklerb38a3622015-07-26 09:54:19 +0300559 init_waitqueue_head(&cl->ev_wait);
Tomas Winklera9bed612015-02-10 10:39:46 +0200560 INIT_LIST_HEAD(&cl->rd_completed);
561 INIT_LIST_HEAD(&cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200562 INIT_LIST_HEAD(&cl->link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200563 cl->writing_state = MEI_IDLE;
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200564 cl->state = MEI_FILE_UNINITIALIZED;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200565 cl->dev = dev;
566}
567
568/**
569 * mei_cl_allocate - allocates cl structure and sets it up.
570 *
571 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300572 * Return: The allocated file or NULL on failure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200573 */
574struct mei_cl *mei_cl_allocate(struct mei_device *dev)
575{
576 struct mei_cl *cl;
577
578 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
579 if (!cl)
580 return NULL;
581
582 mei_cl_init(cl, dev);
583
584 return cl;
585}
586
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200587/**
Alexander Usyskin3908be62015-02-10 10:39:35 +0200588 * mei_cl_link - allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200589 *
Alexander Usyskin3908be62015-02-10 10:39:35 +0200590 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +0900591 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300592 * Return: 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200593 * -EINVAL on incorrect values
Tomas Winkler03b8d342015-02-10 10:39:44 +0200594 * -EMFILE if open count exceeded.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200595 */
Alexander Usyskin7851e002016-02-07 23:35:40 +0200596int mei_cl_link(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200597{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200598 struct mei_device *dev;
Alexander Usyskin7851e002016-02-07 23:35:40 +0200599 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200600
Tomas Winkler781d0d82013-01-08 23:07:22 +0200601 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200602 return -EINVAL;
603
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200604 dev = cl->dev;
605
Alexander Usyskin7851e002016-02-07 23:35:40 +0200606 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler781d0d82013-01-08 23:07:22 +0200607 if (id >= MEI_CLIENTS_MAX) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300608 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300609 return -EMFILE;
610 }
611
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200612 if (dev->open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300613 dev_err(dev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300614 MEI_MAX_OPEN_HANDLE_COUNT);
615 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200616 }
617
Tomas Winkler781d0d82013-01-08 23:07:22 +0200618 dev->open_handle_count++;
619
620 cl->host_client_id = id;
621 list_add_tail(&cl->link, &dev->file_list);
622
623 set_bit(id, dev->host_clients_map);
624
625 cl->state = MEI_FILE_INITIALIZING;
626
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300627 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200628 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200629}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200630
Tomas Winkler9ca90502013-01-08 23:07:13 +0200631/**
Alexander Usyskind49ed642015-05-04 09:43:54 +0300632 * mei_cl_unlink - remove host client from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200633 *
Masanari Iida393b1482013-04-05 01:05:05 +0900634 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +0300635 *
636 * Return: always 0
Tomas Winkler9ca90502013-01-08 23:07:13 +0200637 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200638int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200639{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200640 struct mei_device *dev;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200641
Tomas Winkler781d0d82013-01-08 23:07:22 +0200642 /* don't shout on error exit path */
643 if (!cl)
644 return 0;
645
Alexander Usyskin394a77d2017-03-20 15:04:03 +0200646 if (WARN_ON(!cl->dev))
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200647 return 0;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200648
649 dev = cl->dev;
650
Tomas Winklera14c44d2013-09-16 23:44:45 +0300651 cl_dbg(dev, cl, "unlink client");
652
Tomas Winkler22f96a02013-09-16 23:44:47 +0300653 if (dev->open_handle_count > 0)
654 dev->open_handle_count--;
655
656 /* never clear the 0 bit */
657 if (cl->host_client_id)
658 clear_bit(cl->host_client_id, dev->host_clients_map);
659
660 list_del_init(&cl->link);
661
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200662 cl->state = MEI_FILE_UNINITIALIZED;
Alexander Usyskin7c7a6072016-11-16 22:51:29 +0200663 cl->writing_state = MEI_IDLE;
664
665 WARN_ON(!list_empty(&cl->rd_completed) ||
666 !list_empty(&cl->rd_pending) ||
667 !list_empty(&cl->link));
Tomas Winkler22f96a02013-09-16 23:44:47 +0300668
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200669 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200670}
671
Alexander Usyskin025fb792016-02-07 23:35:43 +0200672void mei_host_client_init(struct mei_device *dev)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200673{
Alexander Usyskin43b8a7e2019-04-22 09:51:07 +0300674 mei_set_devstate(dev, MEI_DEV_ENABLED);
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200675 dev->reset_count = 0;
Tomas Winkler04bb1392014-03-18 22:52:04 +0200676
Alexander Usyskin025fb792016-02-07 23:35:43 +0200677 schedule_work(&dev->bus_rescan_work);
Tomas Winkler60095952015-07-23 15:08:47 +0300678
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300679 pm_runtime_mark_last_busy(dev->dev);
680 dev_dbg(dev->dev, "rpm: autosuspend\n");
Alexander Usyskind5f8e162016-11-24 13:34:02 +0200681 pm_request_autosuspend(dev->dev);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200682}
683
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200684/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300685 * mei_hbuf_acquire - try to acquire host buffer
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200686 *
687 * @dev: the device structure
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300688 * Return: true if host buffer was acquired
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200689 */
690bool mei_hbuf_acquire(struct mei_device *dev)
691{
Tomas Winkler04bb1392014-03-18 22:52:04 +0200692 if (mei_pg_state(dev) == MEI_PG_ON ||
Alexander Usyskin3dc196e2015-06-13 08:51:17 +0300693 mei_pg_in_transition(dev)) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300694 dev_dbg(dev->dev, "device is in pg\n");
Tomas Winkler04bb1392014-03-18 22:52:04 +0200695 return false;
696 }
697
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200698 if (!dev->hbuf_is_ready) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300699 dev_dbg(dev->dev, "hbuf is not ready\n");
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200700 return false;
701 }
702
703 dev->hbuf_is_ready = false;
704
705 return true;
706}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200707
708/**
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200709 * mei_cl_wake_all - wake up readers, writers and event waiters so
710 * they can be interrupted
711 *
712 * @cl: host client
713 */
714static void mei_cl_wake_all(struct mei_cl *cl)
715{
716 struct mei_device *dev = cl->dev;
717
718 /* synchronized under device mutex */
719 if (waitqueue_active(&cl->rx_wait)) {
720 cl_dbg(dev, cl, "Waking up reading client!\n");
721 wake_up_interruptible(&cl->rx_wait);
722 }
723 /* synchronized under device mutex */
724 if (waitqueue_active(&cl->tx_wait)) {
725 cl_dbg(dev, cl, "Waking up writing client!\n");
726 wake_up_interruptible(&cl->tx_wait);
727 }
728 /* synchronized under device mutex */
729 if (waitqueue_active(&cl->ev_wait)) {
730 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
731 wake_up_interruptible(&cl->ev_wait);
732 }
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400733 /* synchronized under device mutex */
734 if (waitqueue_active(&cl->wait)) {
735 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
Alexander Usyskin69f18042016-05-09 00:07:46 -0400736 wake_up(&cl->wait);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400737 }
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200738}
739
740/**
Tomas Winkler3c666182015-05-04 09:43:52 +0300741 * mei_cl_set_disconnected - set disconnected state and clear
742 * associated states and resources
743 *
744 * @cl: host client
745 */
Alexander Usyskin669c2562017-01-20 02:17:17 +0200746static void mei_cl_set_disconnected(struct mei_cl *cl)
Tomas Winkler3c666182015-05-04 09:43:52 +0300747{
748 struct mei_device *dev = cl->dev;
749
750 if (cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200751 cl->state <= MEI_FILE_INITIALIZING)
Tomas Winkler3c666182015-05-04 09:43:52 +0300752 return;
753
754 cl->state = MEI_FILE_DISCONNECTED;
Alexander Usyskinaf336ca2018-02-25 20:07:05 +0200755 mei_io_tx_list_free_cl(&dev->write_list, cl);
756 mei_io_tx_list_free_cl(&dev->write_waiting_list, cl);
Tomas Winklerf0461922017-01-27 16:32:46 +0200757 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
758 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200759 mei_cl_wake_all(cl);
Alexander Usyskin46978ad2016-07-26 01:06:03 +0300760 cl->rx_flow_ctrl_creds = 0;
Tomas Winkler4034b812016-07-26 01:06:04 +0300761 cl->tx_flow_ctrl_creds = 0;
Tomas Winkler3c666182015-05-04 09:43:52 +0300762 cl->timer_count = 0;
Alexander Usyskind49ed642015-05-04 09:43:54 +0300763
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300764 if (!cl->me_cl)
765 return;
766
767 if (!WARN_ON(cl->me_cl->connect_count == 0))
768 cl->me_cl->connect_count--;
769
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300770 if (cl->me_cl->connect_count == 0)
Tomas Winkler4034b812016-07-26 01:06:04 +0300771 cl->me_cl->tx_flow_ctrl_creds = 0;
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300772
Alexander Usyskind49ed642015-05-04 09:43:54 +0300773 mei_me_cl_put(cl->me_cl);
774 cl->me_cl = NULL;
Tomas Winkler3c666182015-05-04 09:43:52 +0300775}
776
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300777static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
778{
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300779 if (!mei_me_cl_get(me_cl))
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300780 return -ENOENT;
781
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300782 /* only one connection is allowed for fixed address clients */
783 if (me_cl->props.fixed_address) {
784 if (me_cl->connect_count) {
785 mei_me_cl_put(me_cl);
786 return -EBUSY;
787 }
788 }
789
790 cl->me_cl = me_cl;
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300791 cl->state = MEI_FILE_CONNECTING;
792 cl->me_cl->connect_count++;
793
794 return 0;
795}
796
Tomas Winkler3c666182015-05-04 09:43:52 +0300797/*
798 * mei_cl_send_disconnect - send disconnect request
799 *
800 * @cl: host client
801 * @cb: callback block
802 *
803 * Return: 0, OK; otherwise, error.
804 */
805static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
806{
807 struct mei_device *dev;
808 int ret;
809
810 dev = cl->dev;
811
812 ret = mei_hbm_cl_disconnect_req(dev, cl);
813 cl->status = ret;
814 if (ret) {
815 cl->state = MEI_FILE_DISCONNECT_REPLY;
816 return ret;
817 }
818
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200819 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300820 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +0300821 mei_schedule_stall_timer(dev);
Tomas Winkler3c666182015-05-04 09:43:52 +0300822
823 return 0;
824}
825
826/**
827 * mei_cl_irq_disconnect - processes close related operation from
828 * interrupt thread context - send disconnect request
829 *
830 * @cl: client
831 * @cb: callback block.
832 * @cmpl_list: complete list.
833 *
834 * Return: 0, OK; otherwise, error.
835 */
836int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200837 struct list_head *cmpl_list)
Tomas Winkler3c666182015-05-04 09:43:52 +0300838{
839 struct mei_device *dev = cl->dev;
840 u32 msg_slots;
841 int slots;
842 int ret;
843
Tomas Winkler98e70862018-07-31 09:35:33 +0300844 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winkler3c666182015-05-04 09:43:52 +0300845 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerde877432018-07-12 17:10:08 +0300846 if (slots < 0)
847 return -EOVERFLOW;
Tomas Winkler3c666182015-05-04 09:43:52 +0300848
Tomas Winklerde877432018-07-12 17:10:08 +0300849 if ((u32)slots < msg_slots)
Tomas Winkler3c666182015-05-04 09:43:52 +0300850 return -EMSGSIZE;
851
852 ret = mei_cl_send_disconnect(cl, cb);
853 if (ret)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200854 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300855
856 return ret;
857}
858
Tomas Winkler3c666182015-05-04 09:43:52 +0300859/**
Alexander Usyskin18901352015-07-23 21:37:13 +0300860 * __mei_cl_disconnect - disconnect host client from the me one
861 * internal function runtime pm has to be already acquired
Tomas Winkler9ca90502013-01-08 23:07:13 +0200862 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200863 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200864 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300865 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200866 */
Alexander Usyskin18901352015-07-23 21:37:13 +0300867static int __mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200868{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200869 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200870 struct mei_cl_cb *cb;
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300871 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200872
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200873 dev = cl->dev;
874
Tomas Winkler3c666182015-05-04 09:43:52 +0300875 cl->state = MEI_FILE_DISCONNECTING;
876
Tomas Winkler3030dc02016-07-26 01:06:05 +0300877 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
878 if (!cb) {
879 rets = -ENOMEM;
Tomas Winkler3c666182015-05-04 09:43:52 +0300880 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +0300881 }
Tomas Winkler5a8373f2014-08-21 14:29:17 +0300882
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200883 if (mei_hbuf_acquire(dev)) {
Tomas Winkler3c666182015-05-04 09:43:52 +0300884 rets = mei_cl_send_disconnect(cl, cb);
885 if (rets) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300886 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler3c666182015-05-04 09:43:52 +0300887 goto out;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200888 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200889 }
Tomas Winkler3c666182015-05-04 09:43:52 +0300890
Tomas Winkler9ca90502013-01-08 23:07:13 +0200891 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400892 wait_event_timeout(cl->wait,
893 cl->state == MEI_FILE_DISCONNECT_REPLY ||
894 cl->state == MEI_FILE_DISCONNECTED,
Tomas Winkler3c666182015-05-04 09:43:52 +0300895 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9ca90502013-01-08 23:07:13 +0200896 mutex_lock(&dev->device_lock);
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300897
Tomas Winkler3c666182015-05-04 09:43:52 +0300898 rets = cl->status;
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400899 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
900 cl->state != MEI_FILE_DISCONNECTED) {
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300901 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
902 rets = -ETIME;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200903 }
904
Tomas Winkler3c666182015-05-04 09:43:52 +0300905out:
906 /* we disconnect also on error */
907 mei_cl_set_disconnected(cl);
908 if (!rets)
909 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
910
Alexander Usyskin18901352015-07-23 21:37:13 +0300911 mei_io_cb_free(cb);
912 return rets;
913}
914
915/**
916 * mei_cl_disconnect - disconnect host client from the me one
917 *
918 * @cl: host client
919 *
920 * Locking: called under "dev->device_lock" lock
921 *
922 * Return: 0 on success, <0 on failure.
923 */
924int mei_cl_disconnect(struct mei_cl *cl)
925{
926 struct mei_device *dev;
927 int rets;
928
929 if (WARN_ON(!cl || !cl->dev))
930 return -ENODEV;
931
932 dev = cl->dev;
933
934 cl_dbg(dev, cl, "disconnecting");
935
936 if (!mei_cl_is_connected(cl))
937 return 0;
938
939 if (mei_cl_is_fixed_address(cl)) {
940 mei_cl_set_disconnected(cl);
941 return 0;
942 }
943
Tomas Winkler7ae079a2018-02-14 14:03:29 +0200944 if (dev->dev_state == MEI_DEV_POWER_DOWN) {
945 cl_dbg(dev, cl, "Device is powering down, don't bother with disconnection\n");
946 mei_cl_set_disconnected(cl);
947 return 0;
948 }
949
Alexander Usyskin18901352015-07-23 21:37:13 +0300950 rets = pm_runtime_get(dev->dev);
951 if (rets < 0 && rets != -EINPROGRESS) {
952 pm_runtime_put_noidle(dev->dev);
953 cl_err(dev, cl, "rpm: get failed %d\n", rets);
954 return rets;
955 }
956
957 rets = __mei_cl_disconnect(cl);
958
Tomas Winkler04bb1392014-03-18 22:52:04 +0200959 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300960 pm_runtime_mark_last_busy(dev->dev);
961 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200962
Tomas Winkler9ca90502013-01-08 23:07:13 +0200963 return rets;
964}
965
966
967/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200968 * mei_cl_is_other_connecting - checks if other
969 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200970 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200971 * @cl: private data of the file object
972 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300973 * Return: true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200974 */
Tomas Winkler0c533572015-05-04 09:43:53 +0300975static bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200976{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200977 struct mei_device *dev;
Tomas Winkler0c533572015-05-04 09:43:53 +0300978 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200979
980 dev = cl->dev;
981
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200982 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
Tomas Winkler0c533572015-05-04 09:43:53 +0300983 if (cb->fop_type == MEI_FOP_CONNECT &&
Alexander Usyskind49ed642015-05-04 09:43:54 +0300984 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200985 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200986 }
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200987
988 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200989}
990
991/**
Tomas Winkler0c533572015-05-04 09:43:53 +0300992 * mei_cl_send_connect - send connect request
993 *
994 * @cl: host client
995 * @cb: callback block
996 *
997 * Return: 0, OK; otherwise, error.
998 */
999static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1000{
1001 struct mei_device *dev;
1002 int ret;
1003
1004 dev = cl->dev;
1005
1006 ret = mei_hbm_cl_connect_req(dev, cl);
1007 cl->status = ret;
1008 if (ret) {
1009 cl->state = MEI_FILE_DISCONNECT_REPLY;
1010 return ret;
1011 }
1012
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001013 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001014 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +03001015 mei_schedule_stall_timer(dev);
Tomas Winkler0c533572015-05-04 09:43:53 +03001016 return 0;
1017}
1018
1019/**
1020 * mei_cl_irq_connect - send connect request in irq_thread context
1021 *
1022 * @cl: host client
1023 * @cb: callback block
1024 * @cmpl_list: complete list
1025 *
1026 * Return: 0, OK; otherwise, error.
1027 */
1028int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001029 struct list_head *cmpl_list)
Tomas Winkler0c533572015-05-04 09:43:53 +03001030{
1031 struct mei_device *dev = cl->dev;
1032 u32 msg_slots;
1033 int slots;
1034 int rets;
1035
Tomas Winkler0c533572015-05-04 09:43:53 +03001036 if (mei_cl_is_other_connecting(cl))
1037 return 0;
1038
Tomas Winkler98e70862018-07-31 09:35:33 +03001039 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winklerde877432018-07-12 17:10:08 +03001040 slots = mei_hbuf_empty_slots(dev);
1041 if (slots < 0)
1042 return -EOVERFLOW;
1043
1044 if ((u32)slots < msg_slots)
Tomas Winkler0c533572015-05-04 09:43:53 +03001045 return -EMSGSIZE;
1046
1047 rets = mei_cl_send_connect(cl, cb);
1048 if (rets)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001049 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001050
1051 return rets;
1052}
1053
1054/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +02001055 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001056 *
1057 * @cl: host client
Alexander Usyskind49ed642015-05-04 09:43:54 +03001058 * @me_cl: me client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001059 * @fp: pointer to file structure
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001060 *
1061 * Locking: called under "dev->device_lock" lock
1062 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001063 * Return: 0 on success, <0 on failure.
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001064 */
Alexander Usyskind49ed642015-05-04 09:43:54 +03001065int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001066 const struct file *fp)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001067{
1068 struct mei_device *dev;
1069 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001070 int rets;
1071
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001072 if (WARN_ON(!cl || !cl->dev || !me_cl))
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001073 return -ENODEV;
1074
1075 dev = cl->dev;
1076
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001077 rets = mei_cl_set_connecting(cl, me_cl);
1078 if (rets)
Alexander Usyskin5d882462017-01-27 16:32:39 +02001079 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001080
1081 if (mei_cl_is_fixed_address(cl)) {
1082 cl->state = MEI_FILE_CONNECTED;
Alexander Usyskin5d882462017-01-27 16:32:39 +02001083 rets = 0;
1084 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001085 }
1086
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001087 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001088 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001089 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001090 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001091 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001092 }
1093
Tomas Winkler3030dc02016-07-26 01:06:05 +03001094 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1095 if (!cb) {
1096 rets = -ENOMEM;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001097 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +03001098 }
Tomas Winkler0c533572015-05-04 09:43:53 +03001099
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001100 /* run hbuf acquire last so we don't have to undo */
1101 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Tomas Winkler0c533572015-05-04 09:43:53 +03001102 rets = mei_cl_send_connect(cl, cb);
1103 if (rets)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001104 goto out;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001105 }
1106
1107 mutex_unlock(&dev->device_lock);
Tomas Winkler12f45ed2014-08-21 14:29:18 +03001108 wait_event_timeout(cl->wait,
Alexander Usyskin285e2992014-02-17 15:13:20 +02001109 (cl->state == MEI_FILE_CONNECTED ||
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001110 cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskin18901352015-07-23 21:37:13 +03001111 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
Tomas Winkler3c666182015-05-04 09:43:52 +03001112 cl->state == MEI_FILE_DISCONNECT_REPLY),
Alexander Usyskin285e2992014-02-17 15:13:20 +02001113 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001114 mutex_lock(&dev->device_lock);
1115
Tomas Winklerf3de9b62015-03-27 00:27:58 +02001116 if (!mei_cl_is_connected(cl)) {
Alexander Usyskin18901352015-07-23 21:37:13 +03001117 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
Tomas Winklerf0461922017-01-27 16:32:46 +02001118 mei_io_list_flush_cl(&dev->ctrl_rd_list, cl);
1119 mei_io_list_flush_cl(&dev->ctrl_wr_list, cl);
Alexander Usyskin18901352015-07-23 21:37:13 +03001120 /* ignore disconnect return valuue;
1121 * in case of failure reset will be invoked
1122 */
1123 __mei_cl_disconnect(cl);
1124 rets = -EFAULT;
1125 goto out;
1126 }
1127
Tomas Winkler0c533572015-05-04 09:43:53 +03001128 /* timeout or something went really wrong */
Alexander Usyskin285e2992014-02-17 15:13:20 +02001129 if (!cl->status)
1130 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001131 }
1132
1133 rets = cl->status;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001134out:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001135 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001136 pm_runtime_mark_last_busy(dev->dev);
1137 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001138
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001139 mei_io_cb_free(cb);
Tomas Winkler0c533572015-05-04 09:43:53 +03001140
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001141nortpm:
Tomas Winkler0c533572015-05-04 09:43:53 +03001142 if (!mei_cl_is_connected(cl))
1143 mei_cl_set_disconnected(cl);
1144
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001145 return rets;
1146}
1147
1148/**
Tomas Winkler03b8d342015-02-10 10:39:44 +02001149 * mei_cl_alloc_linked - allocate and link host client
1150 *
1151 * @dev: the device structure
Tomas Winkler03b8d342015-02-10 10:39:44 +02001152 *
1153 * Return: cl on success ERR_PTR on failure
1154 */
Alexander Usyskin7851e002016-02-07 23:35:40 +02001155struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
Tomas Winkler03b8d342015-02-10 10:39:44 +02001156{
1157 struct mei_cl *cl;
1158 int ret;
1159
1160 cl = mei_cl_allocate(dev);
1161 if (!cl) {
1162 ret = -ENOMEM;
1163 goto err;
1164 }
1165
Alexander Usyskin7851e002016-02-07 23:35:40 +02001166 ret = mei_cl_link(cl);
Tomas Winkler03b8d342015-02-10 10:39:44 +02001167 if (ret)
1168 goto err;
1169
1170 return cl;
1171err:
1172 kfree(cl);
1173 return ERR_PTR(ret);
1174}
1175
Tomas Winkler03b8d342015-02-10 10:39:44 +02001176/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001177 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001178 *
Alexander Usyskin06ee5362016-02-07 23:35:37 +02001179 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001180 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001181 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001182 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001183static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001184{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001185 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001186 return -EINVAL;
1187
Tomas Winkler4034b812016-07-26 01:06:04 +03001188 if (cl->tx_flow_ctrl_creds > 0)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001189 return 1;
1190
Alexander Usyskina808c802016-06-16 17:58:58 +03001191 if (mei_cl_is_fixed_address(cl))
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001192 return 1;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001193
Alexander Usyskind49ed642015-05-04 09:43:54 +03001194 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001195 if (cl->me_cl->tx_flow_ctrl_creds > 0)
Alexander Usyskind49ed642015-05-04 09:43:54 +03001196 return 1;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001197 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001198 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001199}
1200
1201/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001202 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1203 * for a client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001204 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001205 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +09001206 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001207 * Return:
Tomas Winkler9ca90502013-01-08 23:07:13 +02001208 * 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +02001209 * -EINVAL when ctrl credits are <= 0
1210 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001211static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001212{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001213 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001214 return -EINVAL;
1215
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001216 if (mei_cl_is_fixed_address(cl))
1217 return 0;
1218
Alexander Usyskind49ed642015-05-04 09:43:54 +03001219 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001220 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001221 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001222 cl->me_cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001223 } else {
Tomas Winkler4034b812016-07-26 01:06:04 +03001224 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001225 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001226 cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001227 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001228 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001229}
1230
Tomas Winkler9ca90502013-01-08 23:07:13 +02001231/**
Tomas Winkler51678cc2015-07-26 09:54:18 +03001232 * mei_cl_notify_fop2req - convert fop to proper request
1233 *
1234 * @fop: client notification start response command
1235 *
1236 * Return: MEI_HBM_NOTIFICATION_START/STOP
1237 */
1238u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1239{
1240 if (fop == MEI_FOP_NOTIFY_START)
1241 return MEI_HBM_NOTIFICATION_START;
1242 else
1243 return MEI_HBM_NOTIFICATION_STOP;
1244}
1245
1246/**
1247 * mei_cl_notify_req2fop - convert notification request top file operation type
1248 *
1249 * @req: hbm notification request type
1250 *
1251 * Return: MEI_FOP_NOTIFY_START/STOP
1252 */
1253enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1254{
1255 if (req == MEI_HBM_NOTIFICATION_START)
1256 return MEI_FOP_NOTIFY_START;
1257 else
1258 return MEI_FOP_NOTIFY_STOP;
1259}
1260
1261/**
1262 * mei_cl_irq_notify - send notification request in irq_thread context
1263 *
1264 * @cl: client
1265 * @cb: callback block.
1266 * @cmpl_list: complete list.
1267 *
1268 * Return: 0 on such and error otherwise.
1269 */
1270int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001271 struct list_head *cmpl_list)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001272{
1273 struct mei_device *dev = cl->dev;
1274 u32 msg_slots;
1275 int slots;
1276 int ret;
1277 bool request;
1278
Tomas Winkler98e70862018-07-31 09:35:33 +03001279 msg_slots = mei_hbm2slots(sizeof(struct hbm_client_connect_request));
Tomas Winkler51678cc2015-07-26 09:54:18 +03001280 slots = mei_hbuf_empty_slots(dev);
Tomas Winklerde877432018-07-12 17:10:08 +03001281 if (slots < 0)
1282 return -EOVERFLOW;
Tomas Winkler51678cc2015-07-26 09:54:18 +03001283
Tomas Winklerde877432018-07-12 17:10:08 +03001284 if ((u32)slots < msg_slots)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001285 return -EMSGSIZE;
1286
1287 request = mei_cl_notify_fop2req(cb->fop_type);
1288 ret = mei_hbm_cl_notify_req(dev, cl, request);
1289 if (ret) {
1290 cl->status = ret;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001291 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001292 return ret;
1293 }
1294
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001295 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001296 return 0;
1297}
1298
1299/**
1300 * mei_cl_notify_request - send notification stop/start request
1301 *
1302 * @cl: host client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001303 * @fp: associate request with file
Tomas Winkler51678cc2015-07-26 09:54:18 +03001304 * @request: 1 for start or 0 for stop
1305 *
1306 * Locking: called under "dev->device_lock" lock
1307 *
1308 * Return: 0 on such and error otherwise.
1309 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001310int mei_cl_notify_request(struct mei_cl *cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001311 const struct file *fp, u8 request)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001312{
1313 struct mei_device *dev;
1314 struct mei_cl_cb *cb;
1315 enum mei_cb_file_ops fop_type;
1316 int rets;
1317
1318 if (WARN_ON(!cl || !cl->dev))
1319 return -ENODEV;
1320
1321 dev = cl->dev;
1322
1323 if (!dev->hbm_f_ev_supported) {
1324 cl_dbg(dev, cl, "notifications not supported\n");
1325 return -EOPNOTSUPP;
1326 }
1327
Alexander Usyskin7c47d2c2017-01-27 16:32:41 +02001328 if (!mei_cl_is_connected(cl))
1329 return -ENODEV;
1330
Tomas Winkler51678cc2015-07-26 09:54:18 +03001331 rets = pm_runtime_get(dev->dev);
1332 if (rets < 0 && rets != -EINPROGRESS) {
1333 pm_runtime_put_noidle(dev->dev);
1334 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1335 return rets;
1336 }
1337
1338 fop_type = mei_cl_notify_req2fop(request);
Tomas Winkler3030dc02016-07-26 01:06:05 +03001339 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001340 if (!cb) {
1341 rets = -ENOMEM;
1342 goto out;
1343 }
1344
1345 if (mei_hbuf_acquire(dev)) {
1346 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1347 rets = -ENODEV;
1348 goto out;
1349 }
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001350 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001351 }
1352
1353 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001354 wait_event_timeout(cl->wait,
Alexander Usyskina19bf052018-11-06 12:04:40 +02001355 cl->notify_en == request ||
1356 cl->status ||
1357 !mei_cl_is_connected(cl),
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001358 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler51678cc2015-07-26 09:54:18 +03001359 mutex_lock(&dev->device_lock);
1360
Alexander Usyskin4a8eaa92016-04-20 11:03:54 -04001361 if (cl->notify_en != request && !cl->status)
1362 cl->status = -EFAULT;
Tomas Winkler51678cc2015-07-26 09:54:18 +03001363
1364 rets = cl->status;
1365
1366out:
1367 cl_dbg(dev, cl, "rpm: autosuspend\n");
1368 pm_runtime_mark_last_busy(dev->dev);
1369 pm_runtime_put_autosuspend(dev->dev);
1370
1371 mei_io_cb_free(cb);
1372 return rets;
1373}
1374
1375/**
Tomas Winkler237092b2015-07-26 09:54:22 +03001376 * mei_cl_notify - raise notification
1377 *
1378 * @cl: host client
1379 *
1380 * Locking: called under "dev->device_lock" lock
1381 */
1382void mei_cl_notify(struct mei_cl *cl)
1383{
1384 struct mei_device *dev;
1385
1386 if (!cl || !cl->dev)
1387 return;
1388
1389 dev = cl->dev;
1390
1391 if (!cl->notify_en)
1392 return;
1393
1394 cl_dbg(dev, cl, "notify event");
1395 cl->notify_ev = true;
Tomas Winkler850f8942016-02-07 23:35:31 +02001396 if (!mei_cl_bus_notify_event(cl))
1397 wake_up_interruptible(&cl->ev_wait);
Tomas Winkler237092b2015-07-26 09:54:22 +03001398
1399 if (cl->ev_async)
1400 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
Alexander Usyskinbb2ef9c2015-07-26 09:54:23 +03001401
Tomas Winkler237092b2015-07-26 09:54:22 +03001402}
1403
1404/**
Tomas Winklerb38a3622015-07-26 09:54:19 +03001405 * mei_cl_notify_get - get or wait for notification event
1406 *
1407 * @cl: host client
1408 * @block: this request is blocking
1409 * @notify_ev: true if notification event was received
1410 *
1411 * Locking: called under "dev->device_lock" lock
1412 *
1413 * Return: 0 on such and error otherwise.
1414 */
1415int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1416{
1417 struct mei_device *dev;
1418 int rets;
1419
1420 *notify_ev = false;
1421
1422 if (WARN_ON(!cl || !cl->dev))
1423 return -ENODEV;
1424
1425 dev = cl->dev;
1426
Alexander Usyskin6c0d6702017-01-27 16:32:42 +02001427 if (!dev->hbm_f_ev_supported) {
1428 cl_dbg(dev, cl, "notifications not supported\n");
1429 return -EOPNOTSUPP;
1430 }
1431
Tomas Winklerb38a3622015-07-26 09:54:19 +03001432 if (!mei_cl_is_connected(cl))
1433 return -ENODEV;
1434
1435 if (cl->notify_ev)
1436 goto out;
1437
1438 if (!block)
1439 return -EAGAIN;
1440
1441 mutex_unlock(&dev->device_lock);
1442 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1443 mutex_lock(&dev->device_lock);
1444
1445 if (rets < 0)
1446 return rets;
1447
1448out:
1449 *notify_ev = cl->notify_ev;
1450 cl->notify_ev = false;
1451 return 0;
1452}
1453
1454/**
Masanari Iida393b1482013-04-05 01:05:05 +09001455 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001456 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001457 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +03001458 * @length: number of bytes to read
Tomas Winklerbca67d62015-02-10 10:39:43 +02001459 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +02001460 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001461 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001462 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001463int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001464{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001465 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001466 struct mei_cl_cb *cb;
1467 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001468
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001469 if (WARN_ON(!cl || !cl->dev))
1470 return -ENODEV;
1471
1472 dev = cl->dev;
1473
Tomas Winklerb950ac12013-07-25 20:15:53 +03001474 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +02001475 return -ENODEV;
1476
Alexander Usyskind49ed642015-05-04 09:43:54 +03001477 if (!mei_me_cl_is_active(cl->me_cl)) {
1478 cl_err(dev, cl, "no such me client\n");
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001479 return -ENOTTY;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001480 }
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001481
Alexander Usyskin394a77d2017-03-20 15:04:03 +02001482 if (mei_cl_is_fixed_address(cl))
Alexander Usyskine51dfa52016-07-26 01:06:02 +03001483 return 0;
1484
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001485 /* HW currently supports only one pending read */
1486 if (cl->rx_flow_ctrl_creds)
1487 return -EBUSY;
1488
Tomas Winkler3030dc02016-07-26 01:06:05 +03001489 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001490 if (!cb)
1491 return -ENOMEM;
1492
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001493 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001494 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001495 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001496 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001497 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001498 }
1499
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001500 rets = 0;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001501 if (mei_hbuf_acquire(dev)) {
Alexander Usyskin86113502014-03-31 17:59:24 +03001502 rets = mei_hbm_cl_flow_control_req(dev, cl);
1503 if (rets < 0)
Tomas Winkler04bb1392014-03-18 22:52:04 +02001504 goto out;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001505
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001506 list_move_tail(&cb->list, &cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +02001507 }
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001508 cl->rx_flow_ctrl_creds++;
Chao Biaccb8842014-02-12 21:27:25 +02001509
Tomas Winkler04bb1392014-03-18 22:52:04 +02001510out:
1511 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001512 pm_runtime_mark_last_busy(dev->dev);
1513 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001514nortpm:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001515 if (rets)
1516 mei_io_cb_free(cb);
1517
Tomas Winkler9ca90502013-01-08 23:07:13 +02001518 return rets;
1519}
1520
Tomas Winkler074b4c02013-02-06 14:06:44 +02001521/**
Tomas Winklera1c4d082018-07-23 13:21:24 +03001522 * mei_msg_hdr_init - initialize mei message header
1523 *
1524 * @mei_hdr: mei message header
1525 * @cb: message callback structure
1526 */
1527static void mei_msg_hdr_init(struct mei_msg_hdr *mei_hdr, struct mei_cl_cb *cb)
1528{
1529 mei_hdr->host_addr = mei_cl_host_addr(cb->cl);
1530 mei_hdr->me_addr = mei_cl_me_id(cb->cl);
1531 mei_hdr->length = 0;
1532 mei_hdr->reserved = 0;
1533 mei_hdr->msg_complete = 0;
Tomas Winkler9d89ddf2018-07-31 09:35:35 +03001534 mei_hdr->dma_ring = 0;
Tomas Winklera1c4d082018-07-23 13:21:24 +03001535 mei_hdr->internal = cb->internal;
1536}
1537
1538/**
Tomas Winkler9d098192014-02-19 17:35:48 +02001539 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +03001540 * from the interrupt thread context
1541 *
1542 * @cl: client
1543 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +03001544 * @cmpl_list: complete list.
1545 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001546 * Return: 0, OK; otherwise error.
Tomas Winkler21767542013-06-23 09:36:59 +03001547 */
Tomas Winkler9d098192014-02-19 17:35:48 +02001548int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001549 struct list_head *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +03001550{
Tomas Winkler136698e2013-09-16 23:44:44 +03001551 struct mei_device *dev;
1552 struct mei_msg_data *buf;
Tomas Winkler21767542013-06-23 09:36:59 +03001553 struct mei_msg_hdr mei_hdr;
Tomas Winkler98e70862018-07-31 09:35:33 +03001554 size_t hdr_len = sizeof(mei_hdr);
Tomas Winkler136698e2013-09-16 23:44:44 +03001555 size_t len;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001556 size_t hbuf_len, dr_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001557 int hbuf_slots;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001558 u32 dr_slots;
1559 u32 dma_len;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001560 int rets;
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001561 bool first_chunk;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001562 const void *data;
Tomas Winkler21767542013-06-23 09:36:59 +03001563
Tomas Winkler136698e2013-09-16 23:44:44 +03001564 if (WARN_ON(!cl || !cl->dev))
1565 return -ENODEV;
1566
1567 dev = cl->dev;
1568
Tomas Winkler5db75142015-02-10 10:39:42 +02001569 buf = &cb->buf;
Tomas Winkler136698e2013-09-16 23:44:44 +03001570
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001571 first_chunk = cb->buf_idx == 0;
1572
Tomas Winkler4034b812016-07-26 01:06:04 +03001573 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
Tomas Winkler136698e2013-09-16 23:44:44 +03001574 if (rets < 0)
Alexander Usyskine09ee852016-12-14 17:56:52 +02001575 goto err;
Tomas Winkler136698e2013-09-16 23:44:44 +03001576
1577 if (rets == 0) {
Tomas Winkler04bb1392014-03-18 22:52:04 +02001578 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler136698e2013-09-16 23:44:44 +03001579 return 0;
1580 }
1581
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001582 len = buf->size - cb->buf_idx;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001583 data = buf->data + cb->buf_idx;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001584 hbuf_slots = mei_hbuf_empty_slots(dev);
1585 if (hbuf_slots < 0) {
1586 rets = -EOVERFLOW;
1587 goto err;
1588 }
Tomas Winkler98e70862018-07-31 09:35:33 +03001589
Tomas Winkler3aef0212020-02-11 18:05:22 +02001590 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001591 dr_slots = mei_dma_ring_empty_slots(dev);
1592 dr_len = mei_slots2data(dr_slots);
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001593
Tomas Winklera1c4d082018-07-23 13:21:24 +03001594 mei_msg_hdr_init(&mei_hdr, cb);
1595
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001596 /**
1597 * Split the message only if we can write the whole host buffer
1598 * otherwise wait for next time the host buffer is empty.
1599 */
Tomas Winkler98e70862018-07-31 09:35:33 +03001600 if (len + hdr_len <= hbuf_len) {
Tomas Winkler21767542013-06-23 09:36:59 +03001601 mei_hdr.length = len;
1602 mei_hdr.msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001603 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1604 mei_hdr.dma_ring = 1;
1605 if (len > dr_len)
1606 len = dr_len;
1607 else
1608 mei_hdr.msg_complete = 1;
1609
1610 mei_hdr.length = sizeof(dma_len);
1611 dma_len = len;
1612 data = &dma_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001613 } else if ((u32)hbuf_slots == mei_hbuf_depth(dev)) {
Tomas Winklerc30362c2018-11-22 13:11:40 +02001614 len = hbuf_len - hdr_len;
1615 mei_hdr.length = len;
Tomas Winkler21767542013-06-23 09:36:59 +03001616 } else {
Tomas Winkler21767542013-06-23 09:36:59 +03001617 return 0;
1618 }
1619
Tomas Winklerc30362c2018-11-22 13:11:40 +02001620 if (mei_hdr.dma_ring)
1621 mei_dma_ring_write(dev, buf->data + cb->buf_idx, len);
Tomas Winkler21767542013-06-23 09:36:59 +03001622
Tomas Winklerc30362c2018-11-22 13:11:40 +02001623 rets = mei_write_message(dev, &mei_hdr, hdr_len, data, mei_hdr.length);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001624 if (rets)
1625 goto err;
Tomas Winkler21767542013-06-23 09:36:59 +03001626
1627 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001628 cl->writing_state = MEI_WRITING;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001629 cb->buf_idx += len;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001630
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001631 if (first_chunk) {
Alexander Usyskine09ee852016-12-14 17:56:52 +02001632 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1633 rets = -EIO;
1634 goto err;
1635 }
Tomas Winkler21767542013-06-23 09:36:59 +03001636 }
1637
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001638 if (mei_hdr.msg_complete)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001639 list_move_tail(&cb->list, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001640
Tomas Winkler21767542013-06-23 09:36:59 +03001641 return 0;
Alexander Usyskine09ee852016-12-14 17:56:52 +02001642
1643err:
1644 cl->status = rets;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001645 list_move_tail(&cb->list, cmpl_list);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001646 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +03001647}
1648
1649/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001650 * mei_cl_write - submit a write cb to mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001651 * assumes device_lock is locked
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001652 *
1653 * @cl: host client
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001654 * @cb: write callback with filled data
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001655 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001656 * Return: number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001657 */
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001658ssize_t mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001659{
1660 struct mei_device *dev;
1661 struct mei_msg_data *buf;
1662 struct mei_msg_hdr mei_hdr;
Tomas Winkler98e70862018-07-31 09:35:33 +03001663 size_t hdr_len = sizeof(mei_hdr);
Tomas Winklerc30362c2018-11-22 13:11:40 +02001664 size_t len, hbuf_len, dr_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001665 int hbuf_slots;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001666 u32 dr_slots;
1667 u32 dma_len;
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001668 ssize_t rets;
Alexander Usyskine0cb6b22016-11-08 18:26:08 +02001669 bool blocking;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001670 const void *data;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001671
1672 if (WARN_ON(!cl || !cl->dev))
1673 return -ENODEV;
1674
1675 if (WARN_ON(!cb))
1676 return -EINVAL;
1677
1678 dev = cl->dev;
1679
Tomas Winkler5db75142015-02-10 10:39:42 +02001680 buf = &cb->buf;
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001681 len = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001682
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001683 cl_dbg(dev, cl, "len=%zd\n", len);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001684
Tomas Winklerc30362c2018-11-22 13:11:40 +02001685 blocking = cb->blocking;
1686 data = buf->data;
1687
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001688 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001689 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001690 pm_runtime_put_noidle(dev->dev);
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001691 cl_err(dev, cl, "rpm: get failed %zd\n", rets);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02001692 goto free;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001693 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001694
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001695 cb->buf_idx = 0;
1696 cl->writing_state = MEI_IDLE;
1697
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001698
Tomas Winkler4034b812016-07-26 01:06:04 +03001699 rets = mei_cl_tx_flow_ctrl_creds(cl);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001700 if (rets < 0)
1701 goto err;
1702
Tomas Winklera1c4d082018-07-23 13:21:24 +03001703 mei_msg_hdr_init(&mei_hdr, cb);
1704
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001705 if (rets == 0) {
1706 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001707 rets = len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001708 goto out;
1709 }
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001710
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001711 if (!mei_hbuf_acquire(dev)) {
1712 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001713 rets = len;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001714 goto out;
1715 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001716
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001717 hbuf_slots = mei_hbuf_empty_slots(dev);
1718 if (hbuf_slots < 0) {
1719 rets = -EOVERFLOW;
1720 goto out;
1721 }
1722
Tomas Winkler3aef0212020-02-11 18:05:22 +02001723 hbuf_len = mei_slots2data(hbuf_slots) & MEI_MSG_MAX_LEN_MASK;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001724 dr_slots = mei_dma_ring_empty_slots(dev);
1725 dr_len = mei_slots2data(dr_slots);
Tomas Winkler98e70862018-07-31 09:35:33 +03001726
1727 if (len + hdr_len <= hbuf_len) {
Tomas Winkler5151e2b2018-07-12 17:10:10 +03001728 mei_hdr.length = len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001729 mei_hdr.msg_complete = 1;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001730 } else if (dr_slots && hbuf_len >= hdr_len + sizeof(dma_len)) {
1731 mei_hdr.dma_ring = 1;
1732 if (len > dr_len)
1733 len = dr_len;
1734 else
1735 mei_hdr.msg_complete = 1;
1736
1737 mei_hdr.length = sizeof(dma_len);
1738 dma_len = len;
1739 data = &dma_len;
Tomas Winkler8c8d9642018-07-23 13:21:23 +03001740 } else {
Tomas Winklerc30362c2018-11-22 13:11:40 +02001741 len = hbuf_len - hdr_len;
1742 mei_hdr.length = len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001743 }
1744
Tomas Winklerc30362c2018-11-22 13:11:40 +02001745 if (mei_hdr.dma_ring)
1746 mei_dma_ring_write(dev, buf->data, len);
1747
Tomas Winkler98e70862018-07-31 09:35:33 +03001748 rets = mei_write_message(dev, &mei_hdr, hdr_len,
Tomas Winklerc30362c2018-11-22 13:11:40 +02001749 data, mei_hdr.length);
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001750 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001751 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001752
Tomas Winkler4034b812016-07-26 01:06:04 +03001753 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001754 if (rets)
1755 goto err;
1756
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001757 cl->writing_state = MEI_WRITING;
Tomas Winklerc30362c2018-11-22 13:11:40 +02001758 cb->buf_idx = len;
1759 /* restore return value */
1760 len = buf->size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001761
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001762out:
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001763 if (mei_hdr.msg_complete)
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02001764 mei_tx_cb_enqueue(cb, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001765 else
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02001766 mei_tx_cb_enqueue(cb, &dev->write_list);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001767
Alexander Usyskin23253c32015-07-23 10:43:11 +03001768 cb = NULL;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001769 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1770
1771 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001772 rets = wait_event_interruptible(cl->tx_wait,
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02001773 cl->writing_state == MEI_WRITE_COMPLETE ||
1774 (!mei_cl_is_connected(cl)));
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001775 mutex_lock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001776 /* wait_event_interruptible returns -ERESTARTSYS */
1777 if (rets) {
1778 if (signal_pending(current))
1779 rets = -EINTR;
1780 goto err;
1781 }
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02001782 if (cl->writing_state != MEI_WRITE_COMPLETE) {
1783 rets = -EFAULT;
1784 goto err;
1785 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001786 }
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001787
John Hubbardc1a214a2018-08-23 09:16:58 +03001788 rets = len;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001789err:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001790 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001791 pm_runtime_mark_last_busy(dev->dev);
1792 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02001793free:
1794 mei_io_cb_free(cb);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001795
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001796 return rets;
1797}
1798
1799
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001800/**
1801 * mei_cl_complete - processes completed operation for a client
1802 *
1803 * @cl: private data of the file object.
1804 * @cb: callback block.
1805 */
1806void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1807{
Alexander Usyskina1809d32015-05-07 15:53:59 +03001808 struct mei_device *dev = cl->dev;
1809
Tomas Winkler3c666182015-05-04 09:43:52 +03001810 switch (cb->fop_type) {
1811 case MEI_FOP_WRITE:
Alexander Usyskinaf336ca2018-02-25 20:07:05 +02001812 mei_tx_cb_dequeue(cb);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001813 cl->writing_state = MEI_WRITE_COMPLETE;
Alexander Usyskina1809d32015-05-07 15:53:59 +03001814 if (waitqueue_active(&cl->tx_wait)) {
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001815 wake_up_interruptible(&cl->tx_wait);
Alexander Usyskina1809d32015-05-07 15:53:59 +03001816 } else {
1817 pm_runtime_mark_last_busy(dev->dev);
1818 pm_request_autosuspend(dev->dev);
1819 }
Tomas Winkler3c666182015-05-04 09:43:52 +03001820 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001821
Tomas Winkler3c666182015-05-04 09:43:52 +03001822 case MEI_FOP_READ:
Tomas Winklera9bed612015-02-10 10:39:46 +02001823 list_add_tail(&cb->list, &cl->rd_completed);
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001824 if (!mei_cl_is_fixed_address(cl) &&
1825 !WARN_ON(!cl->rx_flow_ctrl_creds))
1826 cl->rx_flow_ctrl_creds--;
Tomas Winklera1f9ae22016-02-07 23:35:30 +02001827 if (!mei_cl_bus_rx_event(cl))
1828 wake_up_interruptible(&cl->rx_wait);
Tomas Winkler3c666182015-05-04 09:43:52 +03001829 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001830
Tomas Winkler3c666182015-05-04 09:43:52 +03001831 case MEI_FOP_CONNECT:
1832 case MEI_FOP_DISCONNECT:
Tomas Winkler51678cc2015-07-26 09:54:18 +03001833 case MEI_FOP_NOTIFY_STOP:
1834 case MEI_FOP_NOTIFY_START:
Tomas Winkler3c666182015-05-04 09:43:52 +03001835 if (waitqueue_active(&cl->wait))
1836 wake_up(&cl->wait);
1837
1838 break;
Alexander Usyskin6a8d6482016-04-17 12:16:03 -04001839 case MEI_FOP_DISCONNECT_RSP:
1840 mei_io_cb_free(cb);
1841 mei_cl_set_disconnected(cl);
1842 break;
Tomas Winkler3c666182015-05-04 09:43:52 +03001843 default:
1844 BUG_ON(0);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001845 }
1846}
1847
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001848
1849/**
Tomas Winkler074b4c02013-02-06 14:06:44 +02001850 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1851 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001852 * @dev: mei device
Tomas Winkler074b4c02013-02-06 14:06:44 +02001853 */
Tomas Winkler074b4c02013-02-06 14:06:44 +02001854void mei_cl_all_disconnect(struct mei_device *dev)
1855{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001856 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001857
Tomas Winkler3c666182015-05-04 09:43:52 +03001858 list_for_each_entry(cl, &dev->file_list, link)
1859 mei_cl_set_disconnected(cl);
Tomas Winkler074b4c02013-02-06 14:06:44 +02001860}