blob: 838a4a349f9e24dd8e4dfd2f65f71bc0470858c6 [file] [log] [blame]
Tomas Winkler9ca90502013-01-08 23:07:13 +02001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
4 * Copyright (c) 2003-2012, Intel Corporation.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
Tomas Winkler9ca90502013-01-08 23:07:13 +020017#include <linux/sched.h>
18#include <linux/wait.h>
19#include <linux/delay.h>
Tomas Winkler1f180352014-09-29 16:31:46 +030020#include <linux/slab.h>
Tomas Winkler04bb1392014-03-18 22:52:04 +020021#include <linux/pm_runtime.h>
Tomas Winkler9ca90502013-01-08 23:07:13 +020022
23#include <linux/mei.h>
24
25#include "mei_dev.h"
26#include "hbm.h"
Tomas Winkler90e0b5f2013-01-08 23:07:14 +020027#include "client.h"
28
29/**
Tomas Winkler79563db2015-01-11 00:07:16 +020030 * mei_me_cl_init - initialize me client
31 *
32 * @me_cl: me client
33 */
34void mei_me_cl_init(struct mei_me_client *me_cl)
35{
36 INIT_LIST_HEAD(&me_cl->list);
37 kref_init(&me_cl->refcnt);
38}
39
40/**
41 * mei_me_cl_get - increases me client refcount
42 *
43 * @me_cl: me client
44 *
45 * Locking: called under "dev->device_lock" lock
46 *
47 * Return: me client or NULL
48 */
49struct mei_me_client *mei_me_cl_get(struct mei_me_client *me_cl)
50{
Tomas Winklerb7d88512015-02-10 10:39:31 +020051 if (me_cl && kref_get_unless_zero(&me_cl->refcnt))
52 return me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +020053
Tomas Winklerb7d88512015-02-10 10:39:31 +020054 return NULL;
Tomas Winkler79563db2015-01-11 00:07:16 +020055}
56
57/**
Tomas Winklerb7d88512015-02-10 10:39:31 +020058 * mei_me_cl_release - free me client
Tomas Winkler79563db2015-01-11 00:07:16 +020059 *
60 * Locking: called under "dev->device_lock" lock
61 *
62 * @ref: me_client refcount
63 */
64static void mei_me_cl_release(struct kref *ref)
65{
66 struct mei_me_client *me_cl =
67 container_of(ref, struct mei_me_client, refcnt);
Tomas Winklerb7d88512015-02-10 10:39:31 +020068
Tomas Winkler79563db2015-01-11 00:07:16 +020069 kfree(me_cl);
70}
Tomas Winklerb7d88512015-02-10 10:39:31 +020071
Tomas Winkler79563db2015-01-11 00:07:16 +020072/**
73 * mei_me_cl_put - decrease me client refcount and free client if necessary
74 *
75 * Locking: called under "dev->device_lock" lock
76 *
77 * @me_cl: me client
78 */
79void mei_me_cl_put(struct mei_me_client *me_cl)
80{
81 if (me_cl)
82 kref_put(&me_cl->refcnt, mei_me_cl_release);
83}
84
85/**
Alexander Usyskind49ed642015-05-04 09:43:54 +030086 * __mei_me_cl_del - delete me client from the list and decrease
Tomas Winklerb7d88512015-02-10 10:39:31 +020087 * reference counter
88 *
89 * @dev: mei device
90 * @me_cl: me client
91 *
92 * Locking: dev->me_clients_rwsem
93 */
94static void __mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
95{
96 if (!me_cl)
97 return;
98
Alexander Usyskind49ed642015-05-04 09:43:54 +030099 list_del_init(&me_cl->list);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200100 mei_me_cl_put(me_cl);
101}
102
103/**
Alexander Usyskind49ed642015-05-04 09:43:54 +0300104 * mei_me_cl_del - delete me client from the list and decrease
105 * reference counter
106 *
107 * @dev: mei device
108 * @me_cl: me client
109 */
110void mei_me_cl_del(struct mei_device *dev, struct mei_me_client *me_cl)
111{
112 down_write(&dev->me_clients_rwsem);
113 __mei_me_cl_del(dev, me_cl);
114 up_write(&dev->me_clients_rwsem);
115}
116
117/**
Tomas Winklerb7d88512015-02-10 10:39:31 +0200118 * mei_me_cl_add - add me client to the list
119 *
120 * @dev: mei device
121 * @me_cl: me client
122 */
123void mei_me_cl_add(struct mei_device *dev, struct mei_me_client *me_cl)
124{
125 down_write(&dev->me_clients_rwsem);
126 list_add(&me_cl->list, &dev->me_clients);
127 up_write(&dev->me_clients_rwsem);
128}
129
130/**
131 * __mei_me_cl_by_uuid - locate me client by uuid
132 * increases ref count
133 *
134 * @dev: mei device
135 * @uuid: me client uuid
136 *
137 * Return: me client or NULL if not found
138 *
139 * Locking: dev->me_clients_rwsem
140 */
141static struct mei_me_client *__mei_me_cl_by_uuid(struct mei_device *dev,
142 const uuid_le *uuid)
143{
144 struct mei_me_client *me_cl;
145 const uuid_le *pn;
146
147 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
148
149 list_for_each_entry(me_cl, &dev->me_clients, list) {
150 pn = &me_cl->props.protocol_name;
151 if (uuid_le_cmp(*uuid, *pn) == 0)
152 return mei_me_cl_get(me_cl);
153 }
154
155 return NULL;
156}
157
158/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300159 * mei_me_cl_by_uuid - locate me client by uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200160 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200161 *
162 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300163 * @uuid: me client uuid
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200164 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300165 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200166 *
167 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200168 */
Tomas Winklerb7d88512015-02-10 10:39:31 +0200169struct mei_me_client *mei_me_cl_by_uuid(struct mei_device *dev,
Tomas Winklerd3208322014-08-24 12:08:55 +0300170 const uuid_le *uuid)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200171{
Tomas Winkler5ca2d382014-08-21 14:29:13 +0300172 struct mei_me_client *me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200173
Tomas Winklerb7d88512015-02-10 10:39:31 +0200174 down_read(&dev->me_clients_rwsem);
175 me_cl = __mei_me_cl_by_uuid(dev, uuid);
176 up_read(&dev->me_clients_rwsem);
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200177
Tomas Winklerb7d88512015-02-10 10:39:31 +0200178 return me_cl;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200179}
180
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200181/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300182 * mei_me_cl_by_id - locate me client by client id
Tomas Winkler79563db2015-01-11 00:07:16 +0200183 * increases ref count
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200184 *
185 * @dev: the device structure
186 * @client_id: me client id
187 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300188 * Return: me client or NULL if not found
Tomas Winklerb7d88512015-02-10 10:39:31 +0200189 *
190 * Locking: dev->me_clients_rwsem
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200191 */
Tomas Winklerd3208322014-08-24 12:08:55 +0300192struct mei_me_client *mei_me_cl_by_id(struct mei_device *dev, u8 client_id)
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200193{
Alexander Usyskina27a76d2014-02-17 15:13:22 +0200194
Tomas Winklerb7d88512015-02-10 10:39:31 +0200195 struct mei_me_client *__me_cl, *me_cl = NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200196
Tomas Winklerb7d88512015-02-10 10:39:31 +0200197 down_read(&dev->me_clients_rwsem);
198 list_for_each_entry(__me_cl, &dev->me_clients, list) {
199 if (__me_cl->client_id == client_id) {
200 me_cl = mei_me_cl_get(__me_cl);
201 break;
202 }
203 }
204 up_read(&dev->me_clients_rwsem);
205
206 return me_cl;
207}
208
209/**
210 * __mei_me_cl_by_uuid_id - locate me client by client id and uuid
211 * increases ref count
212 *
213 * @dev: the device structure
214 * @uuid: me client uuid
215 * @client_id: me client id
216 *
217 * Return: me client or null if not found
218 *
219 * Locking: dev->me_clients_rwsem
220 */
221static struct mei_me_client *__mei_me_cl_by_uuid_id(struct mei_device *dev,
222 const uuid_le *uuid, u8 client_id)
223{
224 struct mei_me_client *me_cl;
225 const uuid_le *pn;
226
227 WARN_ON(!rwsem_is_locked(&dev->me_clients_rwsem));
228
229 list_for_each_entry(me_cl, &dev->me_clients, list) {
230 pn = &me_cl->props.protocol_name;
231 if (uuid_le_cmp(*uuid, *pn) == 0 &&
232 me_cl->client_id == client_id)
Tomas Winkler79563db2015-01-11 00:07:16 +0200233 return mei_me_cl_get(me_cl);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200234 }
Tomas Winkler79563db2015-01-11 00:07:16 +0200235
Tomas Winklerd3208322014-08-24 12:08:55 +0300236 return NULL;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200237}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200238
Tomas Winklerb7d88512015-02-10 10:39:31 +0200239
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300240/**
241 * mei_me_cl_by_uuid_id - locate me client by client id and uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200242 * increases ref count
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300243 *
244 * @dev: the device structure
245 * @uuid: me client uuid
246 * @client_id: me client id
247 *
Tomas Winklerb7d88512015-02-10 10:39:31 +0200248 * Return: me client or null if not found
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300249 */
Tomas Winklerd880f322014-08-21 14:29:15 +0300250struct mei_me_client *mei_me_cl_by_uuid_id(struct mei_device *dev,
251 const uuid_le *uuid, u8 client_id)
252{
253 struct mei_me_client *me_cl;
254
Tomas Winklerb7d88512015-02-10 10:39:31 +0200255 down_read(&dev->me_clients_rwsem);
256 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, client_id);
257 up_read(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200258
Tomas Winklerb7d88512015-02-10 10:39:31 +0200259 return me_cl;
Tomas Winklerd880f322014-08-21 14:29:15 +0300260}
261
Tomas Winkler25ca6472014-08-21 14:29:14 +0300262/**
Tomas Winkler79563db2015-01-11 00:07:16 +0200263 * mei_me_cl_rm_by_uuid - remove all me clients matching uuid
Tomas Winkler25ca6472014-08-21 14:29:14 +0300264 *
265 * @dev: the device structure
266 * @uuid: me client uuid
Tomas Winkler79563db2015-01-11 00:07:16 +0200267 *
268 * Locking: called under "dev->device_lock" lock
Tomas Winkler25ca6472014-08-21 14:29:14 +0300269 */
Tomas Winkler79563db2015-01-11 00:07:16 +0200270void mei_me_cl_rm_by_uuid(struct mei_device *dev, const uuid_le *uuid)
Tomas Winkler25ca6472014-08-21 14:29:14 +0300271{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200272 struct mei_me_client *me_cl;
Tomas Winkler25ca6472014-08-21 14:29:14 +0300273
Tomas Winkler79563db2015-01-11 00:07:16 +0200274 dev_dbg(dev->dev, "remove %pUl\n", uuid);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200275
276 down_write(&dev->me_clients_rwsem);
277 me_cl = __mei_me_cl_by_uuid(dev, uuid);
278 __mei_me_cl_del(dev, me_cl);
279 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200280}
281
282/**
283 * mei_me_cl_rm_by_uuid_id - remove all me clients matching client id
284 *
285 * @dev: the device structure
286 * @uuid: me client uuid
287 * @id: me client id
288 *
289 * Locking: called under "dev->device_lock" lock
290 */
291void mei_me_cl_rm_by_uuid_id(struct mei_device *dev, const uuid_le *uuid, u8 id)
292{
Tomas Winklerb7d88512015-02-10 10:39:31 +0200293 struct mei_me_client *me_cl;
Tomas Winkler79563db2015-01-11 00:07:16 +0200294
295 dev_dbg(dev->dev, "remove %pUl %d\n", uuid, id);
Tomas Winklerb7d88512015-02-10 10:39:31 +0200296
297 down_write(&dev->me_clients_rwsem);
298 me_cl = __mei_me_cl_by_uuid_id(dev, uuid, id);
299 __mei_me_cl_del(dev, me_cl);
300 up_write(&dev->me_clients_rwsem);
Tomas Winkler25ca6472014-08-21 14:29:14 +0300301}
302
Tomas Winkler79563db2015-01-11 00:07:16 +0200303/**
304 * mei_me_cl_rm_all - remove all me clients
305 *
306 * @dev: the device structure
307 *
308 * Locking: called under "dev->device_lock" lock
309 */
310void mei_me_cl_rm_all(struct mei_device *dev)
311{
312 struct mei_me_client *me_cl, *next;
313
Tomas Winklerb7d88512015-02-10 10:39:31 +0200314 down_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200315 list_for_each_entry_safe(me_cl, next, &dev->me_clients, list)
Tomas Winklerb7d88512015-02-10 10:39:31 +0200316 __mei_me_cl_del(dev, me_cl);
317 up_write(&dev->me_clients_rwsem);
Tomas Winkler79563db2015-01-11 00:07:16 +0200318}
319
Tomas Winkler9ca90502013-01-08 23:07:13 +0200320/**
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200321 * mei_cl_cmp_id - tells if the clients are the same
322 *
323 * @cl1: host client 1
324 * @cl2: host client 2
325 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300326 * Return: true - if the clients has same host and me ids
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200327 * false - otherwise
328 */
329static inline bool mei_cl_cmp_id(const struct mei_cl *cl1,
330 const struct mei_cl *cl2)
331{
332 return cl1 && cl2 &&
333 (cl1->host_client_id == cl2->host_client_id) &&
Alexander Usyskind49ed642015-05-04 09:43:54 +0300334 (mei_cl_me_id(cl1) == mei_cl_me_id(cl2));
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200335}
336
337/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200338 * mei_io_cb_free - free mei_cb_private related memory
339 *
340 * @cb: mei callback struct
341 */
342void mei_io_cb_free(struct mei_cl_cb *cb)
343{
344 if (cb == NULL)
345 return;
346
Tomas Winkler928fa662015-02-10 10:39:45 +0200347 list_del(&cb->list);
Tomas Winkler5db75142015-02-10 10:39:42 +0200348 kfree(cb->buf.data);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200349 kfree(cb);
350}
351
352/**
353 * mei_io_cb_init - allocate and initialize io callback
354 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300355 * @cl: mei client
Tomas Winklerbca67d62015-02-10 10:39:43 +0200356 * @type: operation type
Masanari Iida393b1482013-04-05 01:05:05 +0900357 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200358 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300359 * Return: mei_cl_cb pointer or NULL;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200360 */
Tomas Winkler3030dc02016-07-26 01:06:05 +0300361static struct mei_cl_cb *mei_io_cb_init(struct mei_cl *cl,
362 enum mei_cb_file_ops type,
363 const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200364{
365 struct mei_cl_cb *cb;
366
367 cb = kzalloc(sizeof(struct mei_cl_cb), GFP_KERNEL);
368 if (!cb)
369 return NULL;
370
Tomas Winkler928fa662015-02-10 10:39:45 +0200371 INIT_LIST_HEAD(&cb->list);
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200372 cb->fp = fp;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200373 cb->cl = cl;
374 cb->buf_idx = 0;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200375 cb->fop_type = type;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200376 return cb;
377}
378
379/**
Tomas Winkler928fa662015-02-10 10:39:45 +0200380 * __mei_io_list_flush - removes and frees cbs belonging to cl.
381 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200382 * @head: an instance of our list structure
Tomas Winkler928fa662015-02-10 10:39:45 +0200383 * @cl: host client, can be NULL for flushing the whole list
384 * @free: whether to free the cbs
385 */
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200386static void __mei_io_list_flush(struct list_head *head,
Tomas Winkler928fa662015-02-10 10:39:45 +0200387 struct mei_cl *cl, bool free)
388{
389 struct mei_cl_cb *cb, *next;
390
391 /* enable removing everything if no cl is specified */
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200392 list_for_each_entry_safe(cb, next, head, list) {
Tomas Winkler928fa662015-02-10 10:39:45 +0200393 if (!cl || mei_cl_cmp_id(cl, cb->cl)) {
394 list_del_init(&cb->list);
395 if (free)
396 mei_io_cb_free(cb);
397 }
398 }
399}
400
401/**
402 * mei_io_list_flush - removes list entry belonging to cl.
403 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200404 * @head: An instance of our list structure
Tomas Winkler928fa662015-02-10 10:39:45 +0200405 * @cl: host client
406 */
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200407static inline void mei_io_list_flush(struct list_head *head, struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200408{
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200409 __mei_io_list_flush(head, cl, false);
Tomas Winkler928fa662015-02-10 10:39:45 +0200410}
411
412/**
413 * mei_io_list_free - removes cb belonging to cl and free them
414 *
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200415 * @head: An instance of our list structure
Tomas Winkler928fa662015-02-10 10:39:45 +0200416 * @cl: host client
417 */
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200418static inline void mei_io_list_free(struct list_head *head, struct mei_cl *cl)
Tomas Winkler928fa662015-02-10 10:39:45 +0200419{
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200420 __mei_io_list_flush(head, cl, true);
Tomas Winkler928fa662015-02-10 10:39:45 +0200421}
422
423/**
Tomas Winklerbca67d62015-02-10 10:39:43 +0200424 * mei_cl_alloc_cb - a convenient wrapper for allocating read cb
425 *
426 * @cl: host client
427 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200428 * @fop_type: operation type
Tomas Winklerbca67d62015-02-10 10:39:43 +0200429 * @fp: associated file pointer (might be NULL)
430 *
431 * Return: cb on success and NULL on failure
432 */
433struct mei_cl_cb *mei_cl_alloc_cb(struct mei_cl *cl, size_t length,
Tomas Winkler3030dc02016-07-26 01:06:05 +0300434 enum mei_cb_file_ops fop_type,
Tomas Winklerf23e2cc2016-02-07 23:35:23 +0200435 const struct file *fp)
Tomas Winklerbca67d62015-02-10 10:39:43 +0200436{
437 struct mei_cl_cb *cb;
438
Tomas Winkler3030dc02016-07-26 01:06:05 +0300439 cb = mei_io_cb_init(cl, fop_type, fp);
Tomas Winklerbca67d62015-02-10 10:39:43 +0200440 if (!cb)
441 return NULL;
442
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400443 if (length == 0)
444 return cb;
445
446 cb->buf.data = kmalloc(length, GFP_KERNEL);
447 if (!cb->buf.data) {
Tomas Winklerbca67d62015-02-10 10:39:43 +0200448 mei_io_cb_free(cb);
449 return NULL;
450 }
Alexander Usyskinaab3b1a2016-05-24 16:03:41 -0400451 cb->buf.size = length;
Tomas Winklerbca67d62015-02-10 10:39:43 +0200452
453 return cb;
454}
455
456/**
Tomas Winkler3030dc02016-07-26 01:06:05 +0300457 * mei_cl_enqueue_ctrl_wr_cb - a convenient wrapper for allocating
458 * and enqueuing of the control commands cb
459 *
460 * @cl: host client
461 * @length: size of the buffer
Alexander Usyskin967b2742016-11-11 03:00:09 +0200462 * @fop_type: operation type
Tomas Winkler3030dc02016-07-26 01:06:05 +0300463 * @fp: associated file pointer (might be NULL)
464 *
465 * Return: cb on success and NULL on failure
466 * Locking: called under "dev->device_lock" lock
467 */
468struct mei_cl_cb *mei_cl_enqueue_ctrl_wr_cb(struct mei_cl *cl, size_t length,
469 enum mei_cb_file_ops fop_type,
470 const struct file *fp)
471{
472 struct mei_cl_cb *cb;
473
474 /* for RX always allocate at least client's mtu */
475 if (length)
476 length = max_t(size_t, length, mei_cl_mtu(cl));
477
478 cb = mei_cl_alloc_cb(cl, length, fop_type, fp);
479 if (!cb)
480 return NULL;
481
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200482 list_add_tail(&cb->list, &cl->dev->ctrl_wr_list);
Tomas Winkler3030dc02016-07-26 01:06:05 +0300483 return cb;
484}
485
486/**
Tomas Winklera9bed612015-02-10 10:39:46 +0200487 * mei_cl_read_cb - find this cl's callback in the read list
488 * for a specific file
489 *
490 * @cl: host client
491 * @fp: file pointer (matching cb file object), may be NULL
492 *
493 * Return: cb on success, NULL if cb is not found
494 */
495struct mei_cl_cb *mei_cl_read_cb(const struct mei_cl *cl, const struct file *fp)
496{
497 struct mei_cl_cb *cb;
498
499 list_for_each_entry(cb, &cl->rd_completed, list)
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200500 if (!fp || fp == cb->fp)
Tomas Winklera9bed612015-02-10 10:39:46 +0200501 return cb;
502
503 return NULL;
504}
505
506/**
507 * mei_cl_read_cb_flush - free client's read pending and completed cbs
508 * for a specific file
509 *
510 * @cl: host client
511 * @fp: file pointer (matching cb file object), may be NULL
512 */
513void mei_cl_read_cb_flush(const struct mei_cl *cl, const struct file *fp)
514{
515 struct mei_cl_cb *cb, *next;
516
517 list_for_each_entry_safe(cb, next, &cl->rd_completed, list)
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200518 if (!fp || fp == cb->fp)
Tomas Winklera9bed612015-02-10 10:39:46 +0200519 mei_io_cb_free(cb);
520
521
522 list_for_each_entry_safe(cb, next, &cl->rd_pending, list)
Tomas Winkler62e8e6a2016-02-07 23:35:24 +0200523 if (!fp || fp == cb->fp)
Tomas Winklera9bed612015-02-10 10:39:46 +0200524 mei_io_cb_free(cb);
525}
526
527/**
Tomas Winkler9ca90502013-01-08 23:07:13 +0200528 * mei_cl_flush_queues - flushes queue lists belonging to cl.
529 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200530 * @cl: host client
Tomas Winklera9bed612015-02-10 10:39:46 +0200531 * @fp: file pointer (matching cb file object), may be NULL
Alexander Usyskince231392014-09-29 16:31:50 +0300532 *
533 * Return: 0 on success, -EINVAL if cl or cl->dev is NULL.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200534 */
Tomas Winklera9bed612015-02-10 10:39:46 +0200535int mei_cl_flush_queues(struct mei_cl *cl, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200536{
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300537 struct mei_device *dev;
538
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200539 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200540 return -EINVAL;
541
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300542 dev = cl->dev;
543
544 cl_dbg(dev, cl, "remove list entry belonging to cl\n");
Tomas Winklercc99ecf2014-03-10 15:10:40 +0200545 mei_io_list_free(&cl->dev->write_list, cl);
546 mei_io_list_free(&cl->dev->write_waiting_list, cl);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200547 mei_io_list_flush(&cl->dev->ctrl_wr_list, cl);
548 mei_io_list_flush(&cl->dev->ctrl_rd_list, cl);
Tomas Winklera9bed612015-02-10 10:39:46 +0200549
550 mei_cl_read_cb_flush(cl, fp);
551
Tomas Winkler9ca90502013-01-08 23:07:13 +0200552 return 0;
553}
554
Tomas Winkler9ca90502013-01-08 23:07:13 +0200555
556/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +0200557 * mei_cl_init - initializes cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200558 *
559 * @cl: host client to be initialized
560 * @dev: mei device
561 */
562void mei_cl_init(struct mei_cl *cl, struct mei_device *dev)
563{
564 memset(cl, 0, sizeof(struct mei_cl));
565 init_waitqueue_head(&cl->wait);
566 init_waitqueue_head(&cl->rx_wait);
567 init_waitqueue_head(&cl->tx_wait);
Tomas Winklerb38a3622015-07-26 09:54:19 +0300568 init_waitqueue_head(&cl->ev_wait);
Tomas Winklera9bed612015-02-10 10:39:46 +0200569 INIT_LIST_HEAD(&cl->rd_completed);
570 INIT_LIST_HEAD(&cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200571 INIT_LIST_HEAD(&cl->link);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200572 cl->writing_state = MEI_IDLE;
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200573 cl->state = MEI_FILE_UNINITIALIZED;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200574 cl->dev = dev;
575}
576
577/**
578 * mei_cl_allocate - allocates cl structure and sets it up.
579 *
580 * @dev: mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300581 * Return: The allocated file or NULL on failure
Tomas Winkler9ca90502013-01-08 23:07:13 +0200582 */
583struct mei_cl *mei_cl_allocate(struct mei_device *dev)
584{
585 struct mei_cl *cl;
586
587 cl = kmalloc(sizeof(struct mei_cl), GFP_KERNEL);
588 if (!cl)
589 return NULL;
590
591 mei_cl_init(cl, dev);
592
593 return cl;
594}
595
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200596/**
Alexander Usyskin3908be62015-02-10 10:39:35 +0200597 * mei_cl_link - allocate host id in the host map
Tomas Winkler9ca90502013-01-08 23:07:13 +0200598 *
Alexander Usyskin3908be62015-02-10 10:39:35 +0200599 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +0900600 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300601 * Return: 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +0200602 * -EINVAL on incorrect values
Tomas Winkler03b8d342015-02-10 10:39:44 +0200603 * -EMFILE if open count exceeded.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200604 */
Alexander Usyskin7851e002016-02-07 23:35:40 +0200605int mei_cl_link(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200606{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200607 struct mei_device *dev;
Tomas Winkler22f96a02013-09-16 23:44:47 +0300608 long open_handle_count;
Alexander Usyskin7851e002016-02-07 23:35:40 +0200609 int id;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200610
Tomas Winkler781d0d82013-01-08 23:07:22 +0200611 if (WARN_ON(!cl || !cl->dev))
Tomas Winkler9ca90502013-01-08 23:07:13 +0200612 return -EINVAL;
613
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200614 dev = cl->dev;
615
Alexander Usyskin7851e002016-02-07 23:35:40 +0200616 id = find_first_zero_bit(dev->host_clients_map, MEI_CLIENTS_MAX);
Tomas Winkler781d0d82013-01-08 23:07:22 +0200617 if (id >= MEI_CLIENTS_MAX) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300618 dev_err(dev->dev, "id exceeded %d", MEI_CLIENTS_MAX);
Tomas Winklere036cc52013-09-16 23:44:46 +0300619 return -EMFILE;
620 }
621
Tomas Winkler22f96a02013-09-16 23:44:47 +0300622 open_handle_count = dev->open_handle_count + dev->iamthif_open_count;
623 if (open_handle_count >= MEI_MAX_OPEN_HANDLE_COUNT) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300624 dev_err(dev->dev, "open_handle_count exceeded %d",
Tomas Winklere036cc52013-09-16 23:44:46 +0300625 MEI_MAX_OPEN_HANDLE_COUNT);
626 return -EMFILE;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200627 }
628
Tomas Winkler781d0d82013-01-08 23:07:22 +0200629 dev->open_handle_count++;
630
631 cl->host_client_id = id;
632 list_add_tail(&cl->link, &dev->file_list);
633
634 set_bit(id, dev->host_clients_map);
635
636 cl->state = MEI_FILE_INITIALIZING;
637
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300638 cl_dbg(dev, cl, "link cl\n");
Tomas Winkler781d0d82013-01-08 23:07:22 +0200639 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200640}
Tomas Winkler781d0d82013-01-08 23:07:22 +0200641
Tomas Winkler9ca90502013-01-08 23:07:13 +0200642/**
Alexander Usyskind49ed642015-05-04 09:43:54 +0300643 * mei_cl_unlink - remove host client from the list
Tomas Winkler9ca90502013-01-08 23:07:13 +0200644 *
Masanari Iida393b1482013-04-05 01:05:05 +0900645 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +0300646 *
647 * Return: always 0
Tomas Winkler9ca90502013-01-08 23:07:13 +0200648 */
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200649int mei_cl_unlink(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200650{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200651 struct mei_device *dev;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200652
Tomas Winkler781d0d82013-01-08 23:07:22 +0200653 /* don't shout on error exit path */
654 if (!cl)
655 return 0;
656
Alexander Usyskinfdd9b862016-01-08 00:49:21 +0200657 /* amthif might not be initialized */
Tomas Winkler8e9a4a92013-01-10 17:32:14 +0200658 if (!cl->dev)
659 return 0;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200660
661 dev = cl->dev;
662
Tomas Winklera14c44d2013-09-16 23:44:45 +0300663 cl_dbg(dev, cl, "unlink client");
664
Tomas Winkler22f96a02013-09-16 23:44:47 +0300665 if (dev->open_handle_count > 0)
666 dev->open_handle_count--;
667
668 /* never clear the 0 bit */
669 if (cl->host_client_id)
670 clear_bit(cl->host_client_id, dev->host_clients_map);
671
672 list_del_init(&cl->link);
673
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200674 cl->state = MEI_FILE_UNINITIALIZED;
Alexander Usyskin7c7a6072016-11-16 22:51:29 +0200675 cl->writing_state = MEI_IDLE;
676
677 WARN_ON(!list_empty(&cl->rd_completed) ||
678 !list_empty(&cl->rd_pending) ||
679 !list_empty(&cl->link));
Tomas Winkler22f96a02013-09-16 23:44:47 +0300680
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200681 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200682}
683
Alexander Usyskin025fb792016-02-07 23:35:43 +0200684void mei_host_client_init(struct mei_device *dev)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200685{
Tomas Winkler9ca90502013-01-08 23:07:13 +0200686 dev->dev_state = MEI_DEV_ENABLED;
Tomas Winkler6adb8ef2014-01-12 00:36:10 +0200687 dev->reset_count = 0;
Tomas Winkler04bb1392014-03-18 22:52:04 +0200688
Alexander Usyskin025fb792016-02-07 23:35:43 +0200689 schedule_work(&dev->bus_rescan_work);
Tomas Winkler60095952015-07-23 15:08:47 +0300690
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300691 pm_runtime_mark_last_busy(dev->dev);
692 dev_dbg(dev->dev, "rpm: autosuspend\n");
Alexander Usyskind5f8e162016-11-24 13:34:02 +0200693 pm_request_autosuspend(dev->dev);
Tomas Winkler9ca90502013-01-08 23:07:13 +0200694}
695
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200696/**
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300697 * mei_hbuf_acquire - try to acquire host buffer
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200698 *
699 * @dev: the device structure
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300700 * Return: true if host buffer was acquired
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200701 */
702bool mei_hbuf_acquire(struct mei_device *dev)
703{
Tomas Winkler04bb1392014-03-18 22:52:04 +0200704 if (mei_pg_state(dev) == MEI_PG_ON ||
Alexander Usyskin3dc196e2015-06-13 08:51:17 +0300705 mei_pg_in_transition(dev)) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300706 dev_dbg(dev->dev, "device is in pg\n");
Tomas Winkler04bb1392014-03-18 22:52:04 +0200707 return false;
708 }
709
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200710 if (!dev->hbuf_is_ready) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300711 dev_dbg(dev->dev, "hbuf is not ready\n");
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200712 return false;
713 }
714
715 dev->hbuf_is_ready = false;
716
717 return true;
718}
Tomas Winkler9ca90502013-01-08 23:07:13 +0200719
720/**
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200721 * mei_cl_wake_all - wake up readers, writers and event waiters so
722 * they can be interrupted
723 *
724 * @cl: host client
725 */
726static void mei_cl_wake_all(struct mei_cl *cl)
727{
728 struct mei_device *dev = cl->dev;
729
730 /* synchronized under device mutex */
731 if (waitqueue_active(&cl->rx_wait)) {
732 cl_dbg(dev, cl, "Waking up reading client!\n");
733 wake_up_interruptible(&cl->rx_wait);
734 }
735 /* synchronized under device mutex */
736 if (waitqueue_active(&cl->tx_wait)) {
737 cl_dbg(dev, cl, "Waking up writing client!\n");
738 wake_up_interruptible(&cl->tx_wait);
739 }
740 /* synchronized under device mutex */
741 if (waitqueue_active(&cl->ev_wait)) {
742 cl_dbg(dev, cl, "Waking up waiting for event clients!\n");
743 wake_up_interruptible(&cl->ev_wait);
744 }
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400745 /* synchronized under device mutex */
746 if (waitqueue_active(&cl->wait)) {
747 cl_dbg(dev, cl, "Waking up ctrl write clients!\n");
Alexander Usyskin69f18042016-05-09 00:07:46 -0400748 wake_up(&cl->wait);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400749 }
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200750}
751
752/**
Tomas Winkler3c666182015-05-04 09:43:52 +0300753 * mei_cl_set_disconnected - set disconnected state and clear
754 * associated states and resources
755 *
756 * @cl: host client
757 */
Alexander Usyskin669c2562017-01-20 02:17:17 +0200758static void mei_cl_set_disconnected(struct mei_cl *cl)
Tomas Winkler3c666182015-05-04 09:43:52 +0300759{
760 struct mei_device *dev = cl->dev;
761
762 if (cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskinbd47b522016-11-16 22:51:27 +0200763 cl->state <= MEI_FILE_INITIALIZING)
Tomas Winkler3c666182015-05-04 09:43:52 +0300764 return;
765
766 cl->state = MEI_FILE_DISCONNECTED;
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200767 mei_io_list_free(&dev->write_list, cl);
768 mei_io_list_free(&dev->write_waiting_list, cl);
Tomas Winkler3c666182015-05-04 09:43:52 +0300769 mei_io_list_flush(&dev->ctrl_rd_list, cl);
770 mei_io_list_flush(&dev->ctrl_wr_list, cl);
Tomas Winkler6537ae22017-01-27 16:32:43 +0200771 mei_io_list_free(&dev->amthif_cmd_list, cl);
Alexander Usyskina4307fe2016-02-07 23:35:35 +0200772 mei_cl_wake_all(cl);
Alexander Usyskin46978ad2016-07-26 01:06:03 +0300773 cl->rx_flow_ctrl_creds = 0;
Tomas Winkler4034b812016-07-26 01:06:04 +0300774 cl->tx_flow_ctrl_creds = 0;
Tomas Winkler3c666182015-05-04 09:43:52 +0300775 cl->timer_count = 0;
Alexander Usyskind49ed642015-05-04 09:43:54 +0300776
Alexander Usyskin5d882462017-01-27 16:32:39 +0200777 mei_cl_bus_module_put(cl);
778
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300779 if (!cl->me_cl)
780 return;
781
782 if (!WARN_ON(cl->me_cl->connect_count == 0))
783 cl->me_cl->connect_count--;
784
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300785 if (cl->me_cl->connect_count == 0)
Tomas Winkler4034b812016-07-26 01:06:04 +0300786 cl->me_cl->tx_flow_ctrl_creds = 0;
Alexander Usyskinc241e9b2015-06-13 21:18:40 +0300787
Alexander Usyskind49ed642015-05-04 09:43:54 +0300788 mei_me_cl_put(cl->me_cl);
789 cl->me_cl = NULL;
Tomas Winkler3c666182015-05-04 09:43:52 +0300790}
791
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300792static int mei_cl_set_connecting(struct mei_cl *cl, struct mei_me_client *me_cl)
793{
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300794 if (!mei_me_cl_get(me_cl))
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300795 return -ENOENT;
796
Alexander Usyskin1df629e2015-05-04 09:43:56 +0300797 /* only one connection is allowed for fixed address clients */
798 if (me_cl->props.fixed_address) {
799 if (me_cl->connect_count) {
800 mei_me_cl_put(me_cl);
801 return -EBUSY;
802 }
803 }
804
805 cl->me_cl = me_cl;
Alexander Usyskina03d77f62015-05-04 09:43:55 +0300806 cl->state = MEI_FILE_CONNECTING;
807 cl->me_cl->connect_count++;
808
809 return 0;
810}
811
Tomas Winkler3c666182015-05-04 09:43:52 +0300812/*
813 * mei_cl_send_disconnect - send disconnect request
814 *
815 * @cl: host client
816 * @cb: callback block
817 *
818 * Return: 0, OK; otherwise, error.
819 */
820static int mei_cl_send_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb)
821{
822 struct mei_device *dev;
823 int ret;
824
825 dev = cl->dev;
826
827 ret = mei_hbm_cl_disconnect_req(dev, cl);
828 cl->status = ret;
829 if (ret) {
830 cl->state = MEI_FILE_DISCONNECT_REPLY;
831 return ret;
832 }
833
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200834 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300835 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +0300836 mei_schedule_stall_timer(dev);
Tomas Winkler3c666182015-05-04 09:43:52 +0300837
838 return 0;
839}
840
841/**
842 * mei_cl_irq_disconnect - processes close related operation from
843 * interrupt thread context - send disconnect request
844 *
845 * @cl: client
846 * @cb: callback block.
847 * @cmpl_list: complete list.
848 *
849 * Return: 0, OK; otherwise, error.
850 */
851int mei_cl_irq_disconnect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200852 struct list_head *cmpl_list)
Tomas Winkler3c666182015-05-04 09:43:52 +0300853{
854 struct mei_device *dev = cl->dev;
855 u32 msg_slots;
856 int slots;
857 int ret;
858
859 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
860 slots = mei_hbuf_empty_slots(dev);
861
862 if (slots < msg_slots)
863 return -EMSGSIZE;
864
865 ret = mei_cl_send_disconnect(cl, cb);
866 if (ret)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200867 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler3c666182015-05-04 09:43:52 +0300868
869 return ret;
870}
871
Tomas Winkler3c666182015-05-04 09:43:52 +0300872/**
Alexander Usyskin18901352015-07-23 21:37:13 +0300873 * __mei_cl_disconnect - disconnect host client from the me one
874 * internal function runtime pm has to be already acquired
Tomas Winkler9ca90502013-01-08 23:07:13 +0200875 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200876 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +0200877 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300878 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200879 */
Alexander Usyskin18901352015-07-23 21:37:13 +0300880static int __mei_cl_disconnect(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200881{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200882 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200883 struct mei_cl_cb *cb;
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300884 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200885
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200886 dev = cl->dev;
887
Tomas Winkler3c666182015-05-04 09:43:52 +0300888 cl->state = MEI_FILE_DISCONNECTING;
889
Tomas Winkler3030dc02016-07-26 01:06:05 +0300890 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_DISCONNECT, NULL);
891 if (!cb) {
892 rets = -ENOMEM;
Tomas Winkler3c666182015-05-04 09:43:52 +0300893 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +0300894 }
Tomas Winkler5a8373f2014-08-21 14:29:17 +0300895
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200896 if (mei_hbuf_acquire(dev)) {
Tomas Winkler3c666182015-05-04 09:43:52 +0300897 rets = mei_cl_send_disconnect(cl, cb);
898 if (rets) {
Alexander Usyskinc0abffb2013-09-15 18:11:07 +0300899 cl_err(dev, cl, "failed to disconnect.\n");
Tomas Winkler3c666182015-05-04 09:43:52 +0300900 goto out;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200901 }
Tomas Winkler9ca90502013-01-08 23:07:13 +0200902 }
Tomas Winkler3c666182015-05-04 09:43:52 +0300903
Tomas Winkler9ca90502013-01-08 23:07:13 +0200904 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400905 wait_event_timeout(cl->wait,
906 cl->state == MEI_FILE_DISCONNECT_REPLY ||
907 cl->state == MEI_FILE_DISCONNECTED,
Tomas Winkler3c666182015-05-04 09:43:52 +0300908 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9ca90502013-01-08 23:07:13 +0200909 mutex_lock(&dev->device_lock);
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300910
Tomas Winkler3c666182015-05-04 09:43:52 +0300911 rets = cl->status;
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -0400912 if (cl->state != MEI_FILE_DISCONNECT_REPLY &&
913 cl->state != MEI_FILE_DISCONNECTED) {
Alexander Usyskinfe2f17eb32014-07-17 10:53:38 +0300914 cl_dbg(dev, cl, "timeout on disconnect from FW client.\n");
915 rets = -ETIME;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200916 }
917
Tomas Winkler3c666182015-05-04 09:43:52 +0300918out:
919 /* we disconnect also on error */
920 mei_cl_set_disconnected(cl);
921 if (!rets)
922 cl_dbg(dev, cl, "successfully disconnected from FW client.\n");
923
Alexander Usyskin18901352015-07-23 21:37:13 +0300924 mei_io_cb_free(cb);
925 return rets;
926}
927
928/**
929 * mei_cl_disconnect - disconnect host client from the me one
930 *
931 * @cl: host client
932 *
933 * Locking: called under "dev->device_lock" lock
934 *
935 * Return: 0 on success, <0 on failure.
936 */
937int mei_cl_disconnect(struct mei_cl *cl)
938{
939 struct mei_device *dev;
940 int rets;
941
942 if (WARN_ON(!cl || !cl->dev))
943 return -ENODEV;
944
945 dev = cl->dev;
946
947 cl_dbg(dev, cl, "disconnecting");
948
949 if (!mei_cl_is_connected(cl))
950 return 0;
951
952 if (mei_cl_is_fixed_address(cl)) {
953 mei_cl_set_disconnected(cl);
954 return 0;
955 }
956
957 rets = pm_runtime_get(dev->dev);
958 if (rets < 0 && rets != -EINPROGRESS) {
959 pm_runtime_put_noidle(dev->dev);
960 cl_err(dev, cl, "rpm: get failed %d\n", rets);
961 return rets;
962 }
963
964 rets = __mei_cl_disconnect(cl);
965
Tomas Winkler04bb1392014-03-18 22:52:04 +0200966 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300967 pm_runtime_mark_last_busy(dev->dev);
968 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +0200969
Tomas Winkler9ca90502013-01-08 23:07:13 +0200970 return rets;
971}
972
973
974/**
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200975 * mei_cl_is_other_connecting - checks if other
976 * client with the same me client id is connecting
Tomas Winkler9ca90502013-01-08 23:07:13 +0200977 *
Tomas Winkler9ca90502013-01-08 23:07:13 +0200978 * @cl: private data of the file object
979 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +0300980 * Return: true if other client is connected, false - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +0200981 */
Tomas Winkler0c533572015-05-04 09:43:53 +0300982static bool mei_cl_is_other_connecting(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +0200983{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200984 struct mei_device *dev;
Tomas Winkler0c533572015-05-04 09:43:53 +0300985 struct mei_cl_cb *cb;
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200986
987 dev = cl->dev;
988
Alexander Usyskin962ff7b2017-01-27 16:32:45 +0200989 list_for_each_entry(cb, &dev->ctrl_rd_list, list) {
Tomas Winkler0c533572015-05-04 09:43:53 +0300990 if (cb->fop_type == MEI_FOP_CONNECT &&
Alexander Usyskind49ed642015-05-04 09:43:54 +0300991 mei_cl_me_id(cl) == mei_cl_me_id(cb->cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200992 return true;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200993 }
Tomas Winkler90e0b5f2013-01-08 23:07:14 +0200994
995 return false;
Tomas Winkler9ca90502013-01-08 23:07:13 +0200996}
997
998/**
Tomas Winkler0c533572015-05-04 09:43:53 +0300999 * mei_cl_send_connect - send connect request
1000 *
1001 * @cl: host client
1002 * @cb: callback block
1003 *
1004 * Return: 0, OK; otherwise, error.
1005 */
1006static int mei_cl_send_connect(struct mei_cl *cl, struct mei_cl_cb *cb)
1007{
1008 struct mei_device *dev;
1009 int ret;
1010
1011 dev = cl->dev;
1012
1013 ret = mei_hbm_cl_connect_req(dev, cl);
1014 cl->status = ret;
1015 if (ret) {
1016 cl->state = MEI_FILE_DISCONNECT_REPLY;
1017 return ret;
1018 }
1019
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001020 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001021 cl->timer_count = MEI_CONNECT_TIMEOUT;
Alexander Usyskin1892fc22016-09-25 13:25:31 +03001022 mei_schedule_stall_timer(dev);
Tomas Winkler0c533572015-05-04 09:43:53 +03001023 return 0;
1024}
1025
1026/**
1027 * mei_cl_irq_connect - send connect request in irq_thread context
1028 *
1029 * @cl: host client
1030 * @cb: callback block
1031 * @cmpl_list: complete list
1032 *
1033 * Return: 0, OK; otherwise, error.
1034 */
1035int mei_cl_irq_connect(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001036 struct list_head *cmpl_list)
Tomas Winkler0c533572015-05-04 09:43:53 +03001037{
1038 struct mei_device *dev = cl->dev;
1039 u32 msg_slots;
1040 int slots;
1041 int rets;
1042
1043 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
1044 slots = mei_hbuf_empty_slots(dev);
1045
1046 if (mei_cl_is_other_connecting(cl))
1047 return 0;
1048
1049 if (slots < msg_slots)
1050 return -EMSGSIZE;
1051
1052 rets = mei_cl_send_connect(cl, cb);
1053 if (rets)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001054 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler0c533572015-05-04 09:43:53 +03001055
1056 return rets;
1057}
1058
1059/**
Alexander Usyskin83ce0742014-01-08 22:31:46 +02001060 * mei_cl_connect - connect host client to the me one
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001061 *
1062 * @cl: host client
Alexander Usyskind49ed642015-05-04 09:43:54 +03001063 * @me_cl: me client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001064 * @fp: pointer to file structure
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001065 *
1066 * Locking: called under "dev->device_lock" lock
1067 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001068 * Return: 0 on success, <0 on failure.
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001069 */
Alexander Usyskind49ed642015-05-04 09:43:54 +03001070int mei_cl_connect(struct mei_cl *cl, struct mei_me_client *me_cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001071 const struct file *fp)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001072{
1073 struct mei_device *dev;
1074 struct mei_cl_cb *cb;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001075 int rets;
1076
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001077 if (WARN_ON(!cl || !cl->dev || !me_cl))
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001078 return -ENODEV;
1079
1080 dev = cl->dev;
1081
Alexander Usyskin5d882462017-01-27 16:32:39 +02001082 if (!mei_cl_bus_module_get(cl))
1083 return -ENODEV;
1084
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001085 rets = mei_cl_set_connecting(cl, me_cl);
1086 if (rets)
Alexander Usyskin5d882462017-01-27 16:32:39 +02001087 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001088
1089 if (mei_cl_is_fixed_address(cl)) {
1090 cl->state = MEI_FILE_CONNECTED;
Alexander Usyskin5d882462017-01-27 16:32:39 +02001091 rets = 0;
1092 goto nortpm;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001093 }
1094
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001095 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001096 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001097 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001098 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001099 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001100 }
1101
Tomas Winkler3030dc02016-07-26 01:06:05 +03001102 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, MEI_FOP_CONNECT, fp);
1103 if (!cb) {
1104 rets = -ENOMEM;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001105 goto out;
Tomas Winkler3030dc02016-07-26 01:06:05 +03001106 }
Tomas Winkler0c533572015-05-04 09:43:53 +03001107
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001108 /* run hbuf acquire last so we don't have to undo */
1109 if (!mei_cl_is_other_connecting(cl) && mei_hbuf_acquire(dev)) {
Tomas Winkler0c533572015-05-04 09:43:53 +03001110 rets = mei_cl_send_connect(cl, cb);
1111 if (rets)
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001112 goto out;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001113 }
1114
1115 mutex_unlock(&dev->device_lock);
Tomas Winkler12f45ed2014-08-21 14:29:18 +03001116 wait_event_timeout(cl->wait,
Alexander Usyskin285e2992014-02-17 15:13:20 +02001117 (cl->state == MEI_FILE_CONNECTED ||
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001118 cl->state == MEI_FILE_DISCONNECTED ||
Alexander Usyskin18901352015-07-23 21:37:13 +03001119 cl->state == MEI_FILE_DISCONNECT_REQUIRED ||
Tomas Winkler3c666182015-05-04 09:43:52 +03001120 cl->state == MEI_FILE_DISCONNECT_REPLY),
Alexander Usyskin285e2992014-02-17 15:13:20 +02001121 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001122 mutex_lock(&dev->device_lock);
1123
Tomas Winklerf3de9b62015-03-27 00:27:58 +02001124 if (!mei_cl_is_connected(cl)) {
Alexander Usyskin18901352015-07-23 21:37:13 +03001125 if (cl->state == MEI_FILE_DISCONNECT_REQUIRED) {
1126 mei_io_list_flush(&dev->ctrl_rd_list, cl);
1127 mei_io_list_flush(&dev->ctrl_wr_list, cl);
1128 /* ignore disconnect return valuue;
1129 * in case of failure reset will be invoked
1130 */
1131 __mei_cl_disconnect(cl);
1132 rets = -EFAULT;
1133 goto out;
1134 }
1135
Tomas Winkler0c533572015-05-04 09:43:53 +03001136 /* timeout or something went really wrong */
Alexander Usyskin285e2992014-02-17 15:13:20 +02001137 if (!cl->status)
1138 cl->status = -EFAULT;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001139 }
1140
1141 rets = cl->status;
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001142out:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001143 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001144 pm_runtime_mark_last_busy(dev->dev);
1145 pm_runtime_put_autosuspend(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001146
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001147 mei_io_cb_free(cb);
Tomas Winkler0c533572015-05-04 09:43:53 +03001148
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001149nortpm:
Tomas Winkler0c533572015-05-04 09:43:53 +03001150 if (!mei_cl_is_connected(cl))
1151 mei_cl_set_disconnected(cl);
1152
Tomas Winkler9f81abda2013-01-08 23:07:15 +02001153 return rets;
1154}
1155
1156/**
Tomas Winkler03b8d342015-02-10 10:39:44 +02001157 * mei_cl_alloc_linked - allocate and link host client
1158 *
1159 * @dev: the device structure
Tomas Winkler03b8d342015-02-10 10:39:44 +02001160 *
1161 * Return: cl on success ERR_PTR on failure
1162 */
Alexander Usyskin7851e002016-02-07 23:35:40 +02001163struct mei_cl *mei_cl_alloc_linked(struct mei_device *dev)
Tomas Winkler03b8d342015-02-10 10:39:44 +02001164{
1165 struct mei_cl *cl;
1166 int ret;
1167
1168 cl = mei_cl_allocate(dev);
1169 if (!cl) {
1170 ret = -ENOMEM;
1171 goto err;
1172 }
1173
Alexander Usyskin7851e002016-02-07 23:35:40 +02001174 ret = mei_cl_link(cl);
Tomas Winkler03b8d342015-02-10 10:39:44 +02001175 if (ret)
1176 goto err;
1177
1178 return cl;
1179err:
1180 kfree(cl);
1181 return ERR_PTR(ret);
1182}
1183
Tomas Winkler03b8d342015-02-10 10:39:44 +02001184/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001185 * mei_cl_tx_flow_ctrl_creds - checks flow_control credits for cl.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001186 *
Alexander Usyskin06ee5362016-02-07 23:35:37 +02001187 * @cl: host client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001188 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001189 * Return: 1 if tx_flow_ctrl_creds >0, 0 - otherwise.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001190 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001191static int mei_cl_tx_flow_ctrl_creds(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001192{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001193 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001194 return -EINVAL;
1195
Tomas Winkler4034b812016-07-26 01:06:04 +03001196 if (cl->tx_flow_ctrl_creds > 0)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001197 return 1;
1198
Alexander Usyskina808c802016-06-16 17:58:58 +03001199 if (mei_cl_is_fixed_address(cl))
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001200 return 1;
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001201
Alexander Usyskind49ed642015-05-04 09:43:54 +03001202 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001203 if (cl->me_cl->tx_flow_ctrl_creds > 0)
Alexander Usyskind49ed642015-05-04 09:43:54 +03001204 return 1;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001205 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001206 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001207}
1208
1209/**
Tomas Winkler4034b812016-07-26 01:06:04 +03001210 * mei_cl_tx_flow_ctrl_creds_reduce - reduces transmit flow control credits
1211 * for a client
Tomas Winkler9ca90502013-01-08 23:07:13 +02001212 *
Tomas Winkler4034b812016-07-26 01:06:04 +03001213 * @cl: host client
Masanari Iida393b1482013-04-05 01:05:05 +09001214 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001215 * Return:
Tomas Winkler9ca90502013-01-08 23:07:13 +02001216 * 0 on success
Tomas Winkler9ca90502013-01-08 23:07:13 +02001217 * -EINVAL when ctrl credits are <= 0
1218 */
Tomas Winkler4034b812016-07-26 01:06:04 +03001219static int mei_cl_tx_flow_ctrl_creds_reduce(struct mei_cl *cl)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001220{
Alexander Usyskind49ed642015-05-04 09:43:54 +03001221 if (WARN_ON(!cl || !cl->me_cl))
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001222 return -EINVAL;
1223
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001224 if (mei_cl_is_fixed_address(cl))
1225 return 0;
1226
Alexander Usyskind49ed642015-05-04 09:43:54 +03001227 if (mei_cl_is_single_recv_buf(cl)) {
Tomas Winkler4034b812016-07-26 01:06:04 +03001228 if (WARN_ON(cl->me_cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001229 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001230 cl->me_cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001231 } else {
Tomas Winkler4034b812016-07-26 01:06:04 +03001232 if (WARN_ON(cl->tx_flow_ctrl_creds <= 0))
Alexander Usyskind49ed642015-05-04 09:43:54 +03001233 return -EINVAL;
Tomas Winkler4034b812016-07-26 01:06:04 +03001234 cl->tx_flow_ctrl_creds--;
Alexander Usyskin12d00662014-02-17 15:13:23 +02001235 }
Alexander Usyskind49ed642015-05-04 09:43:54 +03001236 return 0;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001237}
1238
Tomas Winkler9ca90502013-01-08 23:07:13 +02001239/**
Tomas Winkler51678cc2015-07-26 09:54:18 +03001240 * mei_cl_notify_fop2req - convert fop to proper request
1241 *
1242 * @fop: client notification start response command
1243 *
1244 * Return: MEI_HBM_NOTIFICATION_START/STOP
1245 */
1246u8 mei_cl_notify_fop2req(enum mei_cb_file_ops fop)
1247{
1248 if (fop == MEI_FOP_NOTIFY_START)
1249 return MEI_HBM_NOTIFICATION_START;
1250 else
1251 return MEI_HBM_NOTIFICATION_STOP;
1252}
1253
1254/**
1255 * mei_cl_notify_req2fop - convert notification request top file operation type
1256 *
1257 * @req: hbm notification request type
1258 *
1259 * Return: MEI_FOP_NOTIFY_START/STOP
1260 */
1261enum mei_cb_file_ops mei_cl_notify_req2fop(u8 req)
1262{
1263 if (req == MEI_HBM_NOTIFICATION_START)
1264 return MEI_FOP_NOTIFY_START;
1265 else
1266 return MEI_FOP_NOTIFY_STOP;
1267}
1268
1269/**
1270 * mei_cl_irq_notify - send notification request in irq_thread context
1271 *
1272 * @cl: client
1273 * @cb: callback block.
1274 * @cmpl_list: complete list.
1275 *
1276 * Return: 0 on such and error otherwise.
1277 */
1278int mei_cl_irq_notify(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001279 struct list_head *cmpl_list)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001280{
1281 struct mei_device *dev = cl->dev;
1282 u32 msg_slots;
1283 int slots;
1284 int ret;
1285 bool request;
1286
1287 msg_slots = mei_data2slots(sizeof(struct hbm_client_connect_request));
1288 slots = mei_hbuf_empty_slots(dev);
1289
1290 if (slots < msg_slots)
1291 return -EMSGSIZE;
1292
1293 request = mei_cl_notify_fop2req(cb->fop_type);
1294 ret = mei_hbm_cl_notify_req(dev, cl, request);
1295 if (ret) {
1296 cl->status = ret;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001297 list_move_tail(&cb->list, cmpl_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001298 return ret;
1299 }
1300
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001301 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001302 return 0;
1303}
1304
1305/**
1306 * mei_cl_notify_request - send notification stop/start request
1307 *
1308 * @cl: host client
Tomas Winkler3030dc02016-07-26 01:06:05 +03001309 * @fp: associate request with file
Tomas Winkler51678cc2015-07-26 09:54:18 +03001310 * @request: 1 for start or 0 for stop
1311 *
1312 * Locking: called under "dev->device_lock" lock
1313 *
1314 * Return: 0 on such and error otherwise.
1315 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001316int mei_cl_notify_request(struct mei_cl *cl,
Tomas Winkler3030dc02016-07-26 01:06:05 +03001317 const struct file *fp, u8 request)
Tomas Winkler51678cc2015-07-26 09:54:18 +03001318{
1319 struct mei_device *dev;
1320 struct mei_cl_cb *cb;
1321 enum mei_cb_file_ops fop_type;
1322 int rets;
1323
1324 if (WARN_ON(!cl || !cl->dev))
1325 return -ENODEV;
1326
1327 dev = cl->dev;
1328
1329 if (!dev->hbm_f_ev_supported) {
1330 cl_dbg(dev, cl, "notifications not supported\n");
1331 return -EOPNOTSUPP;
1332 }
1333
Alexander Usyskin7c47d2c2017-01-27 16:32:41 +02001334 if (!mei_cl_is_connected(cl))
1335 return -ENODEV;
1336
Tomas Winkler51678cc2015-07-26 09:54:18 +03001337 rets = pm_runtime_get(dev->dev);
1338 if (rets < 0 && rets != -EINPROGRESS) {
1339 pm_runtime_put_noidle(dev->dev);
1340 cl_err(dev, cl, "rpm: get failed %d\n", rets);
1341 return rets;
1342 }
1343
1344 fop_type = mei_cl_notify_req2fop(request);
Tomas Winkler3030dc02016-07-26 01:06:05 +03001345 cb = mei_cl_enqueue_ctrl_wr_cb(cl, 0, fop_type, fp);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001346 if (!cb) {
1347 rets = -ENOMEM;
1348 goto out;
1349 }
1350
1351 if (mei_hbuf_acquire(dev)) {
1352 if (mei_hbm_cl_notify_req(dev, cl, request)) {
1353 rets = -ENODEV;
1354 goto out;
1355 }
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001356 list_move_tail(&cb->list, &dev->ctrl_rd_list);
Tomas Winkler51678cc2015-07-26 09:54:18 +03001357 }
1358
1359 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ff4bdd2016-04-20 11:03:55 -04001360 wait_event_timeout(cl->wait,
1361 cl->notify_en == request || !mei_cl_is_connected(cl),
1362 mei_secs_to_jiffies(MEI_CL_CONNECT_TIMEOUT));
Tomas Winkler51678cc2015-07-26 09:54:18 +03001363 mutex_lock(&dev->device_lock);
1364
Alexander Usyskin4a8eaa92016-04-20 11:03:54 -04001365 if (cl->notify_en != request && !cl->status)
1366 cl->status = -EFAULT;
Tomas Winkler51678cc2015-07-26 09:54:18 +03001367
1368 rets = cl->status;
1369
1370out:
1371 cl_dbg(dev, cl, "rpm: autosuspend\n");
1372 pm_runtime_mark_last_busy(dev->dev);
1373 pm_runtime_put_autosuspend(dev->dev);
1374
1375 mei_io_cb_free(cb);
1376 return rets;
1377}
1378
1379/**
Tomas Winkler237092b2015-07-26 09:54:22 +03001380 * mei_cl_notify - raise notification
1381 *
1382 * @cl: host client
1383 *
1384 * Locking: called under "dev->device_lock" lock
1385 */
1386void mei_cl_notify(struct mei_cl *cl)
1387{
1388 struct mei_device *dev;
1389
1390 if (!cl || !cl->dev)
1391 return;
1392
1393 dev = cl->dev;
1394
1395 if (!cl->notify_en)
1396 return;
1397
1398 cl_dbg(dev, cl, "notify event");
1399 cl->notify_ev = true;
Tomas Winkler850f8942016-02-07 23:35:31 +02001400 if (!mei_cl_bus_notify_event(cl))
1401 wake_up_interruptible(&cl->ev_wait);
Tomas Winkler237092b2015-07-26 09:54:22 +03001402
1403 if (cl->ev_async)
1404 kill_fasync(&cl->ev_async, SIGIO, POLL_PRI);
Alexander Usyskinbb2ef9c2015-07-26 09:54:23 +03001405
Tomas Winkler237092b2015-07-26 09:54:22 +03001406}
1407
1408/**
Tomas Winklerb38a3622015-07-26 09:54:19 +03001409 * mei_cl_notify_get - get or wait for notification event
1410 *
1411 * @cl: host client
1412 * @block: this request is blocking
1413 * @notify_ev: true if notification event was received
1414 *
1415 * Locking: called under "dev->device_lock" lock
1416 *
1417 * Return: 0 on such and error otherwise.
1418 */
1419int mei_cl_notify_get(struct mei_cl *cl, bool block, bool *notify_ev)
1420{
1421 struct mei_device *dev;
1422 int rets;
1423
1424 *notify_ev = false;
1425
1426 if (WARN_ON(!cl || !cl->dev))
1427 return -ENODEV;
1428
1429 dev = cl->dev;
1430
Alexander Usyskin6c0d6702017-01-27 16:32:42 +02001431 if (!dev->hbm_f_ev_supported) {
1432 cl_dbg(dev, cl, "notifications not supported\n");
1433 return -EOPNOTSUPP;
1434 }
1435
Tomas Winklerb38a3622015-07-26 09:54:19 +03001436 if (!mei_cl_is_connected(cl))
1437 return -ENODEV;
1438
1439 if (cl->notify_ev)
1440 goto out;
1441
1442 if (!block)
1443 return -EAGAIN;
1444
1445 mutex_unlock(&dev->device_lock);
1446 rets = wait_event_interruptible(cl->ev_wait, cl->notify_ev);
1447 mutex_lock(&dev->device_lock);
1448
1449 if (rets < 0)
1450 return rets;
1451
1452out:
1453 *notify_ev = cl->notify_ev;
1454 cl->notify_ev = false;
1455 return 0;
1456}
1457
1458/**
Masanari Iida393b1482013-04-05 01:05:05 +09001459 * mei_cl_read_start - the start read client message function.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001460 *
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001461 * @cl: host client
Alexander Usyskince231392014-09-29 16:31:50 +03001462 * @length: number of bytes to read
Tomas Winklerbca67d62015-02-10 10:39:43 +02001463 * @fp: pointer to file structure
Tomas Winkler9ca90502013-01-08 23:07:13 +02001464 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001465 * Return: 0 on success, <0 on failure.
Tomas Winkler9ca90502013-01-08 23:07:13 +02001466 */
Tomas Winklerf23e2cc2016-02-07 23:35:23 +02001467int mei_cl_read_start(struct mei_cl *cl, size_t length, const struct file *fp)
Tomas Winkler9ca90502013-01-08 23:07:13 +02001468{
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001469 struct mei_device *dev;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001470 struct mei_cl_cb *cb;
1471 int rets;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001472
Tomas Winkler90e0b5f2013-01-08 23:07:14 +02001473 if (WARN_ON(!cl || !cl->dev))
1474 return -ENODEV;
1475
1476 dev = cl->dev;
1477
Tomas Winklerb950ac12013-07-25 20:15:53 +03001478 if (!mei_cl_is_connected(cl))
Tomas Winkler9ca90502013-01-08 23:07:13 +02001479 return -ENODEV;
1480
Alexander Usyskind49ed642015-05-04 09:43:54 +03001481 if (!mei_me_cl_is_active(cl->me_cl)) {
1482 cl_err(dev, cl, "no such me client\n");
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001483 return -ENOTTY;
Tomas Winkler9ca90502013-01-08 23:07:13 +02001484 }
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001485
Alexander Usyskin9d27e732016-07-26 01:06:07 +03001486 if (mei_cl_is_fixed_address(cl) || cl == &dev->iamthif_cl)
Alexander Usyskine51dfa52016-07-26 01:06:02 +03001487 return 0;
1488
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001489 /* HW currently supports only one pending read */
1490 if (cl->rx_flow_ctrl_creds)
1491 return -EBUSY;
1492
Tomas Winkler3030dc02016-07-26 01:06:05 +03001493 cb = mei_cl_enqueue_ctrl_wr_cb(cl, length, MEI_FOP_READ, fp);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001494 if (!cb)
1495 return -ENOMEM;
1496
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001497 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001498 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001499 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001500 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001501 goto nortpm;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001502 }
1503
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001504 rets = 0;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001505 if (mei_hbuf_acquire(dev)) {
Alexander Usyskin86113502014-03-31 17:59:24 +03001506 rets = mei_hbm_cl_flow_control_req(dev, cl);
1507 if (rets < 0)
Tomas Winkler04bb1392014-03-18 22:52:04 +02001508 goto out;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001509
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001510 list_move_tail(&cb->list, &cl->rd_pending);
Tomas Winkler9ca90502013-01-08 23:07:13 +02001511 }
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001512 cl->rx_flow_ctrl_creds++;
Chao Biaccb8842014-02-12 21:27:25 +02001513
Tomas Winkler04bb1392014-03-18 22:52:04 +02001514out:
1515 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001516 pm_runtime_mark_last_busy(dev->dev);
1517 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001518nortpm:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001519 if (rets)
1520 mei_io_cb_free(cb);
1521
Tomas Winkler9ca90502013-01-08 23:07:13 +02001522 return rets;
1523}
1524
Tomas Winkler074b4c02013-02-06 14:06:44 +02001525/**
Tomas Winkler9d098192014-02-19 17:35:48 +02001526 * mei_cl_irq_write - write a message to device
Tomas Winkler21767542013-06-23 09:36:59 +03001527 * from the interrupt thread context
1528 *
1529 * @cl: client
1530 * @cb: callback block.
Tomas Winkler21767542013-06-23 09:36:59 +03001531 * @cmpl_list: complete list.
1532 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001533 * Return: 0, OK; otherwise error.
Tomas Winkler21767542013-06-23 09:36:59 +03001534 */
Tomas Winkler9d098192014-02-19 17:35:48 +02001535int mei_cl_irq_write(struct mei_cl *cl, struct mei_cl_cb *cb,
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001536 struct list_head *cmpl_list)
Tomas Winkler21767542013-06-23 09:36:59 +03001537{
Tomas Winkler136698e2013-09-16 23:44:44 +03001538 struct mei_device *dev;
1539 struct mei_msg_data *buf;
Tomas Winkler21767542013-06-23 09:36:59 +03001540 struct mei_msg_hdr mei_hdr;
Tomas Winkler136698e2013-09-16 23:44:44 +03001541 size_t len;
1542 u32 msg_slots;
Tomas Winkler9d098192014-02-19 17:35:48 +02001543 int slots;
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001544 int rets;
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001545 bool first_chunk;
Tomas Winkler21767542013-06-23 09:36:59 +03001546
Tomas Winkler136698e2013-09-16 23:44:44 +03001547 if (WARN_ON(!cl || !cl->dev))
1548 return -ENODEV;
1549
1550 dev = cl->dev;
1551
Tomas Winkler5db75142015-02-10 10:39:42 +02001552 buf = &cb->buf;
Tomas Winkler136698e2013-09-16 23:44:44 +03001553
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001554 first_chunk = cb->buf_idx == 0;
1555
Tomas Winkler4034b812016-07-26 01:06:04 +03001556 rets = first_chunk ? mei_cl_tx_flow_ctrl_creds(cl) : 1;
Tomas Winkler136698e2013-09-16 23:44:44 +03001557 if (rets < 0)
Alexander Usyskine09ee852016-12-14 17:56:52 +02001558 goto err;
Tomas Winkler136698e2013-09-16 23:44:44 +03001559
1560 if (rets == 0) {
Tomas Winkler04bb1392014-03-18 22:52:04 +02001561 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Tomas Winkler136698e2013-09-16 23:44:44 +03001562 return 0;
1563 }
1564
Tomas Winkler9d098192014-02-19 17:35:48 +02001565 slots = mei_hbuf_empty_slots(dev);
Tomas Winkler136698e2013-09-16 23:44:44 +03001566 len = buf->size - cb->buf_idx;
1567 msg_slots = mei_data2slots(len);
1568
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001569 mei_hdr.host_addr = mei_cl_host_addr(cl);
Alexander Usyskind49ed642015-05-04 09:43:54 +03001570 mei_hdr.me_addr = mei_cl_me_id(cl);
Tomas Winkler21767542013-06-23 09:36:59 +03001571 mei_hdr.reserved = 0;
Tomas Winkler479327f2013-12-17 15:56:56 +02001572 mei_hdr.internal = cb->internal;
Tomas Winkler21767542013-06-23 09:36:59 +03001573
Tomas Winkler9d098192014-02-19 17:35:48 +02001574 if (slots >= msg_slots) {
Tomas Winkler21767542013-06-23 09:36:59 +03001575 mei_hdr.length = len;
1576 mei_hdr.msg_complete = 1;
1577 /* Split the message only if we can write the whole host buffer */
Tomas Winkler9d098192014-02-19 17:35:48 +02001578 } else if (slots == dev->hbuf_depth) {
1579 msg_slots = slots;
1580 len = (slots * sizeof(u32)) - sizeof(struct mei_msg_hdr);
Tomas Winkler21767542013-06-23 09:36:59 +03001581 mei_hdr.length = len;
1582 mei_hdr.msg_complete = 0;
1583 } else {
1584 /* wait for next time the host buffer is empty */
1585 return 0;
1586 }
1587
Alexander Usyskin35bf7692016-02-17 18:27:34 +02001588 cl_dbg(dev, cl, "buf: size = %zu idx = %zu\n",
Tomas Winkler5db75142015-02-10 10:39:42 +02001589 cb->buf.size, cb->buf_idx);
Tomas Winkler21767542013-06-23 09:36:59 +03001590
Tomas Winkler136698e2013-09-16 23:44:44 +03001591 rets = mei_write_message(dev, &mei_hdr, buf->data + cb->buf_idx);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001592 if (rets)
1593 goto err;
Tomas Winkler21767542013-06-23 09:36:59 +03001594
1595 cl->status = 0;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001596 cl->writing_state = MEI_WRITING;
Tomas Winkler21767542013-06-23 09:36:59 +03001597 cb->buf_idx += mei_hdr.length;
Tomas Winkler86601722015-02-10 10:39:40 +02001598 cb->completed = mei_hdr.msg_complete == 1;
Tomas Winkler4dfaa9f2013-06-23 09:37:00 +03001599
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001600 if (first_chunk) {
Alexander Usyskine09ee852016-12-14 17:56:52 +02001601 if (mei_cl_tx_flow_ctrl_creds_reduce(cl)) {
1602 rets = -EIO;
1603 goto err;
1604 }
Tomas Winkler21767542013-06-23 09:36:59 +03001605 }
1606
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001607 if (mei_hdr.msg_complete)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001608 list_move_tail(&cb->list, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001609
Tomas Winkler21767542013-06-23 09:36:59 +03001610 return 0;
Alexander Usyskine09ee852016-12-14 17:56:52 +02001611
1612err:
1613 cl->status = rets;
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001614 list_move_tail(&cb->list, cmpl_list);
Alexander Usyskine09ee852016-12-14 17:56:52 +02001615 return rets;
Tomas Winkler21767542013-06-23 09:36:59 +03001616}
1617
1618/**
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001619 * mei_cl_write - submit a write cb to mei device
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001620 * assumes device_lock is locked
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001621 *
1622 * @cl: host client
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001623 * @cb: write callback with filled data
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001624 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001625 * Return: number of bytes sent on success, <0 on failure.
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001626 */
Alexander Usyskine0cb6b22016-11-08 18:26:08 +02001627int mei_cl_write(struct mei_cl *cl, struct mei_cl_cb *cb)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001628{
1629 struct mei_device *dev;
1630 struct mei_msg_data *buf;
1631 struct mei_msg_hdr mei_hdr;
Alexander Usyskin23253c32015-07-23 10:43:11 +03001632 int size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001633 int rets;
Alexander Usyskine0cb6b22016-11-08 18:26:08 +02001634 bool blocking;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001635
1636 if (WARN_ON(!cl || !cl->dev))
1637 return -ENODEV;
1638
1639 if (WARN_ON(!cb))
1640 return -EINVAL;
1641
1642 dev = cl->dev;
1643
Tomas Winkler5db75142015-02-10 10:39:42 +02001644 buf = &cb->buf;
Alexander Usyskin23253c32015-07-23 10:43:11 +03001645 size = buf->size;
Alexander Usyskine0cb6b22016-11-08 18:26:08 +02001646 blocking = cb->blocking;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001647
Alexander Usyskin23253c32015-07-23 10:43:11 +03001648 cl_dbg(dev, cl, "size=%d\n", size);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001649
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001650 rets = pm_runtime_get(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001651 if (rets < 0 && rets != -EINPROGRESS) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001652 pm_runtime_put_noidle(dev->dev);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001653 cl_err(dev, cl, "rpm: get failed %d\n", rets);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02001654 goto free;
Tomas Winkler04bb1392014-03-18 22:52:04 +02001655 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001656
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001657 cb->buf_idx = 0;
1658 cl->writing_state = MEI_IDLE;
1659
Alexander Usyskin1df629e2015-05-04 09:43:56 +03001660 mei_hdr.host_addr = mei_cl_host_addr(cl);
Alexander Usyskind49ed642015-05-04 09:43:54 +03001661 mei_hdr.me_addr = mei_cl_me_id(cl);
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001662 mei_hdr.reserved = 0;
1663 mei_hdr.msg_complete = 0;
1664 mei_hdr.internal = cb->internal;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001665
Tomas Winkler4034b812016-07-26 01:06:04 +03001666 rets = mei_cl_tx_flow_ctrl_creds(cl);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001667 if (rets < 0)
1668 goto err;
1669
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001670 if (rets == 0) {
1671 cl_dbg(dev, cl, "No flow control credentials: not sending.\n");
Alexander Usyskin23253c32015-07-23 10:43:11 +03001672 rets = size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001673 goto out;
1674 }
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001675 if (!mei_hbuf_acquire(dev)) {
1676 cl_dbg(dev, cl, "Cannot acquire the host buffer: not sending.\n");
Alexander Usyskin23253c32015-07-23 10:43:11 +03001677 rets = size;
Tomas Winkler6aae48f2014-02-19 17:35:47 +02001678 goto out;
1679 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001680
1681 /* Check for a maximum length */
Alexander Usyskin23253c32015-07-23 10:43:11 +03001682 if (size > mei_hbuf_max_len(dev)) {
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001683 mei_hdr.length = mei_hbuf_max_len(dev);
1684 mei_hdr.msg_complete = 0;
1685 } else {
Alexander Usyskin23253c32015-07-23 10:43:11 +03001686 mei_hdr.length = size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001687 mei_hdr.msg_complete = 1;
1688 }
1689
Tomas Winkler2ebf8c92013-09-16 23:44:43 +03001690 rets = mei_write_message(dev, &mei_hdr, buf->data);
1691 if (rets)
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001692 goto err;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001693
Tomas Winkler4034b812016-07-26 01:06:04 +03001694 rets = mei_cl_tx_flow_ctrl_creds_reduce(cl);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001695 if (rets)
1696 goto err;
1697
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001698 cl->writing_state = MEI_WRITING;
1699 cb->buf_idx = mei_hdr.length;
Tomas Winkler86601722015-02-10 10:39:40 +02001700 cb->completed = mei_hdr.msg_complete == 1;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001701
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001702out:
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001703 if (mei_hdr.msg_complete)
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001704 list_add_tail(&cb->list, &dev->write_waiting_list);
Alexander Usyskinb8b7303572015-05-07 15:53:58 +03001705 else
Alexander Usyskin962ff7b2017-01-27 16:32:45 +02001706 list_add_tail(&cb->list, &dev->write_list);
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001707
Alexander Usyskin23253c32015-07-23 10:43:11 +03001708 cb = NULL;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001709 if (blocking && cl->writing_state != MEI_WRITE_COMPLETE) {
1710
1711 mutex_unlock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001712 rets = wait_event_interruptible(cl->tx_wait,
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02001713 cl->writing_state == MEI_WRITE_COMPLETE ||
1714 (!mei_cl_is_connected(cl)));
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001715 mutex_lock(&dev->device_lock);
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001716 /* wait_event_interruptible returns -ERESTARTSYS */
1717 if (rets) {
1718 if (signal_pending(current))
1719 rets = -EINTR;
1720 goto err;
1721 }
Alexander Usyskin0faf6a32016-02-07 23:35:34 +02001722 if (cl->writing_state != MEI_WRITE_COMPLETE) {
1723 rets = -EFAULT;
1724 goto err;
1725 }
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001726 }
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +02001727
Alexander Usyskin23253c32015-07-23 10:43:11 +03001728 rets = size;
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001729err:
Tomas Winkler04bb1392014-03-18 22:52:04 +02001730 cl_dbg(dev, cl, "rpm: autosuspend\n");
Tomas Winkler2bf94cab2014-09-29 16:31:42 +03001731 pm_runtime_mark_last_busy(dev->dev);
1732 pm_runtime_put_autosuspend(dev->dev);
Alexander Usyskin6cbb0972016-02-10 23:57:26 +02001733free:
1734 mei_io_cb_free(cb);
Tomas Winkler04bb1392014-03-18 22:52:04 +02001735
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001736 return rets;
1737}
1738
1739
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001740/**
1741 * mei_cl_complete - processes completed operation for a client
1742 *
1743 * @cl: private data of the file object.
1744 * @cb: callback block.
1745 */
1746void mei_cl_complete(struct mei_cl *cl, struct mei_cl_cb *cb)
1747{
Alexander Usyskina1809d32015-05-07 15:53:59 +03001748 struct mei_device *dev = cl->dev;
1749
Tomas Winkler3c666182015-05-04 09:43:52 +03001750 switch (cb->fop_type) {
1751 case MEI_FOP_WRITE:
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001752 mei_io_cb_free(cb);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001753 cl->writing_state = MEI_WRITE_COMPLETE;
Alexander Usyskina1809d32015-05-07 15:53:59 +03001754 if (waitqueue_active(&cl->tx_wait)) {
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001755 wake_up_interruptible(&cl->tx_wait);
Alexander Usyskina1809d32015-05-07 15:53:59 +03001756 } else {
1757 pm_runtime_mark_last_busy(dev->dev);
1758 pm_request_autosuspend(dev->dev);
1759 }
Tomas Winkler3c666182015-05-04 09:43:52 +03001760 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001761
Tomas Winkler3c666182015-05-04 09:43:52 +03001762 case MEI_FOP_READ:
Tomas Winklera9bed612015-02-10 10:39:46 +02001763 list_add_tail(&cb->list, &cl->rd_completed);
Alexander Usyskin46978ad2016-07-26 01:06:03 +03001764 if (!mei_cl_is_fixed_address(cl) &&
1765 !WARN_ON(!cl->rx_flow_ctrl_creds))
1766 cl->rx_flow_ctrl_creds--;
Tomas Winklera1f9ae22016-02-07 23:35:30 +02001767 if (!mei_cl_bus_rx_event(cl))
1768 wake_up_interruptible(&cl->rx_wait);
Tomas Winkler3c666182015-05-04 09:43:52 +03001769 break;
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001770
Tomas Winkler3c666182015-05-04 09:43:52 +03001771 case MEI_FOP_CONNECT:
1772 case MEI_FOP_DISCONNECT:
Tomas Winkler51678cc2015-07-26 09:54:18 +03001773 case MEI_FOP_NOTIFY_STOP:
1774 case MEI_FOP_NOTIFY_START:
Tomas Winkler3c666182015-05-04 09:43:52 +03001775 if (waitqueue_active(&cl->wait))
1776 wake_up(&cl->wait);
1777
1778 break;
Alexander Usyskin6a8d6482016-04-17 12:16:03 -04001779 case MEI_FOP_DISCONNECT_RSP:
1780 mei_io_cb_free(cb);
1781 mei_cl_set_disconnected(cl);
1782 break;
Tomas Winkler3c666182015-05-04 09:43:52 +03001783 default:
1784 BUG_ON(0);
Tomas Winklerdb086fa2013-05-12 15:34:45 +03001785 }
1786}
1787
Tomas Winkler4234a6d2013-04-08 21:56:37 +03001788
1789/**
Tomas Winkler074b4c02013-02-06 14:06:44 +02001790 * mei_cl_all_disconnect - disconnect forcefully all connected clients
1791 *
Alexander Usyskina8605ea2014-09-29 16:31:49 +03001792 * @dev: mei device
Tomas Winkler074b4c02013-02-06 14:06:44 +02001793 */
Tomas Winkler074b4c02013-02-06 14:06:44 +02001794void mei_cl_all_disconnect(struct mei_device *dev)
1795{
Tomas Winkler31f88f52014-02-17 15:13:25 +02001796 struct mei_cl *cl;
Tomas Winkler074b4c02013-02-06 14:06:44 +02001797
Tomas Winkler3c666182015-05-04 09:43:52 +03001798 list_for_each_entry(cl, &dev->file_list, link)
1799 mei_cl_set_disconnected(cl);
Tomas Winkler074b4c02013-02-06 14:06:44 +02001800}