blob: 678531c08c6d1e710644b08fb3b808b7c9b09da6 [file] [log] [blame]
Oren Weil3ce72722011-05-15 13:43:43 +03001/*
2 *
3 * Intel Management Engine Interface (Intel MEI) Linux driver
Tomas Winkler733ba91c2012-02-09 19:25:53 +02004 * Copyright (c) 2003-2012, Intel Corporation.
Oren Weil3ce72722011-05-15 13:43:43 +03005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 */
16
17#include <linux/pci.h>
Tomas Winkler06ecd642013-02-06 14:06:42 +020018
19#include <linux/kthread.h>
20#include <linux/interrupt.h>
Tomas Winkler47a73802012-12-25 19:06:03 +020021
22#include "mei_dev.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020023#include "hbm.h"
24
Tomas Winkler6e4cd272014-03-11 14:49:23 +020025#include "hw-me.h"
26#include "hw-me-regs.h"
Tomas Winkler06ecd642013-02-06 14:06:42 +020027
Tomas Winkler3a65dd42012-12-25 19:06:06 +020028/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020029 * mei_me_reg_read - Reads 32bit data from the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020030 *
31 * @dev: the device structure
32 * @offset: offset from which to read the data
33 *
34 * returns register value (u32)
35 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020036static inline u32 mei_me_reg_read(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020037 unsigned long offset)
38{
Tomas Winkler52c34562013-02-06 14:06:40 +020039 return ioread32(hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020040}
Oren Weil3ce72722011-05-15 13:43:43 +030041
42
43/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020044 * mei_me_reg_write - Writes 32bit data to the mei device
Tomas Winkler3a65dd42012-12-25 19:06:06 +020045 *
46 * @dev: the device structure
47 * @offset: offset from which to write the data
48 * @value: register value to write (u32)
49 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020050static inline void mei_me_reg_write(const struct mei_me_hw *hw,
Tomas Winkler3a65dd42012-12-25 19:06:06 +020051 unsigned long offset, u32 value)
52{
Tomas Winkler52c34562013-02-06 14:06:40 +020053 iowrite32(value, hw->mem_addr + offset);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020054}
55
56/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020057 * mei_me_mecbrw_read - Reads 32bit data from ME circular buffer
Tomas Winklerd0252842013-01-08 23:07:24 +020058 * read window register
Tomas Winkler3a65dd42012-12-25 19:06:06 +020059 *
60 * @dev: the device structure
61 *
Tomas Winklerd0252842013-01-08 23:07:24 +020062 * returns ME_CB_RW register value (u32)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020063 */
Tomas Winkler827eef52013-02-06 14:06:41 +020064static u32 mei_me_mecbrw_read(const struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020065{
Tomas Winklerb68301e2013-03-27 16:58:29 +020066 return mei_me_reg_read(to_me_hw(dev), ME_CB_RW);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020067}
68/**
Tomas Winklerb68301e2013-03-27 16:58:29 +020069 * mei_me_mecsr_read - Reads 32bit data from the ME CSR
Tomas Winkler3a65dd42012-12-25 19:06:06 +020070 *
71 * @dev: the device structure
72 *
73 * returns ME_CSR_HA register value (u32)
74 */
Tomas Winklerb68301e2013-03-27 16:58:29 +020075static inline u32 mei_me_mecsr_read(const struct mei_me_hw *hw)
Tomas Winkler3a65dd42012-12-25 19:06:06 +020076{
Tomas Winklerb68301e2013-03-27 16:58:29 +020077 return mei_me_reg_read(hw, ME_CSR_HA);
Tomas Winkler3a65dd42012-12-25 19:06:06 +020078}
79
80/**
Tomas Winklerd0252842013-01-08 23:07:24 +020081 * mei_hcsr_read - Reads 32bit data from the host CSR
82 *
83 * @dev: the device structure
84 *
85 * returns H_CSR register value (u32)
86 */
Tomas Winkler52c34562013-02-06 14:06:40 +020087static inline u32 mei_hcsr_read(const struct mei_me_hw *hw)
Tomas Winklerd0252842013-01-08 23:07:24 +020088{
Tomas Winklerb68301e2013-03-27 16:58:29 +020089 return mei_me_reg_read(hw, H_CSR);
Tomas Winklerd0252842013-01-08 23:07:24 +020090}
91
92/**
93 * mei_hcsr_set - writes H_CSR register to the mei device,
Oren Weil3ce72722011-05-15 13:43:43 +030094 * and ignores the H_IS bit for it is write-one-to-zero.
95 *
96 * @dev: the device structure
97 */
Tomas Winkler52c34562013-02-06 14:06:40 +020098static inline void mei_hcsr_set(struct mei_me_hw *hw, u32 hcsr)
Oren Weil3ce72722011-05-15 13:43:43 +030099{
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200100 hcsr &= ~H_IS;
Tomas Winklerb68301e2013-03-27 16:58:29 +0200101 mei_me_reg_write(hw, H_CSR, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300102}
103
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300104/**
105 * mei_me_fw_status - read fw status register from pci config space
106 *
107 * @dev: mei device
108 * @fw_status: fw status register values
109 */
110static int mei_me_fw_status(struct mei_device *dev,
111 struct mei_fw_status *fw_status)
112{
113 const struct mei_fw_status *fw_src = &dev->cfg->fw_status;
114 struct pci_dev *pdev = to_pci_dev(dev->dev);
115 int ret;
116 int i;
117
118 if (!fw_status)
119 return -EINVAL;
120
121 fw_status->count = fw_src->count;
122 for (i = 0; i < fw_src->count && i < MEI_FW_STATUS_MAX; i++) {
123 ret = pci_read_config_dword(pdev,
124 fw_src->status[i], &fw_status->status[i]);
125 if (ret)
126 return ret;
127 }
128
129 return 0;
130}
Tomas Winklere7e0c232013-01-08 23:07:31 +0200131
132/**
Masanari Iida393b1482013-04-05 01:05:05 +0900133 * mei_me_hw_config - configure hw dependent settings
Tomas Winklere7e0c232013-01-08 23:07:31 +0200134 *
135 * @dev: mei device
136 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200137static void mei_me_hw_config(struct mei_device *dev)
Tomas Winklere7e0c232013-01-08 23:07:31 +0200138{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200139 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler52c34562013-02-06 14:06:40 +0200140 u32 hcsr = mei_hcsr_read(to_me_hw(dev));
Tomas Winklere7e0c232013-01-08 23:07:31 +0200141 /* Doesn't change in runtime */
142 dev->hbuf_depth = (hcsr & H_CBD) >> 24;
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200143
144 hw->pg_state = MEI_PG_OFF;
Tomas Winklere7e0c232013-01-08 23:07:31 +0200145}
Tomas Winkler964a2332014-03-18 22:51:59 +0200146
147/**
148 * mei_me_pg_state - translate internal pg state
149 * to the mei power gating state
150 *
151 * @hw - me hardware
152 * returns: MEI_PG_OFF if aliveness is on and MEI_PG_ON otherwise
153 */
154static inline enum mei_pg_state mei_me_pg_state(struct mei_device *dev)
155{
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200156 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300157
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200158 return hw->pg_state;
Tomas Winkler964a2332014-03-18 22:51:59 +0200159}
160
Oren Weil3ce72722011-05-15 13:43:43 +0300161/**
Tomas Winklerd0252842013-01-08 23:07:24 +0200162 * mei_clear_interrupts - clear and stop interrupts
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200163 *
164 * @dev: the device structure
165 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200166static void mei_me_intr_clear(struct mei_device *dev)
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200167{
Tomas Winkler52c34562013-02-06 14:06:40 +0200168 struct mei_me_hw *hw = to_me_hw(dev);
169 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler92db1552014-09-29 16:31:37 +0300170
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200171 if ((hcsr & H_IS) == H_IS)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200172 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200173}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200174/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200175 * mei_me_intr_enable - enables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300176 *
177 * @dev: the device structure
178 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200179static void mei_me_intr_enable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300180{
Tomas Winkler52c34562013-02-06 14:06:40 +0200181 struct mei_me_hw *hw = to_me_hw(dev);
182 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler92db1552014-09-29 16:31:37 +0300183
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200184 hcsr |= H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200185 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300186}
187
188/**
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200189 * mei_disable_interrupts - disables mei device interrupts
Oren Weil3ce72722011-05-15 13:43:43 +0300190 *
191 * @dev: the device structure
192 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200193static void mei_me_intr_disable(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300194{
Tomas Winkler52c34562013-02-06 14:06:40 +0200195 struct mei_me_hw *hw = to_me_hw(dev);
196 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkler92db1552014-09-29 16:31:37 +0300197
Tomas Winkler9ea73dd2013-01-08 23:07:28 +0200198 hcsr &= ~H_IE;
Tomas Winkler52c34562013-02-06 14:06:40 +0200199 mei_hcsr_set(hw, hcsr);
Oren Weil3ce72722011-05-15 13:43:43 +0300200}
201
Tomas Winkleradfba322013-01-08 23:07:27 +0200202/**
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200203 * mei_me_hw_reset_release - release device from the reset
204 *
205 * @dev: the device structure
206 */
207static void mei_me_hw_reset_release(struct mei_device *dev)
208{
209 struct mei_me_hw *hw = to_me_hw(dev);
210 u32 hcsr = mei_hcsr_read(hw);
211
212 hcsr |= H_IG;
213 hcsr &= ~H_RST;
214 mei_hcsr_set(hw, hcsr);
Tomas Winklerb04ada92014-05-12 12:19:39 +0300215
216 /* complete this write before we set host ready on another CPU */
217 mmiowb();
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200218}
219/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200220 * mei_me_hw_reset - resets fw via mei csr register.
Tomas Winkleradfba322013-01-08 23:07:27 +0200221 *
222 * @dev: the device structure
Masanari Iida393b1482013-04-05 01:05:05 +0900223 * @intr_enable: if interrupt should be enabled after reset.
Tomas Winkleradfba322013-01-08 23:07:27 +0200224 */
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300225static int mei_me_hw_reset(struct mei_device *dev, bool intr_enable)
Tomas Winkleradfba322013-01-08 23:07:27 +0200226{
Tomas Winkler52c34562013-02-06 14:06:40 +0200227 struct mei_me_hw *hw = to_me_hw(dev);
228 u32 hcsr = mei_hcsr_read(hw);
Tomas Winkleradfba322013-01-08 23:07:27 +0200229
Tomas Winklerff960662013-07-30 14:11:51 +0300230 hcsr |= H_RST | H_IG | H_IS;
Tomas Winkleradfba322013-01-08 23:07:27 +0200231
232 if (intr_enable)
233 hcsr |= H_IE;
234 else
Tomas Winklerff960662013-07-30 14:11:51 +0300235 hcsr &= ~H_IE;
Tomas Winkleradfba322013-01-08 23:07:27 +0200236
Tomas Winkler07cd7be2014-05-12 12:19:40 +0300237 dev->recvd_hw_ready = false;
Tomas Winklerff960662013-07-30 14:11:51 +0300238 mei_me_reg_write(hw, H_CSR, hcsr);
Tomas Winkleradfba322013-01-08 23:07:27 +0200239
Tomas Winklerc40765d2014-05-12 12:19:41 +0300240 /*
241 * Host reads the H_CSR once to ensure that the
242 * posted write to H_CSR completes.
243 */
244 hcsr = mei_hcsr_read(hw);
245
246 if ((hcsr & H_RST) == 0)
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300247 dev_warn(dev->dev, "H_RST is not set = 0x%08X", hcsr);
Tomas Winklerc40765d2014-05-12 12:19:41 +0300248
249 if ((hcsr & H_RDY) == H_RDY)
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300250 dev_warn(dev->dev, "H_RDY is not cleared 0x%08X", hcsr);
Tomas Winklerc40765d2014-05-12 12:19:41 +0300251
Tomas Winkler33ec0822014-01-12 00:36:09 +0200252 if (intr_enable == false)
Tomas Winkler68f8ea12013-03-10 13:56:07 +0200253 mei_me_hw_reset_release(dev);
Tomas Winkleradfba322013-01-08 23:07:27 +0200254
Tomas Winklerc20c68d2013-06-23 10:42:49 +0300255 return 0;
Tomas Winkleradfba322013-01-08 23:07:27 +0200256}
257
Tomas Winkler115ba282013-01-08 23:07:29 +0200258/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200259 * mei_me_host_set_ready - enable device
Tomas Winkler115ba282013-01-08 23:07:29 +0200260 *
261 * @dev - mei device
262 * returns bool
263 */
264
Tomas Winkler827eef52013-02-06 14:06:41 +0200265static void mei_me_host_set_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200266{
Tomas Winkler52c34562013-02-06 14:06:40 +0200267 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300268
Tomas Winklerb04ada92014-05-12 12:19:39 +0300269 hw->host_hw_state = mei_hcsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200270 hw->host_hw_state |= H_IE | H_IG | H_RDY;
271 mei_hcsr_set(hw, hw->host_hw_state);
Tomas Winkler115ba282013-01-08 23:07:29 +0200272}
273/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200274 * mei_me_host_is_ready - check whether the host has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200275 *
276 * @dev - mei device
277 * returns bool
278 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200279static bool mei_me_host_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200280{
Tomas Winkler52c34562013-02-06 14:06:40 +0200281 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300282
Tomas Winkler52c34562013-02-06 14:06:40 +0200283 hw->host_hw_state = mei_hcsr_read(hw);
284 return (hw->host_hw_state & H_RDY) == H_RDY;
Tomas Winkler115ba282013-01-08 23:07:29 +0200285}
286
287/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200288 * mei_me_hw_is_ready - check whether the me(hw) has turned ready
Tomas Winkler115ba282013-01-08 23:07:29 +0200289 *
290 * @dev - mei device
291 * returns bool
292 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200293static bool mei_me_hw_is_ready(struct mei_device *dev)
Tomas Winkler115ba282013-01-08 23:07:29 +0200294{
Tomas Winkler52c34562013-02-06 14:06:40 +0200295 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300296
Tomas Winklerb68301e2013-03-27 16:58:29 +0200297 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200298 return (hw->me_hw_state & ME_RDY_HRA) == ME_RDY_HRA;
Tomas Winkler115ba282013-01-08 23:07:29 +0200299}
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200300
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200301static int mei_me_hw_ready_wait(struct mei_device *dev)
302{
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200303 mutex_unlock(&dev->device_lock);
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300304 wait_event_timeout(dev->wait_hw_ready,
Tomas Winklerdab9bf42013-07-17 15:13:17 +0300305 dev->recvd_hw_ready,
Tomas Winkler7d93e582014-01-14 23:10:10 +0200306 mei_secs_to_jiffies(MEI_HW_READY_TIMEOUT));
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200307 mutex_lock(&dev->device_lock);
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300308 if (!dev->recvd_hw_ready) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300309 dev_err(dev->dev, "wait hw ready failed\n");
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300310 return -ETIME;
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200311 }
312
313 dev->recvd_hw_ready = false;
314 return 0;
315}
316
317static int mei_me_hw_start(struct mei_device *dev)
318{
319 int ret = mei_me_hw_ready_wait(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300320
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200321 if (ret)
322 return ret;
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300323 dev_dbg(dev->dev, "hw is ready\n");
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200324
325 mei_me_host_set_ready(dev);
326 return ret;
327}
328
329
Tomas Winkler3a65dd42012-12-25 19:06:06 +0200330/**
Tomas Winkler726917f2012-06-25 23:46:28 +0300331 * mei_hbuf_filled_slots - gets number of device filled buffer slots
Oren Weil3ce72722011-05-15 13:43:43 +0300332 *
Sedat Dilek7353f852013-01-17 19:54:15 +0100333 * @dev: the device structure
Oren Weil3ce72722011-05-15 13:43:43 +0300334 *
335 * returns number of filled slots
336 */
Tomas Winkler726917f2012-06-25 23:46:28 +0300337static unsigned char mei_hbuf_filled_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300338{
Tomas Winkler52c34562013-02-06 14:06:40 +0200339 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300340 char read_ptr, write_ptr;
341
Tomas Winkler52c34562013-02-06 14:06:40 +0200342 hw->host_hw_state = mei_hcsr_read(hw);
Tomas Winkler726917f2012-06-25 23:46:28 +0300343
Tomas Winkler52c34562013-02-06 14:06:40 +0200344 read_ptr = (char) ((hw->host_hw_state & H_CBRP) >> 8);
345 write_ptr = (char) ((hw->host_hw_state & H_CBWP) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300346
347 return (unsigned char) (write_ptr - read_ptr);
348}
349
350/**
Masanari Iida393b1482013-04-05 01:05:05 +0900351 * mei_me_hbuf_is_empty - checks if host buffer is empty.
Oren Weil3ce72722011-05-15 13:43:43 +0300352 *
353 * @dev: the device structure
354 *
Tomas Winkler726917f2012-06-25 23:46:28 +0300355 * returns true if empty, false - otherwise.
Oren Weil3ce72722011-05-15 13:43:43 +0300356 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200357static bool mei_me_hbuf_is_empty(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300358{
Tomas Winkler726917f2012-06-25 23:46:28 +0300359 return mei_hbuf_filled_slots(dev) == 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300360}
361
362/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200363 * mei_me_hbuf_empty_slots - counts write empty slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300364 *
365 * @dev: the device structure
366 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200367 * returns -EOVERFLOW if overflow, otherwise empty slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300368 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200369static int mei_me_hbuf_empty_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300370{
Tomas Winkler24aadc82012-06-25 23:46:27 +0300371 unsigned char filled_slots, empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300372
Tomas Winkler726917f2012-06-25 23:46:28 +0300373 filled_slots = mei_hbuf_filled_slots(dev);
Tomas Winkler24aadc82012-06-25 23:46:27 +0300374 empty_slots = dev->hbuf_depth - filled_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300375
376 /* check for overflow */
Tomas Winkler24aadc82012-06-25 23:46:27 +0300377 if (filled_slots > dev->hbuf_depth)
Oren Weil3ce72722011-05-15 13:43:43 +0300378 return -EOVERFLOW;
379
380 return empty_slots;
381}
382
Tomas Winkler827eef52013-02-06 14:06:41 +0200383static size_t mei_me_hbuf_max_len(const struct mei_device *dev)
384{
385 return dev->hbuf_depth * sizeof(u32) - sizeof(struct mei_msg_hdr);
386}
387
388
Oren Weil3ce72722011-05-15 13:43:43 +0300389/**
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200390 * mei_me_write_message - writes a message to mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300391 *
392 * @dev: the device structure
Sedat Dilek7353f852013-01-17 19:54:15 +0100393 * @header: mei HECI header of message
Tomas Winkler438763f2012-12-25 19:05:59 +0200394 * @buf: message payload will be written
Oren Weil3ce72722011-05-15 13:43:43 +0300395 *
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200396 * This function returns -EIO if write has failed
Oren Weil3ce72722011-05-15 13:43:43 +0300397 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200398static int mei_me_write_message(struct mei_device *dev,
399 struct mei_msg_hdr *header,
400 unsigned char *buf)
Oren Weil3ce72722011-05-15 13:43:43 +0300401{
Tomas Winkler52c34562013-02-06 14:06:40 +0200402 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200403 unsigned long rem;
Tomas Winkler438763f2012-12-25 19:05:59 +0200404 unsigned long length = header->length;
Tomas Winkler169d1332012-06-19 09:13:35 +0300405 u32 *reg_buf = (u32 *)buf;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200406 u32 hcsr;
Tomas Winklerc8c8d082013-03-11 18:27:02 +0200407 u32 dw_cnt;
Tomas Winkler169d1332012-06-19 09:13:35 +0300408 int i;
409 int empty_slots;
Oren Weil3ce72722011-05-15 13:43:43 +0300410
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300411 dev_dbg(dev->dev, MEI_HDR_FMT, MEI_HDR_PRM(header));
Oren Weil3ce72722011-05-15 13:43:43 +0300412
Tomas Winkler726917f2012-06-25 23:46:28 +0300413 empty_slots = mei_hbuf_empty_slots(dev);
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300414 dev_dbg(dev->dev, "empty slots = %hu.\n", empty_slots);
Oren Weil3ce72722011-05-15 13:43:43 +0300415
Tomas Winkler7bdf72d2012-07-04 19:24:52 +0300416 dw_cnt = mei_data2slots(length);
Tomas Winkler169d1332012-06-19 09:13:35 +0300417 if (empty_slots < 0 || dw_cnt > empty_slots)
Tomas Winkler9d098192014-02-19 17:35:48 +0200418 return -EMSGSIZE;
Oren Weil3ce72722011-05-15 13:43:43 +0300419
Tomas Winklerb68301e2013-03-27 16:58:29 +0200420 mei_me_reg_write(hw, H_CB_WW, *((u32 *) header));
Oren Weil3ce72722011-05-15 13:43:43 +0300421
Tomas Winkler169d1332012-06-19 09:13:35 +0300422 for (i = 0; i < length / 4; i++)
Tomas Winklerb68301e2013-03-27 16:58:29 +0200423 mei_me_reg_write(hw, H_CB_WW, reg_buf[i]);
Tomas Winkler169d1332012-06-19 09:13:35 +0300424
425 rem = length & 0x3;
426 if (rem > 0) {
427 u32 reg = 0;
Tomas Winkler92db1552014-09-29 16:31:37 +0300428
Tomas Winkler169d1332012-06-19 09:13:35 +0300429 memcpy(&reg, &buf[length - rem], rem);
Tomas Winklerb68301e2013-03-27 16:58:29 +0200430 mei_me_reg_write(hw, H_CB_WW, reg);
Oren Weil3ce72722011-05-15 13:43:43 +0300431 }
432
Tomas Winkler52c34562013-02-06 14:06:40 +0200433 hcsr = mei_hcsr_read(hw) | H_IG;
434 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200435 if (!mei_me_hw_is_ready(dev))
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200436 return -EIO;
Oren Weil3ce72722011-05-15 13:43:43 +0300437
Tomas Winkler1ccb7b62012-03-14 14:39:42 +0200438 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300439}
440
441/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200442 * mei_me_count_full_read_slots - counts read full slots.
Oren Weil3ce72722011-05-15 13:43:43 +0300443 *
444 * @dev: the device structure
445 *
Alexander Usyskin7ca96aa2014-02-19 17:35:49 +0200446 * returns -EOVERFLOW if overflow, otherwise filled slots count
Oren Weil3ce72722011-05-15 13:43:43 +0300447 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200448static int mei_me_count_full_read_slots(struct mei_device *dev)
Oren Weil3ce72722011-05-15 13:43:43 +0300449{
Tomas Winkler52c34562013-02-06 14:06:40 +0200450 struct mei_me_hw *hw = to_me_hw(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300451 char read_ptr, write_ptr;
452 unsigned char buffer_depth, filled_slots;
453
Tomas Winklerb68301e2013-03-27 16:58:29 +0200454 hw->me_hw_state = mei_me_mecsr_read(hw);
Tomas Winkler52c34562013-02-06 14:06:40 +0200455 buffer_depth = (unsigned char)((hw->me_hw_state & ME_CBD_HRA) >> 24);
456 read_ptr = (char) ((hw->me_hw_state & ME_CBRP_HRA) >> 8);
457 write_ptr = (char) ((hw->me_hw_state & ME_CBWP_HRA) >> 16);
Oren Weil3ce72722011-05-15 13:43:43 +0300458 filled_slots = (unsigned char) (write_ptr - read_ptr);
459
460 /* check for overflow */
461 if (filled_slots > buffer_depth)
462 return -EOVERFLOW;
463
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300464 dev_dbg(dev->dev, "filled_slots =%08x\n", filled_slots);
Oren Weil3ce72722011-05-15 13:43:43 +0300465 return (int)filled_slots;
466}
467
468/**
Tomas Winkler827eef52013-02-06 14:06:41 +0200469 * mei_me_read_slots - reads a message from mei device.
Oren Weil3ce72722011-05-15 13:43:43 +0300470 *
471 * @dev: the device structure
472 * @buffer: message buffer will be written
473 * @buffer_length: message size will be read
474 */
Tomas Winkler827eef52013-02-06 14:06:41 +0200475static int mei_me_read_slots(struct mei_device *dev, unsigned char *buffer,
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200476 unsigned long buffer_length)
Oren Weil3ce72722011-05-15 13:43:43 +0300477{
Tomas Winkler52c34562013-02-06 14:06:40 +0200478 struct mei_me_hw *hw = to_me_hw(dev);
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200479 u32 *reg_buf = (u32 *)buffer;
Tomas Winkler88eb99f2013-01-08 23:07:30 +0200480 u32 hcsr;
Oren Weil3ce72722011-05-15 13:43:43 +0300481
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200482 for (; buffer_length >= sizeof(u32); buffer_length -= sizeof(u32))
Tomas Winkler827eef52013-02-06 14:06:41 +0200483 *reg_buf++ = mei_me_mecbrw_read(dev);
Oren Weil3ce72722011-05-15 13:43:43 +0300484
485 if (buffer_length > 0) {
Tomas Winkler827eef52013-02-06 14:06:41 +0200486 u32 reg = mei_me_mecbrw_read(dev);
Tomas Winkler92db1552014-09-29 16:31:37 +0300487
Tomas Winkleredf1eed2012-02-09 19:25:54 +0200488 memcpy(reg_buf, &reg, buffer_length);
Oren Weil3ce72722011-05-15 13:43:43 +0300489 }
490
Tomas Winkler52c34562013-02-06 14:06:40 +0200491 hcsr = mei_hcsr_read(hw) | H_IG;
492 mei_hcsr_set(hw, hcsr);
Tomas Winkler827eef52013-02-06 14:06:41 +0200493 return 0;
Oren Weil3ce72722011-05-15 13:43:43 +0300494}
495
Tomas Winkler06ecd642013-02-06 14:06:42 +0200496/**
Tomas Winkler152de902014-09-29 16:31:36 +0300497 * mei_me_pg_enter - write pg enter register
Tomas Winklerb16c3572014-03-18 22:51:57 +0200498 *
499 * @dev: the device structure
500 */
501static void mei_me_pg_enter(struct mei_device *dev)
502{
503 struct mei_me_hw *hw = to_me_hw(dev);
504 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
Tomas Winkler92db1552014-09-29 16:31:37 +0300505
Tomas Winklerb16c3572014-03-18 22:51:57 +0200506 reg |= H_HPG_CSR_PGI;
507 mei_me_reg_write(hw, H_HPG_CSR, reg);
508}
509
510/**
Tomas Winkler152de902014-09-29 16:31:36 +0300511 * mei_me_pg_exit - write pg exit register
Tomas Winklerb16c3572014-03-18 22:51:57 +0200512 *
513 * @dev: the device structure
514 */
515static void mei_me_pg_exit(struct mei_device *dev)
516{
517 struct mei_me_hw *hw = to_me_hw(dev);
518 u32 reg = mei_me_reg_read(hw, H_HPG_CSR);
519
520 WARN(!(reg & H_HPG_CSR_PGI), "PGI is not set\n");
521
522 reg |= H_HPG_CSR_PGIHEXR;
523 mei_me_reg_write(hw, H_HPG_CSR, reg);
524}
525
526/**
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200527 * mei_me_pg_set_sync - perform pg entry procedure
528 *
529 * @dev: the device structure
530 *
531 * returns 0 on success an error code otherwise
532 */
533int mei_me_pg_set_sync(struct mei_device *dev)
534{
535 struct mei_me_hw *hw = to_me_hw(dev);
536 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
537 int ret;
538
539 dev->pg_event = MEI_PG_EVENT_WAIT;
540
541 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_ENTRY_REQ_CMD);
542 if (ret)
543 return ret;
544
545 mutex_unlock(&dev->device_lock);
546 wait_event_timeout(dev->wait_pg,
547 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
548 mutex_lock(&dev->device_lock);
549
550 if (dev->pg_event == MEI_PG_EVENT_RECEIVED) {
551 mei_me_pg_enter(dev);
552 ret = 0;
553 } else {
554 ret = -ETIME;
555 }
556
557 dev->pg_event = MEI_PG_EVENT_IDLE;
558 hw->pg_state = MEI_PG_ON;
559
560 return ret;
561}
562
563/**
564 * mei_me_pg_unset_sync - perform pg exit procedure
565 *
566 * @dev: the device structure
567 *
568 * returns 0 on success an error code otherwise
569 */
570int mei_me_pg_unset_sync(struct mei_device *dev)
571{
572 struct mei_me_hw *hw = to_me_hw(dev);
573 unsigned long timeout = mei_secs_to_jiffies(MEI_PGI_TIMEOUT);
574 int ret;
575
576 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
577 goto reply;
578
579 dev->pg_event = MEI_PG_EVENT_WAIT;
580
581 mei_me_pg_exit(dev);
582
583 mutex_unlock(&dev->device_lock);
584 wait_event_timeout(dev->wait_pg,
585 dev->pg_event == MEI_PG_EVENT_RECEIVED, timeout);
586 mutex_lock(&dev->device_lock);
587
588reply:
589 if (dev->pg_event == MEI_PG_EVENT_RECEIVED)
590 ret = mei_hbm_pg(dev, MEI_PG_ISOLATION_EXIT_RES_CMD);
591 else
592 ret = -ETIME;
593
594 dev->pg_event = MEI_PG_EVENT_IDLE;
595 hw->pg_state = MEI_PG_OFF;
596
597 return ret;
598}
599
600/**
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200601 * mei_me_pg_is_enabled - detect if PG is supported by HW
602 *
603 * @dev: the device structure
604 *
605 * returns: true is pg supported, false otherwise
606 */
607static bool mei_me_pg_is_enabled(struct mei_device *dev)
608{
609 struct mei_me_hw *hw = to_me_hw(dev);
610 u32 reg = mei_me_reg_read(hw, ME_CSR_HA);
611
612 if ((reg & ME_PGIC_HRA) == 0)
613 goto notsupported;
614
Tomas Winklerbae1cc72014-08-21 14:29:21 +0300615 if (!dev->hbm_f_pg_supported)
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200616 goto notsupported;
617
618 return true;
619
620notsupported:
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300621 dev_dbg(dev->dev, "pg: not supported: HGP = %d hbm version %d.%d ?= %d.%d\n",
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200622 !!(reg & ME_PGIC_HRA),
623 dev->version.major_version,
624 dev->version.minor_version,
625 HBM_MAJOR_VERSION_PGI,
626 HBM_MINOR_VERSION_PGI);
627
628 return false;
629}
630
631/**
Tomas Winkler06ecd642013-02-06 14:06:42 +0200632 * mei_me_irq_quick_handler - The ISR of the MEI device
633 *
634 * @irq: The irq number
635 * @dev_id: pointer to the device structure
636 *
637 * returns irqreturn_t
638 */
639
640irqreturn_t mei_me_irq_quick_handler(int irq, void *dev_id)
641{
642 struct mei_device *dev = (struct mei_device *) dev_id;
643 struct mei_me_hw *hw = to_me_hw(dev);
644 u32 csr_reg = mei_hcsr_read(hw);
645
646 if ((csr_reg & H_IS) != H_IS)
647 return IRQ_NONE;
648
649 /* clear H_IS bit in H_CSR */
Tomas Winklerb68301e2013-03-27 16:58:29 +0200650 mei_me_reg_write(hw, H_CSR, csr_reg);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200651
652 return IRQ_WAKE_THREAD;
653}
654
655/**
656 * mei_me_irq_thread_handler - function called after ISR to handle the interrupt
657 * processing.
658 *
659 * @irq: The irq number
660 * @dev_id: pointer to the device structure
661 *
662 * returns irqreturn_t
663 *
664 */
665irqreturn_t mei_me_irq_thread_handler(int irq, void *dev_id)
666{
667 struct mei_device *dev = (struct mei_device *) dev_id;
668 struct mei_cl_cb complete_list;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200669 s32 slots;
Tomas Winkler544f9462014-01-08 20:19:21 +0200670 int rets = 0;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200671
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300672 dev_dbg(dev->dev, "function called after ISR to handle the interrupt processing.\n");
Tomas Winkler06ecd642013-02-06 14:06:42 +0200673 /* initialize our complete list */
674 mutex_lock(&dev->device_lock);
675 mei_io_list_init(&complete_list);
676
677 /* Ack the interrupt here
678 * In case of MSI we don't go through the quick handler */
679 if (pci_dev_msi_enabled(dev->pdev))
680 mei_clear_interrupts(dev);
681
682 /* check if ME wants a reset */
Tomas Winkler33ec0822014-01-12 00:36:09 +0200683 if (!mei_hw_is_ready(dev) && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300684 dev_warn(dev->dev, "FW not ready: resetting.\n");
Tomas Winkler544f9462014-01-08 20:19:21 +0200685 schedule_work(&dev->reset_work);
686 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200687 }
688
689 /* check if we need to start the dev */
690 if (!mei_host_is_ready(dev)) {
691 if (mei_hw_is_ready(dev)) {
Tomas Winklerb04ada92014-05-12 12:19:39 +0300692 mei_me_hw_reset_release(dev);
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300693 dev_dbg(dev->dev, "we need to start the dev.\n");
Tomas Winkler06ecd642013-02-06 14:06:42 +0200694
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200695 dev->recvd_hw_ready = true;
Alexander Usyskin2c2b93e2014-08-12 20:16:03 +0300696 wake_up(&dev->wait_hw_ready);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200697 } else {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300698 dev_dbg(dev->dev, "Spurious Interrupt\n");
Tomas Winkler06ecd642013-02-06 14:06:42 +0200699 }
Tomas Winkler544f9462014-01-08 20:19:21 +0200700 goto end;
Tomas Winkler06ecd642013-02-06 14:06:42 +0200701 }
702 /* check slots available for reading */
703 slots = mei_count_full_read_slots(dev);
704 while (slots > 0) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300705 dev_dbg(dev->dev, "slots to read = %08x\n", slots);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200706 rets = mei_irq_read_handler(dev, &complete_list, &slots);
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200707 /* There is a race between ME write and interrupt delivery:
708 * Not all data is always available immediately after the
709 * interrupt, so try to read again on the next interrupt.
710 */
711 if (rets == -ENODATA)
712 break;
713
Tomas Winkler33ec0822014-01-12 00:36:09 +0200714 if (rets && dev->dev_state != MEI_DEV_RESETTING) {
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300715 dev_err(dev->dev, "mei_irq_read_handler ret = %d.\n",
Tomas Winklerb1b94b52014-03-03 00:21:28 +0200716 rets);
Tomas Winkler544f9462014-01-08 20:19:21 +0200717 schedule_work(&dev->reset_work);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200718 goto end;
Tomas Winkler544f9462014-01-08 20:19:21 +0200719 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200720 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200721
Tomas Winkler6aae48f2014-02-19 17:35:47 +0200722 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
723
Tomas Winklerba9cdd02014-03-18 22:52:00 +0200724 /*
725 * During PG handshake only allowed write is the replay to the
726 * PG exit message, so block calling write function
727 * if the pg state is not idle
728 */
729 if (dev->pg_event == MEI_PG_EVENT_IDLE) {
730 rets = mei_irq_write_handler(dev, &complete_list);
731 dev->hbuf_is_ready = mei_hbuf_is_ready(dev);
732 }
Tomas Winkler06ecd642013-02-06 14:06:42 +0200733
Tomas Winkler4c6e22b2013-03-17 11:41:20 +0200734 mei_irq_compl_handler(dev, &complete_list);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200735
Tomas Winkler544f9462014-01-08 20:19:21 +0200736end:
Tomas Winkler2bf94cab2014-09-29 16:31:42 +0300737 dev_dbg(dev->dev, "interrupt thread end ret = %d\n", rets);
Tomas Winkler544f9462014-01-08 20:19:21 +0200738 mutex_unlock(&dev->device_lock);
Tomas Winkler06ecd642013-02-06 14:06:42 +0200739 return IRQ_HANDLED;
740}
Alexander Usyskin04dd3662014-03-31 17:59:23 +0300741
Tomas Winkler827eef52013-02-06 14:06:41 +0200742static const struct mei_hw_ops mei_me_hw_ops = {
743
Tomas Winkler1bd30b62014-09-29 16:31:43 +0300744 .fw_status = mei_me_fw_status,
Tomas Winkler964a2332014-03-18 22:51:59 +0200745 .pg_state = mei_me_pg_state,
746
Tomas Winkler827eef52013-02-06 14:06:41 +0200747 .host_is_ready = mei_me_host_is_ready,
748
749 .hw_is_ready = mei_me_hw_is_ready,
750 .hw_reset = mei_me_hw_reset,
Tomas Winkleraafae7e2013-03-11 18:27:03 +0200751 .hw_config = mei_me_hw_config,
752 .hw_start = mei_me_hw_start,
Tomas Winkler827eef52013-02-06 14:06:41 +0200753
Tomas Winkleree7e5af2014-03-18 22:51:58 +0200754 .pg_is_enabled = mei_me_pg_is_enabled,
755
Tomas Winkler827eef52013-02-06 14:06:41 +0200756 .intr_clear = mei_me_intr_clear,
757 .intr_enable = mei_me_intr_enable,
758 .intr_disable = mei_me_intr_disable,
759
760 .hbuf_free_slots = mei_me_hbuf_empty_slots,
761 .hbuf_is_ready = mei_me_hbuf_is_empty,
762 .hbuf_max_len = mei_me_hbuf_max_len,
763
764 .write = mei_me_write_message,
765
766 .rdbuf_full_slots = mei_me_count_full_read_slots,
767 .read_hdr = mei_me_mecbrw_read,
768 .read = mei_me_read_slots
769};
770
Tomas Winklerc9199512014-05-13 01:30:54 +0300771static bool mei_me_fw_type_nm(struct pci_dev *pdev)
772{
773 u32 reg;
Tomas Winkler92db1552014-09-29 16:31:37 +0300774
Tomas Winklerc9199512014-05-13 01:30:54 +0300775 pci_read_config_dword(pdev, PCI_CFG_HFS_2, &reg);
776 /* make sure that bit 9 (NM) is up and bit 10 (DM) is down */
777 return (reg & 0x600) == 0x200;
778}
779
780#define MEI_CFG_FW_NM \
781 .quirk_probe = mei_me_fw_type_nm
782
783static bool mei_me_fw_type_sps(struct pci_dev *pdev)
784{
785 u32 reg;
786 /* Read ME FW Status check for SPS Firmware */
787 pci_read_config_dword(pdev, PCI_CFG_HFS_1, &reg);
788 /* if bits [19:16] = 15, running SPS Firmware */
789 return (reg & 0xf0000) == 0xf0000;
790}
791
792#define MEI_CFG_FW_SPS \
793 .quirk_probe = mei_me_fw_type_sps
794
795
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300796#define MEI_CFG_LEGACY_HFS \
797 .fw_status.count = 0
798
799#define MEI_CFG_ICH_HFS \
800 .fw_status.count = 1, \
801 .fw_status.status[0] = PCI_CFG_HFS_1
802
803#define MEI_CFG_PCH_HFS \
804 .fw_status.count = 2, \
805 .fw_status.status[0] = PCI_CFG_HFS_1, \
806 .fw_status.status[1] = PCI_CFG_HFS_2
807
808
809/* ICH Legacy devices */
810const struct mei_cfg mei_me_legacy_cfg = {
811 MEI_CFG_LEGACY_HFS,
812};
813
814/* ICH devices */
815const struct mei_cfg mei_me_ich_cfg = {
816 MEI_CFG_ICH_HFS,
817};
818
819/* PCH devices */
820const struct mei_cfg mei_me_pch_cfg = {
821 MEI_CFG_PCH_HFS,
822};
823
Tomas Winklerc9199512014-05-13 01:30:54 +0300824
825/* PCH Cougar Point and Patsburg with quirk for Node Manager exclusion */
826const struct mei_cfg mei_me_pch_cpt_pbg_cfg = {
827 MEI_CFG_PCH_HFS,
828 MEI_CFG_FW_NM,
829};
830
831/* PCH Lynx Point with quirk for SPS Firmware exclusion */
832const struct mei_cfg mei_me_lpt_cfg = {
833 MEI_CFG_PCH_HFS,
834 MEI_CFG_FW_SPS,
835};
836
Tomas Winkler52c34562013-02-06 14:06:40 +0200837/**
Masanari Iida393b1482013-04-05 01:05:05 +0900838 * mei_me_dev_init - allocates and initializes the mei device structure
Tomas Winkler52c34562013-02-06 14:06:40 +0200839 *
840 * @pdev: The pci device structure
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300841 * @cfg: per device generation config
Tomas Winkler52c34562013-02-06 14:06:40 +0200842 *
843 * returns The mei_device_device pointer on success, NULL on failure.
844 */
Alexander Usyskin8d929d42014-05-13 01:30:53 +0300845struct mei_device *mei_me_dev_init(struct pci_dev *pdev,
846 const struct mei_cfg *cfg)
Tomas Winkler52c34562013-02-06 14:06:40 +0200847{
848 struct mei_device *dev;
849
850 dev = kzalloc(sizeof(struct mei_device) +
851 sizeof(struct mei_me_hw), GFP_KERNEL);
852 if (!dev)
853 return NULL;
854
Tomas Winkler3a7e9b62014-09-29 16:31:41 +0300855 mei_device_init(dev, &pdev->dev, &mei_me_hw_ops);
856 dev->cfg = cfg;
Tomas Winkler52c34562013-02-06 14:06:40 +0200857 dev->pdev = pdev;
858 return dev;
859}
Tomas Winkler06ecd642013-02-06 14:06:42 +0200860