blob: da71f8097509c8e33c8b22b6080b0f21733e3aa6 [file] [log] [blame]
Neeraj Soni7b939262017-11-15 16:31:27 +05301/*
2 * Copyright (c) 2015-2017, The Linux Foundation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 and
6 * only version 2 as published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful,
9 * but WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
11 * GNU General Public License for more details.
12 */
13
14/*
15 * PFK Key Cache
16 *
17 * Key Cache used internally in PFK.
18 * The purpose of the cache is to save access time to QSEE when loading keys.
19 * Currently the cache is the same size as the total number of keys that can
20 * be loaded to ICE. Since this number is relatively small, the algorithms for
21 * cache eviction are simple, linear and based on last usage timestamp, i.e
22 * the node that will be evicted is the one with the oldest timestamp.
23 * Empty entries always have the oldest timestamp.
24 */
25
26#include <linux/module.h>
27#include <linux/mutex.h>
28#include <linux/spinlock.h>
29#include <crypto/ice.h>
30#include <linux/errno.h>
31#include <linux/string.h>
32#include <linux/jiffies.h>
33#include <linux/slab.h>
34#include <linux/printk.h>
35#include <linux/sched.h>
36
37#include "pfk_kc.h"
38#include "pfk_ice.h"
39
40
41/** the first available index in ice engine */
42#define PFK_KC_STARTING_INDEX 2
43
44/** currently the only supported key and salt sizes */
45#define PFK_KC_KEY_SIZE 32
46#define PFK_KC_SALT_SIZE 32
47
48/** Table size */
49/* TODO replace by some constant from ice.h */
50#define PFK_KC_TABLE_SIZE ((32) - (PFK_KC_STARTING_INDEX))
51
52/** The maximum key and salt size */
53#define PFK_MAX_KEY_SIZE PFK_KC_KEY_SIZE
54#define PFK_MAX_SALT_SIZE PFK_KC_SALT_SIZE
55#define PFK_UFS "ufs"
56
57static DEFINE_SPINLOCK(kc_lock);
58static unsigned long flags;
59static bool kc_ready;
60static char *s_type = "sdcc";
61
62/**
63 * enum pfk_kc_entry_state - state of the entry inside kc table
64 *
65 * @FREE: entry is free
66 * @ACTIVE_ICE_PRELOAD: entry is actively used by ICE engine
67 and cannot be used by others. SCM call
68 to load key to ICE is pending to be performed
69 * @ACTIVE_ICE_LOADED: entry is actively used by ICE engine and
70 cannot be used by others. SCM call to load the
71 key to ICE was successfully executed and key is
72 now loaded
73 * @INACTIVE_INVALIDATING: entry is being invalidated during file close
74 and cannot be used by others until invalidation
75 is complete
76 * @INACTIVE: entry's key is already loaded, but is not
77 currently being used. It can be re-used for
78 optimization and to avoid SCM call cost or
79 it can be taken by another key if there are
80 no FREE entries
81 * @SCM_ERROR: error occurred while scm call was performed to
82 load the key to ICE
83 */
84enum pfk_kc_entry_state {
85 FREE,
86 ACTIVE_ICE_PRELOAD,
87 ACTIVE_ICE_LOADED,
88 INACTIVE_INVALIDATING,
89 INACTIVE,
90 SCM_ERROR
91};
92
93struct kc_entry {
94 unsigned char key[PFK_MAX_KEY_SIZE];
95 size_t key_size;
96
97 unsigned char salt[PFK_MAX_SALT_SIZE];
98 size_t salt_size;
99
100 u64 time_stamp;
101 u32 key_index;
102
103 struct task_struct *thread_pending;
104
105 enum pfk_kc_entry_state state;
106
107 /* ref count for the number of requests in the HW queue for this key */
108 int loaded_ref_cnt;
109 int scm_error;
110};
111
112static struct kc_entry kc_table[PFK_KC_TABLE_SIZE];
113
114/**
115 * kc_is_ready() - driver is initialized and ready.
116 *
117 * Return: true if the key cache is ready.
118 */
119static inline bool kc_is_ready(void)
120{
121 return kc_ready;
122}
123
124static inline void kc_spin_lock(void)
125{
126 spin_lock_irqsave(&kc_lock, flags);
127}
128
129static inline void kc_spin_unlock(void)
130{
131 spin_unlock_irqrestore(&kc_lock, flags);
132}
133
134/**
135 * kc_entry_is_available() - checks whether the entry is available
136 *
137 * Return true if it is , false otherwise or if invalid
138 * Should be invoked under spinlock
139 */
140static bool kc_entry_is_available(const struct kc_entry *entry)
141{
142 if (!entry)
143 return false;
144
145 return (entry->state == FREE || entry->state == INACTIVE);
146}
147
148/**
149 * kc_entry_wait_till_available() - waits till entry is available
150 *
151 * Returns 0 in case of success or -ERESTARTSYS if the wait was interrupted
152 * by signal
153 *
154 * Should be invoked under spinlock
155 */
156static int kc_entry_wait_till_available(struct kc_entry *entry)
157{
158 int res = 0;
159
160 while (!kc_entry_is_available(entry)) {
161 set_current_state(TASK_INTERRUPTIBLE);
162 if (signal_pending(current)) {
163 res = -ERESTARTSYS;
164 break;
165 }
166 /* assuming only one thread can try to invalidate
167 * the same entry
168 */
169 entry->thread_pending = current;
170 kc_spin_unlock();
171 schedule();
172 kc_spin_lock();
173 }
174 set_current_state(TASK_RUNNING);
175
176 return res;
177}
178
179/**
180 * kc_entry_start_invalidating() - moves entry to state
181 * INACTIVE_INVALIDATING
182 * If entry is in use, waits till
183 * it gets available
184 * @entry: pointer to entry
185 *
186 * Return 0 in case of success, otherwise error
187 * Should be invoked under spinlock
188 */
189static int kc_entry_start_invalidating(struct kc_entry *entry)
190{
191 int res;
192
193 res = kc_entry_wait_till_available(entry);
194 if (res)
195 return res;
196
197 entry->state = INACTIVE_INVALIDATING;
198
199 return 0;
200}
201
202/**
203 * kc_entry_finish_invalidating() - moves entry to state FREE
204 * wakes up all the tasks waiting
205 * on it
206 *
207 * @entry: pointer to entry
208 *
209 * Return 0 in case of success, otherwise error
210 * Should be invoked under spinlock
211 */
212static void kc_entry_finish_invalidating(struct kc_entry *entry)
213{
214 if (!entry)
215 return;
216
217 if (entry->state != INACTIVE_INVALIDATING)
218 return;
219
220 entry->state = FREE;
221}
222
223/**
224 * kc_min_entry() - compare two entries to find one with minimal time
225 * @a: ptr to the first entry. If NULL the other entry will be returned
226 * @b: pointer to the second entry
227 *
228 * Return the entry which timestamp is the minimal, or b if a is NULL
229 */
230static inline struct kc_entry *kc_min_entry(struct kc_entry *a,
231 struct kc_entry *b)
232{
233 if (!a)
234 return b;
235
236 if (time_before64(b->time_stamp, a->time_stamp))
237 return b;
238
239 return a;
240}
241
242/**
243 * kc_entry_at_index() - return entry at specific index
244 * @index: index of entry to be accessed
245 *
246 * Return entry
247 * Should be invoked under spinlock
248 */
249static struct kc_entry *kc_entry_at_index(int index)
250{
251 return &(kc_table[index]);
252}
253
254/**
255 * kc_find_key_at_index() - find kc entry starting at specific index
256 * @key: key to look for
257 * @key_size: the key size
258 * @salt: salt to look for
259 * @salt_size: the salt size
260 * @sarting_index: index to start search with, if entry found, updated with
261 * index of that entry
262 *
263 * Return entry or NULL in case of error
264 * Should be invoked under spinlock
265 */
266static struct kc_entry *kc_find_key_at_index(const unsigned char *key,
267 size_t key_size, const unsigned char *salt, size_t salt_size,
268 int *starting_index)
269{
270 struct kc_entry *entry = NULL;
271 int i = 0;
272
273 for (i = *starting_index; i < PFK_KC_TABLE_SIZE; i++) {
274 entry = kc_entry_at_index(i);
275
276 if (salt != NULL) {
277 if (entry->salt_size != salt_size)
278 continue;
279
280 if (memcmp(entry->salt, salt, salt_size) != 0)
281 continue;
282 }
283
284 if (entry->key_size != key_size)
285 continue;
286
287 if (memcmp(entry->key, key, key_size) == 0) {
288 *starting_index = i;
289 return entry;
290 }
291 }
292
293 return NULL;
294}
295
296/**
297 * kc_find_key() - find kc entry
298 * @key: key to look for
299 * @key_size: the key size
300 * @salt: salt to look for
301 * @salt_size: the salt size
302 *
303 * Return entry or NULL in case of error
304 * Should be invoked under spinlock
305 */
306static struct kc_entry *kc_find_key(const unsigned char *key, size_t key_size,
307 const unsigned char *salt, size_t salt_size)
308{
309 int index = 0;
310
311 return kc_find_key_at_index(key, key_size, salt, salt_size, &index);
312}
313
314/**
315 * kc_find_oldest_entry_non_locked() - finds the entry with minimal timestamp
316 * that is not locked
317 *
318 * Returns entry with minimal timestamp. Empty entries have timestamp
319 * of 0, therefore they are returned first.
320 * If all the entries are locked, will return NULL
321 * Should be invoked under spin lock
322 */
323static struct kc_entry *kc_find_oldest_entry_non_locked(void)
324{
325 struct kc_entry *curr_min_entry = NULL;
326 struct kc_entry *entry = NULL;
327 int i = 0;
328
329 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
330 entry = kc_entry_at_index(i);
331
332 if (entry->state == FREE)
333 return entry;
334
335 if (entry->state == INACTIVE)
336 curr_min_entry = kc_min_entry(curr_min_entry, entry);
337 }
338
339 return curr_min_entry;
340}
341
342/**
343 * kc_update_timestamp() - updates timestamp of entry to current
344 *
345 * @entry: entry to update
346 *
347 */
348static void kc_update_timestamp(struct kc_entry *entry)
349{
350 if (!entry)
351 return;
352
353 entry->time_stamp = get_jiffies_64();
354}
355
356/**
357 * kc_clear_entry() - clear the key from entry and mark entry not in use
358 *
359 * @entry: pointer to entry
360 *
361 * Should be invoked under spinlock
362 */
363static void kc_clear_entry(struct kc_entry *entry)
364{
365 if (!entry)
366 return;
367
368 memset(entry->key, 0, entry->key_size);
369 memset(entry->salt, 0, entry->salt_size);
370
371 entry->key_size = 0;
372 entry->salt_size = 0;
373
374 entry->time_stamp = 0;
375 entry->scm_error = 0;
376
377 entry->state = FREE;
378
379 entry->loaded_ref_cnt = 0;
380 entry->thread_pending = NULL;
381}
382
383/**
384 * kc_update_entry() - replaces the key in given entry and
385 * loads the new key to ICE
386 *
387 * @entry: entry to replace key in
388 * @key: key
389 * @key_size: key_size
390 * @salt: salt
391 * @salt_size: salt_size
392 *
393 * The previous key is securely released and wiped, the new one is loaded
394 * to ICE.
395 * Should be invoked under spinlock
396 */
397static int kc_update_entry(struct kc_entry *entry, const unsigned char *key,
398 size_t key_size, const unsigned char *salt, size_t salt_size)
399{
400 int ret;
401
402 kc_clear_entry(entry);
403
404 memcpy(entry->key, key, key_size);
405 entry->key_size = key_size;
406
407 memcpy(entry->salt, salt, salt_size);
408 entry->salt_size = salt_size;
409
410 /* Mark entry as no longer free before releasing the lock */
411 entry->state = ACTIVE_ICE_PRELOAD;
412 kc_spin_unlock();
413
414 ret = qti_pfk_ice_set_key(entry->key_index, entry->key,
415 entry->salt, s_type);
416
417 kc_spin_lock();
418 return ret;
419}
420
421/**
422 * pfk_kc_init() - init function
423 *
424 * Return 0 in case of success, error otherwise
425 */
426int pfk_kc_init(void)
427{
428 int i = 0;
429 struct kc_entry *entry = NULL;
430
431 kc_spin_lock();
432 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
433 entry = kc_entry_at_index(i);
434 entry->key_index = PFK_KC_STARTING_INDEX + i;
435 }
436 kc_ready = true;
437 kc_spin_unlock();
438 return 0;
439}
440
441/**
442 * pfk_kc_denit() - deinit function
443 *
444 * Return 0 in case of success, error otherwise
445 */
446int pfk_kc_deinit(void)
447{
448 int res = pfk_kc_clear();
449
450 kc_ready = false;
451 return res;
452}
453
454/**
455 * pfk_kc_load_key_start() - retrieve the key from cache or add it if
456 * it's not there and return the ICE hw key index in @key_index.
457 * @key: pointer to the key
458 * @key_size: the size of the key
459 * @salt: pointer to the salt
460 * @salt_size: the size of the salt
461 * @key_index: the pointer to key_index where the output will be stored
462 * @async: whether scm calls are allowed in the caller context
463 *
464 * If key is present in cache, than the key_index will be retrieved from cache.
465 * If it is not present, the oldest entry from kc table will be evicted,
466 * the key will be loaded to ICE via QSEE to the index that is the evicted
467 * entry number and stored in cache.
468 * Entry that is going to be used is marked as being used, it will mark
469 * as not being used when ICE finishes using it and pfk_kc_load_key_end
470 * will be invoked.
471 * As QSEE calls can only be done from a non-atomic context, when @async flag
472 * is set to 'false', it specifies that it is ok to make the calls in the
473 * current context. Otherwise, when @async is set, the caller should retry the
474 * call again from a different context, and -EAGAIN error will be returned.
475 *
476 * Return 0 in case of success, error otherwise
477 */
478int pfk_kc_load_key_start(const unsigned char *key, size_t key_size,
479 const unsigned char *salt, size_t salt_size, u32 *key_index,
480 bool async)
481{
482 int ret = 0;
483 struct kc_entry *entry = NULL;
484 bool entry_exists = false;
485
486 if (!kc_is_ready())
487 return -ENODEV;
488
489 if (!key || !salt || !key_index) {
490 pr_err("%s key/salt/key_index NULL\n", __func__);
491 return -EINVAL;
492 }
493
494 if (key_size != PFK_KC_KEY_SIZE) {
495 pr_err("unsupported key size %zu\n", key_size);
496 return -EINVAL;
497 }
498
499 if (salt_size != PFK_KC_SALT_SIZE) {
500 pr_err("unsupported salt size %zu\n", salt_size);
501 return -EINVAL;
502 }
503
504 kc_spin_lock();
505
506 entry = kc_find_key(key, key_size, salt, salt_size);
507 if (!entry) {
508 if (async) {
509 pr_debug("%s task will populate entry\n", __func__);
510 kc_spin_unlock();
511 return -EAGAIN;
512 }
513
514 entry = kc_find_oldest_entry_non_locked();
515 if (!entry) {
516 /* could not find a single non locked entry,
517 * return EBUSY to upper layers so that the
518 * request will be rescheduled
519 */
520 kc_spin_unlock();
521 return -EBUSY;
522 }
523 } else {
524 entry_exists = true;
525 }
526
527 pr_debug("entry with index %d is in state %d\n",
528 entry->key_index, entry->state);
529
530 switch (entry->state) {
531 case (INACTIVE):
532 if (entry_exists) {
533 kc_update_timestamp(entry);
534 entry->state = ACTIVE_ICE_LOADED;
535
536 if (!strcmp(s_type, (char *)PFK_UFS)) {
537 if (async)
538 entry->loaded_ref_cnt++;
539 } else {
540 entry->loaded_ref_cnt++;
541 }
542 break;
543 }
544 case (FREE):
545 ret = kc_update_entry(entry, key, key_size, salt, salt_size);
546 if (ret) {
547 entry->state = SCM_ERROR;
548 entry->scm_error = ret;
549 pr_err("%s: key load error (%d)\n", __func__, ret);
550 } else {
551 kc_update_timestamp(entry);
552 entry->state = ACTIVE_ICE_LOADED;
553
554 /*
555 * In case of UFS only increase ref cnt for async calls,
556 * sync calls from within work thread do not pass
557 * requests further to HW
558 */
559 if (!strcmp(s_type, (char *)PFK_UFS)) {
560 if (async)
561 entry->loaded_ref_cnt++;
562 } else {
563 entry->loaded_ref_cnt++;
564 }
565 }
566 break;
567 case (ACTIVE_ICE_PRELOAD):
568 case (INACTIVE_INVALIDATING):
569 ret = -EAGAIN;
570 break;
571 case (ACTIVE_ICE_LOADED):
572 kc_update_timestamp(entry);
573
574 if (!strcmp(s_type, (char *)PFK_UFS)) {
575 if (async)
576 entry->loaded_ref_cnt++;
577 } else {
578 entry->loaded_ref_cnt++;
579 }
580 break;
581 case(SCM_ERROR):
582 ret = entry->scm_error;
583 kc_clear_entry(entry);
584 entry->state = FREE;
585 break;
586 default:
587 pr_err("invalid state %d for entry with key index %d\n",
588 entry->state, entry->key_index);
589 ret = -EINVAL;
590 }
591
592 *key_index = entry->key_index;
593 kc_spin_unlock();
594
595 return ret;
596}
597
598/**
599 * pfk_kc_load_key_end() - finish the process of key loading that was started
600 * by pfk_kc_load_key_start
601 * by marking the entry as not
602 * being in use
603 * @key: pointer to the key
604 * @key_size: the size of the key
605 * @salt: pointer to the salt
606 * @salt_size: the size of the salt
607 *
608 */
609void pfk_kc_load_key_end(const unsigned char *key, size_t key_size,
610 const unsigned char *salt, size_t salt_size)
611{
612 struct kc_entry *entry = NULL;
613 struct task_struct *tmp_pending = NULL;
614 int ref_cnt = 0;
615
616 if (!kc_is_ready())
617 return;
618
619 if (!key || !salt)
620 return;
621
622 if (key_size != PFK_KC_KEY_SIZE)
623 return;
624
625 if (salt_size != PFK_KC_SALT_SIZE)
626 return;
627
628 kc_spin_lock();
629
630 entry = kc_find_key(key, key_size, salt, salt_size);
631 if (!entry) {
632 kc_spin_unlock();
633 pr_err("internal error, there should an entry to unlock\n");
634
635 return;
636 }
637 ref_cnt = --entry->loaded_ref_cnt;
638
639 if (ref_cnt < 0)
640 pr_err("internal error, ref count should never be negative\n");
641
642 if (!ref_cnt) {
643 entry->state = INACTIVE;
644 /*
645 * wake-up invalidation if it's waiting
646 * for the entry to be released
647 */
648 if (entry->thread_pending) {
649 tmp_pending = entry->thread_pending;
650 entry->thread_pending = NULL;
651
652 kc_spin_unlock();
653 wake_up_process(tmp_pending);
654 return;
655 }
656 }
657
658 kc_spin_unlock();
659}
660
661/**
662 * pfk_kc_remove_key() - remove the key from cache and from ICE engine
663 * @key: pointer to the key
664 * @key_size: the size of the key
665 * @salt: pointer to the key
666 * @salt_size: the size of the key
667 *
668 * Return 0 in case of success, error otherwise (also in case of non
669 * (existing key)
670 */
671int pfk_kc_remove_key_with_salt(const unsigned char *key, size_t key_size,
672 const unsigned char *salt, size_t salt_size)
673{
674 struct kc_entry *entry = NULL;
675 int res = 0;
676
677 if (!kc_is_ready())
678 return -ENODEV;
679
680 if (!key)
681 return -EINVAL;
682
683 if (!salt)
684 return -EINVAL;
685
686 if (key_size != PFK_KC_KEY_SIZE)
687 return -EINVAL;
688
689 if (salt_size != PFK_KC_SALT_SIZE)
690 return -EINVAL;
691
692 kc_spin_lock();
693
694 entry = kc_find_key(key, key_size, salt, salt_size);
695 if (!entry) {
696 pr_debug("%s: key does not exist\n", __func__);
697 kc_spin_unlock();
698 return -EINVAL;
699 }
700
701 res = kc_entry_start_invalidating(entry);
702 if (res != 0) {
703 kc_spin_unlock();
704 return res;
705 }
706 kc_clear_entry(entry);
707
708 kc_spin_unlock();
709
710 qti_pfk_ice_invalidate_key(entry->key_index, s_type);
711
712 kc_spin_lock();
713 kc_entry_finish_invalidating(entry);
714 kc_spin_unlock();
715
716 return 0;
717}
718
719/**
720 * pfk_kc_remove_key() - remove the key from cache and from ICE engine
721 * when no salt is available. Will only search key part, if there are several,
722 * all will be removed
723 *
724 * @key: pointer to the key
725 * @key_size: the size of the key
726 *
727 * Return 0 in case of success, error otherwise (also for non-existing key)
728 */
729int pfk_kc_remove_key(const unsigned char *key, size_t key_size)
730{
731 struct kc_entry *entry = NULL;
732 int index = 0;
733 int temp_indexes[PFK_KC_TABLE_SIZE] = {0};
734 int temp_indexes_size = 0;
735 int i = 0;
736 int res = 0;
737
738 if (!kc_is_ready())
739 return -ENODEV;
740
741 if (!key)
742 return -EINVAL;
743
744 if (key_size != PFK_KC_KEY_SIZE)
745 return -EINVAL;
746
747 memset(temp_indexes, -1, sizeof(temp_indexes));
748
749 kc_spin_lock();
750
751 entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
752 if (!entry) {
753 pr_err("%s: key does not exist\n", __func__);
754 kc_spin_unlock();
755 return -EINVAL;
756 }
757
758 res = kc_entry_start_invalidating(entry);
759 if (res != 0) {
760 kc_spin_unlock();
761 return res;
762 }
763
764 temp_indexes[temp_indexes_size++] = index;
765 kc_clear_entry(entry);
766
767 /* let's clean additional entries with the same key if there are any */
768 do {
769 index++;
770 entry = kc_find_key_at_index(key, key_size, NULL, 0, &index);
771 if (!entry)
772 break;
773
774 res = kc_entry_start_invalidating(entry);
775 if (res != 0) {
776 kc_spin_unlock();
777 goto out;
778 }
779
780 temp_indexes[temp_indexes_size++] = index;
781
782 kc_clear_entry(entry);
783
784
785 } while (true);
786
787 kc_spin_unlock();
788
789 temp_indexes_size--;
790 for (i = temp_indexes_size; i >= 0 ; i--)
791 qti_pfk_ice_invalidate_key(
792 kc_entry_at_index(temp_indexes[i])->key_index,
793 s_type);
794
795 /* fall through */
796 res = 0;
797
798out:
799 kc_spin_lock();
800 for (i = temp_indexes_size; i >= 0 ; i--)
801 kc_entry_finish_invalidating(
802 kc_entry_at_index(temp_indexes[i]));
803 kc_spin_unlock();
804
805 return res;
806}
807
808/**
809 * pfk_kc_clear() - clear the table and remove all keys from ICE
810 *
811 * Return 0 on success, error otherwise
812 *
813 */
814int pfk_kc_clear(void)
815{
816 struct kc_entry *entry = NULL;
817 int i = 0;
818 int res = 0;
819
820 if (!kc_is_ready())
821 return -ENODEV;
822
823 kc_spin_lock();
824 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
825 entry = kc_entry_at_index(i);
826 res = kc_entry_start_invalidating(entry);
827 if (res != 0) {
828 kc_spin_unlock();
829 goto out;
830 }
831 kc_clear_entry(entry);
832 }
833 kc_spin_unlock();
834
835 for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
836 qti_pfk_ice_invalidate_key(kc_entry_at_index(i)->key_index,
837 s_type);
838
839 /* fall through */
840 res = 0;
841out:
842 kc_spin_lock();
843 for (i = 0; i < PFK_KC_TABLE_SIZE; i++)
844 kc_entry_finish_invalidating(kc_entry_at_index(i));
845 kc_spin_unlock();
846
847 return res;
848}
849
850/**
851 * pfk_kc_clear_on_reset() - clear the table and remove all keys from ICE
852 * The assumption is that at this point we don't have any pending transactions
853 * Also, there is no need to clear keys from ICE
854 *
855 * Return 0 on success, error otherwise
856 *
857 */
858void pfk_kc_clear_on_reset(void)
859{
860 struct kc_entry *entry = NULL;
861 int i = 0;
862
863 if (!kc_is_ready())
864 return;
865
866 kc_spin_lock();
867 for (i = 0; i < PFK_KC_TABLE_SIZE; i++) {
868 entry = kc_entry_at_index(i);
869 kc_clear_entry(entry);
870 }
871 kc_spin_unlock();
872}
873
874static int pfk_kc_find_storage_type(char **device)
875{
876 char boot[20] = {'\0'};
877 char *match = (char *)strnstr(saved_command_line,
878 "androidboot.bootdevice=",
879 strlen(saved_command_line));
880 if (match) {
881 memcpy(boot, (match + strlen("androidboot.bootdevice=")),
882 sizeof(boot) - 1);
883 if (strnstr(boot, PFK_UFS, strlen(boot)))
884 *device = PFK_UFS;
885
886 return 0;
887 }
888 return -EINVAL;
889}
890
891static int __init pfk_kc_pre_init(void)
892{
893 return pfk_kc_find_storage_type(&s_type);
894}
895
896static void __exit pfk_kc_exit(void)
897{
898 s_type = NULL;
899}
900
901module_init(pfk_kc_pre_init);
902module_exit(pfk_kc_exit);
903
904MODULE_LICENSE("GPL v2");
905MODULE_DESCRIPTION("Per-File-Key-KC driver");