blob: 9aa2a21ce746b83069a0ec088c9dbe6deed27efc [file] [log] [blame]
Paul Crowleyf71ace32016-06-02 11:01:19 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "EncryptInplace.h"
18
19#include <stdio.h>
20#include <stdint.h>
21#include <stdbool.h>
22#include <inttypes.h>
23#include <time.h>
24#include <sys/types.h>
25#include <sys/stat.h>
26#include <fcntl.h>
27#include <ext4_utils/ext4.h>
28#include <ext4_utils/ext4_utils.h>
29#include <f2fs_sparseblock.h>
30
31#include <algorithm>
32
33#include "cutils/properties.h"
34#define LOG_TAG "EncryptInplace"
35#include "cutils/log.h"
36#include "CheckBattery.h"
37
38// HORRIBLE HACK, FIXME
39#include "cryptfs.h"
40
41// FIXME horrible cut-and-paste code
42static inline int unix_read(int fd, void* buff, int len)
43{
44 return TEMP_FAILURE_RETRY(read(fd, buff, len));
45}
46
47static inline int unix_write(int fd, const void* buff, int len)
48{
49 return TEMP_FAILURE_RETRY(write(fd, buff, len));
50}
51
52#define CRYPT_SECTORS_PER_BUFSIZE (CRYPT_INPLACE_BUFSIZE / CRYPT_SECTOR_SIZE)
53
54/* aligned 32K writes tends to make flash happy.
55 * SD card association recommends it.
56 */
57#ifndef CONFIG_HW_DISK_ENCRYPTION
58#define BLOCKS_AT_A_TIME 8
59#else
60#define BLOCKS_AT_A_TIME 1024
61#endif
62
63struct encryptGroupsData
64{
65 int realfd;
66 int cryptofd;
67 off64_t numblocks;
68 off64_t one_pct, cur_pct, new_pct;
69 off64_t blocks_already_done, tot_numblocks;
70 off64_t used_blocks_already_done, tot_used_blocks;
71 char* real_blkdev, * crypto_blkdev;
72 int count;
73 off64_t offset;
74 char* buffer;
75 off64_t last_written_sector;
76 int completed;
77 time_t time_started;
78 int remaining_time;
79};
80
81static void update_progress(struct encryptGroupsData* data, int is_used)
82{
83 data->blocks_already_done++;
84
85 if (is_used) {
86 data->used_blocks_already_done++;
87 }
88 if (data->tot_used_blocks) {
89 data->new_pct = data->used_blocks_already_done / data->one_pct;
90 } else {
91 data->new_pct = data->blocks_already_done / data->one_pct;
92 }
93
94 if (data->new_pct > data->cur_pct) {
95 char buf[8];
96 data->cur_pct = data->new_pct;
97 snprintf(buf, sizeof(buf), "%" PRId64, data->cur_pct);
98 property_set("vold.encrypt_progress", buf);
99 }
100
101 if (data->cur_pct >= 5) {
102 struct timespec time_now;
103 if (clock_gettime(CLOCK_MONOTONIC, &time_now)) {
104 SLOGW("Error getting time");
105 } else {
106 double elapsed_time = difftime(time_now.tv_sec, data->time_started);
107 off64_t remaining_blocks = data->tot_used_blocks
108 - data->used_blocks_already_done;
109 int remaining_time = (int)(elapsed_time * remaining_blocks
110 / data->used_blocks_already_done);
111
112 // Change time only if not yet set, lower, or a lot higher for
113 // best user experience
114 if (data->remaining_time == -1
115 || remaining_time < data->remaining_time
116 || remaining_time > data->remaining_time + 60) {
117 char buf[8];
118 snprintf(buf, sizeof(buf), "%d", remaining_time);
119 property_set("vold.encrypt_time_remaining", buf);
120 data->remaining_time = remaining_time;
121 }
122 }
123 }
124}
125
126static void log_progress(struct encryptGroupsData const* data, bool completed)
127{
128 // Precondition - if completed data = 0 else data != 0
129
130 // Track progress so we can skip logging blocks
131 static off64_t offset = -1;
132
133 // Need to close existing 'Encrypting from' log?
134 if (completed || (offset != -1 && data->offset != offset)) {
135 SLOGI("Encrypted to sector %" PRId64,
136 offset / info.block_size * CRYPT_SECTOR_SIZE);
137 offset = -1;
138 }
139
140 // Need to start new 'Encrypting from' log?
141 if (!completed && offset != data->offset) {
142 SLOGI("Encrypting from sector %" PRId64,
143 data->offset / info.block_size * CRYPT_SECTOR_SIZE);
144 }
145
146 // Update offset
147 if (!completed) {
148 offset = data->offset + (off64_t)data->count * info.block_size;
149 }
150}
151
152static int flush_outstanding_data(struct encryptGroupsData* data)
153{
154 if (data->count == 0) {
155 return 0;
156 }
157
158 SLOGV("Copying %d blocks at offset %" PRIx64, data->count, data->offset);
159
160 if (pread64(data->realfd, data->buffer,
161 info.block_size * data->count, data->offset)
162 <= 0) {
163 SLOGE("Error reading real_blkdev %s for inplace encrypt",
164 data->real_blkdev);
165 return -1;
166 }
167
168 if (pwrite64(data->cryptofd, data->buffer,
169 info.block_size * data->count, data->offset)
170 <= 0) {
171 SLOGE("Error writing crypto_blkdev %s for inplace encrypt",
172 data->crypto_blkdev);
173 return -1;
174 } else {
175 log_progress(data, false);
176 }
177
178 data->count = 0;
179 data->last_written_sector = (data->offset + data->count)
180 / info.block_size * CRYPT_SECTOR_SIZE - 1;
181 return 0;
182}
183
184static int encrypt_groups(struct encryptGroupsData* data)
185{
186 unsigned int i;
187 u8 *block_bitmap = 0;
188 unsigned int block;
189 off64_t ret;
190 int rc = -1;
191
192 data->buffer = (char*) malloc(info.block_size * BLOCKS_AT_A_TIME);
193 if (!data->buffer) {
194 SLOGE("Failed to allocate crypto buffer");
195 goto errout;
196 }
197
198 block_bitmap = (u8*) malloc(info.block_size);
199 if (!block_bitmap) {
200 SLOGE("failed to allocate block bitmap");
201 goto errout;
202 }
203
204 for (i = 0; i < aux_info.groups; ++i) {
205 SLOGI("Encrypting group %d", i);
206
207 u32 first_block = aux_info.first_data_block + i * info.blocks_per_group;
208 u32 block_count = std::min(info.blocks_per_group,
209 (u32)(aux_info.len_blocks - first_block));
210
211 off64_t offset = (u64)info.block_size
212 * aux_info.bg_desc[i].bg_block_bitmap;
213
214 ret = pread64(data->realfd, block_bitmap, info.block_size, offset);
215 if (ret != (int)info.block_size) {
216 SLOGE("failed to read all of block group bitmap %d", i);
217 goto errout;
218 }
219
220 offset = (u64)info.block_size * first_block;
221
222 data->count = 0;
223
224 for (block = 0; block < block_count; block++) {
225 int used = (aux_info.bg_desc[i].bg_flags & EXT4_BG_BLOCK_UNINIT) ?
226 0 : bitmap_get_bit(block_bitmap, block);
227 update_progress(data, used);
228 if (used) {
229 if (data->count == 0) {
230 data->offset = offset;
231 }
232 data->count++;
233 } else {
234 if (flush_outstanding_data(data)) {
235 goto errout;
236 }
237 }
238
239 offset += info.block_size;
240
241 /* Write data if we are aligned or buffer size reached */
242 if (offset % (info.block_size * BLOCKS_AT_A_TIME) == 0
243 || data->count == BLOCKS_AT_A_TIME) {
244 if (flush_outstanding_data(data)) {
245 goto errout;
246 }
247 }
248
249 if (!is_battery_ok_to_continue()) {
250 SLOGE("Stopping encryption due to low battery");
251 rc = 0;
252 goto errout;
253 }
254
255 }
256 if (flush_outstanding_data(data)) {
257 goto errout;
258 }
259 }
260
261 data->completed = 1;
262 rc = 0;
263
264errout:
265 log_progress(0, true);
266 free(data->buffer);
267 free(block_bitmap);
268 return rc;
269}
270
271static int cryptfs_enable_inplace_ext4(char *crypto_blkdev,
272 char *real_blkdev,
273 off64_t size,
274 off64_t *size_already_done,
275 off64_t tot_size,
276 off64_t previously_encrypted_upto)
277{
278 u32 i;
279 struct encryptGroupsData data;
280 int rc; // Can't initialize without causing warning -Wclobbered
281 int retries = RETRY_MOUNT_ATTEMPTS;
282 struct timespec time_started = {0};
283
284 if (previously_encrypted_upto > *size_already_done) {
285 SLOGD("Not fast encrypting since resuming part way through");
286 return -1;
287 }
288
289 memset(&data, 0, sizeof(data));
290 data.real_blkdev = real_blkdev;
291 data.crypto_blkdev = crypto_blkdev;
292
293 if ( (data.realfd = open(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
294 SLOGE("Error opening real_blkdev %s for inplace encrypt. err=%d(%s)\n",
295 real_blkdev, errno, strerror(errno));
296 rc = -1;
297 goto errout;
298 }
299
300 // Wait until the block device appears. Re-use the mount retry values since it is reasonable.
301 while ((data.cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
302 if (--retries) {
303 SLOGE("Error opening crypto_blkdev %s for ext4 inplace encrypt. err=%d(%s), retrying\n",
304 crypto_blkdev, errno, strerror(errno));
305 sleep(RETRY_MOUNT_DELAY_SECONDS);
306 } else {
307 SLOGE("Error opening crypto_blkdev %s for ext4 inplace encrypt. err=%d(%s)\n",
308 crypto_blkdev, errno, strerror(errno));
309 rc = ENABLE_INPLACE_ERR_DEV;
310 goto errout;
311 }
312 }
313
314 if (setjmp(setjmp_env)) { // NOLINT
315 SLOGE("Reading ext4 extent caused an exception\n");
316 rc = -1;
317 goto errout;
318 }
319
320 if (read_ext(data.realfd, 0) != 0) {
321 SLOGE("Failed to read ext4 extent\n");
322 rc = -1;
323 goto errout;
324 }
325
326 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
327 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
328 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
329
330 SLOGI("Encrypting ext4 filesystem in place...");
331
332 data.tot_used_blocks = data.numblocks;
333 for (i = 0; i < aux_info.groups; ++i) {
334 data.tot_used_blocks -= aux_info.bg_desc[i].bg_free_blocks_count;
335 }
336
337 data.one_pct = data.tot_used_blocks / 100;
338 data.cur_pct = 0;
339
340 if (clock_gettime(CLOCK_MONOTONIC, &time_started)) {
341 SLOGW("Error getting time at start");
342 // Note - continue anyway - we'll run with 0
343 }
344 data.time_started = time_started.tv_sec;
345 data.remaining_time = -1;
346
347 rc = encrypt_groups(&data);
348 if (rc) {
349 SLOGE("Error encrypting groups");
350 goto errout;
351 }
352
353 *size_already_done += data.completed ? size : data.last_written_sector;
354 rc = 0;
355
356errout:
357 close(data.realfd);
358 close(data.cryptofd);
359
360 return rc;
361}
362
363static void log_progress_f2fs(u64 block, bool completed)
364{
365 // Precondition - if completed data = 0 else data != 0
366
367 // Track progress so we can skip logging blocks
368 static u64 last_block = (u64)-1;
369
370 // Need to close existing 'Encrypting from' log?
371 if (completed || (last_block != (u64)-1 && block != last_block + 1)) {
372 SLOGI("Encrypted to block %" PRId64, last_block);
373 last_block = -1;
374 }
375
376 // Need to start new 'Encrypting from' log?
377 if (!completed && (last_block == (u64)-1 || block != last_block + 1)) {
378 SLOGI("Encrypting from block %" PRId64, block);
379 }
380
381 // Update offset
382 if (!completed) {
383 last_block = block;
384 }
385}
386
387static int encrypt_one_block_f2fs(u64 pos, void *data)
388{
389 struct encryptGroupsData *priv_dat = (struct encryptGroupsData *)data;
390
391 priv_dat->blocks_already_done = pos - 1;
392 update_progress(priv_dat, 1);
393
394 off64_t offset = pos * CRYPT_INPLACE_BUFSIZE;
395
396 if (pread64(priv_dat->realfd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
397 SLOGE("Error reading real_blkdev %s for f2fs inplace encrypt", priv_dat->crypto_blkdev);
398 return -1;
399 }
400
401 if (pwrite64(priv_dat->cryptofd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
402 SLOGE("Error writing crypto_blkdev %s for f2fs inplace encrypt", priv_dat->crypto_blkdev);
403 return -1;
404 } else {
405 log_progress_f2fs(pos, false);
406 }
407
408 return 0;
409}
410
411static int cryptfs_enable_inplace_f2fs(char *crypto_blkdev,
412 char *real_blkdev,
413 off64_t size,
414 off64_t *size_already_done,
415 off64_t tot_size,
416 off64_t previously_encrypted_upto)
417{
418 struct encryptGroupsData data;
419 struct f2fs_info *f2fs_info = NULL;
420 int rc = ENABLE_INPLACE_ERR_OTHER;
421 if (previously_encrypted_upto > *size_already_done) {
422 SLOGD("Not fast encrypting since resuming part way through");
423 return ENABLE_INPLACE_ERR_OTHER;
424 }
425 memset(&data, 0, sizeof(data));
426 data.real_blkdev = real_blkdev;
427 data.crypto_blkdev = crypto_blkdev;
428 data.realfd = -1;
429 data.cryptofd = -1;
430 if ( (data.realfd = open64(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
431 SLOGE("Error opening real_blkdev %s for f2fs inplace encrypt\n",
432 real_blkdev);
433 goto errout;
434 }
435 if ( (data.cryptofd = open64(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
436 SLOGE("Error opening crypto_blkdev %s for f2fs inplace encrypt. err=%d(%s)\n",
437 crypto_blkdev, errno, strerror(errno));
438 rc = ENABLE_INPLACE_ERR_DEV;
439 goto errout;
440 }
441
442 f2fs_info = generate_f2fs_info(data.realfd);
443 if (!f2fs_info)
444 goto errout;
445
446 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
447 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
448 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
449
450 data.tot_used_blocks = get_num_blocks_used(f2fs_info);
451
452 data.one_pct = data.tot_used_blocks / 100;
453 data.cur_pct = 0;
454 data.time_started = time(NULL);
455 data.remaining_time = -1;
456
457 data.buffer = (char*) malloc(f2fs_info->block_size);
458 if (!data.buffer) {
459 SLOGE("Failed to allocate crypto buffer");
460 goto errout;
461 }
462
463 data.count = 0;
464
465 /* Currently, this either runs to completion, or hits a nonrecoverable error */
466 rc = run_on_used_blocks(data.blocks_already_done, f2fs_info, &encrypt_one_block_f2fs, &data);
467
468 if (rc) {
469 SLOGE("Error in running over f2fs blocks");
470 rc = ENABLE_INPLACE_ERR_OTHER;
471 goto errout;
472 }
473
474 *size_already_done += size;
475 rc = 0;
476
477errout:
478 if (rc)
479 SLOGE("Failed to encrypt f2fs filesystem on %s", real_blkdev);
480
481 log_progress_f2fs(0, true);
482 free(f2fs_info);
483 free(data.buffer);
484 close(data.realfd);
485 close(data.cryptofd);
486
487 return rc;
488}
489
490static int cryptfs_enable_inplace_full(char *crypto_blkdev, char *real_blkdev,
491 off64_t size, off64_t *size_already_done,
492 off64_t tot_size,
493 off64_t previously_encrypted_upto)
494{
495 int realfd, cryptofd;
496 char *buf[CRYPT_INPLACE_BUFSIZE];
497 int rc = ENABLE_INPLACE_ERR_OTHER;
498 off64_t numblocks, i, remainder;
499 off64_t one_pct, cur_pct, new_pct;
500 off64_t blocks_already_done, tot_numblocks;
501
502 if ( (realfd = open(real_blkdev, O_RDONLY|O_CLOEXEC)) < 0) {
503 SLOGE("Error opening real_blkdev %s for inplace encrypt\n", real_blkdev);
504 return ENABLE_INPLACE_ERR_OTHER;
505 }
506
507 if ( (cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
508 SLOGE("Error opening crypto_blkdev %s for inplace encrypt. err=%d(%s)\n",
509 crypto_blkdev, errno, strerror(errno));
510 close(realfd);
511 return ENABLE_INPLACE_ERR_DEV;
512 }
513
514 /* This is pretty much a simple loop of reading 4K, and writing 4K.
515 * The size passed in is the number of 512 byte sectors in the filesystem.
516 * So compute the number of whole 4K blocks we should read/write,
517 * and the remainder.
518 */
519 numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
520 remainder = size % CRYPT_SECTORS_PER_BUFSIZE;
521 tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
522 blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
523
524 SLOGE("Encrypting filesystem in place...");
525
526 i = previously_encrypted_upto + 1 - *size_already_done;
527
528 if (lseek64(realfd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
529 SLOGE("Cannot seek to previously encrypted point on %s", real_blkdev);
530 goto errout;
531 }
532
533 if (lseek64(cryptofd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
534 SLOGE("Cannot seek to previously encrypted point on %s", crypto_blkdev);
535 goto errout;
536 }
537
538 for (;i < size && i % CRYPT_SECTORS_PER_BUFSIZE != 0; ++i) {
539 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
540 SLOGE("Error reading initial sectors from real_blkdev %s for "
541 "inplace encrypt\n", crypto_blkdev);
542 goto errout;
543 }
544 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
545 SLOGE("Error writing initial sectors to crypto_blkdev %s for "
546 "inplace encrypt\n", crypto_blkdev);
547 goto errout;
548 } else {
549 SLOGI("Encrypted 1 block at %" PRId64, i);
550 }
551 }
552
553 one_pct = tot_numblocks / 100;
554 cur_pct = 0;
555 /* process the majority of the filesystem in blocks */
556 for (i/=CRYPT_SECTORS_PER_BUFSIZE; i<numblocks; i++) {
557 new_pct = (i + blocks_already_done) / one_pct;
558 if (new_pct > cur_pct) {
559 char buf[8];
560
561 cur_pct = new_pct;
562 snprintf(buf, sizeof(buf), "%" PRId64, cur_pct);
563 property_set("vold.encrypt_progress", buf);
564 }
565 if (unix_read(realfd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
566 SLOGE("Error reading real_blkdev %s for inplace encrypt", crypto_blkdev);
567 goto errout;
568 }
569 if (unix_write(cryptofd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
570 SLOGE("Error writing crypto_blkdev %s for inplace encrypt", crypto_blkdev);
571 goto errout;
572 } else {
573 SLOGD("Encrypted %d block at %" PRId64,
574 CRYPT_SECTORS_PER_BUFSIZE,
575 i * CRYPT_SECTORS_PER_BUFSIZE);
576 }
577
578 if (!is_battery_ok_to_continue()) {
579 SLOGE("Stopping encryption due to low battery");
580 *size_already_done += (i + 1) * CRYPT_SECTORS_PER_BUFSIZE - 1;
581 rc = 0;
582 goto errout;
583 }
584 }
585
586 /* Do any remaining sectors */
587 for (i=0; i<remainder; i++) {
588 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
589 SLOGE("Error reading final sectors from real_blkdev %s for inplace encrypt", crypto_blkdev);
590 goto errout;
591 }
592 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
593 SLOGE("Error writing final sectors to crypto_blkdev %s for inplace encrypt", crypto_blkdev);
594 goto errout;
595 } else {
596 SLOGI("Encrypted 1 block at next location");
597 }
598 }
599
600 *size_already_done += size;
601 rc = 0;
602
603errout:
604 close(realfd);
605 close(cryptofd);
606
607 return rc;
608}
609
610/* returns on of the ENABLE_INPLACE_* return codes */
611int cryptfs_enable_inplace(char *crypto_blkdev, char *real_blkdev,
612 off64_t size, off64_t *size_already_done,
613 off64_t tot_size,
614 off64_t previously_encrypted_upto)
615{
616 int rc_ext4, rc_f2fs, rc_full;
617 if (previously_encrypted_upto) {
618 SLOGD("Continuing encryption from %" PRId64, previously_encrypted_upto);
619 }
620
621 if (*size_already_done + size < previously_encrypted_upto) {
622 *size_already_done += size;
623 return 0;
624 }
625
626 /* TODO: identify filesystem type.
627 * As is, cryptfs_enable_inplace_ext4 will fail on an f2fs partition, and
628 * then we will drop down to cryptfs_enable_inplace_f2fs.
629 * */
630 if ((rc_ext4 = cryptfs_enable_inplace_ext4(crypto_blkdev, real_blkdev,
631 size, size_already_done,
632 tot_size, previously_encrypted_upto)) == 0) {
633 return 0;
634 }
635 SLOGD("cryptfs_enable_inplace_ext4()=%d\n", rc_ext4);
636
637 if ((rc_f2fs = cryptfs_enable_inplace_f2fs(crypto_blkdev, real_blkdev,
638 size, size_already_done,
639 tot_size, previously_encrypted_upto)) == 0) {
640 return 0;
641 }
642 SLOGD("cryptfs_enable_inplace_f2fs()=%d\n", rc_f2fs);
643
644 rc_full = cryptfs_enable_inplace_full(crypto_blkdev, real_blkdev,
645 size, size_already_done, tot_size,
646 previously_encrypted_upto);
647 SLOGD("cryptfs_enable_inplace_full()=%d\n", rc_full);
648
649 /* Hack for b/17898962, the following is the symptom... */
650 if (rc_ext4 == ENABLE_INPLACE_ERR_DEV
651 && rc_f2fs == ENABLE_INPLACE_ERR_DEV
652 && rc_full == ENABLE_INPLACE_ERR_DEV) {
653 return ENABLE_INPLACE_ERR_DEV;
654 }
655 return rc_full;
656}