blob: 16a108c6391bdfc2f959046af499a610b74d4efe [file] [log] [blame]
Paul Crowleyf71ace32016-06-02 11:01:19 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "EncryptInplace.h"
18
19#include <stdio.h>
20#include <stdint.h>
Paul Crowleyf71ace32016-06-02 11:01:19 -070021#include <inttypes.h>
22#include <time.h>
23#include <sys/types.h>
24#include <sys/stat.h>
25#include <fcntl.h>
26#include <ext4_utils/ext4.h>
27#include <ext4_utils/ext4_utils.h>
28#include <f2fs_sparseblock.h>
29
30#include <algorithm>
31
32#include "cutils/properties.h"
33#define LOG_TAG "EncryptInplace"
34#include "cutils/log.h"
Paul Crowleyf71ace32016-06-02 11:01:19 -070035
36// HORRIBLE HACK, FIXME
37#include "cryptfs.h"
38
39// FIXME horrible cut-and-paste code
40static inline int unix_read(int fd, void* buff, int len)
41{
42 return TEMP_FAILURE_RETRY(read(fd, buff, len));
43}
44
45static inline int unix_write(int fd, const void* buff, int len)
46{
47 return TEMP_FAILURE_RETRY(write(fd, buff, len));
48}
49
50#define CRYPT_SECTORS_PER_BUFSIZE (CRYPT_INPLACE_BUFSIZE / CRYPT_SECTOR_SIZE)
51
52/* aligned 32K writes tends to make flash happy.
53 * SD card association recommends it.
54 */
55#ifndef CONFIG_HW_DISK_ENCRYPTION
56#define BLOCKS_AT_A_TIME 8
57#else
58#define BLOCKS_AT_A_TIME 1024
59#endif
60
61struct encryptGroupsData
62{
63 int realfd;
64 int cryptofd;
65 off64_t numblocks;
66 off64_t one_pct, cur_pct, new_pct;
67 off64_t blocks_already_done, tot_numblocks;
68 off64_t used_blocks_already_done, tot_used_blocks;
69 char* real_blkdev, * crypto_blkdev;
70 int count;
71 off64_t offset;
72 char* buffer;
73 off64_t last_written_sector;
74 int completed;
75 time_t time_started;
76 int remaining_time;
77};
78
79static void update_progress(struct encryptGroupsData* data, int is_used)
80{
81 data->blocks_already_done++;
82
83 if (is_used) {
84 data->used_blocks_already_done++;
85 }
86 if (data->tot_used_blocks) {
87 data->new_pct = data->used_blocks_already_done / data->one_pct;
88 } else {
89 data->new_pct = data->blocks_already_done / data->one_pct;
90 }
91
92 if (data->new_pct > data->cur_pct) {
93 char buf[8];
94 data->cur_pct = data->new_pct;
95 snprintf(buf, sizeof(buf), "%" PRId64, data->cur_pct);
96 property_set("vold.encrypt_progress", buf);
97 }
98
99 if (data->cur_pct >= 5) {
100 struct timespec time_now;
101 if (clock_gettime(CLOCK_MONOTONIC, &time_now)) {
102 SLOGW("Error getting time");
103 } else {
104 double elapsed_time = difftime(time_now.tv_sec, data->time_started);
105 off64_t remaining_blocks = data->tot_used_blocks
106 - data->used_blocks_already_done;
107 int remaining_time = (int)(elapsed_time * remaining_blocks
108 / data->used_blocks_already_done);
109
110 // Change time only if not yet set, lower, or a lot higher for
111 // best user experience
112 if (data->remaining_time == -1
113 || remaining_time < data->remaining_time
114 || remaining_time > data->remaining_time + 60) {
115 char buf[8];
116 snprintf(buf, sizeof(buf), "%d", remaining_time);
117 property_set("vold.encrypt_time_remaining", buf);
118 data->remaining_time = remaining_time;
119 }
120 }
121 }
122}
123
124static void log_progress(struct encryptGroupsData const* data, bool completed)
125{
126 // Precondition - if completed data = 0 else data != 0
127
128 // Track progress so we can skip logging blocks
129 static off64_t offset = -1;
130
131 // Need to close existing 'Encrypting from' log?
132 if (completed || (offset != -1 && data->offset != offset)) {
133 SLOGI("Encrypted to sector %" PRId64,
134 offset / info.block_size * CRYPT_SECTOR_SIZE);
135 offset = -1;
136 }
137
138 // Need to start new 'Encrypting from' log?
139 if (!completed && offset != data->offset) {
140 SLOGI("Encrypting from sector %" PRId64,
141 data->offset / info.block_size * CRYPT_SECTOR_SIZE);
142 }
143
144 // Update offset
145 if (!completed) {
146 offset = data->offset + (off64_t)data->count * info.block_size;
147 }
148}
149
150static int flush_outstanding_data(struct encryptGroupsData* data)
151{
152 if (data->count == 0) {
153 return 0;
154 }
155
156 SLOGV("Copying %d blocks at offset %" PRIx64, data->count, data->offset);
157
158 if (pread64(data->realfd, data->buffer,
159 info.block_size * data->count, data->offset)
160 <= 0) {
161 SLOGE("Error reading real_blkdev %s for inplace encrypt",
162 data->real_blkdev);
163 return -1;
164 }
165
166 if (pwrite64(data->cryptofd, data->buffer,
167 info.block_size * data->count, data->offset)
168 <= 0) {
169 SLOGE("Error writing crypto_blkdev %s for inplace encrypt",
170 data->crypto_blkdev);
171 return -1;
172 } else {
173 log_progress(data, false);
174 }
175
176 data->count = 0;
177 data->last_written_sector = (data->offset + data->count)
178 / info.block_size * CRYPT_SECTOR_SIZE - 1;
179 return 0;
180}
181
182static int encrypt_groups(struct encryptGroupsData* data)
183{
184 unsigned int i;
185 u8 *block_bitmap = 0;
186 unsigned int block;
187 off64_t ret;
188 int rc = -1;
189
190 data->buffer = (char*) malloc(info.block_size * BLOCKS_AT_A_TIME);
191 if (!data->buffer) {
192 SLOGE("Failed to allocate crypto buffer");
193 goto errout;
194 }
195
196 block_bitmap = (u8*) malloc(info.block_size);
197 if (!block_bitmap) {
198 SLOGE("failed to allocate block bitmap");
199 goto errout;
200 }
201
202 for (i = 0; i < aux_info.groups; ++i) {
203 SLOGI("Encrypting group %d", i);
204
205 u32 first_block = aux_info.first_data_block + i * info.blocks_per_group;
206 u32 block_count = std::min(info.blocks_per_group,
207 (u32)(aux_info.len_blocks - first_block));
208
209 off64_t offset = (u64)info.block_size
210 * aux_info.bg_desc[i].bg_block_bitmap;
211
212 ret = pread64(data->realfd, block_bitmap, info.block_size, offset);
213 if (ret != (int)info.block_size) {
214 SLOGE("failed to read all of block group bitmap %d", i);
215 goto errout;
216 }
217
218 offset = (u64)info.block_size * first_block;
219
220 data->count = 0;
221
222 for (block = 0; block < block_count; block++) {
223 int used = (aux_info.bg_desc[i].bg_flags & EXT4_BG_BLOCK_UNINIT) ?
224 0 : bitmap_get_bit(block_bitmap, block);
225 update_progress(data, used);
226 if (used) {
227 if (data->count == 0) {
228 data->offset = offset;
229 }
230 data->count++;
231 } else {
232 if (flush_outstanding_data(data)) {
233 goto errout;
234 }
235 }
236
237 offset += info.block_size;
238
239 /* Write data if we are aligned or buffer size reached */
240 if (offset % (info.block_size * BLOCKS_AT_A_TIME) == 0
241 || data->count == BLOCKS_AT_A_TIME) {
242 if (flush_outstanding_data(data)) {
243 goto errout;
244 }
245 }
Paul Crowleyf71ace32016-06-02 11:01:19 -0700246 }
247 if (flush_outstanding_data(data)) {
248 goto errout;
249 }
250 }
251
252 data->completed = 1;
253 rc = 0;
254
255errout:
256 log_progress(0, true);
257 free(data->buffer);
258 free(block_bitmap);
259 return rc;
260}
261
262static int cryptfs_enable_inplace_ext4(char *crypto_blkdev,
263 char *real_blkdev,
264 off64_t size,
265 off64_t *size_already_done,
266 off64_t tot_size,
267 off64_t previously_encrypted_upto)
268{
269 u32 i;
270 struct encryptGroupsData data;
271 int rc; // Can't initialize without causing warning -Wclobbered
272 int retries = RETRY_MOUNT_ATTEMPTS;
273 struct timespec time_started = {0};
274
275 if (previously_encrypted_upto > *size_already_done) {
276 SLOGD("Not fast encrypting since resuming part way through");
277 return -1;
278 }
279
280 memset(&data, 0, sizeof(data));
281 data.real_blkdev = real_blkdev;
282 data.crypto_blkdev = crypto_blkdev;
283
284 if ( (data.realfd = open(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
285 SLOGE("Error opening real_blkdev %s for inplace encrypt. err=%d(%s)\n",
286 real_blkdev, errno, strerror(errno));
287 rc = -1;
288 goto errout;
289 }
290
291 // Wait until the block device appears. Re-use the mount retry values since it is reasonable.
292 while ((data.cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
293 if (--retries) {
294 SLOGE("Error opening crypto_blkdev %s for ext4 inplace encrypt. err=%d(%s), retrying\n",
295 crypto_blkdev, errno, strerror(errno));
296 sleep(RETRY_MOUNT_DELAY_SECONDS);
297 } else {
298 SLOGE("Error opening crypto_blkdev %s for ext4 inplace encrypt. err=%d(%s)\n",
299 crypto_blkdev, errno, strerror(errno));
300 rc = ENABLE_INPLACE_ERR_DEV;
301 goto errout;
302 }
303 }
304
305 if (setjmp(setjmp_env)) { // NOLINT
306 SLOGE("Reading ext4 extent caused an exception\n");
307 rc = -1;
308 goto errout;
309 }
310
311 if (read_ext(data.realfd, 0) != 0) {
312 SLOGE("Failed to read ext4 extent\n");
313 rc = -1;
314 goto errout;
315 }
316
317 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
318 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
319 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
320
321 SLOGI("Encrypting ext4 filesystem in place...");
322
323 data.tot_used_blocks = data.numblocks;
324 for (i = 0; i < aux_info.groups; ++i) {
325 data.tot_used_blocks -= aux_info.bg_desc[i].bg_free_blocks_count;
326 }
327
328 data.one_pct = data.tot_used_blocks / 100;
329 data.cur_pct = 0;
330
331 if (clock_gettime(CLOCK_MONOTONIC, &time_started)) {
332 SLOGW("Error getting time at start");
333 // Note - continue anyway - we'll run with 0
334 }
335 data.time_started = time_started.tv_sec;
336 data.remaining_time = -1;
337
338 rc = encrypt_groups(&data);
339 if (rc) {
340 SLOGE("Error encrypting groups");
341 goto errout;
342 }
343
344 *size_already_done += data.completed ? size : data.last_written_sector;
345 rc = 0;
346
347errout:
348 close(data.realfd);
349 close(data.cryptofd);
350
351 return rc;
352}
353
354static void log_progress_f2fs(u64 block, bool completed)
355{
356 // Precondition - if completed data = 0 else data != 0
357
358 // Track progress so we can skip logging blocks
359 static u64 last_block = (u64)-1;
360
361 // Need to close existing 'Encrypting from' log?
362 if (completed || (last_block != (u64)-1 && block != last_block + 1)) {
363 SLOGI("Encrypted to block %" PRId64, last_block);
364 last_block = -1;
365 }
366
367 // Need to start new 'Encrypting from' log?
368 if (!completed && (last_block == (u64)-1 || block != last_block + 1)) {
369 SLOGI("Encrypting from block %" PRId64, block);
370 }
371
372 // Update offset
373 if (!completed) {
374 last_block = block;
375 }
376}
377
378static int encrypt_one_block_f2fs(u64 pos, void *data)
379{
380 struct encryptGroupsData *priv_dat = (struct encryptGroupsData *)data;
381
382 priv_dat->blocks_already_done = pos - 1;
383 update_progress(priv_dat, 1);
384
385 off64_t offset = pos * CRYPT_INPLACE_BUFSIZE;
386
387 if (pread64(priv_dat->realfd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
388 SLOGE("Error reading real_blkdev %s for f2fs inplace encrypt", priv_dat->crypto_blkdev);
389 return -1;
390 }
391
392 if (pwrite64(priv_dat->cryptofd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
393 SLOGE("Error writing crypto_blkdev %s for f2fs inplace encrypt", priv_dat->crypto_blkdev);
394 return -1;
395 } else {
396 log_progress_f2fs(pos, false);
397 }
398
399 return 0;
400}
401
402static int cryptfs_enable_inplace_f2fs(char *crypto_blkdev,
403 char *real_blkdev,
404 off64_t size,
405 off64_t *size_already_done,
406 off64_t tot_size,
407 off64_t previously_encrypted_upto)
408{
409 struct encryptGroupsData data;
410 struct f2fs_info *f2fs_info = NULL;
411 int rc = ENABLE_INPLACE_ERR_OTHER;
412 if (previously_encrypted_upto > *size_already_done) {
413 SLOGD("Not fast encrypting since resuming part way through");
414 return ENABLE_INPLACE_ERR_OTHER;
415 }
416 memset(&data, 0, sizeof(data));
417 data.real_blkdev = real_blkdev;
418 data.crypto_blkdev = crypto_blkdev;
419 data.realfd = -1;
420 data.cryptofd = -1;
421 if ( (data.realfd = open64(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
422 SLOGE("Error opening real_blkdev %s for f2fs inplace encrypt\n",
423 real_blkdev);
424 goto errout;
425 }
426 if ( (data.cryptofd = open64(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
427 SLOGE("Error opening crypto_blkdev %s for f2fs inplace encrypt. err=%d(%s)\n",
428 crypto_blkdev, errno, strerror(errno));
429 rc = ENABLE_INPLACE_ERR_DEV;
430 goto errout;
431 }
432
433 f2fs_info = generate_f2fs_info(data.realfd);
434 if (!f2fs_info)
435 goto errout;
436
437 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
438 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
439 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
440
441 data.tot_used_blocks = get_num_blocks_used(f2fs_info);
442
443 data.one_pct = data.tot_used_blocks / 100;
444 data.cur_pct = 0;
445 data.time_started = time(NULL);
446 data.remaining_time = -1;
447
448 data.buffer = (char*) malloc(f2fs_info->block_size);
449 if (!data.buffer) {
450 SLOGE("Failed to allocate crypto buffer");
451 goto errout;
452 }
453
454 data.count = 0;
455
456 /* Currently, this either runs to completion, or hits a nonrecoverable error */
457 rc = run_on_used_blocks(data.blocks_already_done, f2fs_info, &encrypt_one_block_f2fs, &data);
458
459 if (rc) {
460 SLOGE("Error in running over f2fs blocks");
461 rc = ENABLE_INPLACE_ERR_OTHER;
462 goto errout;
463 }
464
465 *size_already_done += size;
466 rc = 0;
467
468errout:
469 if (rc)
470 SLOGE("Failed to encrypt f2fs filesystem on %s", real_blkdev);
471
472 log_progress_f2fs(0, true);
473 free(f2fs_info);
474 free(data.buffer);
475 close(data.realfd);
476 close(data.cryptofd);
477
478 return rc;
479}
480
481static int cryptfs_enable_inplace_full(char *crypto_blkdev, char *real_blkdev,
482 off64_t size, off64_t *size_already_done,
483 off64_t tot_size,
484 off64_t previously_encrypted_upto)
485{
486 int realfd, cryptofd;
487 char *buf[CRYPT_INPLACE_BUFSIZE];
488 int rc = ENABLE_INPLACE_ERR_OTHER;
489 off64_t numblocks, i, remainder;
490 off64_t one_pct, cur_pct, new_pct;
491 off64_t blocks_already_done, tot_numblocks;
492
493 if ( (realfd = open(real_blkdev, O_RDONLY|O_CLOEXEC)) < 0) {
494 SLOGE("Error opening real_blkdev %s for inplace encrypt\n", real_blkdev);
495 return ENABLE_INPLACE_ERR_OTHER;
496 }
497
498 if ( (cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
499 SLOGE("Error opening crypto_blkdev %s for inplace encrypt. err=%d(%s)\n",
500 crypto_blkdev, errno, strerror(errno));
501 close(realfd);
502 return ENABLE_INPLACE_ERR_DEV;
503 }
504
505 /* This is pretty much a simple loop of reading 4K, and writing 4K.
506 * The size passed in is the number of 512 byte sectors in the filesystem.
507 * So compute the number of whole 4K blocks we should read/write,
508 * and the remainder.
509 */
510 numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
511 remainder = size % CRYPT_SECTORS_PER_BUFSIZE;
512 tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
513 blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
514
515 SLOGE("Encrypting filesystem in place...");
516
517 i = previously_encrypted_upto + 1 - *size_already_done;
518
519 if (lseek64(realfd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
520 SLOGE("Cannot seek to previously encrypted point on %s", real_blkdev);
521 goto errout;
522 }
523
524 if (lseek64(cryptofd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
525 SLOGE("Cannot seek to previously encrypted point on %s", crypto_blkdev);
526 goto errout;
527 }
528
529 for (;i < size && i % CRYPT_SECTORS_PER_BUFSIZE != 0; ++i) {
530 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
531 SLOGE("Error reading initial sectors from real_blkdev %s for "
532 "inplace encrypt\n", crypto_blkdev);
533 goto errout;
534 }
535 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
536 SLOGE("Error writing initial sectors to crypto_blkdev %s for "
537 "inplace encrypt\n", crypto_blkdev);
538 goto errout;
539 } else {
540 SLOGI("Encrypted 1 block at %" PRId64, i);
541 }
542 }
543
544 one_pct = tot_numblocks / 100;
545 cur_pct = 0;
546 /* process the majority of the filesystem in blocks */
547 for (i/=CRYPT_SECTORS_PER_BUFSIZE; i<numblocks; i++) {
548 new_pct = (i + blocks_already_done) / one_pct;
549 if (new_pct > cur_pct) {
550 char buf[8];
551
552 cur_pct = new_pct;
553 snprintf(buf, sizeof(buf), "%" PRId64, cur_pct);
554 property_set("vold.encrypt_progress", buf);
555 }
556 if (unix_read(realfd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
557 SLOGE("Error reading real_blkdev %s for inplace encrypt", crypto_blkdev);
558 goto errout;
559 }
560 if (unix_write(cryptofd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
561 SLOGE("Error writing crypto_blkdev %s for inplace encrypt", crypto_blkdev);
562 goto errout;
563 } else {
564 SLOGD("Encrypted %d block at %" PRId64,
565 CRYPT_SECTORS_PER_BUFSIZE,
566 i * CRYPT_SECTORS_PER_BUFSIZE);
567 }
Paul Crowleyf71ace32016-06-02 11:01:19 -0700568 }
569
570 /* Do any remaining sectors */
571 for (i=0; i<remainder; i++) {
572 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
573 SLOGE("Error reading final sectors from real_blkdev %s for inplace encrypt", crypto_blkdev);
574 goto errout;
575 }
576 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
577 SLOGE("Error writing final sectors to crypto_blkdev %s for inplace encrypt", crypto_blkdev);
578 goto errout;
579 } else {
580 SLOGI("Encrypted 1 block at next location");
581 }
582 }
583
584 *size_already_done += size;
585 rc = 0;
586
587errout:
588 close(realfd);
589 close(cryptofd);
590
591 return rc;
592}
593
594/* returns on of the ENABLE_INPLACE_* return codes */
595int cryptfs_enable_inplace(char *crypto_blkdev, char *real_blkdev,
596 off64_t size, off64_t *size_already_done,
597 off64_t tot_size,
598 off64_t previously_encrypted_upto)
599{
600 int rc_ext4, rc_f2fs, rc_full;
601 if (previously_encrypted_upto) {
602 SLOGD("Continuing encryption from %" PRId64, previously_encrypted_upto);
603 }
604
605 if (*size_already_done + size < previously_encrypted_upto) {
606 *size_already_done += size;
607 return 0;
608 }
609
610 /* TODO: identify filesystem type.
611 * As is, cryptfs_enable_inplace_ext4 will fail on an f2fs partition, and
612 * then we will drop down to cryptfs_enable_inplace_f2fs.
613 * */
614 if ((rc_ext4 = cryptfs_enable_inplace_ext4(crypto_blkdev, real_blkdev,
615 size, size_already_done,
616 tot_size, previously_encrypted_upto)) == 0) {
617 return 0;
618 }
619 SLOGD("cryptfs_enable_inplace_ext4()=%d\n", rc_ext4);
620
621 if ((rc_f2fs = cryptfs_enable_inplace_f2fs(crypto_blkdev, real_blkdev,
622 size, size_already_done,
623 tot_size, previously_encrypted_upto)) == 0) {
624 return 0;
625 }
626 SLOGD("cryptfs_enable_inplace_f2fs()=%d\n", rc_f2fs);
627
628 rc_full = cryptfs_enable_inplace_full(crypto_blkdev, real_blkdev,
629 size, size_already_done, tot_size,
630 previously_encrypted_upto);
631 SLOGD("cryptfs_enable_inplace_full()=%d\n", rc_full);
632
633 /* Hack for b/17898962, the following is the symptom... */
634 if (rc_ext4 == ENABLE_INPLACE_ERR_DEV
635 && rc_f2fs == ENABLE_INPLACE_ERR_DEV
636 && rc_full == ENABLE_INPLACE_ERR_DEV) {
637 return ENABLE_INPLACE_ERR_DEV;
638 }
639 return rc_full;
640}