blob: 45734a83af4950b4c496072ec2b09339a6fe2990 [file] [log] [blame]
Paul Crowleyf71ace32016-06-02 11:01:19 -07001/*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "EncryptInplace.h"
18
19#include <stdio.h>
20#include <stdint.h>
Paul Crowleyf71ace32016-06-02 11:01:19 -070021#include <inttypes.h>
22#include <time.h>
23#include <sys/types.h>
24#include <sys/stat.h>
25#include <fcntl.h>
26#include <ext4_utils/ext4.h>
27#include <ext4_utils/ext4_utils.h>
28#include <f2fs_sparseblock.h>
29
30#include <algorithm>
31
Paul Crowley772cc852018-02-01 09:53:27 -080032#include <android-base/logging.h>
33#include <android-base/properties.h>
Paul Crowleyf71ace32016-06-02 11:01:19 -070034
35// HORRIBLE HACK, FIXME
36#include "cryptfs.h"
37
38// FIXME horrible cut-and-paste code
39static inline int unix_read(int fd, void* buff, int len)
40{
41 return TEMP_FAILURE_RETRY(read(fd, buff, len));
42}
43
44static inline int unix_write(int fd, const void* buff, int len)
45{
46 return TEMP_FAILURE_RETRY(write(fd, buff, len));
47}
48
49#define CRYPT_SECTORS_PER_BUFSIZE (CRYPT_INPLACE_BUFSIZE / CRYPT_SECTOR_SIZE)
50
51/* aligned 32K writes tends to make flash happy.
52 * SD card association recommends it.
53 */
54#ifndef CONFIG_HW_DISK_ENCRYPTION
55#define BLOCKS_AT_A_TIME 8
56#else
57#define BLOCKS_AT_A_TIME 1024
58#endif
59
60struct encryptGroupsData
61{
62 int realfd;
63 int cryptofd;
64 off64_t numblocks;
65 off64_t one_pct, cur_pct, new_pct;
66 off64_t blocks_already_done, tot_numblocks;
67 off64_t used_blocks_already_done, tot_used_blocks;
68 char* real_blkdev, * crypto_blkdev;
69 int count;
70 off64_t offset;
71 char* buffer;
72 off64_t last_written_sector;
73 int completed;
74 time_t time_started;
75 int remaining_time;
76};
77
78static void update_progress(struct encryptGroupsData* data, int is_used)
79{
80 data->blocks_already_done++;
81
82 if (is_used) {
83 data->used_blocks_already_done++;
84 }
85 if (data->tot_used_blocks) {
86 data->new_pct = data->used_blocks_already_done / data->one_pct;
87 } else {
88 data->new_pct = data->blocks_already_done / data->one_pct;
89 }
90
91 if (data->new_pct > data->cur_pct) {
92 char buf[8];
93 data->cur_pct = data->new_pct;
94 snprintf(buf, sizeof(buf), "%" PRId64, data->cur_pct);
Paul Crowley772cc852018-02-01 09:53:27 -080095 android::base::SetProperty("vold.encrypt_progress", buf);
Paul Crowleyf71ace32016-06-02 11:01:19 -070096 }
97
98 if (data->cur_pct >= 5) {
99 struct timespec time_now;
100 if (clock_gettime(CLOCK_MONOTONIC, &time_now)) {
Paul Crowley772cc852018-02-01 09:53:27 -0800101 LOG(WARNING) << "Error getting time";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700102 } else {
103 double elapsed_time = difftime(time_now.tv_sec, data->time_started);
104 off64_t remaining_blocks = data->tot_used_blocks
105 - data->used_blocks_already_done;
106 int remaining_time = (int)(elapsed_time * remaining_blocks
107 / data->used_blocks_already_done);
108
109 // Change time only if not yet set, lower, or a lot higher for
110 // best user experience
111 if (data->remaining_time == -1
112 || remaining_time < data->remaining_time
113 || remaining_time > data->remaining_time + 60) {
114 char buf[8];
115 snprintf(buf, sizeof(buf), "%d", remaining_time);
Paul Crowley772cc852018-02-01 09:53:27 -0800116 android::base::SetProperty("vold.encrypt_time_remaining", buf);
Paul Crowleyf71ace32016-06-02 11:01:19 -0700117 data->remaining_time = remaining_time;
118 }
119 }
120 }
121}
122
123static void log_progress(struct encryptGroupsData const* data, bool completed)
124{
125 // Precondition - if completed data = 0 else data != 0
126
127 // Track progress so we can skip logging blocks
128 static off64_t offset = -1;
129
130 // Need to close existing 'Encrypting from' log?
131 if (completed || (offset != -1 && data->offset != offset)) {
Paul Crowley772cc852018-02-01 09:53:27 -0800132 LOG(INFO) << "Encrypted to sector " << offset / info.block_size * CRYPT_SECTOR_SIZE;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700133 offset = -1;
134 }
135
136 // Need to start new 'Encrypting from' log?
137 if (!completed && offset != data->offset) {
Paul Crowley772cc852018-02-01 09:53:27 -0800138 LOG(INFO) << "Encrypting from sector " << data->offset / info.block_size * CRYPT_SECTOR_SIZE;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700139 }
140
141 // Update offset
142 if (!completed) {
143 offset = data->offset + (off64_t)data->count * info.block_size;
144 }
145}
146
147static int flush_outstanding_data(struct encryptGroupsData* data)
148{
149 if (data->count == 0) {
150 return 0;
151 }
152
Paul Crowley772cc852018-02-01 09:53:27 -0800153 LOG(VERBOSE) << "Copying " << data->count << " blocks at offset " << data->offset;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700154
Paul Crowley772cc852018-02-01 09:53:27 -0800155 if (pread64(data->realfd, data->buffer, info.block_size * data->count, data->offset) <= 0) {
156 LOG(ERROR) << "Error reading real_blkdev " << data->real_blkdev << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700157 return -1;
158 }
159
Paul Crowley772cc852018-02-01 09:53:27 -0800160 if (pwrite64(data->cryptofd, data->buffer, info.block_size * data->count, data->offset) <= 0) {
161 LOG(ERROR) << "Error writing crypto_blkdev " << data->crypto_blkdev
162 << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700163 return -1;
164 } else {
165 log_progress(data, false);
166 }
167
168 data->count = 0;
169 data->last_written_sector = (data->offset + data->count)
170 / info.block_size * CRYPT_SECTOR_SIZE - 1;
171 return 0;
172}
173
174static int encrypt_groups(struct encryptGroupsData* data)
175{
176 unsigned int i;
177 u8 *block_bitmap = 0;
178 unsigned int block;
179 off64_t ret;
180 int rc = -1;
181
182 data->buffer = (char*) malloc(info.block_size * BLOCKS_AT_A_TIME);
183 if (!data->buffer) {
Paul Crowley772cc852018-02-01 09:53:27 -0800184 LOG(ERROR) << "Failed to allocate crypto buffer";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700185 goto errout;
186 }
187
188 block_bitmap = (u8*) malloc(info.block_size);
189 if (!block_bitmap) {
Paul Crowley772cc852018-02-01 09:53:27 -0800190 LOG(ERROR) << "failed to allocate block bitmap";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700191 goto errout;
192 }
193
194 for (i = 0; i < aux_info.groups; ++i) {
Paul Crowley772cc852018-02-01 09:53:27 -0800195 LOG(INFO) << "Encrypting group " << i;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700196
197 u32 first_block = aux_info.first_data_block + i * info.blocks_per_group;
198 u32 block_count = std::min(info.blocks_per_group,
199 (u32)(aux_info.len_blocks - first_block));
200
201 off64_t offset = (u64)info.block_size
202 * aux_info.bg_desc[i].bg_block_bitmap;
203
204 ret = pread64(data->realfd, block_bitmap, info.block_size, offset);
205 if (ret != (int)info.block_size) {
Paul Crowley772cc852018-02-01 09:53:27 -0800206 LOG(ERROR) << "failed to read all of block group bitmap " << i;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700207 goto errout;
208 }
209
210 offset = (u64)info.block_size * first_block;
211
212 data->count = 0;
213
214 for (block = 0; block < block_count; block++) {
215 int used = (aux_info.bg_desc[i].bg_flags & EXT4_BG_BLOCK_UNINIT) ?
216 0 : bitmap_get_bit(block_bitmap, block);
217 update_progress(data, used);
218 if (used) {
219 if (data->count == 0) {
220 data->offset = offset;
221 }
222 data->count++;
223 } else {
224 if (flush_outstanding_data(data)) {
225 goto errout;
226 }
227 }
228
229 offset += info.block_size;
230
231 /* Write data if we are aligned or buffer size reached */
232 if (offset % (info.block_size * BLOCKS_AT_A_TIME) == 0
233 || data->count == BLOCKS_AT_A_TIME) {
234 if (flush_outstanding_data(data)) {
235 goto errout;
236 }
237 }
Paul Crowleyf71ace32016-06-02 11:01:19 -0700238 }
239 if (flush_outstanding_data(data)) {
240 goto errout;
241 }
242 }
243
244 data->completed = 1;
245 rc = 0;
246
247errout:
248 log_progress(0, true);
249 free(data->buffer);
250 free(block_bitmap);
251 return rc;
252}
253
Paul Crowley772cc852018-02-01 09:53:27 -0800254static int cryptfs_enable_inplace_ext4(char* crypto_blkdev, char* real_blkdev, off64_t size,
255 off64_t* size_already_done, off64_t tot_size,
256 off64_t previously_encrypted_upto) {
Paul Crowleyf71ace32016-06-02 11:01:19 -0700257 u32 i;
258 struct encryptGroupsData data;
259 int rc; // Can't initialize without causing warning -Wclobbered
260 int retries = RETRY_MOUNT_ATTEMPTS;
261 struct timespec time_started = {0};
262
263 if (previously_encrypted_upto > *size_already_done) {
Paul Crowley772cc852018-02-01 09:53:27 -0800264 LOG(DEBUG) << "Not fast encrypting since resuming part way through";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700265 return -1;
266 }
267
268 memset(&data, 0, sizeof(data));
269 data.real_blkdev = real_blkdev;
270 data.crypto_blkdev = crypto_blkdev;
271
272 if ( (data.realfd = open(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800273 PLOG(ERROR) << "Error opening real_blkdev " << real_blkdev << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700274 rc = -1;
275 goto errout;
276 }
277
278 // Wait until the block device appears. Re-use the mount retry values since it is reasonable.
279 while ((data.cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
280 if (--retries) {
Paul Crowley772cc852018-02-01 09:53:27 -0800281 PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev
282 << " for ext4 inplace encrypt, retrying";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700283 sleep(RETRY_MOUNT_DELAY_SECONDS);
284 } else {
Paul Crowley772cc852018-02-01 09:53:27 -0800285 PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev
286 << " for ext4 inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700287 rc = ENABLE_INPLACE_ERR_DEV;
288 goto errout;
289 }
290 }
291
292 if (setjmp(setjmp_env)) { // NOLINT
Paul Crowley772cc852018-02-01 09:53:27 -0800293 LOG(ERROR) << "Reading ext4 extent caused an exception";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700294 rc = -1;
295 goto errout;
296 }
297
298 if (read_ext(data.realfd, 0) != 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800299 LOG(ERROR) << "Failed to read ext4 extent";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700300 rc = -1;
301 goto errout;
302 }
303
304 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
305 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
306 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
307
Paul Crowley772cc852018-02-01 09:53:27 -0800308 LOG(INFO) << "Encrypting ext4 filesystem in place...";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700309
310 data.tot_used_blocks = data.numblocks;
311 for (i = 0; i < aux_info.groups; ++i) {
312 data.tot_used_blocks -= aux_info.bg_desc[i].bg_free_blocks_count;
313 }
314
315 data.one_pct = data.tot_used_blocks / 100;
316 data.cur_pct = 0;
317
318 if (clock_gettime(CLOCK_MONOTONIC, &time_started)) {
Paul Crowley772cc852018-02-01 09:53:27 -0800319 LOG(WARNING) << "Error getting time at start";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700320 // Note - continue anyway - we'll run with 0
321 }
322 data.time_started = time_started.tv_sec;
323 data.remaining_time = -1;
324
325 rc = encrypt_groups(&data);
326 if (rc) {
Paul Crowley772cc852018-02-01 09:53:27 -0800327 LOG(ERROR) << "Error encrypting groups";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700328 goto errout;
329 }
330
331 *size_already_done += data.completed ? size : data.last_written_sector;
332 rc = 0;
333
334errout:
335 close(data.realfd);
336 close(data.cryptofd);
337
338 return rc;
339}
340
341static void log_progress_f2fs(u64 block, bool completed)
342{
343 // Precondition - if completed data = 0 else data != 0
344
345 // Track progress so we can skip logging blocks
346 static u64 last_block = (u64)-1;
347
348 // Need to close existing 'Encrypting from' log?
349 if (completed || (last_block != (u64)-1 && block != last_block + 1)) {
Paul Crowley772cc852018-02-01 09:53:27 -0800350 LOG(INFO) << "Encrypted to block " << last_block;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700351 last_block = -1;
352 }
353
354 // Need to start new 'Encrypting from' log?
355 if (!completed && (last_block == (u64)-1 || block != last_block + 1)) {
Paul Crowley772cc852018-02-01 09:53:27 -0800356 LOG(INFO) << "Encrypting from block " << block;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700357 }
358
359 // Update offset
360 if (!completed) {
361 last_block = block;
362 }
363}
364
365static int encrypt_one_block_f2fs(u64 pos, void *data)
366{
367 struct encryptGroupsData *priv_dat = (struct encryptGroupsData *)data;
368
369 priv_dat->blocks_already_done = pos - 1;
370 update_progress(priv_dat, 1);
371
372 off64_t offset = pos * CRYPT_INPLACE_BUFSIZE;
373
374 if (pread64(priv_dat->realfd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800375 LOG(ERROR) << "Error reading real_blkdev " << priv_dat->crypto_blkdev
376 << " for f2fs inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700377 return -1;
378 }
379
380 if (pwrite64(priv_dat->cryptofd, priv_dat->buffer, CRYPT_INPLACE_BUFSIZE, offset) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800381 LOG(ERROR) << "Error writing crypto_blkdev " << priv_dat->crypto_blkdev
382 << " for f2fs inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700383 return -1;
384 } else {
385 log_progress_f2fs(pos, false);
386 }
387
388 return 0;
389}
390
Paul Crowley772cc852018-02-01 09:53:27 -0800391static int cryptfs_enable_inplace_f2fs(char* crypto_blkdev, char* real_blkdev, off64_t size,
392 off64_t* size_already_done, off64_t tot_size,
393 off64_t previously_encrypted_upto) {
Paul Crowleyf71ace32016-06-02 11:01:19 -0700394 struct encryptGroupsData data;
395 struct f2fs_info *f2fs_info = NULL;
396 int rc = ENABLE_INPLACE_ERR_OTHER;
397 if (previously_encrypted_upto > *size_already_done) {
Paul Crowley772cc852018-02-01 09:53:27 -0800398 LOG(DEBUG) << "Not fast encrypting since resuming part way through";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700399 return ENABLE_INPLACE_ERR_OTHER;
400 }
401 memset(&data, 0, sizeof(data));
402 data.real_blkdev = real_blkdev;
403 data.crypto_blkdev = crypto_blkdev;
404 data.realfd = -1;
405 data.cryptofd = -1;
406 if ( (data.realfd = open64(real_blkdev, O_RDWR|O_CLOEXEC)) < 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800407 PLOG(ERROR) << "Error opening real_blkdev " << real_blkdev << " for f2fs inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700408 goto errout;
409 }
410 if ( (data.cryptofd = open64(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800411 PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev
412 << " for f2fs inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700413 rc = ENABLE_INPLACE_ERR_DEV;
414 goto errout;
415 }
416
417 f2fs_info = generate_f2fs_info(data.realfd);
418 if (!f2fs_info)
419 goto errout;
420
421 data.numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
422 data.tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
423 data.blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
424
425 data.tot_used_blocks = get_num_blocks_used(f2fs_info);
426
427 data.one_pct = data.tot_used_blocks / 100;
428 data.cur_pct = 0;
429 data.time_started = time(NULL);
430 data.remaining_time = -1;
431
432 data.buffer = (char*) malloc(f2fs_info->block_size);
433 if (!data.buffer) {
Paul Crowley772cc852018-02-01 09:53:27 -0800434 LOG(ERROR) << "Failed to allocate crypto buffer";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700435 goto errout;
436 }
437
438 data.count = 0;
439
440 /* Currently, this either runs to completion, or hits a nonrecoverable error */
441 rc = run_on_used_blocks(data.blocks_already_done, f2fs_info, &encrypt_one_block_f2fs, &data);
442
443 if (rc) {
Paul Crowley772cc852018-02-01 09:53:27 -0800444 LOG(ERROR) << "Error in running over f2fs blocks";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700445 rc = ENABLE_INPLACE_ERR_OTHER;
446 goto errout;
447 }
448
449 *size_already_done += size;
450 rc = 0;
451
452errout:
Paul Crowley772cc852018-02-01 09:53:27 -0800453 if (rc) LOG(ERROR) << "Failed to encrypt f2fs filesystem on " << real_blkdev;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700454
455 log_progress_f2fs(0, true);
456 free(f2fs_info);
457 free(data.buffer);
458 close(data.realfd);
459 close(data.cryptofd);
460
461 return rc;
462}
463
Paul Crowley772cc852018-02-01 09:53:27 -0800464static int cryptfs_enable_inplace_full(char* crypto_blkdev, char* real_blkdev, off64_t size,
465 off64_t* size_already_done, off64_t tot_size,
466 off64_t previously_encrypted_upto) {
Paul Crowleyf71ace32016-06-02 11:01:19 -0700467 int realfd, cryptofd;
468 char *buf[CRYPT_INPLACE_BUFSIZE];
469 int rc = ENABLE_INPLACE_ERR_OTHER;
470 off64_t numblocks, i, remainder;
471 off64_t one_pct, cur_pct, new_pct;
472 off64_t blocks_already_done, tot_numblocks;
473
474 if ( (realfd = open(real_blkdev, O_RDONLY|O_CLOEXEC)) < 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800475 PLOG(ERROR) << "Error opening real_blkdev " << real_blkdev << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700476 return ENABLE_INPLACE_ERR_OTHER;
477 }
478
479 if ( (cryptofd = open(crypto_blkdev, O_WRONLY|O_CLOEXEC)) < 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800480 PLOG(ERROR) << "Error opening crypto_blkdev " << crypto_blkdev << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700481 close(realfd);
482 return ENABLE_INPLACE_ERR_DEV;
483 }
484
485 /* This is pretty much a simple loop of reading 4K, and writing 4K.
486 * The size passed in is the number of 512 byte sectors in the filesystem.
487 * So compute the number of whole 4K blocks we should read/write,
488 * and the remainder.
489 */
490 numblocks = size / CRYPT_SECTORS_PER_BUFSIZE;
491 remainder = size % CRYPT_SECTORS_PER_BUFSIZE;
492 tot_numblocks = tot_size / CRYPT_SECTORS_PER_BUFSIZE;
493 blocks_already_done = *size_already_done / CRYPT_SECTORS_PER_BUFSIZE;
494
Paul Crowley772cc852018-02-01 09:53:27 -0800495 LOG(ERROR) << "Encrypting filesystem in place...";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700496
497 i = previously_encrypted_upto + 1 - *size_already_done;
498
499 if (lseek64(realfd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800500 PLOG(ERROR) << "Cannot seek to previously encrypted point on " << real_blkdev;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700501 goto errout;
502 }
503
504 if (lseek64(cryptofd, i * CRYPT_SECTOR_SIZE, SEEK_SET) < 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800505 PLOG(ERROR) << "Cannot seek to previously encrypted point on " << crypto_blkdev;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700506 goto errout;
507 }
508
509 for (;i < size && i % CRYPT_SECTORS_PER_BUFSIZE != 0; ++i) {
510 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800511 PLOG(ERROR) << "Error reading initial sectors from real_blkdev " << real_blkdev
512 << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700513 goto errout;
514 }
515 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800516 PLOG(ERROR) << "Error writing initial sectors to crypto_blkdev " << crypto_blkdev
517 << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700518 goto errout;
519 } else {
Paul Crowley772cc852018-02-01 09:53:27 -0800520 LOG(INFO) << "Encrypted 1 block at " << i;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700521 }
522 }
523
524 one_pct = tot_numblocks / 100;
525 cur_pct = 0;
526 /* process the majority of the filesystem in blocks */
527 for (i/=CRYPT_SECTORS_PER_BUFSIZE; i<numblocks; i++) {
528 new_pct = (i + blocks_already_done) / one_pct;
529 if (new_pct > cur_pct) {
530 char buf[8];
531
532 cur_pct = new_pct;
533 snprintf(buf, sizeof(buf), "%" PRId64, cur_pct);
Paul Crowley772cc852018-02-01 09:53:27 -0800534 android::base::SetProperty("vold.encrypt_progress", buf);
Paul Crowleyf71ace32016-06-02 11:01:19 -0700535 }
536 if (unix_read(realfd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800537 PLOG(ERROR) << "Error reading real_blkdev " << real_blkdev << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700538 goto errout;
539 }
540 if (unix_write(cryptofd, buf, CRYPT_INPLACE_BUFSIZE) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800541 PLOG(ERROR) << "Error writing crypto_blkdev " << crypto_blkdev << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700542 goto errout;
543 } else {
Paul Crowley772cc852018-02-01 09:53:27 -0800544 LOG(DEBUG) << "Encrypted " << CRYPT_SECTORS_PER_BUFSIZE << " block at "
545 << i * CRYPT_SECTORS_PER_BUFSIZE;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700546 }
Paul Crowleyf71ace32016-06-02 11:01:19 -0700547 }
548
549 /* Do any remaining sectors */
550 for (i=0; i<remainder; i++) {
551 if (unix_read(realfd, buf, CRYPT_SECTOR_SIZE) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800552 LOG(ERROR) << "Error reading final sectors from real_blkdev " << real_blkdev
553 << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700554 goto errout;
555 }
556 if (unix_write(cryptofd, buf, CRYPT_SECTOR_SIZE) <= 0) {
Paul Crowley772cc852018-02-01 09:53:27 -0800557 LOG(ERROR) << "Error writing final sectors to crypto_blkdev " << crypto_blkdev
558 << " for inplace encrypt";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700559 goto errout;
560 } else {
Paul Crowley772cc852018-02-01 09:53:27 -0800561 LOG(INFO) << "Encrypted 1 block at next location";
Paul Crowleyf71ace32016-06-02 11:01:19 -0700562 }
563 }
564
565 *size_already_done += size;
566 rc = 0;
567
568errout:
569 close(realfd);
570 close(cryptofd);
571
572 return rc;
573}
574
575/* returns on of the ENABLE_INPLACE_* return codes */
Paul Crowley772cc852018-02-01 09:53:27 -0800576int cryptfs_enable_inplace(char* crypto_blkdev, char* real_blkdev, off64_t size,
577 off64_t* size_already_done, off64_t tot_size,
578 off64_t previously_encrypted_upto) {
Paul Crowleyf71ace32016-06-02 11:01:19 -0700579 int rc_ext4, rc_f2fs, rc_full;
580 if (previously_encrypted_upto) {
Paul Crowley772cc852018-02-01 09:53:27 -0800581 LOG(DEBUG) << "Continuing encryption from " << previously_encrypted_upto;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700582 }
583
584 if (*size_already_done + size < previously_encrypted_upto) {
585 *size_already_done += size;
586 return 0;
587 }
588
589 /* TODO: identify filesystem type.
590 * As is, cryptfs_enable_inplace_ext4 will fail on an f2fs partition, and
591 * then we will drop down to cryptfs_enable_inplace_f2fs.
592 * */
593 if ((rc_ext4 = cryptfs_enable_inplace_ext4(crypto_blkdev, real_blkdev,
594 size, size_already_done,
595 tot_size, previously_encrypted_upto)) == 0) {
596 return 0;
597 }
Paul Crowley772cc852018-02-01 09:53:27 -0800598 LOG(DEBUG) << "cryptfs_enable_inplace_ext4()=" << rc_ext4;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700599
600 if ((rc_f2fs = cryptfs_enable_inplace_f2fs(crypto_blkdev, real_blkdev,
601 size, size_already_done,
602 tot_size, previously_encrypted_upto)) == 0) {
603 return 0;
604 }
Paul Crowley772cc852018-02-01 09:53:27 -0800605 LOG(DEBUG) << "cryptfs_enable_inplace_f2fs()=" << rc_f2fs;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700606
607 rc_full = cryptfs_enable_inplace_full(crypto_blkdev, real_blkdev,
608 size, size_already_done, tot_size,
609 previously_encrypted_upto);
Paul Crowley772cc852018-02-01 09:53:27 -0800610 LOG(DEBUG) << "cryptfs_enable_inplace_full()=" << rc_full;
Paul Crowleyf71ace32016-06-02 11:01:19 -0700611
612 /* Hack for b/17898962, the following is the symptom... */
613 if (rc_ext4 == ENABLE_INPLACE_ERR_DEV
614 && rc_f2fs == ENABLE_INPLACE_ERR_DEV
615 && rc_full == ENABLE_INPLACE_ERR_DEV) {
616 return ENABLE_INPLACE_ERR_DEV;
617 }
618 return rc_full;
619}