blob: 083da7a4909b76a44462c40743297077683d1180 [file] [log] [blame]
Doug Zongker424296a2014-09-02 08:53:09 -07001# Copyright (C) 2014 The Android Open Source Project
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
Doug Zongkerfc44a512014-08-26 13:10:25 -070015import bisect
16import os
Doug Zongkerfc44a512014-08-26 13:10:25 -070017import struct
Tianjie Xudf1166e2018-01-27 17:35:41 -080018import threading
Doug Zongkerfc44a512014-08-26 13:10:25 -070019from hashlib import sha1
20
Dan Albert8b72aef2015-03-23 19:13:21 -070021import rangelib
22
Doug Zongkerfc44a512014-08-26 13:10:25 -070023
24class SparseImage(object):
Tao Bao5ece99d2015-05-12 11:42:31 -070025 """Wraps a sparse image file into an image object.
Doug Zongkerfc44a512014-08-26 13:10:25 -070026
Tao Bao5ece99d2015-05-12 11:42:31 -070027 Wraps a sparse image file (and optional file map and clobbered_blocks) into
28 an image object suitable for passing to BlockImageDiff. file_map contains
29 the mapping between files and their blocks. clobbered_blocks contains the set
30 of blocks that should be always written to the target regardless of the old
31 contents (i.e. copying instead of patching). clobbered_blocks should be in
32 the form of a string like "0" or "0 1-5 8".
33 """
34
Sami Tolvanen405e71d2016-02-09 12:28:58 -080035 def __init__(self, simg_fn, file_map_fn=None, clobbered_blocks=None,
Tao Baoe709b092018-02-07 12:40:00 -080036 mode="rb", build_map=True, allow_shared_blocks=False):
Sami Tolvanen405e71d2016-02-09 12:28:58 -080037 self.simg_f = f = open(simg_fn, mode)
Doug Zongkerfc44a512014-08-26 13:10:25 -070038
39 header_bin = f.read(28)
40 header = struct.unpack("<I4H4I", header_bin)
41
42 magic = header[0]
43 major_version = header[1]
44 minor_version = header[2]
45 file_hdr_sz = header[3]
46 chunk_hdr_sz = header[4]
47 self.blocksize = blk_sz = header[5]
48 self.total_blocks = total_blks = header[6]
Sami Tolvanen405e71d2016-02-09 12:28:58 -080049 self.total_chunks = total_chunks = header[7]
Doug Zongkerfc44a512014-08-26 13:10:25 -070050
51 if magic != 0xED26FF3A:
52 raise ValueError("Magic should be 0xED26FF3A but is 0x%08X" % (magic,))
53 if major_version != 1 or minor_version != 0:
54 raise ValueError("I know about version 1.0, but this is version %u.%u" %
55 (major_version, minor_version))
56 if file_hdr_sz != 28:
57 raise ValueError("File header size was expected to be 28, but is %u." %
58 (file_hdr_sz,))
59 if chunk_hdr_sz != 12:
60 raise ValueError("Chunk header size was expected to be 12, but is %u." %
61 (chunk_hdr_sz,))
62
63 print("Total of %u %u-byte output blocks in %u input chunks."
64 % (total_blks, blk_sz, total_chunks))
65
Sami Tolvanen405e71d2016-02-09 12:28:58 -080066 if not build_map:
67 return
68
Doug Zongkerfc44a512014-08-26 13:10:25 -070069 pos = 0 # in blocks
70 care_data = []
71 self.offset_map = offset_map = []
Tao Bao5ece99d2015-05-12 11:42:31 -070072 self.clobbered_blocks = rangelib.RangeSet(data=clobbered_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -070073
74 for i in range(total_chunks):
75 header_bin = f.read(12)
76 header = struct.unpack("<2H2I", header_bin)
77 chunk_type = header[0]
Doug Zongkerfc44a512014-08-26 13:10:25 -070078 chunk_sz = header[2]
79 total_sz = header[3]
80 data_sz = total_sz - 12
81
82 if chunk_type == 0xCAC1:
83 if data_sz != (chunk_sz * blk_sz):
84 raise ValueError(
85 "Raw chunk input size (%u) does not match output size (%u)" %
86 (data_sz, chunk_sz * blk_sz))
87 else:
88 care_data.append(pos)
89 care_data.append(pos + chunk_sz)
Doug Zongkere18eb502014-10-15 15:55:50 -070090 offset_map.append((pos, chunk_sz, f.tell(), None))
Doug Zongkerfc44a512014-08-26 13:10:25 -070091 pos += chunk_sz
92 f.seek(data_sz, os.SEEK_CUR)
93
94 elif chunk_type == 0xCAC2:
Doug Zongkere18eb502014-10-15 15:55:50 -070095 fill_data = f.read(4)
96 care_data.append(pos)
97 care_data.append(pos + chunk_sz)
98 offset_map.append((pos, chunk_sz, None, fill_data))
99 pos += chunk_sz
Doug Zongkerfc44a512014-08-26 13:10:25 -0700100
101 elif chunk_type == 0xCAC3:
102 if data_sz != 0:
103 raise ValueError("Don't care chunk input size is non-zero (%u)" %
104 (data_sz))
105 else:
106 pos += chunk_sz
107
108 elif chunk_type == 0xCAC4:
109 raise ValueError("CRC32 chunks are not supported")
110
111 else:
112 raise ValueError("Unknown chunk type 0x%04X not supported" %
113 (chunk_type,))
114
Tianjie Xudf1166e2018-01-27 17:35:41 -0800115 self.generator_lock = threading.Lock()
116
Dan Albert8b72aef2015-03-23 19:13:21 -0700117 self.care_map = rangelib.RangeSet(care_data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700118 self.offset_index = [i[0] for i in offset_map]
119
Tao Bao2fd2c9b2015-07-09 17:37:49 -0700120 # Bug: 20881595
121 # Introduce extended blocks as a workaround for the bug. dm-verity may
122 # touch blocks that are not in the care_map due to block device
123 # read-ahead. It will fail if such blocks contain non-zeroes. We zero out
124 # the extended blocks explicitly to avoid dm-verity failures. 512 blocks
125 # are the maximum read-ahead we configure for dm-verity block devices.
126 extended = self.care_map.extend(512)
127 all_blocks = rangelib.RangeSet(data=(0, self.total_blocks))
128 extended = extended.intersect(all_blocks).subtract(self.care_map)
129 self.extended = extended
130
Doug Zongkerfc44a512014-08-26 13:10:25 -0700131 if file_map_fn:
Tao Baoe709b092018-02-07 12:40:00 -0800132 self.LoadFileBlockMap(file_map_fn, self.clobbered_blocks,
133 allow_shared_blocks)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700134 else:
135 self.file_map = {"__DATA": self.care_map}
136
Sami Tolvanen405e71d2016-02-09 12:28:58 -0800137 def AppendFillChunk(self, data, blocks):
138 f = self.simg_f
139
140 # Append a fill chunk
141 f.seek(0, os.SEEK_END)
142 f.write(struct.pack("<2H3I", 0xCAC2, 0, blocks, 16, data))
143
144 # Update the sparse header
145 self.total_blocks += blocks
146 self.total_chunks += 1
147
148 f.seek(16, os.SEEK_SET)
149 f.write(struct.pack("<2I", self.total_blocks, self.total_chunks))
150
Tao Bao183e56e2017-03-05 17:05:09 -0800151 def RangeSha1(self, ranges):
152 h = sha1()
153 for data in self._GetRangeData(ranges):
154 h.update(data)
155 return h.hexdigest()
156
Doug Zongkerfc44a512014-08-26 13:10:25 -0700157 def ReadRangeSet(self, ranges):
158 return [d for d in self._GetRangeData(ranges)]
159
Tao Bao5fcaaef2015-06-01 13:40:49 -0700160 def TotalSha1(self, include_clobbered_blocks=False):
161 """Return the SHA-1 hash of all data in the 'care' regions.
162
163 If include_clobbered_blocks is True, it returns the hash including the
164 clobbered_blocks."""
165 ranges = self.care_map
166 if not include_clobbered_blocks:
Tao Bao2b4ff172015-06-23 17:30:35 -0700167 ranges = ranges.subtract(self.clobbered_blocks)
Tao Bao183e56e2017-03-05 17:05:09 -0800168 return self.RangeSha1(ranges)
169
170 def WriteRangeDataToFd(self, ranges, fd):
171 for data in self._GetRangeData(ranges):
172 fd.write(data)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700173
174 def _GetRangeData(self, ranges):
175 """Generator that produces all the image data in 'ranges'. The
176 number of individual pieces returned is arbitrary (and in
177 particular is not necessarily equal to the number of ranges in
178 'ranges'.
179
Tianjie Xudf1166e2018-01-27 17:35:41 -0800180 Use a lock to protect the generator so that we will not run two
Doug Zongkerfc44a512014-08-26 13:10:25 -0700181 instances of this generator on the same object simultaneously."""
182
183 f = self.simg_f
Tianjie Xudf1166e2018-01-27 17:35:41 -0800184 with self.generator_lock:
185 for s, e in ranges:
186 to_read = e-s
187 idx = bisect.bisect_right(self.offset_index, s) - 1
Doug Zongkere18eb502014-10-15 15:55:50 -0700188 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
Tianjie Xudf1166e2018-01-27 17:35:41 -0800189
190 # for the first chunk we may be starting partway through it.
191 remain = chunk_len - (s - chunk_start)
192 this_read = min(remain, to_read)
Doug Zongkere18eb502014-10-15 15:55:50 -0700193 if filepos is not None:
Tianjie Xudf1166e2018-01-27 17:35:41 -0800194 p = filepos + ((s - chunk_start) * self.blocksize)
195 f.seek(p, os.SEEK_SET)
Doug Zongkere18eb502014-10-15 15:55:50 -0700196 yield f.read(this_read * self.blocksize)
197 else:
198 yield fill_data * (this_read * (self.blocksize >> 2))
Doug Zongkerfc44a512014-08-26 13:10:25 -0700199 to_read -= this_read
200
Tianjie Xudf1166e2018-01-27 17:35:41 -0800201 while to_read > 0:
202 # continue with following chunks if this range spans multiple chunks.
203 idx += 1
204 chunk_start, chunk_len, filepos, fill_data = self.offset_map[idx]
205 this_read = min(chunk_len, to_read)
206 if filepos is not None:
207 f.seek(filepos, os.SEEK_SET)
208 yield f.read(this_read * self.blocksize)
209 else:
210 yield fill_data * (this_read * (self.blocksize >> 2))
211 to_read -= this_read
212
Tao Baoe709b092018-02-07 12:40:00 -0800213 def LoadFileBlockMap(self, fn, clobbered_blocks, allow_shared_blocks):
214 """Loads the given block map file.
215
216 Args:
217 fn: The filename of the block map file.
218 clobbered_blocks: A RangeSet instance for the clobbered blocks.
219 allow_shared_blocks: Whether having shared blocks is allowed.
220 """
Doug Zongkerfc44a512014-08-26 13:10:25 -0700221 remaining = self.care_map
222 self.file_map = out = {}
223
224 with open(fn) as f:
225 for line in f:
226 fn, ranges = line.split(None, 1)
Dan Albert8b72aef2015-03-23 19:13:21 -0700227 ranges = rangelib.RangeSet.parse(ranges)
Tao Baoe709b092018-02-07 12:40:00 -0800228
229 if allow_shared_blocks:
230 # Find the shared blocks that have been claimed by others.
231 shared_blocks = ranges.subtract(remaining)
232 if shared_blocks:
233 ranges = ranges.subtract(shared_blocks)
234 if not ranges:
235 continue
236
237 # Tag the entry so that we can skip applying imgdiff on this file.
238 ranges.extra['uses_shared_blocks'] = True
239
Doug Zongkerfc44a512014-08-26 13:10:25 -0700240 out[fn] = ranges
241 assert ranges.size() == ranges.intersect(remaining).size()
Tao Bao5ece99d2015-05-12 11:42:31 -0700242
243 # Currently we assume that blocks in clobbered_blocks are not part of
244 # any file.
245 assert not clobbered_blocks.overlaps(ranges)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700246 remaining = remaining.subtract(ranges)
247
Tao Bao5ece99d2015-05-12 11:42:31 -0700248 remaining = remaining.subtract(clobbered_blocks)
249
Doug Zongkerfc44a512014-08-26 13:10:25 -0700250 # For all the remaining blocks in the care_map (ie, those that
Tao Bao5ece99d2015-05-12 11:42:31 -0700251 # aren't part of the data for any file nor part of the clobbered_blocks),
252 # divide them into blocks that are all zero and blocks that aren't.
253 # (Zero blocks are handled specially because (1) there are usually
254 # a lot of them and (2) bsdiff handles files with long sequences of
255 # repeated bytes especially poorly.)
Doug Zongkerfc44a512014-08-26 13:10:25 -0700256
257 zero_blocks = []
258 nonzero_blocks = []
259 reference = '\0' * self.blocksize
260
Tao Bao7c4c6f52015-08-19 17:07:50 -0700261 # Workaround for bug 23227672. For squashfs, we don't have a system.map. So
262 # the whole system image will be treated as a single file. But for some
263 # unknown bug, the updater will be killed due to OOM when writing back the
264 # patched image to flash (observed on lenok-userdebug MEA49). Prior to
265 # getting a real fix, we evenly divide the non-zero blocks into smaller
266 # groups (currently 1024 blocks or 4MB per group).
267 # Bug: 23227672
268 MAX_BLOCKS_PER_GROUP = 1024
269 nonzero_groups = []
270
Doug Zongkerfc44a512014-08-26 13:10:25 -0700271 f = self.simg_f
272 for s, e in remaining:
273 for b in range(s, e):
274 idx = bisect.bisect_right(self.offset_index, b) - 1
Dan Albert8b72aef2015-03-23 19:13:21 -0700275 chunk_start, _, filepos, fill_data = self.offset_map[idx]
Doug Zongkere18eb502014-10-15 15:55:50 -0700276 if filepos is not None:
277 filepos += (b-chunk_start) * self.blocksize
278 f.seek(filepos, os.SEEK_SET)
279 data = f.read(self.blocksize)
280 else:
281 if fill_data == reference[:4]: # fill with all zeros
282 data = reference
283 else:
284 data = None
Doug Zongkerfc44a512014-08-26 13:10:25 -0700285
286 if data == reference:
287 zero_blocks.append(b)
288 zero_blocks.append(b+1)
289 else:
290 nonzero_blocks.append(b)
291 nonzero_blocks.append(b+1)
292
Tao Bao7c4c6f52015-08-19 17:07:50 -0700293 if len(nonzero_blocks) >= MAX_BLOCKS_PER_GROUP:
294 nonzero_groups.append(nonzero_blocks)
295 # Clear the list.
296 nonzero_blocks = []
297
298 if nonzero_blocks:
299 nonzero_groups.append(nonzero_blocks)
300 nonzero_blocks = []
301
302 assert zero_blocks or nonzero_groups or clobbered_blocks
Tao Bao7f9470c2015-06-26 17:49:39 -0700303
304 if zero_blocks:
305 out["__ZERO"] = rangelib.RangeSet(data=zero_blocks)
Tao Bao7c4c6f52015-08-19 17:07:50 -0700306 if nonzero_groups:
307 for i, blocks in enumerate(nonzero_groups):
308 out["__NONZERO-%d" % i] = rangelib.RangeSet(data=blocks)
Tao Bao8bd72022015-07-01 18:06:33 -0700309 if clobbered_blocks:
310 out["__COPY"] = clobbered_blocks
Doug Zongkerfc44a512014-08-26 13:10:25 -0700311
312 def ResetFileMap(self):
313 """Throw away the file map and treat the entire image as
314 undifferentiated data."""
315 self.file_map = {"__DATA": self.care_map}