blob: c1417c45189592c273d14c8a97df7cad2869259d [file] [log] [blame]
Chris Craikb565df12015-10-05 13:00:52 -07001/*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#include "OpReorderer.h"
18
19#include "utils/PaintUtils.h"
20#include "RenderNode.h"
21
22#include "SkCanvas.h"
23#include "utils/Trace.h"
24
25namespace android {
26namespace uirenderer {
27
28class BatchBase {
29
30public:
31 BatchBase(batchid_t batchId, BakedOpState* op, bool merging)
32 : mBatchId(batchId)
33 , mMerging(merging) {
34 mBounds = op->computedState.clippedBounds;
35 mOps.push_back(op);
36 }
37
38 bool intersects(const Rect& rect) const {
39 if (!rect.intersects(mBounds)) return false;
40
41 for (const BakedOpState* op : mOps) {
42 if (rect.intersects(op->computedState.clippedBounds)) {
43 return true;
44 }
45 }
46 return false;
47 }
48
49 batchid_t getBatchId() const { return mBatchId; }
50 bool isMerging() const { return mMerging; }
51
52 const std::vector<BakedOpState*>& getOps() const { return mOps; }
53
54 void dump() const {
Chris Craik6fe991e52015-10-20 09:39:42 -070055 ALOGD(" Batch %p, id %d, merging %d, count %d, bounds " RECT_STRING,
56 this, mBatchId, mMerging, mOps.size(), RECT_ARGS(mBounds));
Chris Craikb565df12015-10-05 13:00:52 -070057 }
58protected:
59 batchid_t mBatchId;
60 Rect mBounds;
61 std::vector<BakedOpState*> mOps;
62 bool mMerging;
63};
64
65class OpBatch : public BatchBase {
66public:
67 static void* operator new(size_t size, LinearAllocator& allocator) {
68 return allocator.alloc(size);
69 }
70
71 OpBatch(batchid_t batchId, BakedOpState* op)
72 : BatchBase(batchId, op, false) {
73 }
74
75 void batchOp(BakedOpState* op) {
76 mBounds.unionWith(op->computedState.clippedBounds);
77 mOps.push_back(op);
78 }
79};
80
81class MergingOpBatch : public BatchBase {
82public:
83 static void* operator new(size_t size, LinearAllocator& allocator) {
84 return allocator.alloc(size);
85 }
86
87 MergingOpBatch(batchid_t batchId, BakedOpState* op)
88 : BatchBase(batchId, op, true) {
89 }
90
91 /*
92 * Helper for determining if a new op can merge with a MergingDrawBatch based on their bounds
93 * and clip side flags. Positive bounds delta means new bounds fit in old.
94 */
95 static inline bool checkSide(const int currentFlags, const int newFlags, const int side,
96 float boundsDelta) {
97 bool currentClipExists = currentFlags & side;
98 bool newClipExists = newFlags & side;
99
100 // if current is clipped, we must be able to fit new bounds in current
101 if (boundsDelta > 0 && currentClipExists) return false;
102
103 // if new is clipped, we must be able to fit current bounds in new
104 if (boundsDelta < 0 && newClipExists) return false;
105
106 return true;
107 }
108
109 static bool paintIsDefault(const SkPaint& paint) {
110 return paint.getAlpha() == 255
111 && paint.getColorFilter() == nullptr
112 && paint.getShader() == nullptr;
113 }
114
115 static bool paintsAreEquivalent(const SkPaint& a, const SkPaint& b) {
116 return a.getAlpha() == b.getAlpha()
117 && a.getColorFilter() == b.getColorFilter()
118 && a.getShader() == b.getShader();
119 }
120
121 /*
122 * Checks if a (mergeable) op can be merged into this batch
123 *
124 * If true, the op's multiDraw must be guaranteed to handle both ops simultaneously, so it is
125 * important to consider all paint attributes used in the draw calls in deciding both a) if an
126 * op tries to merge at all, and b) if the op can merge with another set of ops
127 *
128 * False positives can lead to information from the paints of subsequent merged operations being
129 * dropped, so we make simplifying qualifications on the ops that can merge, per op type.
130 */
131 bool canMergeWith(BakedOpState* op) const {
132 bool isTextBatch = getBatchId() == OpBatchType::Text
133 || getBatchId() == OpBatchType::ColorText;
134
135 // Overlapping other operations is only allowed for text without shadow. For other ops,
136 // multiDraw isn't guaranteed to overdraw correctly
137 if (!isTextBatch || PaintUtils::hasTextShadow(op->op->paint)) {
138 if (intersects(op->computedState.clippedBounds)) return false;
139 }
140
141 const BakedOpState* lhs = op;
142 const BakedOpState* rhs = mOps[0];
143
144 if (!MathUtils::areEqual(lhs->alpha, rhs->alpha)) return false;
145
146 // Identical round rect clip state means both ops will clip in the same way, or not at all.
147 // As the state objects are const, we can compare their pointers to determine mergeability
148 if (lhs->roundRectClipState != rhs->roundRectClipState) return false;
149 if (lhs->projectionPathMask != rhs->projectionPathMask) return false;
150
151 /* Clipping compatibility check
152 *
153 * Exploits the fact that if a op or batch is clipped on a side, its bounds will equal its
154 * clip for that side.
155 */
156 const int currentFlags = mClipSideFlags;
157 const int newFlags = op->computedState.clipSideFlags;
158 if (currentFlags != OpClipSideFlags::None || newFlags != OpClipSideFlags::None) {
159 const Rect& opBounds = op->computedState.clippedBounds;
160 float boundsDelta = mBounds.left - opBounds.left;
161 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Left, boundsDelta)) return false;
162 boundsDelta = mBounds.top - opBounds.top;
163 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Top, boundsDelta)) return false;
164
165 // right and bottom delta calculation reversed to account for direction
166 boundsDelta = opBounds.right - mBounds.right;
167 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Right, boundsDelta)) return false;
168 boundsDelta = opBounds.bottom - mBounds.bottom;
169 if (!checkSide(currentFlags, newFlags, OpClipSideFlags::Bottom, boundsDelta)) return false;
170 }
171
172 const SkPaint* newPaint = op->op->paint;
173 const SkPaint* oldPaint = mOps[0]->op->paint;
174
175 if (newPaint == oldPaint) {
176 // if paints are equal, then modifiers + paint attribs don't need to be compared
177 return true;
178 } else if (newPaint && !oldPaint) {
179 return paintIsDefault(*newPaint);
180 } else if (!newPaint && oldPaint) {
181 return paintIsDefault(*oldPaint);
182 }
183 return paintsAreEquivalent(*newPaint, *oldPaint);
184 }
185
186 void mergeOp(BakedOpState* op) {
187 mBounds.unionWith(op->computedState.clippedBounds);
188 mOps.push_back(op);
189
190 const int newClipSideFlags = op->computedState.clipSideFlags;
191 mClipSideFlags |= newClipSideFlags;
192
193 const Rect& opClip = op->computedState.clipRect;
194 if (newClipSideFlags & OpClipSideFlags::Left) mClipRect.left = opClip.left;
195 if (newClipSideFlags & OpClipSideFlags::Top) mClipRect.top = opClip.top;
196 if (newClipSideFlags & OpClipSideFlags::Right) mClipRect.right = opClip.right;
197 if (newClipSideFlags & OpClipSideFlags::Bottom) mClipRect.bottom = opClip.bottom;
198 }
199
200private:
201 int mClipSideFlags = 0;
202 Rect mClipRect;
203};
204
Chris Craik6fe991e52015-10-20 09:39:42 -0700205// iterate back toward target to see if anything drawn since should overlap the new op
206// if no target, merging ops still interate to find similar batch to insert after
207void OpReorderer::LayerReorderer::locateInsertIndex(int batchId, const Rect& clippedBounds,
208 BatchBase** targetBatch, size_t* insertBatchIndex) const {
209 for (int i = mBatches.size() - 1; i >= 0; i--) {
210 BatchBase* overBatch = mBatches[i];
211
212 if (overBatch == *targetBatch) break;
213
214 // TODO: also consider shader shared between batch types
215 if (batchId == overBatch->getBatchId()) {
216 *insertBatchIndex = i + 1;
217 if (!*targetBatch) break; // found insert position, quit
218 }
219
220 if (overBatch->intersects(clippedBounds)) {
221 // NOTE: it may be possible to optimize for special cases where two operations
222 // of the same batch/paint could swap order, such as with a non-mergeable
223 // (clipped) and a mergeable text operation
224 *targetBatch = nullptr;
225 break;
226 }
227 }
228}
229
230void OpReorderer::LayerReorderer::deferUnmergeableOp(LinearAllocator& allocator,
231 BakedOpState* op, batchid_t batchId) {
232 OpBatch* targetBatch = mBatchLookup[batchId];
233
234 size_t insertBatchIndex = mBatches.size();
235 if (targetBatch) {
236 locateInsertIndex(batchId, op->computedState.clippedBounds,
237 (BatchBase**)(&targetBatch), &insertBatchIndex);
238 }
239
240 if (targetBatch) {
241 targetBatch->batchOp(op);
242 } else {
243 // new non-merging batch
244 targetBatch = new (allocator) OpBatch(batchId, op);
245 mBatchLookup[batchId] = targetBatch;
246 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
247 }
248}
249
250// insertion point of a new batch, will hopefully be immediately after similar batch
251// (generally, should be similar shader)
252void OpReorderer::LayerReorderer::deferMergeableOp(LinearAllocator& allocator,
253 BakedOpState* op, batchid_t batchId, mergeid_t mergeId) {
254 MergingOpBatch* targetBatch = nullptr;
255
256 // Try to merge with any existing batch with same mergeId
257 auto getResult = mMergingBatchLookup[batchId].find(mergeId);
258 if (getResult != mMergingBatchLookup[batchId].end()) {
259 targetBatch = getResult->second;
260 if (!targetBatch->canMergeWith(op)) {
261 targetBatch = nullptr;
262 }
263 }
264
265 size_t insertBatchIndex = mBatches.size();
266 locateInsertIndex(batchId, op->computedState.clippedBounds,
267 (BatchBase**)(&targetBatch), &insertBatchIndex);
268
269 if (targetBatch) {
270 targetBatch->mergeOp(op);
271 } else {
272 // new merging batch
273 targetBatch = new (allocator) MergingOpBatch(batchId, op);
274 mMergingBatchLookup[batchId].insert(std::make_pair(mergeId, targetBatch));
275
276 mBatches.insert(mBatches.begin() + insertBatchIndex, targetBatch);
277 }
278}
279
280void OpReorderer::LayerReorderer::replayBakedOpsImpl(void* arg, BakedOpReceiver* receivers) const {
281 for (const BatchBase* batch : mBatches) {
282 // TODO: different behavior based on batch->isMerging()
283 for (const BakedOpState* op : batch->getOps()) {
284 receivers[op->op->opId](arg, *op->op, *op);
285 }
286 }
287}
288
289void OpReorderer::LayerReorderer::dump() const {
290 for (const BatchBase* batch : mBatches) {
291 batch->dump();
292 }
293}
Chris Craikb565df12015-10-05 13:00:52 -0700294
295OpReorderer::OpReorderer()
Chris Craik6fe991e52015-10-20 09:39:42 -0700296 : mCanvasState(*this) {
297 mLayerReorderers.emplace_back();
298 mLayerStack.push_back(0);
Chris Craikb565df12015-10-05 13:00:52 -0700299}
300
Chris Craik6fe991e52015-10-20 09:39:42 -0700301void OpReorderer::onViewportInitialized() {}
302
303void OpReorderer::onSnapshotRestored(const Snapshot& removed, const Snapshot& restored) {}
304
Chris Craikddf22152015-10-14 17:42:47 -0700305void OpReorderer::defer(const SkRect& clip, int viewportWidth, int viewportHeight,
Chris Craikb565df12015-10-05 13:00:52 -0700306 const std::vector< sp<RenderNode> >& nodes) {
307 mCanvasState.initializeSaveStack(viewportWidth, viewportHeight,
Chris Craikddf22152015-10-14 17:42:47 -0700308 clip.fLeft, clip.fTop, clip.fRight, clip.fBottom,
309 Vector3());
Chris Craikb565df12015-10-05 13:00:52 -0700310 for (const sp<RenderNode>& node : nodes) {
311 if (node->nothingToDraw()) continue;
312
313 // TODO: dedupe this code with onRenderNode()
314 mCanvasState.save(SkCanvas::kClip_SaveFlag | SkCanvas::kMatrix_SaveFlag);
315 if (node->applyViewProperties(mCanvasState)) {
316 // not rejected do ops...
Chris Craik003cc3d2015-10-16 10:24:55 -0700317 const DisplayList& displayList = node->getDisplayList();
Chris Craikb36af872015-10-16 14:23:12 -0700318 deferImpl(displayList);
Chris Craikb565df12015-10-05 13:00:52 -0700319 }
320 mCanvasState.restore();
321 }
322}
323
Chris Craikb36af872015-10-16 14:23:12 -0700324void OpReorderer::defer(int viewportWidth, int viewportHeight, const DisplayList& displayList) {
Chris Craikb565df12015-10-05 13:00:52 -0700325 ATRACE_NAME("prepare drawing commands");
326 mCanvasState.initializeSaveStack(viewportWidth, viewportHeight,
327 0, 0, viewportWidth, viewportHeight, Vector3());
Chris Craikb36af872015-10-16 14:23:12 -0700328 deferImpl(displayList);
Chris Craikb565df12015-10-05 13:00:52 -0700329}
330
331/**
332 * Used to define a list of lambdas referencing private OpReorderer::onXXXXOp() methods.
333 *
334 * This allows opIds embedded in the RecordedOps to be used for dispatching to these lambdas. E.g. a
335 * BitmapOp op then would be dispatched to OpReorderer::onBitmapOp(const BitmapOp&)
336 */
Chris Craik6fe991e52015-10-20 09:39:42 -0700337#define OP_RECEIVER(Type) \
Chris Craikb565df12015-10-05 13:00:52 -0700338 [](OpReorderer& reorderer, const RecordedOp& op) { reorderer.on##Type(static_cast<const Type&>(op)); },
Chris Craikb36af872015-10-16 14:23:12 -0700339void OpReorderer::deferImpl(const DisplayList& displayList) {
Chris Craikb565df12015-10-05 13:00:52 -0700340 static std::function<void(OpReorderer& reorderer, const RecordedOp&)> receivers[] = {
Chris Craik6fe991e52015-10-20 09:39:42 -0700341 MAP_OPS(OP_RECEIVER)
Chris Craikb565df12015-10-05 13:00:52 -0700342 };
Chris Craikb36af872015-10-16 14:23:12 -0700343 for (const DisplayList::Chunk& chunk : displayList.getChunks()) {
Chris Craikb565df12015-10-05 13:00:52 -0700344 for (size_t opIndex = chunk.beginOpIndex; opIndex < chunk.endOpIndex; opIndex++) {
Chris Craikb36af872015-10-16 14:23:12 -0700345 const RecordedOp* op = displayList.getOps()[opIndex];
Chris Craikb565df12015-10-05 13:00:52 -0700346 receivers[op->opId](*this, *op);
347 }
348 }
349}
350
351void OpReorderer::replayBakedOpsImpl(void* arg, BakedOpReceiver* receivers) {
352 ATRACE_NAME("flush drawing commands");
Chris Craik6fe991e52015-10-20 09:39:42 -0700353 // Relay through layers in reverse order, since layers
354 // later in the list will be drawn by earlier ones
355 for (int i = mLayerReorderers.size() - 1; i >= 0; i--) {
356 mLayerReorderers[i].replayBakedOpsImpl(arg, receivers);
Chris Craikb565df12015-10-05 13:00:52 -0700357 }
358}
359
Chris Craikb565df12015-10-05 13:00:52 -0700360void OpReorderer::onRenderNodeOp(const RenderNodeOp& op) {
361 if (op.renderNode->nothingToDraw()) {
362 return;
363 }
Chris Craik6fe991e52015-10-20 09:39:42 -0700364 int count = mCanvasState.save(SkCanvas::kClip_SaveFlag | SkCanvas::kMatrix_SaveFlag);
Chris Craikb565df12015-10-05 13:00:52 -0700365
366 // apply state from RecordedOp
367 mCanvasState.concatMatrix(op.localMatrix);
368 mCanvasState.clipRect(op.localClipRect.left, op.localClipRect.top,
369 op.localClipRect.right, op.localClipRect.bottom, SkRegion::kIntersect_Op);
370
371 // apply RenderProperties state
372 if (op.renderNode->applyViewProperties(mCanvasState)) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700373 // if node not rejected based on properties, do ops...
Chris Craikb36af872015-10-16 14:23:12 -0700374 deferImpl(op.renderNode->getDisplayList());
Chris Craikb565df12015-10-05 13:00:52 -0700375 }
Chris Craik6fe991e52015-10-20 09:39:42 -0700376 mCanvasState.restoreToCount(count);
Chris Craikb565df12015-10-05 13:00:52 -0700377}
378
379static batchid_t tessellatedBatchId(const SkPaint& paint) {
380 return paint.getPathEffect()
381 ? OpBatchType::AlphaMaskTexture
382 : (paint.isAntiAlias() ? OpBatchType::AlphaVertices : OpBatchType::Vertices);
383}
384
385void OpReorderer::onBitmapOp(const BitmapOp& op) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700386 BakedOpState* bakedStateOp = tryBakeOpState(op);
Chris Craikb565df12015-10-05 13:00:52 -0700387 if (!bakedStateOp) return; // quick rejected
388
389 mergeid_t mergeId = (mergeid_t) op.bitmap->getGenerationID();
390 // TODO: AssetAtlas
Chris Craik6fe991e52015-10-20 09:39:42 -0700391 currentLayer().deferMergeableOp(mAllocator, bakedStateOp, OpBatchType::Bitmap, mergeId);
Chris Craikb565df12015-10-05 13:00:52 -0700392}
393
394void OpReorderer::onRectOp(const RectOp& op) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700395 BakedOpState* bakedStateOp = tryBakeOpState(op);
Chris Craikb565df12015-10-05 13:00:52 -0700396 if (!bakedStateOp) return; // quick rejected
Chris Craik6fe991e52015-10-20 09:39:42 -0700397 currentLayer().deferUnmergeableOp(mAllocator, bakedStateOp, tessellatedBatchId(*op.paint));
Chris Craikb565df12015-10-05 13:00:52 -0700398}
399
400void OpReorderer::onSimpleRectsOp(const SimpleRectsOp& op) {
Chris Craik6fe991e52015-10-20 09:39:42 -0700401 BakedOpState* bakedStateOp = tryBakeOpState(op);
Chris Craikb565df12015-10-05 13:00:52 -0700402 if (!bakedStateOp) return; // quick rejected
Chris Craik6fe991e52015-10-20 09:39:42 -0700403 currentLayer().deferUnmergeableOp(mAllocator, bakedStateOp, OpBatchType::Vertices);
Chris Craikb565df12015-10-05 13:00:52 -0700404}
405
Chris Craik6fe991e52015-10-20 09:39:42 -0700406// TODO: test rejection at defer time, where the bounds become empty
407void OpReorderer::onBeginLayerOp(const BeginLayerOp& op) {
408 mCanvasState.save(SkCanvas::kClip_SaveFlag | SkCanvas::kMatrix_SaveFlag);
409 mCanvasState.writableSnapshot()->transform->loadIdentity();
410 mCanvasState.writableSnapshot()->initializeViewport(
411 (int) op.unmappedBounds.getWidth(), (int) op.unmappedBounds.getHeight());
412 mCanvasState.writableSnapshot()->roundRectClipState = nullptr;
Chris Craikb565df12015-10-05 13:00:52 -0700413
Chris Craik6fe991e52015-10-20 09:39:42 -0700414 // create a new layer, and push its index on the stack
415 mLayerStack.push_back(mLayerReorderers.size());
416 mLayerReorderers.emplace_back();
417 mLayerReorderers.back().beginLayerOp = &op;
418}
Chris Craikb565df12015-10-05 13:00:52 -0700419
Chris Craik6fe991e52015-10-20 09:39:42 -0700420void OpReorderer::onEndLayerOp(const EndLayerOp& /* ignored */) {
421 mCanvasState.restore();
Chris Craikb565df12015-10-05 13:00:52 -0700422
Chris Craik6fe991e52015-10-20 09:39:42 -0700423 const BeginLayerOp& beginLayerOp = *currentLayer().beginLayerOp;
424
425 // pop finished layer off of the stack
426 int finishedLayerIndex = mLayerStack.back();
427 mLayerStack.pop_back();
428
429 // record the draw operation into the previous layer's list of draw commands
430 // uses state from the associated beginLayerOp, since it has all the state needed for drawing
431 LayerOp* drawLayerOp = new (mAllocator) LayerOp(
432 beginLayerOp.unmappedBounds,
433 beginLayerOp.localMatrix,
434 beginLayerOp.localClipRect,
435 beginLayerOp.paint);
436 BakedOpState* bakedOpState = tryBakeOpState(*drawLayerOp);
437
438 if (bakedOpState) {
439 // Layer will be drawn into parent layer (which is now current, since we popped mLayerStack)
440 currentLayer().deferUnmergeableOp(mAllocator, bakedOpState, OpBatchType::Bitmap);
441 } else {
442 // Layer won't be drawn - delete its drawing batches to prevent it from doing any work
443 mLayerReorderers[finishedLayerIndex].clear();
444 return;
Chris Craikb565df12015-10-05 13:00:52 -0700445 }
446}
447
Chris Craik6fe991e52015-10-20 09:39:42 -0700448void OpReorderer::onLayerOp(const LayerOp& op) {
449 LOG_ALWAYS_FATAL("unsupported");
Chris Craikb565df12015-10-05 13:00:52 -0700450}
451
452} // namespace uirenderer
453} // namespace android