Merge "Move the nested struct inside VkNativeBufferANDROID outside."
diff --git a/cmds/dumpstate/utils.cpp b/cmds/dumpstate/utils.cpp
index 4cbf577..e08c806 100644
--- a/cmds/dumpstate/utils.cpp
+++ b/cmds/dumpstate/utils.cpp
@@ -725,7 +725,11 @@
         return -1;
     }
     fcntl(s, F_SETFD, FD_CLOEXEC);
-    if (listen(s, 4) < 0) {
+
+    // Set backlog to 0 to make sure that queue size will be minimum.
+    // In Linux, because the minimum queue will be 1, connect() will be blocked
+    // if the other clients already called connect() and the connection request was not accepted.
+    if (listen(s, 0) < 0) {
         MYLOGE("listen(control socket): %s\n", strerror(errno));
         return -1;
     }
@@ -736,6 +740,9 @@
 
     // Close socket just after accept(), to make sure that connect() by client will get error
     // when the socket is used by the other services.
+    // There is still a race condition possibility between accept and close, but there is no way
+    // to close-on-accept atomically.
+    // See detail; b/123306389#comment25
     close(s);
 
     if (fd < 0) {
diff --git a/libs/binder/BpBinder.cpp b/libs/binder/BpBinder.cpp
index f35803c..5ceb218 100644
--- a/libs/binder/BpBinder.cpp
+++ b/libs/binder/BpBinder.cpp
@@ -148,6 +148,10 @@
     IPCThreadState::self()->incWeakHandle(handle, this);
 }
 
+int32_t BpBinder::handle() const {
+    return mHandle;
+}
+
 bool BpBinder::isDescriptorCached() const {
     Mutex::Autolock _l(mLock);
     return mDescriptorCache.size() ? true : false;
diff --git a/libs/binder/IPCThreadState.cpp b/libs/binder/IPCThreadState.cpp
index 0e21054..cfb86f0 100644
--- a/libs/binder/IPCThreadState.cpp
+++ b/libs/binder/IPCThreadState.cpp
@@ -999,7 +999,7 @@
     if (err >= NO_ERROR) {
         if (bwr.write_consumed > 0) {
             if (bwr.write_consumed < mOut.dataSize())
-                mOut.remove(0, bwr.write_consumed);
+                LOG_ALWAYS_FATAL("Driver did not consume write buffer");
             else {
                 mOut.setDataSize(0);
                 processPostWriteDerefs();
diff --git a/libs/binder/Parcel.cpp b/libs/binder/Parcel.cpp
index badf6cb..b5fbf42 100644
--- a/libs/binder/Parcel.cpp
+++ b/libs/binder/Parcel.cpp
@@ -1394,11 +1394,6 @@
     return status.writeToParcel(this);
 }
 
-void Parcel::remove(size_t /*start*/, size_t /*amt*/)
-{
-    LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
-}
-
 status_t Parcel::validateReadData(size_t upperBound) const
 {
     // Don't allow non-object reads on object data
diff --git a/libs/binder/include/binder/BpBinder.h b/libs/binder/include/binder/BpBinder.h
index 78f2e1d..b3a1d0b 100644
--- a/libs/binder/include/binder/BpBinder.h
+++ b/libs/binder/include/binder/BpBinder.h
@@ -34,7 +34,7 @@
 public:
     static BpBinder*    create(int32_t handle);
 
-    inline  int32_t     handle() const { return mHandle; }
+    int32_t             handle() const;
 
     virtual const String16&    getInterfaceDescriptor() const;
     virtual bool        isBinderAlive() const;
diff --git a/libs/binder/include/binder/Parcel.h b/libs/binder/include/binder/Parcel.h
index de13d81..b65d456 100644
--- a/libs/binder/include/binder/Parcel.h
+++ b/libs/binder/include/binder/Parcel.h
@@ -235,8 +235,6 @@
     // Currently the native implementation doesn't do any of the StrictMode
     // stack gathering and serialization that the Java implementation does.
     status_t            writeNoException();
-
-    void                remove(size_t start, size_t amt);
     
     status_t            read(void* outData, size_t len) const;
     const void*         readInplace(size_t len) const;
diff --git a/libs/gui/LayerState.cpp b/libs/gui/LayerState.cpp
index 6066421..aa07cbe 100644
--- a/libs/gui/LayerState.cpp
+++ b/libs/gui/LayerState.cpp
@@ -267,8 +267,9 @@
     }
     if (other.what & eFlagsChanged) {
         what |= eFlagsChanged;
-        flags = other.flags;
-        mask = other.mask;
+        flags &= ~other.mask;
+        flags |= (other.flags & other.mask);
+        mask |= other.mask;
     }
     if (other.what & eLayerStackChanged) {
         what |= eLayerStackChanged;
diff --git a/services/surfaceflinger/Scheduler/Scheduler.cpp b/services/surfaceflinger/Scheduler/Scheduler.cpp
index 9435193..7acb470 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.cpp
+++ b/services/surfaceflinger/Scheduler/Scheduler.cpp
@@ -385,11 +385,17 @@
 }
 
 void Scheduler::setChangeRefreshRateCallback(
-        const ChangeRefreshRateCallback& changeRefreshRateCallback) {
+        const ChangeRefreshRateCallback&& changeRefreshRateCallback) {
     std::lock_guard<std::mutex> lock(mCallbackLock);
     mChangeRefreshRateCallback = changeRefreshRateCallback;
 }
 
+void Scheduler::setGetCurrentRefreshRateTypeCallback(
+        const GetCurrentRefreshRateTypeCallback&& getCurrentRefreshRateTypeCallback) {
+    std::lock_guard<std::mutex> lock(mCallbackLock);
+    mGetCurrentRefreshRateTypeCallback = getCurrentRefreshRateTypeCallback;
+}
+
 void Scheduler::setGetVsyncPeriodCallback(const GetVsyncPeriod&& getVsyncPeriod) {
     std::lock_guard<std::mutex> lock(mCallbackLock);
     mGetVsyncPeriod = getVsyncPeriod;
@@ -447,8 +453,13 @@
 void Scheduler::resetKernelTimerCallback() {
     ATRACE_INT("ExpiredKernelIdleTimer", 0);
     std::lock_guard<std::mutex> lock(mCallbackLock);
-    if (mGetVsyncPeriod) {
-        resyncToHardwareVsync(true, mGetVsyncPeriod());
+    if (mGetVsyncPeriod && mGetCurrentRefreshRateTypeCallback) {
+        // If we're not in performance mode then the kernel timer shouldn't do
+        // anything, as the refresh rate during DPU power collapse will be the
+        // same.
+        if (mGetCurrentRefreshRateTypeCallback() == Scheduler::RefreshRateType::PERFORMANCE) {
+            resyncToHardwareVsync(true, mGetVsyncPeriod());
+        }
     }
 }
 
@@ -478,10 +489,16 @@
 }
 
 void Scheduler::expiredKernelTimerCallback() {
+    std::lock_guard<std::mutex> lock(mCallbackLock);
     ATRACE_INT("ExpiredKernelIdleTimer", 1);
-    // Disable HW Vsync if the timer expired, as we don't need it
-    // enabled if we're not pushing frames.
-    disableHardwareVsync(false);
+    if (mGetCurrentRefreshRateTypeCallback) {
+        if (mGetCurrentRefreshRateTypeCallback() != Scheduler::RefreshRateType::PERFORMANCE) {
+            // Disable HW Vsync if the timer expired, as we don't need it
+            // enabled if we're not pushing frames, and if we're in PERFORMANCE
+            // mode then we'll need to re-update the DispSync model anyways.
+            disableHardwareVsync(false);
+        }
+    }
 }
 
 std::string Scheduler::doDump() {
diff --git a/services/surfaceflinger/Scheduler/Scheduler.h b/services/surfaceflinger/Scheduler/Scheduler.h
index 33f96b6..123036e 100644
--- a/services/surfaceflinger/Scheduler/Scheduler.h
+++ b/services/surfaceflinger/Scheduler/Scheduler.h
@@ -49,6 +49,7 @@
     }
 
     using RefreshRateType = scheduler::RefreshRateConfigs::RefreshRateType;
+    using GetCurrentRefreshRateTypeCallback = std::function<RefreshRateType()>;
     using ChangeRefreshRateCallback = std::function<void(RefreshRateType, ConfigEvent)>;
     using GetVsyncPeriod = std::function<nsecs_t()>;
 
@@ -165,7 +166,9 @@
     // Updates FPS based on the most content presented.
     void updateFpsBasedOnContent();
     // Callback that gets invoked when Scheduler wants to change the refresh rate.
-    void setChangeRefreshRateCallback(const ChangeRefreshRateCallback& changeRefreshRateCallback);
+    void setChangeRefreshRateCallback(const ChangeRefreshRateCallback&& changeRefreshRateCallback);
+    void setGetCurrentRefreshRateTypeCallback(
+            const GetCurrentRefreshRateTypeCallback&& getCurrentRefreshRateType);
     void setGetVsyncPeriodCallback(const GetVsyncPeriod&& getVsyncPeriod);
 
     // Returns whether idle timer is enabled or not
@@ -294,6 +297,7 @@
     std::unique_ptr<scheduler::OneShotTimer> mDisplayPowerTimer;
 
     std::mutex mCallbackLock;
+    GetCurrentRefreshRateTypeCallback mGetCurrentRefreshRateTypeCallback GUARDED_BY(mCallbackLock);
     ChangeRefreshRateCallback mChangeRefreshRateCallback GUARDED_BY(mCallbackLock);
     GetVsyncPeriod mGetVsyncPeriod GUARDED_BY(mCallbackLock);
 
diff --git a/services/surfaceflinger/SurfaceFlinger.cpp b/services/surfaceflinger/SurfaceFlinger.cpp
index 9333de3..86ed5fb 100644
--- a/services/surfaceflinger/SurfaceFlinger.cpp
+++ b/services/surfaceflinger/SurfaceFlinger.cpp
@@ -713,6 +713,24 @@
                 Mutex::Autolock lock(mStateLock);
                 setRefreshRateTo(type, event);
             });
+    mScheduler->setGetCurrentRefreshRateTypeCallback([this] {
+        Mutex::Autolock lock(mStateLock);
+        const auto display = getDefaultDisplayDeviceLocked();
+        if (!display) {
+            // If we don't have a default display the fallback to the default
+            // refresh rate type
+            return RefreshRateType::DEFAULT;
+        }
+
+        const int configId = display->getActiveConfig();
+        for (const auto& [type, refresh] : mRefreshRateConfigs.getRefreshRates()) {
+            if (refresh && refresh->configId == configId) {
+                return type;
+            }
+        }
+        // This should never happen, but just gracefully fallback to default.
+        return RefreshRateType::DEFAULT;
+    });
     mScheduler->setGetVsyncPeriodCallback([this] {
         Mutex::Autolock lock(mStateLock);
         return getVsyncPeriod();
@@ -1045,6 +1063,7 @@
         desiredActiveConfigChangeDone();
         return false;
     }
+
     mUpcomingActiveConfig = desiredActiveConfig;
     const auto displayId = display->getId();
     LOG_ALWAYS_FATAL_IF(!displayId);
@@ -5413,7 +5432,12 @@
                 return NO_ERROR;
             }
             case 1023: { // Set native mode
+                int32_t colorMode;
+
                 mDisplayColorSetting = static_cast<DisplayColorSetting>(data.readInt32());
+                if (data.readInt32(&colorMode) == NO_ERROR) {
+                    mForceColorMode = static_cast<ColorMode>(colorMode);
+                }
                 invalidateHwcGeometry();
                 repaintEverything();
                 return NO_ERROR;
diff --git a/services/surfaceflinger/tests/Transaction_test.cpp b/services/surfaceflinger/tests/Transaction_test.cpp
index d5f6534..aed7b40 100644
--- a/services/surfaceflinger/tests/Transaction_test.cpp
+++ b/services/surfaceflinger/tests/Transaction_test.cpp
@@ -4427,6 +4427,30 @@
     }
 }
 
+TEST_F(LayerUpdateTest, MergingTransactionFlags) {
+    Transaction().hide(mFGSurfaceControl).apply();
+    std::unique_ptr<ScreenCapture> sc;
+    {
+        SCOPED_TRACE("before merge");
+        ScreenCapture::captureScreen(&sc);
+        sc->expectBGColor(0, 12);
+        sc->expectBGColor(75, 75);
+        sc->expectBGColor(145, 145);
+    }
+
+    Transaction t1, t2;
+    t1.show(mFGSurfaceControl);
+    t2.setFlags(mFGSurfaceControl, 0 /* flags */, layer_state_t::eLayerSecure /* mask */);
+    t1.merge(std::move(t2));
+    t1.apply();
+
+    {
+        SCOPED_TRACE("after merge");
+        ScreenCapture::captureScreen(&sc);
+        sc->expectFGColor(75, 75);
+    }
+}
+
 class ChildLayerTest : public LayerUpdateTest {
 protected:
     void SetUp() override {