mirror of
https://github.com/godotengine/godot-angle-static.git
synced 2026-01-03 14:09:33 +03:00
Add "angle_enable_context_mutex_recursion" build option
Option is for Android Vulkan.
Fixes the recursion problem:
#00 pc 000000000004d69c /apex/com.android.runtime/lib64/bionic/libc.so (syscall+28) (BuildId: dda896312b8ac9c639f6aeb0175b8a0a)
#01 pc 0000000000051e84 /apex/com.android.runtime/lib64/bionic/libc.so (__futex_wait_ex(void volatile*, bool, int, bool, timespec const*)+144) (BuildId: dda896312b8ac9c639f6aeb0175b8a0a)
#02 pc 00000000000b41b4 /apex/com.android.runtime/lib64/bionic/libc.so (NonPI::MutexLockWithTimeout(pthread_mutex_internal_t*, bool, timespec const*)+236) (BuildId: dda896312b8ac9c639f6aeb0175b8a0a)
#03 pc 00000000003d8274 /vendor/lib64/egl/libGLESv2_angle.so (std::__Cr::mutex::lock()+12) (BuildId: 203fee8f1cfe9c18)
#04 pc 0000000000336494 /vendor/lib64/egl/libGLESv2_angle.so (egl::SharedContextMutex<std::__Cr::mutex>::doLock()+132) (BuildId: 203fee8f1cfe9c18)
#05 pc 000000000033668c /vendor/lib64/egl/libGLESv2_angle.so (egl::SharedContextMutex<std::__Cr::mutex>::lock()+124) (BuildId: 203fee8f1cfe9c18)
#06 pc 00000000001d6e08 /vendor/lib64/egl/libGLESv2_angle.so (EGL_ClientWaitSyncKHR+100) (BuildId: 203fee8f1cfe9c18)
#07 pc 00000000000674f0 /system/lib64/libEGL.so (android::eglClientWaitSyncKHRImpl(void*, void*, int, unsigned long)+112) (BuildId: c4698b8b366f6519dbd7bd3a5f6239f2)
#08 pc 0000000000086648 /system/lib64/libgui.so (android::BufferQueueProducer::dequeueBuffer(int*, android::sp<android::Fence>*, unsigned int, unsigned int, int, unsigned long, unsigned long*, android::FrameEventHistoryDelta*)+3240) (BuildId: ff35b91736084ee7c49efe908fb9f8f3)
#09 pc 00000000000e24fc /system/lib64/libgui.so (android::Surface::dequeueBuffer(ANativeWindowBuffer**, int*)+392) (BuildId: ff35b91736084ee7c49efe908fb9f8f3)
#10 pc 0000000000755770 /system/lib64/libhwui.so (android::uirenderer::renderthread::ReliableSurface::hook_dequeueBuffer(ANativeWindow*, int (*)(ANativeWindow*, ANativeWindowBuffer**, int*), void*, ANativeWindowBuffer**, int*)+80) (BuildId: 0ab7d7584ac800860c4d180557441d1b)
#11 pc 00000000000e0b48 /system/lib64/libgui.so (android::Surface::hook_dequeueBuffer(ANativeWindow*, ANativeWindowBuffer**, int*)+92) (BuildId: ff35b91736084ee7c49efe908fb9f8f3)
#12 pc 000000000002cc8c /system/lib64/libvulkan.so (vulkan::driver::AcquireNextImageKHR(VkDevice_T*, VkSwapchainKHR_T*, unsigned long, VkSemaphore_T*, VkFence_T*, unsigned int*)+280) (BuildId: 0c72f8685858f73fbb13fd68d401bba5)
#13 pc 0000000000266f08 /vendor/lib64/egl/libGLESv2_angle.so (rx::(anonymous namespace)::TryAcquireNextImageUnlocked(VkDevice_T*, VkSwapchainKHR_T*, rx::impl::ImageAcquireOperation*)+128) (BuildId: 203fee8f1cfe9c18)
#14 pc 000000000026495c /vendor/lib64/egl/libGLESv2_angle.so (rx::WindowSurfaceVk::acquireNextSwapchainImage(rx::vk::Context*)+140) (BuildId: 203fee8f1cfe9c18)
#15 pc 0000000000265800 /vendor/lib64/egl/libGLESv2_angle.so (rx::WindowSurfaceVk::doDeferredAcquireNextImageWithUsableSwapchain(gl::Context const*)+148) (BuildId: 203fee8f1cfe9c18)
#16 pc 0000000000267bb4 /vendor/lib64/egl/libGLESv2_angle.so (rx::WindowSurfaceVk::getBufferAge(gl::Context const*, int*)+196) (BuildId: 203fee8f1cfe9c18)
#17 pc 000000000034143c /vendor/lib64/egl/libGLESv2_angle.so (egl::Surface::getBufferAge(gl::Context const*, int*)+36) (BuildId: 203fee8f1cfe9c18)
#18 pc 000000000036c354 /vendor/lib64/egl/libGLESv2_angle.so (egl::QuerySurfaceAttrib(egl::Display const*, gl::Context const*, egl::Surface*, int, int*)+812) (BuildId: 203fee8f1cfe9c18)
#19 pc 00000000001d1674 /vendor/lib64/egl/libGLESv2_angle.so (egl::QuerySurface(egl::Thread*, egl::Display*, egl::SurfaceID, int, int*)+176) (BuildId: 203fee8f1cfe9c18)
#20 pc 00000000001d3658 /vendor/lib64/egl/libGLESv2_angle.so (EGL_QuerySurface+200) (BuildId: 203fee8f1cfe9c18)
Happens only on platforms that use "EGL_KHR_fence_sync" instead of
"EGL_ANDROID_native_fence_sync" for synchronization. Happens only on
specific Surface types, that using `EGLConsumer`/`GLConsumer`, such as
`SurfaceTexture`.
The Android's "testDrawingHardwareBitmapNotLeaking" test may be used for
testing the issue. Note: in order to reproduce, `SingleContextMutex`
must not be used by context. The "Fix ExternalImageTarget EGLImage race"
CL will achieve that.
To check what extension is used, call:
adb shell "dumpsys SurfaceFlinger | grep 'Sync configuration'"
Possible results:
Sync configuration: [using: EGL_KHR_fence_sync EGL_KHR_wait_sync]
Sync configuration: [using: EGL_ANDROID_native_fence_sync EGL_KHR_wait_sync]
Bug: angleproject:4354
Change-Id: I915a2c026b59af0a2098ae6b3300f773b29cbfbb
Reviewed-on: https://chromium-review.googlesource.com/c/angle/angle/+/4733831
Commit-Queue: Igor Nazarov <i.nazarov@samsung.com>
Reviewed-by: Shahbaz Youssefi <syoussefi@chromium.org>
This commit is contained in:
committed by
Angle LUCI CQ
parent
11cef17b53
commit
d61a50c155
14
BUILD.gn
14
BUILD.gn
@@ -84,6 +84,16 @@ declare_args() {
|
||||
angle_enable_shared_context_mutex = true
|
||||
}
|
||||
|
||||
declare_args() {
|
||||
# May need to enable to fix recursion when vkAcquireNextImageKHR() returns back to ANGLE from
|
||||
# eglClientWaitSyncKHR(). May happen if Android Presentation Engine uses
|
||||
# EGL_KHR_fence_sync instead of EGL_ANDROID_native_fence_sync for synchronization.
|
||||
# To check call: adb shell "dumpsys SurfaceFlinger | grep 'Sync configuration'"
|
||||
# The Android's "testDrawingHardwareBitmapNotLeaking" test may be used for testing.
|
||||
# Disable this option to save performance on platforms that does not require recursion.
|
||||
angle_enable_context_mutex_recursion = angle_enable_global_mutex_recursion
|
||||
}
|
||||
|
||||
if (angle_build_all) {
|
||||
group("all") {
|
||||
testonly = true
|
||||
@@ -182,6 +192,10 @@ config("internal_config") {
|
||||
defines += [ "ANGLE_ENABLE_SHARED_CONTEXT_MUTEX=1" ]
|
||||
}
|
||||
|
||||
if (angle_enable_context_mutex_recursion) {
|
||||
defines += [ "ANGLE_ENABLE_CONTEXT_MUTEX_RECURSION=1" ]
|
||||
}
|
||||
|
||||
# Enables debug/trace-related functionality, including logging every GLES/EGL API command to the
|
||||
# "angle_debug.txt" file on desktop. Enables debug markers for AGI, but must also set
|
||||
# angle_enable_annotator_run_time_checks to improve performance.
|
||||
|
||||
@@ -15,16 +15,16 @@ namespace egl
|
||||
|
||||
namespace
|
||||
{
|
||||
bool CheckThreadIdCurrent(const std::atomic<angle::ThreadId> &threadId,
|
||||
angle::ThreadId *currentThreadIdOut)
|
||||
[[maybe_unused]] bool CheckThreadIdCurrent(const std::atomic<angle::ThreadId> &threadId,
|
||||
angle::ThreadId *currentThreadIdOut)
|
||||
{
|
||||
*currentThreadIdOut = angle::GetCurrentThreadId();
|
||||
return (threadId.load(std::memory_order_relaxed) == *currentThreadIdOut);
|
||||
}
|
||||
|
||||
bool TryUpdateThreadId(std::atomic<angle::ThreadId> *threadId,
|
||||
angle::ThreadId oldThreadId,
|
||||
angle::ThreadId newThreadId)
|
||||
[[maybe_unused]] bool TryUpdateThreadId(std::atomic<angle::ThreadId> *threadId,
|
||||
angle::ThreadId oldThreadId,
|
||||
angle::ThreadId newThreadId)
|
||||
{
|
||||
const bool ok = (threadId->load(std::memory_order_relaxed) == oldThreadId);
|
||||
if (ok)
|
||||
@@ -90,15 +90,31 @@ bool SingleContextMutex::try_lock()
|
||||
return false;
|
||||
}
|
||||
|
||||
#if defined(ANGLE_ENABLE_CONTEXT_MUTEX_RECURSION)
|
||||
void SingleContextMutex::lock()
|
||||
{
|
||||
const int oldValue = mState.fetch_add(1, std::memory_order_relaxed);
|
||||
ASSERT(oldValue >= 0);
|
||||
}
|
||||
|
||||
void SingleContextMutex::unlock()
|
||||
{
|
||||
const int oldValue = mState.fetch_sub(1, std::memory_order_release);
|
||||
ASSERT(oldValue > 0);
|
||||
}
|
||||
#else
|
||||
void SingleContextMutex::lock()
|
||||
{
|
||||
ASSERT(!isLocked(std::memory_order_relaxed));
|
||||
mState.store(1, std::memory_order_relaxed);
|
||||
}
|
||||
|
||||
void SingleContextMutex::unlock()
|
||||
{
|
||||
ASSERT(isLocked(std::memory_order_relaxed));
|
||||
mState.store(0, std::memory_order_release);
|
||||
}
|
||||
#endif
|
||||
|
||||
// SharedContextMutex
|
||||
template <class Mutex>
|
||||
@@ -124,6 +140,81 @@ void SharedContextMutex<Mutex>::unlock()
|
||||
root->doUnlock();
|
||||
}
|
||||
|
||||
#if defined(ANGLE_ENABLE_CONTEXT_MUTEX_RECURSION)
|
||||
template <class Mutex>
|
||||
ANGLE_INLINE SharedContextMutex<Mutex> *SharedContextMutex<Mutex>::doTryLock()
|
||||
{
|
||||
const angle::ThreadId threadId = angle::GetCurrentThreadId();
|
||||
if (ANGLE_UNLIKELY(!mMutex.try_lock()))
|
||||
{
|
||||
if (ANGLE_UNLIKELY(mOwnerThreadId.load(std::memory_order_relaxed) == threadId))
|
||||
{
|
||||
ASSERT(this == getRoot());
|
||||
ASSERT(mLockLevel > 0);
|
||||
++mLockLevel;
|
||||
return this;
|
||||
}
|
||||
return nullptr;
|
||||
}
|
||||
ASSERT(mOwnerThreadId.load(std::memory_order_relaxed) == angle::InvalidThreadId());
|
||||
ASSERT(mLockLevel == 0);
|
||||
SharedContextMutex *const root = getRoot();
|
||||
if (ANGLE_UNLIKELY(this != root))
|
||||
{
|
||||
// Unlock, so only the "stable root" mutex remains locked
|
||||
mMutex.unlock();
|
||||
SharedContextMutex *const lockedRoot = root->doTryLock();
|
||||
ASSERT(lockedRoot == nullptr || lockedRoot == getRoot());
|
||||
return lockedRoot;
|
||||
}
|
||||
mOwnerThreadId.store(threadId, std::memory_order_relaxed);
|
||||
mLockLevel = 1;
|
||||
return this;
|
||||
}
|
||||
|
||||
template <class Mutex>
|
||||
ANGLE_INLINE SharedContextMutex<Mutex> *SharedContextMutex<Mutex>::doLock()
|
||||
{
|
||||
const angle::ThreadId threadId = angle::GetCurrentThreadId();
|
||||
if (ANGLE_UNLIKELY(!mMutex.try_lock()))
|
||||
{
|
||||
if (ANGLE_UNLIKELY(mOwnerThreadId.load(std::memory_order_relaxed) == threadId))
|
||||
{
|
||||
ASSERT(this == getRoot());
|
||||
ASSERT(mLockLevel > 0);
|
||||
++mLockLevel;
|
||||
return this;
|
||||
}
|
||||
mMutex.lock();
|
||||
}
|
||||
ASSERT(mOwnerThreadId.load(std::memory_order_relaxed) == angle::InvalidThreadId());
|
||||
ASSERT(mLockLevel == 0);
|
||||
SharedContextMutex *const root = getRoot();
|
||||
if (ANGLE_UNLIKELY(this != root))
|
||||
{
|
||||
// Unlock, so only the "stable root" mutex remains locked
|
||||
mMutex.unlock();
|
||||
SharedContextMutex *const lockedRoot = root->doLock();
|
||||
ASSERT(lockedRoot == getRoot());
|
||||
return lockedRoot;
|
||||
}
|
||||
mOwnerThreadId.store(threadId, std::memory_order_relaxed);
|
||||
mLockLevel = 1;
|
||||
return this;
|
||||
}
|
||||
|
||||
template <class Mutex>
|
||||
ANGLE_INLINE void SharedContextMutex<Mutex>::doUnlock()
|
||||
{
|
||||
ASSERT(mOwnerThreadId.load(std::memory_order_relaxed) == angle::GetCurrentThreadId());
|
||||
ASSERT(mLockLevel > 0);
|
||||
if (ANGLE_LIKELY(--mLockLevel == 0))
|
||||
{
|
||||
mOwnerThreadId.store(angle::InvalidThreadId(), std::memory_order_relaxed);
|
||||
mMutex.unlock();
|
||||
}
|
||||
}
|
||||
#else
|
||||
template <class Mutex>
|
||||
ANGLE_INLINE SharedContextMutex<Mutex> *SharedContextMutex<Mutex>::doTryLock()
|
||||
{
|
||||
@@ -172,15 +263,17 @@ ANGLE_INLINE void SharedContextMutex<Mutex>::doUnlock()
|
||||
TryUpdateThreadId(&mOwnerThreadId, angle::GetCurrentThreadId(), angle::InvalidThreadId()));
|
||||
mMutex.unlock();
|
||||
}
|
||||
#endif
|
||||
|
||||
template <class Mutex>
|
||||
SharedContextMutex<Mutex>::SharedContextMutex()
|
||||
: mRoot(this), mRank(0), mOwnerThreadId(angle::InvalidThreadId())
|
||||
: mOwnerThreadId(angle::InvalidThreadId()), mLockLevel(0), mRoot(this), mRank(0)
|
||||
{}
|
||||
|
||||
template <class Mutex>
|
||||
SharedContextMutex<Mutex>::~SharedContextMutex()
|
||||
{
|
||||
ASSERT(mLockLevel == 0);
|
||||
ASSERT(this == getRoot());
|
||||
ASSERT(mOldRoots.empty());
|
||||
ASSERT(mLeaves.empty());
|
||||
@@ -227,6 +320,7 @@ void SharedContextMutex<Mutex>::Merge(SharedContextMutex *lockedMutex,
|
||||
}
|
||||
// Lock was unsuccessful - unlock and retry...
|
||||
// May use "doUnlock()" because lockedRoot is a "stable root" mutex.
|
||||
// Note: lock will be preserved in case of the recursive lock.
|
||||
lockedRoot->doUnlock();
|
||||
// Sleep random amount to allow one of the thread acquire the lock next time...
|
||||
std::this_thread::sleep_for(std::chrono::microseconds(rand() % 91 + 10));
|
||||
@@ -241,28 +335,36 @@ void SharedContextMutex<Mutex>::Merge(SharedContextMutex *lockedMutex,
|
||||
|
||||
// Decide the new "root". See mRank comment for more details...
|
||||
|
||||
// Make "otherLockedRoot" the root of the "merged" mutex
|
||||
if (lockedRoot->mRank > otherLockedRoot->mRank)
|
||||
SharedContextMutex *oldRoot = lockedRoot;
|
||||
SharedContextMutex *newRoot = otherLockedRoot;
|
||||
|
||||
if (oldRoot->mRank > newRoot->mRank)
|
||||
{
|
||||
// So the "lockedRoot" is lower rank.
|
||||
std::swap(lockedRoot, otherLockedRoot);
|
||||
std::swap(oldRoot, newRoot);
|
||||
}
|
||||
else if (lockedRoot->mRank == otherLockedRoot->mRank)
|
||||
else if (oldRoot->mRank == newRoot->mRank)
|
||||
{
|
||||
++otherLockedRoot->mRank;
|
||||
++newRoot->mRank;
|
||||
}
|
||||
|
||||
// Update the structure
|
||||
for (SharedContextMutex *const leaf : lockedRoot->mLeaves)
|
||||
for (SharedContextMutex *const leaf : oldRoot->mLeaves)
|
||||
{
|
||||
ASSERT(leaf->getRoot() == lockedRoot);
|
||||
leaf->setNewRoot(otherLockedRoot);
|
||||
ASSERT(leaf->getRoot() == oldRoot);
|
||||
leaf->setNewRoot(newRoot);
|
||||
}
|
||||
lockedRoot->mLeaves.clear();
|
||||
lockedRoot->setNewRoot(otherLockedRoot);
|
||||
oldRoot->mLeaves.clear();
|
||||
oldRoot->setNewRoot(newRoot);
|
||||
|
||||
// Leave only the "merged" mutex locked. "lockedRoot" already merged, need to use "doUnlock()"
|
||||
lockedRoot->doUnlock();
|
||||
// Leave only the "merged" mutex locked. "oldRoot" already merged, need to use "doUnlock()"
|
||||
oldRoot->doUnlock();
|
||||
|
||||
// Merge from recursive lock is unexpected. Handle such cases anyway to be safe.
|
||||
while (oldRoot->mLockLevel > 0)
|
||||
{
|
||||
newRoot->doLock();
|
||||
oldRoot->doUnlock();
|
||||
}
|
||||
}
|
||||
|
||||
template <class Mutex>
|
||||
|
||||
@@ -144,7 +144,7 @@ class [[nodiscard]] ScopedContextMutexLock final
|
||||
class SingleContextMutex final : public ContextMutex
|
||||
{
|
||||
public:
|
||||
ANGLE_INLINE bool isLocked(std::memory_order order) const { return mState.load(order) != 0; }
|
||||
ANGLE_INLINE bool isLocked(std::memory_order order) const { return mState.load(order) > 0; }
|
||||
|
||||
// ContextMutex
|
||||
bool try_lock() override;
|
||||
@@ -198,6 +198,10 @@ class SharedContextMutex final : public ContextMutex
|
||||
|
||||
private:
|
||||
Mutex mMutex;
|
||||
// Used when ASSERT() and/or recursion are/is enabled.
|
||||
std::atomic<angle::ThreadId> mOwnerThreadId;
|
||||
// Used only when recursion is enabled.
|
||||
uint32_t mLockLevel;
|
||||
|
||||
// mRoot and mLeaves tree structure details:
|
||||
// - used to implement primary functionality of this class;
|
||||
@@ -253,9 +257,6 @@ class SharedContextMutex final : public ContextMutex
|
||||
// - no mOldRoots grows at all;
|
||||
// - minumum number of mutexes to reach mOldRoots size of N => 2^(N+1).
|
||||
uint32_t mRank;
|
||||
|
||||
// Only used when ASSERT() is enabled.
|
||||
std::atomic<angle::ThreadId> mOwnerThreadId;
|
||||
};
|
||||
|
||||
class ContextMutexManager
|
||||
|
||||
Reference in New Issue
Block a user