Mutex: Rollback requeing waiters as LIFO

PiperOrigin-RevId: 567415671
Change-Id: I59bfcb5ac9fbde227a4cdb3b497b0bd5969b0770
diff --git a/absl/synchronization/mutex.cc b/absl/synchronization/mutex.cc
index 70a1737..27fd6a5 100644
--- a/absl/synchronization/mutex.cc
+++ b/absl/synchronization/mutex.cc
@@ -991,24 +991,6 @@
       if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
         s->skip = s->next;                   // s may skip to its successor
       }
-    } else if ((flags & kMuHasBlocked) &&
-               (s->priority >= head->next->priority) &&
-               (!head->maybe_unlocking ||
-                (waitp->how == kExclusive &&
-                 Condition::GuaranteedEqual(waitp->cond, nullptr)))) {
-      // This thread has already waited, then was woken, then failed to acquire
-      // the mutex and now tries to requeue. Try to requeue it at head,
-      // otherwise it can suffer bad latency (wait whole qeueue several times).
-      // However, we need to be conservative. First, we need to ensure that we
-      // respect priorities. Then, we need to be careful to not break wait
-      // queue invariants: we require either that unlocker is not scanning
-      // the queue or that the current thread is a writer with no condition
-      // (unlocker will recheck the queue for such waiters).
-      s->next = head->next;
-      head->next = s;
-      if (MuEquivalentWaiter(s, s->next)) {  // s->may_skip is known to be true
-        s->skip = s->next;                   // s may skip to its successor
-      }
     } else {  // enqueue not done any other way, so
               // we're inserting s at the back
       // s will become new head; copy data from head into it