Add absl::chunked_queue

This change introduces absl::chunked_queue, a sequence container
optimized for use as a FIFO (First-In, First-Out) queue. It is similar
in purpose to std::deque but with different performance trade-offs and
features.

absl::chunked_queue stores elements in a series of
exponentially-growing chunks of memory.

absl::chunked_queue is often a better choice than std::deque in the
following situations:
  * Large queues: For very large numbers of elements, the exponential
    growth strategy of absl::chunked_queue can lead to fewer, larger
    memory allocations compared to std::deque, which can be a
    performance advantage.
  * Strict FIFO processing: When you only need to add elements to the
    back (push_back) and remove them from the front (pop_front).

std::deque should be preferred in the following cases:
  * Operations at both ends: std::deque is designed for efficient
    insertions and deletions at both the front and the
    back. absl::chunked_queue is optimized for push_back and pop_front
    and does not offer a pop_back method.
  * Random access: std::deque provides amortized O(1) random access to
    elements via operator[]. absl::chunked_queue does not support
    random access.
PiperOrigin-RevId: 850999629
Change-Id: Ie71737c10b6125b9e498109267cac87a4ca2f9e8
diff --git a/CMake/AbseilDll.cmake b/CMake/AbseilDll.cmake
index 10e6cd1..47d0efd 100644
--- a/CMake/AbseilDll.cmake
+++ b/CMake/AbseilDll.cmake
@@ -69,6 +69,7 @@
   "cleanup/internal/cleanup.h"
   "container/btree_map.h"
   "container/btree_set.h"
+  "container/chunked_queue.h"
   "container/hash_container_defaults.h"
   "container/fixed_array.h"
   "container/flat_hash_map.h"
@@ -76,6 +77,7 @@
   "container/inlined_vector.h"
   "container/internal/btree.h"
   "container/internal/btree_container.h"
+  "container/internal/chunked_queue.h"
   "container/internal/common.h"
   "container/internal/common_policy_traits.h"
   "container/internal/compressed_tuple.h"
diff --git a/absl/container/BUILD.bazel b/absl/container/BUILD.bazel
index 15fb0a5..e90aaec 100644
--- a/absl/container/BUILD.bazel
+++ b/absl/container/BUILD.bazel
@@ -1349,3 +1349,46 @@
         "@google_benchmark//:benchmark_main",
     ],
 )
+
+cc_library(
+    name = "chunked_queue",
+    srcs = ["internal/chunked_queue.h"],
+    hdrs = ["chunked_queue.h"],
+    deps = [
+        ":layout",
+        "//absl/base:config",
+        "//absl/base:core_headers",
+        "//absl/base:iterator_traits_internal",
+    ],
+)
+
+cc_test(
+    name = "chunked_queue_test",
+    size = "small",
+    srcs = ["chunked_queue_test.cc"],
+    deps = [
+        ":chunked_queue",
+        ":test_allocator",
+        "//absl/base:core_headers",
+        "//absl/strings",
+        "@googletest//:gtest",
+        "@googletest//:gtest_main",
+    ],
+)
+
+cc_binary(
+    name = "chunked_queue_benchmark",
+    testonly = True,
+    srcs = ["chunked_queue_benchmark.cc"],
+    copts = ABSL_TEST_COPTS,
+    linkopts = ABSL_DEFAULT_LINKOPTS,
+    tags = ["benchmark"],
+    visibility = ["//visibility:private"],
+    deps = [
+        ":chunked_queue",
+        "//absl/random",
+        "//absl/status",
+        "//absl/strings:cord",
+        "@google_benchmark//:benchmark_main",
+    ],
+)
diff --git a/absl/container/CMakeLists.txt b/absl/container/CMakeLists.txt
index f1ea9e2..365c6ea 100644
--- a/absl/container/CMakeLists.txt
+++ b/absl/container/CMakeLists.txt
@@ -1202,3 +1202,38 @@
     absl::unordered_set_modifiers_test
     GTest::gmock_main
 )
+
+absl_cc_library(
+  NAME
+    chunked_queue
+  HDRS
+    "chunked_queue.h"
+    "internal/chunked_queue.h"
+  COPTS
+    ${ABSL_DEFAULT_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+     absl::config
+     absl::core_headers
+     absl::iterator_traits_internal
+     absl::layout
+)
+
+absl_cc_test(
+  NAME
+    chunked_queue_test
+  SRCS
+    "chunked_queue_test.cc"
+  COPTS
+    ${ABSL_TEST_COPTS}
+  LINKOPTS
+    ${ABSL_DEFAULT_LINKOPTS}
+  DEPS
+    absl::chunked_queue
+    absl::config
+    absl::core_headers
+    absl::strings
+    absl::test_allocator
+    GTest::gmock_main
+)
diff --git a/absl/container/chunked_queue.h b/absl/container/chunked_queue.h
new file mode 100644
index 0000000..d5b1184
--- /dev/null
+++ b/absl/container/chunked_queue.h
@@ -0,0 +1,755 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+//
+// -----------------------------------------------------------------------------
+// File: chunked_queue.h
+// -----------------------------------------------------------------------------
+//
+// `std::deque` provides random access and fast push/pop back/front. It is
+// implemented as an array of fixed blocks. It provides no control of block size
+// and implementations differ; libstdc++ tries to allocate blocks of ~512 bytes
+// and libc++ tries for blocks of ~4k bytes.
+//
+// `absl::chunked_queue` provides the same minus random access. It is
+// implemented as a double-linked list of fixed or variable sized blocks.
+//
+// `absl::chunked_queue` is useful when memory usage is paramount as it provides
+//  finegrained and configurable block sizing.
+//
+// The interface supported by this class is limited to:
+//
+//   empty()
+//   size()
+//   max_size()
+//   shrink_to_fit()
+//   resize()
+//   assign()
+//   push_back()
+//   emplace_back()
+//   pop_front()
+//   front()
+//   back()
+//   swap()
+//   clear()
+//   begin(), end()
+//   cbegin(), cend()
+//
+// === ADVANCED USAGE
+//
+// == clear()
+//
+// As an optimization clear() leaves the first block of the chunked_queue
+// allocated (but empty). So clear will not delete all memory of the container.
+// In order to do so, call shrink_to_fit() or swap the container with an empty
+// one.
+//
+//   absl::chunked_queue<int64> q = {1, 2, 3};
+//   q.clear();
+//   q.shrink_to_fit();
+//
+// == block size customization
+//
+// chunked_queue allows customization of the block size for each block. By
+// default the block size is set to 1 element and the size doubles for the next
+// block until it reaches the default max block size, which is 128 elements.
+//
+// = fixed size
+//
+// When only the first block size parameter is specified, it sets a fixed block
+// size for all blocks:
+//
+//   chunked_queue<T, 32>: 32 elements per block
+//
+// The smaller the block size, the less the memory usage for small queues at the
+// cost of performance. Caveat: For large queues, a smaller block size will
+// increase memory usage, and reduce performance.
+//
+// = variable size
+//
+// When both block size parameters are specified, they set the min and max block
+// sizes for the blocks. Initially the queue starts with the min block size and
+// as it grows, the size of each block grows until it reaches the max block
+// size.
+// New blocks are double the size of the tail block (so they at least
+// double the size of the queue).
+//
+//   chunked_queue<T, 4, 64>: first block 4 elements, second block 8 elements,
+//                            third block 16 elements, fourth block 32 elements,
+//                            all other blocks 64 elements
+//
+// One can specify a min and max such that small queues will not waste memory
+// and large queues will not have too many blocks.
+
+#ifndef ABSL_CONTAINER_CHUNKED_QUEUE_H_
+#define ABSL_CONTAINER_CHUNKED_QUEUE_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/internal/iterator_traits.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/chunked_queue.h"
+#include "absl/container/internal/layout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+
+template <typename T, size_t BLo = 0, size_t BHi = BLo,
+          typename Allocator = std::allocator<T>>
+class chunked_queue {
+ public:
+  static constexpr size_t kBlockSizeMin = (BLo == 0 && BHi == 0) ? 1 : BLo;
+  static constexpr size_t kBlockSizeMax = (BLo == 0 && BHi == 0) ? 128 : BHi;
+
+ private:
+  static_assert(kBlockSizeMin > 0, "Min block size cannot be zero");
+  static_assert(kBlockSizeMin <= kBlockSizeMax, "Invalid block size bounds");
+
+  using Block = container_internal::ChunkedQueueBlock<T, Allocator>;
+  using AllocatorTraits = std::allocator_traits<Allocator>;
+
+  class iterator_common {
+   public:
+    friend bool operator==(const iterator_common& a, const iterator_common& b) {
+      return a.ptr == b.ptr;
+    }
+
+    friend bool operator!=(const iterator_common& a, const iterator_common& b) {
+      return !(a == b);
+    }
+
+   protected:
+    iterator_common() = default;
+    explicit iterator_common(Block* b)
+        : block(b), ptr(b->start()), limit(b->limit()) {}
+
+    void Incr() {
+      // If we do not have a next block, make ptr point one past the end of this
+      // block. If we do have a next block, make ptr point to the first element
+      // of the next block.
+      ++ptr;
+      if (ptr == limit && block->next()) *this = iterator_common(block->next());
+    }
+
+    void IncrBy(size_t n) {
+      while (ptr + n > limit) {
+        n -= limit - ptr;
+        *this = iterator_common(block->next());
+      }
+      ptr += n;
+    }
+
+    Block* block = nullptr;
+    T* ptr = nullptr;
+    T* limit = nullptr;
+  };
+
+  // CT can be either T or const T.
+  template <typename CT>
+  class basic_iterator : public iterator_common {
+   public:
+    using iterator_category = std::forward_iterator_tag;
+    using value_type = typename AllocatorTraits::value_type;
+    using difference_type = typename AllocatorTraits::difference_type;
+    using pointer =
+        typename std::conditional<std::is_const<CT>::value,
+                                  typename AllocatorTraits::const_pointer,
+                                  typename AllocatorTraits::pointer>::type;
+    using reference = CT&;
+
+    basic_iterator() = default;
+
+    // Copy ctor if CT is T.
+    // Otherwise it's a conversion of iterator to const_iterator.
+    basic_iterator(const basic_iterator<T>& it)  // NOLINT(runtime/explicit)
+        : iterator_common(it) {}
+
+    basic_iterator& operator=(const basic_iterator& other) = default;
+
+    reference operator*() const { return *this->ptr; }
+    pointer operator->() const { return this->ptr; }
+    basic_iterator& operator++() {
+      this->Incr();
+      return *this;
+    }
+    basic_iterator operator++(int) {
+      basic_iterator t = *this;
+      ++*this;
+      return t;
+    }
+
+   private:
+    explicit basic_iterator(Block* b) : iterator_common(b) {}
+
+    friend chunked_queue;
+  };
+
+ public:
+  using allocator_type = typename AllocatorTraits::allocator_type;
+  using value_type = typename AllocatorTraits::value_type;
+  using size_type = typename AllocatorTraits::size_type;
+  using difference_type = typename AllocatorTraits::difference_type;
+  using reference = value_type&;
+  using const_reference = const value_type&;
+  using iterator = basic_iterator<T>;
+  using const_iterator = basic_iterator<const T>;
+
+  // Constructs an empty queue.
+  chunked_queue() : chunked_queue(allocator_type()) {}
+
+  // Constructs an empty queue with a custom allocator.
+  explicit chunked_queue(const allocator_type& alloc)
+      : alloc_and_size_(alloc) {}
+
+  // Constructs a queue with `count` default-inserted elements.
+  explicit chunked_queue(size_type count,
+                         const allocator_type& alloc = allocator_type())
+      : alloc_and_size_(alloc) {
+    resize(count);
+  }
+
+  // Constructs a queue with `count` copies of `value`.
+  chunked_queue(size_type count, const T& value,
+                const allocator_type& alloc = allocator_type())
+      : alloc_and_size_(alloc) {
+    assign(count, value);
+  }
+
+  // Constructs a queue with the contents of the range [first, last).
+  template <typename Iter,
+            typename = std::enable_if_t<
+                base_internal::IsAtLeastInputIterator<Iter>::value>>
+  chunked_queue(Iter first, Iter last,
+                const allocator_type& alloc = allocator_type())
+      : alloc_and_size_(alloc) {
+    using Tag = typename std::iterator_traits<Iter>::iterator_category;
+    RangeInit(first, last, Tag());
+  }
+
+  // Constructs a queue with the contents of the initializer list `list`.
+  chunked_queue(std::initializer_list<T> list,
+                const allocator_type& alloc = allocator_type())
+      : chunked_queue(list.begin(), list.end(), alloc) {}
+
+  ~chunked_queue();
+
+  // Copy constructor.
+  chunked_queue(const chunked_queue& other)
+      : chunked_queue(other,
+                      AllocatorTraits::select_on_container_copy_construction(
+                          other.alloc_and_size_.allocator())) {}
+
+  // Copy constructor with specific allocator.
+  chunked_queue(const chunked_queue& other, const allocator_type& alloc)
+      : alloc_and_size_(alloc) {
+    for (const_reference item : other) {
+      push_back(item);
+    }
+  }
+
+  // Move constructor.
+  chunked_queue(chunked_queue&& other) noexcept
+      : head_(other.head_),
+        tail_(other.tail_),
+        alloc_and_size_(std::move(other.alloc_and_size_)) {
+    other.head_ = {};
+    other.tail_ = {};
+    other.alloc_and_size_.size = 0;
+  }
+
+  // Replaces contents with those from initializer list `il`.
+  chunked_queue& operator=(std::initializer_list<T> il) {
+    assign(il.begin(), il.end());
+    return *this;
+  }
+
+  // Copy assignment operator.
+  chunked_queue& operator=(const chunked_queue& other) {
+    if (this == &other) {
+      return *this;
+    }
+    if (AllocatorTraits::propagate_on_container_copy_assignment::value &&
+        (alloc_and_size_.allocator() != other.alloc_and_size_.allocator())) {
+      // Destroy all current elements and blocks with the current allocator,
+      // before switching this to use the allocator propagated from "other".
+      DestroyAndDeallocateAll();
+      alloc_and_size_ = AllocatorAndSize(other.alloc_and_size_.allocator());
+    }
+    assign(other.begin(), other.end());
+    return *this;
+  }
+
+  // Move assignment operator.
+  chunked_queue& operator=(chunked_queue&& other) noexcept;
+
+  // Returns true if the queue contains no elements.
+  bool empty() const { return alloc_and_size_.size == 0; }
+
+  // Returns the number of elements in the queue.
+  size_t size() const { return alloc_and_size_.size; }
+
+  // Returns the maximum number of elements the queue is able to hold.
+  size_type max_size() const noexcept {
+    return AllocatorTraits::max_size(alloc_and_size_.allocator());
+  }
+
+  // Resizes the container to contain `new_size` elements.
+  // If `new_size > size()`, additional default-inserted elements are appended.
+  // If `new_size < size()`, elements are removed from the end.
+  void resize(size_t new_size);
+
+  // Resizes the container to contain `new_size` elements.
+  // If `new_size > size()`, additional copies of `value` are appended.
+  // If `new_size < size()`, elements are removed from the end.
+  void resize(size_type new_size, const T& value) {
+    if (new_size > size()) {
+      size_t to_add = new_size - size();
+      for (size_t i = 0; i < to_add; ++i) {
+        push_back(value);
+      }
+    } else {
+      resize(new_size);
+    }
+  }
+
+  // Requests the removal of unused capacity.
+  void shrink_to_fit() {
+    // As an optimization clear() leaves the first block of the chunked_queue
+    // allocated (but empty). When empty, shrink_to_fit() deallocates the first
+    // block by swapping it a newly constructed container that has no first
+    // block.
+    if (empty()) {
+      chunked_queue(alloc_and_size_.allocator()).swap(*this);
+    }
+  }
+
+  // Replaces the contents with copies of those in the range [first, last).
+  template <typename Iter,
+            typename = std::enable_if_t<
+                base_internal::IsAtLeastInputIterator<Iter>::value>>
+  void assign(Iter first, Iter last) {
+    auto out = begin();
+    Block* prev_block = nullptr;
+
+    // Overwrite existing elements.
+    for (; out != end() && first != last; ++first) {
+      // Track the previous block so we can correctly update tail_ if we stop
+      // exactly at a block boundary.
+      if (out.ptr + 1 == out.block->limit()) {
+        prev_block = out.block;
+      }
+      *out = *first;
+      ++out;
+    }
+
+    // If we stopped exactly at the start of a block (meaning the previous block
+    // was full), we must ensure tail_ points to the end of the previous block,
+    // not the start of the current (now empty and to be deleted) block.
+    // This maintains the invariant required by back() which assumes tail_
+    // never points to the start of a block (unless it's the only block).
+    if (!empty() && out.block != nullptr && out.ptr == out.block->start() &&
+        prev_block != nullptr) {
+      // Delete the current block and all subsequent blocks.
+      //
+      // NOTE: Calling EraseAllFrom on an iterator that points to the limit of
+      // the previous block will not delete any element from the previous block.
+      iterator prev_block_end(prev_block);
+      prev_block_end.ptr = prev_block->limit();
+      EraseAllFrom(prev_block_end);
+
+      // Update tail_ to point to the end of the previous block.
+      tail_ = prev_block_end;
+      prev_block->set_next(nullptr);
+    } else {
+      // Standard erase from the current position to the end.
+      EraseAllFrom(out);
+    }
+
+    // Append any remaining new elements.
+    for (; first != last; ++first) {
+      push_back(*first);
+    }
+  }
+
+  // Replaces the contents with `count` copies of `value`.
+  void assign(size_type count, const T& value) {
+    clear();
+    for (size_type i = 0; i < count; ++i) {
+      push_back(value);
+    }
+  }
+
+  // Replaces the contents with the elements from the initializer list `il`.
+  void assign(std::initializer_list<T> il) { assign(il.begin(), il.end()); }
+
+  // Appends the given element value to the end of the container.
+  // Invalidates `end()` iterator. References to other elements remain valid.
+  void push_back(const T& val) { emplace_back(val); }
+  void push_back(T&& val) { emplace_back(std::move(val)); }
+
+  // Appends a new element to the end of the container.
+  // The element is constructed in-place with `args`.
+  // Returns a reference to the new element.
+  // Invalidates `end()` iterator. References to other elements remain valid.
+  template <typename... A>
+  T& emplace_back(A&&... args) {
+    T* storage = AllocateBack();
+    AllocatorTraits::construct(alloc_and_size_.allocator(), storage,
+                               std::forward<A>(args)...);
+    return *storage;
+  }
+
+  // Removes the first element of the container.
+  // Invalidates iterators to the removed element.
+  // REQUIRES: !empty()
+  void pop_front();
+
+  // Returns a reference to the first element in the container.
+  // REQUIRES: !empty()
+  T& front() {
+    ABSL_HARDENING_ASSERT(!empty());
+    return *head_;
+  }
+  const T& front() const {
+    ABSL_HARDENING_ASSERT(!empty());
+    return *head_;
+  }
+
+  // Returns a reference to the last element in the container.
+  // REQUIRES: !empty()
+  T& back() {
+    ABSL_HARDENING_ASSERT(!empty());
+    return *(&*tail_ - 1);
+  }
+  const T& back() const {
+    ABSL_HARDENING_ASSERT(!empty());
+    return *(&*tail_ - 1);
+  }
+
+  // Swaps the contents of this queue with `other`.
+  void swap(chunked_queue& other) noexcept {
+    using std::swap;
+    swap(head_, other.head_);
+    swap(tail_, other.tail_);
+    if (AllocatorTraits::propagate_on_container_swap::value) {
+      swap(alloc_and_size_, other.alloc_and_size_);
+    } else {
+      // Swap only the sizes; each object keeps its allocator.
+      //
+      // (It is undefined behavior to swap between two containers with unequal
+      // allocators if propagate_on_container_swap is false, so we don't have to
+      // handle that here like we do in the move-assignment operator.)
+      ABSL_HARDENING_ASSERT(get_allocator() == other.get_allocator());
+      swap(alloc_and_size_.size, other.alloc_and_size_.size);
+    }
+  }
+
+  // Erases all elements from the container.
+  // Note: Leaves one empty block allocated as an optimization.
+  // To free all memory, call shrink_to_fit() after calling clear().
+  void clear();
+
+  iterator begin() { return head_; }
+  iterator end() { return tail_; }
+
+  const_iterator begin() const { return head_; }
+  const_iterator end() const { return tail_; }
+
+  const_iterator cbegin() const { return head_; }
+  const_iterator cend() const { return tail_; }
+
+  // Returns the allocator associated with the container.
+  allocator_type get_allocator() const { return alloc_and_size_.allocator(); }
+
+ private:
+  // Empty base-class optimization: bundle storage for our allocator together
+  // with a field we had to store anyway (size), via inheriting from the
+  // allocator, so this allocator instance doesn't consume any storage
+  // when its type has no data members.
+  struct AllocatorAndSize : private allocator_type {
+    explicit AllocatorAndSize(const allocator_type& alloc)
+        : allocator_type(alloc) {}
+    const allocator_type& allocator() const { return *this; }
+    allocator_type& allocator() { return *this; }
+    size_t size = 0;
+  };
+
+  template <typename Iter>
+  void RangeInit(Iter first, Iter last, std::input_iterator_tag) {
+    while (first != last) {
+      AddTailBlock();
+      for (; first != last && tail_.ptr != tail_.limit;
+           ++alloc_and_size_.size, ++tail_.ptr, ++first) {
+        AllocatorTraits::construct(alloc_and_size_.allocator(), tail_.ptr,
+                                   *first);
+      }
+    }
+  }
+
+  void Construct(T* start, T* limit) {
+    ABSL_ASSERT(start <= limit);
+    for (; start != limit; ++start) {
+      AllocatorTraits::construct(alloc_and_size_.allocator(), start);
+    }
+  }
+
+  size_t Destroy(T* start, T* limit) {
+    ABSL_ASSERT(start <= limit);
+    const size_t n = limit - start;
+    for (; start != limit; ++start) {
+      AllocatorTraits::destroy(alloc_and_size_.allocator(), start);
+    }
+    return n;
+  }
+
+  T* block_begin(Block* b) const {
+    return b == head_.block ? head_.ptr : b->start();
+  }
+  T* block_end(Block* b) const {
+    // We have the choice of !b->next or b == tail_.block to determine if b is
+    // the tail or not. !b->next is usually faster because the caller of
+    // block_end() is most likely traversing the list of blocks so b->next is
+    // already fetched into some register.
+    return !b->next() ? tail_.ptr : b->limit();
+  }
+
+  void AddTailBlock();
+  size_t NewBlockSize() {
+    // Double the last block size and bound to [kBlockSizeMin, kBlockSizeMax].
+    if (!tail_.block) return kBlockSizeMin;
+    return (std::min)(kBlockSizeMax, 2 * tail_.block->size());
+  }
+
+  T* AllocateBack();
+  void EraseAllFrom(iterator i);
+
+  // Destroys any contained elements and destroys all allocated storage.
+  // (Like clear(), except this doesn't leave any empty blocks behind.)
+  void DestroyAndDeallocateAll();
+
+  // The set of elements in the queue is the following:
+  //
+  // (1) When we have just one block:
+  //      [head_.ptr .. tail_.ptr-1]
+  // (2) When we have multiple blocks:
+  //      [head_.ptr .. head_.limit-1]
+  //      ... concatenation of all elements from interior blocks ...
+  //      [tail_.ptr .. tail_.limit-1]
+  //
+  // Rep invariants:
+  // When have just one block:
+  //   head_.limit == tail_.limit == &head_.block->element[kBlockSize]
+  // Always:
+  //   head_.ptr <= head_.limit
+  //   tail_.ptr <= tail_.limit
+
+  iterator head_;
+  iterator tail_;
+  AllocatorAndSize alloc_and_size_;
+};
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+constexpr size_t chunked_queue<T, BLo, BHi, Allocator>::kBlockSizeMin;
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+constexpr size_t chunked_queue<T, BLo, BHi, Allocator>::kBlockSizeMax;
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void swap(chunked_queue<T, BLo, BHi, Allocator>& a,
+                 chunked_queue<T, BLo, BHi, Allocator>& b) noexcept {
+  a.swap(b);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+chunked_queue<T, BLo, BHi, Allocator>&
+chunked_queue<T, BLo, BHi, Allocator>::operator=(
+    chunked_queue&& other) noexcept {
+  if (this == &other) {
+    return *this;
+  }
+  DestroyAndDeallocateAll();
+
+  if constexpr (AllocatorTraits::propagate_on_container_move_assignment::
+                    value) {
+    // Take over the storage of "other", along with its allocator.
+    head_ = other.head_;
+    tail_ = other.tail_;
+    alloc_and_size_ = std::move(other.alloc_and_size_);
+    other.head_ = {};
+    other.tail_ = {};
+    other.alloc_and_size_.size = 0;
+  } else if (get_allocator() == other.get_allocator()) {
+    // Take over the storage of "other", with which we share an allocator.
+    head_ = other.head_;
+    tail_ = other.tail_;
+    alloc_and_size_.size = other.alloc_and_size_.size;
+    other.head_ = {};
+    other.tail_ = {};
+    other.alloc_and_size_.size = 0;
+  } else {
+    // We cannot take over of the storage from "other", since it has a different
+    // allocator; we're stuck move-assigning elements individually.
+    for (auto& elem : other) {
+      push_back(std::move(elem));
+    }
+  }
+  return *this;
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline chunked_queue<T, BLo, BHi, Allocator>::~chunked_queue() {
+  Block* b = head_.block;
+  while (b) {
+    Block* next = b->next();
+    Destroy(block_begin(b), block_end(b));
+    Block::Delete(b, &alloc_and_size_.allocator());
+    b = next;
+  }
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+void chunked_queue<T, BLo, BHi, Allocator>::resize(size_t new_size) {
+  while (new_size > size()) {
+    ptrdiff_t to_add = new_size - size();
+    if (tail_.ptr == tail_.limit) {
+      AddTailBlock();
+    }
+    T* start = tail_.ptr;
+    T* limit = (std::min)(tail_.limit, start + to_add);
+    Construct(start, limit);
+    tail_.ptr = limit;
+    alloc_and_size_.size += limit - start;
+  }
+  if (size() == new_size) {
+    return;
+  }
+  ABSL_ASSERT(new_size < size());
+  auto new_end = begin();
+  new_end.IncrBy(new_size);
+  ABSL_ASSERT(new_end != end());
+  EraseAllFrom(new_end);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::AddTailBlock() {
+  ABSL_ASSERT(tail_.ptr == tail_.limit);
+  auto* b = Block::New(NewBlockSize(), &alloc_and_size_.allocator());
+  if (!head_.block) {
+    ABSL_ASSERT(!tail_.block);
+    head_ = iterator(b);
+  } else {
+    ABSL_ASSERT(tail_.block);
+    tail_.block->set_next(b);
+  }
+  tail_ = iterator(b);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline T* chunked_queue<T, BLo, BHi, Allocator>::AllocateBack() {
+  if (tail_.ptr == tail_.limit) {
+    AddTailBlock();
+  }
+  ++alloc_and_size_.size;
+  return tail_.ptr++;
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::EraseAllFrom(iterator i) {
+  if (!i.block) {
+    return;
+  }
+  ABSL_ASSERT(i.ptr);
+  ABSL_ASSERT(i.limit);
+  alloc_and_size_.size -= Destroy(i.ptr, block_end(i.block));
+  Block* b = i.block->next();
+  while (b) {
+    Block* next = b->next();
+    alloc_and_size_.size -= Destroy(b->start(), block_end(b));
+    Block::Delete(b, &alloc_and_size_.allocator());
+    b = next;
+  }
+  tail_ = i;
+  tail_.block->set_next(nullptr);
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::DestroyAndDeallocateAll() {
+  Block* b = head_.block;
+  while (b) {
+    Block* next = b->next();
+    Destroy(block_begin(b), block_end(b));
+    Block::Delete(b, &alloc_and_size_.allocator());
+    b = next;
+  }
+  head_ = iterator();
+  tail_ = iterator();
+  alloc_and_size_.size = 0;
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+inline void chunked_queue<T, BLo, BHi, Allocator>::pop_front() {
+  ABSL_HARDENING_ASSERT(!empty());
+  ABSL_ASSERT(head_.block);
+  AllocatorTraits::destroy(alloc_and_size_.allocator(), head_.ptr);
+  ++head_.ptr;
+  --alloc_and_size_.size;
+  if (empty()) {
+    // Reset head and tail to the start of the (only) block.
+    ABSL_ASSERT(head_.block == tail_.block);
+    head_.ptr = tail_.ptr = head_.block->start();
+    return;
+  }
+  if (head_.ptr == head_.limit) {
+    Block* n = head_.block->next();
+    Block::Delete(head_.block, &alloc_and_size_.allocator());
+    head_ = iterator(n);
+  }
+}
+
+template <typename T, size_t BLo, size_t BHi, typename Allocator>
+void chunked_queue<T, BLo, BHi, Allocator>::clear() {
+  // NOTE: As an optimization we leave one block allocated.
+  Block* b = head_.block;
+  if (!b) {
+    ABSL_ASSERT(empty());
+    return;
+  }
+  while (b) {
+    Block* next = b->next();
+    Destroy(block_begin(b), block_end(b));
+    if (head_.block != b) {
+      Block::Delete(b, &alloc_and_size_.allocator());
+    }
+    b = next;
+  }
+  b = head_.block;
+  b->set_next(nullptr);
+  head_ = tail_ = iterator(b);
+  alloc_and_size_.size = 0;
+}
+
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_CHUNKED_QUEUE_H_
diff --git a/absl/container/chunked_queue_benchmark.cc b/absl/container/chunked_queue_benchmark.cc
new file mode 100644
index 0000000..ee4d3c1
--- /dev/null
+++ b/absl/container/chunked_queue_benchmark.cc
@@ -0,0 +1,386 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <forward_list>
+#include <list>
+#include <random>
+
+#include "absl/container/chunked_queue.h"
+#include "absl/random/random.h"
+#include "absl/status/status.h"
+#include "absl/strings/cord.h"
+#include "benchmark/benchmark.h"
+
+namespace {
+
+// Queue implementation using std::forward_list. Used to benchmark
+// absl::chunked_queue against another plausable implementation.
+template <typename T>
+class forward_list_queue {
+ public:
+  using iterator = typename std::forward_list<T>::iterator;
+
+  forward_list_queue() = default;
+  ~forward_list_queue() = default;
+
+  template <typename... Args>
+  void emplace_back(Args&&... args) {
+    if (list_.empty()) {
+      list_.emplace_front(std::forward<Args>(args)...);
+      tail_ = list_.begin();
+    } else {
+      list_.emplace_after(tail_, std::forward<Args>(args)...);
+      ++tail_;
+    }
+  }
+
+  void push_back(const T& value) { emplace_back(value); }
+  iterator begin() { return list_.begin(); }
+  iterator end() { return list_.end(); }
+  T& front() { return list_.front(); }
+  const T& front() const { return list_.front(); }
+  void pop_front() { list_.pop_front(); }
+  bool empty() const { return list_.empty(); }
+  void clear() { list_.clear(); }
+
+ private:
+  std::forward_list<T> list_;
+  typename std::forward_list<T>::iterator tail_;
+};
+
+template <class T>
+using Deque = std::deque<T>;
+template <class T>
+using List = std::list<T>;
+template <class T>
+using FwdList = forward_list_queue<T>;
+template <class T>
+using Chunked = absl::chunked_queue<T>;
+template <class T>
+using ExpChunked = absl::chunked_queue<T, 2, 64>;
+
+class Element {
+ public:
+  Element() : Element(-1) {}
+  Element(int type) : type_(type) {}      // NOLINT
+  operator int() const { return type_; }  // NOLINT
+
+ private:
+  int type_;
+  absl::Cord item_;
+  absl::Status status_;
+};
+
+template <class Q>
+Q MakeQueue(int64_t num_elements) {
+  Q q;
+  for (int64_t i = 0; i < num_elements; i++) {
+    q.push_back(static_cast<int>(i));
+  }
+  return q;
+}
+
+void CustomArgs(benchmark::internal::Benchmark* b) {
+  b->Arg(1 << 4);
+  b->Arg(1 << 10);
+  b->Arg(1 << 17);
+}
+
+template <class Q>
+void BM_construct(benchmark::State& state) {
+  for (auto s : state) {
+    Q q;
+    benchmark::DoNotOptimize(q);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_construct, Deque<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, List<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, FwdList<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, Chunked<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, ExpChunked<int64_t>);
+BENCHMARK_TEMPLATE(BM_construct, Deque<Element>);
+BENCHMARK_TEMPLATE(BM_construct, List<Element>);
+BENCHMARK_TEMPLATE(BM_construct, FwdList<Element>);
+BENCHMARK_TEMPLATE(BM_construct, Chunked<Element>);
+BENCHMARK_TEMPLATE(BM_construct, ExpChunked<Element>);
+
+template <class Q>
+void BM_destroy(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  for (auto s : state) {
+    state.PauseTiming();
+    {
+      Q q = MakeQueue<Q>(num_elements);
+      benchmark::DoNotOptimize(q);
+      state.ResumeTiming();
+    }
+  }
+  state.SetItemsProcessed(state.iterations() * num_elements);
+}
+
+BENCHMARK_TEMPLATE(BM_destroy, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_destroy, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_push_back(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    Q q;
+    state.ResumeTiming();
+    for (int j = 0; j < num_elements; j++) q.push_back(j);
+    benchmark::DoNotOptimize(q);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_push_back, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_back, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_pop_front(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    Q q = MakeQueue<Q>(num_elements);
+    state.ResumeTiming();
+    for (int j = 0; j < num_elements; j++) q.pop_front();
+    benchmark::DoNotOptimize(q);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_pop_front, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_pop_front, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_clear(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    Q q = MakeQueue<Q>(num_elements);
+    state.ResumeTiming();
+    q.clear();
+    benchmark::DoNotOptimize(q);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_clear, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_clear, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_iter(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    Q q = MakeQueue<Q>(state.max_iterations);
+    int sum = 0;
+    state.ResumeTiming();
+    for (const auto& v : q) sum += v;
+    benchmark::DoNotOptimize(sum);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_iter, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_iter, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_resize_shrink(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    Q q = MakeQueue<Q>(num_elements * 2);
+    state.ResumeTiming();
+    q.resize(num_elements);
+    benchmark::DoNotOptimize(q);
+  }
+}
+
+// FwdList does not support resize.
+BENCHMARK_TEMPLATE(BM_resize_shrink, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, List<int64_t>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_shrink, FwdList<int64>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, List<Element>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_shrink, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_shrink, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_resize_grow(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    Q q = MakeQueue<Q>(num_elements);
+    state.ResumeTiming();
+    q.resize(static_cast<size_t>(num_elements) * 2);
+    benchmark::DoNotOptimize(q);
+  }
+}
+
+// FwdList does not support resize.
+BENCHMARK_TEMPLATE(BM_resize_grow, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, List<int64_t>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_grow, FwdList<int64>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, List<Element>)->Apply(CustomArgs);
+// BENCHMARK_TEMPLATE(BM_resize_grow, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_resize_grow, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_assign_shrink(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    const Q src = MakeQueue<Q>(num_elements);
+    Q dst = MakeQueue<Q>(num_elements * 2);
+    state.ResumeTiming();
+    dst = src;
+    benchmark::DoNotOptimize(dst);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_assign_shrink, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_shrink, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_assign_grow(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+  for (auto s : state) {
+    state.PauseTiming();
+    const Q src = MakeQueue<Q>(num_elements * 2);
+    Q dst = MakeQueue<Q>(num_elements);
+    state.ResumeTiming();
+    dst = src;
+    benchmark::DoNotOptimize(dst);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_assign_grow, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_assign_grow, ExpChunked<Element>)->Apply(CustomArgs);
+
+template <class Q>
+void BM_push_pop(benchmark::State& state) {
+  const int64_t num_elements = state.range(0);
+
+  state.SetItemsProcessed(state.max_iterations * num_elements);
+
+  std::mt19937 rnd;
+  for (auto s : state) {
+    state.PauseTiming();
+    Q q;
+    state.ResumeTiming();
+    for (int j = 0; j < num_elements; j++) {
+      if (q.empty() || absl::Bernoulli(rnd, 0.5)) {
+        q.push_back(state.iterations());
+      } else {
+        q.pop_front();
+      }
+    }
+    benchmark::DoNotOptimize(q);
+  }
+}
+
+BENCHMARK_TEMPLATE(BM_push_pop, Deque<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, List<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, FwdList<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, Chunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, ExpChunked<int64_t>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, Deque<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, List<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, FwdList<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, Chunked<Element>)->Apply(CustomArgs);
+BENCHMARK_TEMPLATE(BM_push_pop, ExpChunked<Element>)->Apply(CustomArgs);
+
+}  // namespace
diff --git a/absl/container/chunked_queue_test.cc b/absl/container/chunked_queue_test.cc
new file mode 100644
index 0000000..d394ec4
--- /dev/null
+++ b/absl/container/chunked_queue_test.cc
@@ -0,0 +1,768 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#include "absl/container/chunked_queue.h"
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <deque>
+#include <forward_list>
+#include <iterator>
+#include <list>
+#include <memory>
+#include <string>
+#include <type_traits>
+#include <utility>
+#include <vector>
+
+#include "gmock/gmock.h"
+#include "gtest/gtest.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/test_allocator.h"
+#include "absl/strings/str_cat.h"
+
+using ::testing::ElementsAre;
+using ::testing::Pair;
+using ::testing::Pointee;
+using ::testing::SizeIs;
+
+// Hide in a namespace to make sure swap is found via ADL.
+namespace adl_namespace {
+namespace {
+TEST(ChunkedQueueADLTest, Swap) {
+  absl::chunked_queue<int64_t> q1;
+  absl::chunked_queue<int64_t> q2;
+  q1.push_back(4);
+  q2.push_back(5);
+  q2.push_back(6);
+  swap(q1, q2);
+  EXPECT_THAT(q1, ElementsAre(5, 6));
+  EXPECT_THAT(q2, ElementsAre(4));
+}
+}  // namespace
+}  // namespace adl_namespace
+
+namespace {
+
+template <class T>
+using ChunkedQueueBlock =
+    absl::container_internal::ChunkedQueueBlock<T, std::allocator<T>>;
+
+TEST(Internal, elements_in_bytes) {
+  EXPECT_EQ(size_t{1}, ChunkedQueueBlock<int>::block_size_from_bytes(0));
+  EXPECT_EQ(size_t{1}, ChunkedQueueBlock<int>::block_size_from_bytes(
+                           sizeof(ChunkedQueueBlock<int>)));
+  EXPECT_EQ(size_t{1},
+            ChunkedQueueBlock<int>::block_size_from_bytes(sizeof(int)));
+  EXPECT_EQ(size_t{2}, ChunkedQueueBlock<int>::block_size_from_bytes(
+                           sizeof(ChunkedQueueBlock<int>) + 2 * sizeof(int)));
+}
+
+TEST(Internal, BlockSizedDelete) {
+  struct Item {
+    int i;
+    char c;
+  };
+  std::allocator<Item> allocator;
+  auto* block = ChunkedQueueBlock<Item>::New(3, &allocator);
+  ChunkedQueueBlock<Item>::Delete(block, &allocator);
+}
+
+template <size_t elem_size>
+void BlockSizeRounding() {
+  struct Elem {
+    char data[elem_size];
+  };
+  typedef ChunkedQueueBlock<Elem> Block;
+  for (size_t n = 1; n < 100; ++n) {
+    SCOPED_TRACE(n);
+    std::allocator<Elem> allocator;
+    Block* b = Block::New(n, &allocator);
+    EXPECT_GE(b->size(), n);
+    Block::Delete(b, &allocator);
+  }
+}
+
+TEST(Internal, BlockSizeRounding1) { BlockSizeRounding<1>(); }
+TEST(Internal, BlockSizeRounding17) { BlockSizeRounding<17>(); }
+TEST(Internal, BlockSizeRounding101) { BlockSizeRounding<101>(); }
+TEST(Internal, BlockSizeRounding528) { BlockSizeRounding<528>(); }
+
+TEST(ChunkedQueue, MinMaxBlockSize) {
+  absl::chunked_queue<int64_t, 1, 2> q = {1, 2, 3};
+  EXPECT_THAT(q, ElementsAre(1, 2, 3));
+}
+
+TEST(ChunkedQueue, Empty) {
+  absl::chunked_queue<int64_t> q;
+  EXPECT_TRUE(q.empty());
+  q.push_back(10);
+  EXPECT_FALSE(q.empty());
+  EXPECT_EQ(q.front(), 10);
+  EXPECT_EQ(q.back(), 10);
+  q.pop_front();
+  EXPECT_TRUE(q.empty());
+  q.clear();
+  EXPECT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, CopyConstruct) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t> r(q);
+  EXPECT_THAT(r, ElementsAre(1));
+  EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, CopyConstructMultipleChunks) {
+  absl::chunked_queue<int64_t, 2> q;
+  q.push_back(1);
+  q.push_back(2);
+  q.push_back(3);
+  absl::chunked_queue<int64_t, 2> r(q);
+  EXPECT_THAT(r, ElementsAre(1, 2, 3));
+  EXPECT_EQ(3, r.size());
+}
+
+TEST(ChunkedQueue, BeginEndConstruct) {
+  std::vector<int64_t> src = {1, 2, 3, 4, 5};
+  absl::chunked_queue<int64_t, 2> q(src.begin(), src.end());
+  EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+  EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, InitializerListConstruct) {
+  absl::chunked_queue<int64_t, 2> q = {1, 2, 3, 4, 5};
+  EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+  EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, CountConstruct) {
+  absl::chunked_queue<int64_t> q(3);
+  EXPECT_THAT(q, ElementsAre(0, 0, 0));
+  EXPECT_EQ(3, q.size());
+}
+
+TEST(ChunkedQueue, CountValueConstruct) {
+  absl::chunked_queue<int64_t> q(3, 10);
+  EXPECT_THAT(q, ElementsAre(10, 10, 10));
+  EXPECT_EQ(3, q.size());
+}
+
+TEST(ChunkedQueue, InitializerListAssign) {
+  absl::chunked_queue<int64_t, 2> q;
+  q = {1, 2, 3, 4, 5};
+  EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+  EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, CopyAssign) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t> r = q;
+  EXPECT_THAT(r, ElementsAre(1));
+}
+
+TEST(ChunkedQueue, CopyAssignSelf) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  q = *&q;  // Avoid -Wself-assign.
+  EXPECT_THAT(q, ElementsAre(1));
+  EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, CopyAssignDestinationBigger) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t> r;
+  r.push_back(9);
+  r.push_back(9);
+  r.push_back(9);
+  r = q;
+  EXPECT_THAT(r, ElementsAre(1));
+  EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, CopyAssignSourceBiggerMultipleChunks) {
+  absl::chunked_queue<int64_t, 2> q;
+  q.push_back(1);
+  q.push_back(2);
+  q.push_back(3);
+  absl::chunked_queue<int64_t, 2> r;
+  r.push_back(9);
+  r = q;
+  EXPECT_THAT(r, ElementsAre(1, 2, 3));
+  EXPECT_EQ(3, r.size());
+}
+
+TEST(ChunkedQueue, CopyAssignDestinationBiggerMultipleChunks) {
+  absl::chunked_queue<int64_t, 2> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t, 2> r;
+  r.push_back(9);
+  r.push_back(9);
+  r.push_back(9);
+  r = q;
+  EXPECT_THAT(r, ElementsAre(1));
+  EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, AssignCountValue) {
+  absl::chunked_queue<int64_t> q;
+  q.assign(3, 10);
+  EXPECT_THAT(q, ElementsAre(10, 10, 10));
+  EXPECT_EQ(3, q.size());
+
+  q.assign(2, 20);
+  EXPECT_THAT(q, ElementsAre(20, 20));
+  EXPECT_EQ(2, q.size());
+}
+
+TEST(ChunkedQueue, MoveConstruct) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t> r(std::move(q));
+  EXPECT_THAT(r, ElementsAre(1));
+  EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, MoveAssign) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t> r;
+  r = std::move(q);
+  EXPECT_THAT(r, ElementsAre(1));
+  EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, MoveAssignImmovable) {
+  struct Immovable {
+    Immovable() = default;
+
+    Immovable(const Immovable&) = delete;
+    Immovable& operator=(const Immovable&) = delete;
+    Immovable(Immovable&&) = delete;
+    Immovable& operator=(Immovable&&) = delete;
+  };
+  absl::chunked_queue<Immovable> q;
+  q.emplace_back();
+  absl::chunked_queue<Immovable> r;
+  r = std::move(q);
+  EXPECT_THAT(r, SizeIs(1));
+}
+
+TEST(ChunkedQueue, MoveAssignSelf) {
+  absl::chunked_queue<int64_t> q;
+  absl::chunked_queue<int64_t>& q2 = q;
+  q.push_back(1);
+  q = std::move(q2);
+  EXPECT_THAT(q, ElementsAre(1));
+  EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, MoveAssignDestinationBigger) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t> r;
+  r.push_back(9);
+  r.push_back(9);
+  r.push_back(9);
+  r = std::move(q);
+  EXPECT_THAT(r, ElementsAre(1));
+  EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, MoveAssignDestinationBiggerMultipleChunks) {
+  absl::chunked_queue<int64_t, 2> q;
+  q.push_back(1);
+  absl::chunked_queue<int64_t, 2> r;
+  r.push_back(9);
+  r.push_back(9);
+  r.push_back(9);
+  r = std::move(q);
+  EXPECT_THAT(r, ElementsAre(1));
+  EXPECT_EQ(1, r.size());
+}
+
+TEST(ChunkedQueue, ConstFrontBack) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(10);
+  EXPECT_EQ(q.front(), 10);
+  EXPECT_EQ(q.back(), 10);
+  q.front() = 12;
+  EXPECT_EQ(q.front(), 12);
+  EXPECT_EQ(q.back(), 12);
+
+  const absl::chunked_queue<int64_t>& qref = q;
+  EXPECT_EQ(qref.front(), 12);
+  EXPECT_EQ(qref.back(), 12);
+
+  q.pop_front();
+
+  // Test at block bloundary and beyond
+  for (int i = 0; i < 64; ++i) q.push_back(i + 10);
+  EXPECT_EQ(q.front(), 10);
+  EXPECT_EQ(q.back(), 73);
+
+  for (int i = 64; i < 128; ++i) q.push_back(i + 10);
+  EXPECT_EQ(q.front(), 10);
+  EXPECT_EQ(q.back(), 137);
+  q.clear();
+  EXPECT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, PushAndPop) {
+  absl::chunked_queue<int64_t> q;
+  EXPECT_TRUE(q.empty());
+  EXPECT_EQ(0, q.size());
+  for (int i = 0; i < 10000; i++) {
+    q.push_back(i);
+    EXPECT_EQ(q.front(), 0) << ": iteration " << i;
+    EXPECT_FALSE(q.empty());
+    EXPECT_EQ(i + 1, q.size());
+  }
+  for (int i = 0; i < 10000; i++) {
+    EXPECT_FALSE(q.empty());
+    EXPECT_EQ(10000 - i, q.size());
+    EXPECT_EQ(q.front(), i);
+    q.pop_front();
+  }
+  EXPECT_TRUE(q.empty());
+  EXPECT_EQ(0, q.size());
+}
+
+TEST(ChunkedQueue, Swap) {
+  absl::chunked_queue<int64_t> q1;
+  absl::chunked_queue<int64_t> q2;
+  q1.push_back(4);
+  q2.push_back(5);
+  q2.push_back(6);
+  q2.swap(q1);
+  EXPECT_EQ(2, q1.size());
+  EXPECT_EQ(5, q1.front());
+  EXPECT_EQ(1, q2.size());
+  EXPECT_EQ(4, q2.front());
+  q1.pop_front();
+  q1.swap(q2);
+  EXPECT_EQ(1, q1.size());
+  EXPECT_EQ(4, q1.front());
+  EXPECT_EQ(1, q2.size());
+  EXPECT_EQ(6, q2.front());
+  q1.pop_front();
+  q1.swap(q2);
+  EXPECT_EQ(1, q1.size());
+  EXPECT_EQ(6, q1.front());
+  EXPECT_EQ(0, q2.size());
+  q1.clear();
+  EXPECT_TRUE(q1.empty());
+}
+
+TEST(ChunkedQueue, ShrinkToFit) {
+  absl::chunked_queue<int64_t> q;
+  q.shrink_to_fit();  // Should work on empty
+  EXPECT_TRUE(q.empty());
+
+  q.push_back(1);
+  q.shrink_to_fit();  // Should work on non-empty
+  EXPECT_THAT(q, ElementsAre(1));
+
+  q.clear();
+  // We know clear leaves a block and shrink_to_fit should remove it.
+  // Hard to test internal memory state without mocks or inspection.
+  // But at least we verify it doesn't crash or corrupt.
+  q.shrink_to_fit();
+  EXPECT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, ResizeExtends) {
+  absl::chunked_queue<int64_t> q;
+  q.resize(2);
+  EXPECT_THAT(q, ElementsAre(0, 0));
+  EXPECT_EQ(2, q.size());
+}
+
+TEST(ChunkedQueue, ResizeShrinks) {
+  absl::chunked_queue<int64_t> q;
+  q.push_back(1);
+  q.push_back(2);
+  q.resize(1);
+  EXPECT_THAT(q, ElementsAre(1));
+  EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, ResizeExtendsMultipleBlocks) {
+  absl::chunked_queue<int64_t, 2> q;
+  q.resize(3);
+  EXPECT_THAT(q, ElementsAre(0, 0, 0));
+  EXPECT_EQ(3, q.size());
+}
+
+TEST(ChunkedQueue, ResizeShrinksMultipleBlocks) {
+  absl::chunked_queue<int64_t, 2> q;
+  q.push_back(1);
+  q.push_back(2);
+  q.push_back(3);
+  q.resize(1);
+  EXPECT_THAT(q, ElementsAre(1));
+  EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, ResizeValue) {
+  absl::chunked_queue<int64_t> q;
+  q.resize(3, 10);
+  EXPECT_THAT(q, ElementsAre(10, 10, 10));
+  EXPECT_EQ(3, q.size());
+
+  q.resize(5, 20);
+  EXPECT_THAT(q, ElementsAre(10, 10, 10, 20, 20));
+  EXPECT_EQ(5, q.size());
+
+  q.resize(2, 30);
+  EXPECT_THAT(q, ElementsAre(10, 10));
+  EXPECT_EQ(2, q.size());
+}
+
+TEST(ChunkedQueue, MaxSize) {
+  absl::chunked_queue<int64_t> q;
+  EXPECT_GE(q.max_size(),
+            size_t{1} << (sizeof(size_t) * 8 - sizeof(int64_t) - 4));
+}
+
+TEST(ChunkedQueue, AssignExtends) {
+  absl::chunked_queue<int64_t, 2> q;
+  std::vector<int64_t> v = {1, 2, 3, 4, 5};
+  q.assign(v.begin(), v.end());
+  EXPECT_THAT(q, ElementsAre(1, 2, 3, 4, 5));
+  EXPECT_EQ(5, q.size());
+}
+
+TEST(ChunkedQueue, AssignShrinks) {
+  absl::chunked_queue<int64_t, 2> q = {1, 2, 3, 4, 5};
+  std::vector<int64_t> v = {1};
+  q.assign(v.begin(), v.end());
+  EXPECT_THAT(q, ElementsAre(1));
+  EXPECT_EQ(1, q.size());
+}
+
+TEST(ChunkedQueue, AssignBoundaryCondition) {
+  // Create a queue with fixed block size of 4.
+  // 3 blocks: [1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]
+  absl::chunked_queue<int, 4> q = {1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12};
+
+  // Assign a range that fills exactly the first block (4 elements).
+  // This triggers the boundary condition where the assignment loop ends
+  // exactly at the limit of the first block.
+  std::vector<int> v = {101, 102, 103, 104};
+  q.assign(v.begin(), v.end());
+
+  EXPECT_EQ(q.size(), 4);
+  EXPECT_EQ(q.front(), 101);
+  // Verify back() is valid. If tail_ was incorrectly pointing to the start
+  // of the (now deleted) second block, this might access invalid memory
+  // or fail assertions.
+  EXPECT_EQ(q.back(), 104);
+
+  // Verify we can continue to push elements correctly.
+  q.push_back(105);
+  EXPECT_EQ(q.size(), 5);
+  EXPECT_EQ(q.back(), 105);
+}
+
+TEST(ChunkedQueue, Iterator) {
+  absl::chunked_queue<int64_t> q;
+  EXPECT_TRUE(q.begin() == q.end());
+
+  q.push_back(1);
+  absl::chunked_queue<int64_t>::const_iterator iter = q.begin();
+  ASSERT_FALSE(iter == q.end());
+  ASSERT_EQ(*iter, 1);
+  ++iter;
+  ASSERT_TRUE(iter == q.end());
+
+  q.push_back(2);
+  iter = q.begin();
+  ASSERT_EQ(*iter, 1);
+  ++iter;
+  absl::chunked_queue<int64_t>::const_iterator copy_iter = iter;
+  ASSERT_FALSE(copy_iter == q.end());
+  ASSERT_EQ(*copy_iter, 2);
+  ++copy_iter;
+  ASSERT_TRUE(copy_iter == q.end());
+
+  copy_iter = iter;
+  ASSERT_FALSE(iter == q.end());
+  ASSERT_EQ(*iter, 2);
+  ++iter;
+  ASSERT_TRUE(iter == q.end());
+
+  ASSERT_FALSE(copy_iter == q.end());
+  ASSERT_EQ(*copy_iter, 2);
+  ++copy_iter;
+  ASSERT_TRUE(copy_iter == q.end());
+}
+
+TEST(ChunkedQueue, IteratorDefaultConstructor) {
+  using ConstIter = absl::chunked_queue<int64_t>::const_iterator;
+  using Iter = absl::chunked_queue<int64_t>::iterator;
+  ConstIter const_iter;
+  EXPECT_TRUE(const_iter == ConstIter());
+  Iter iter;
+  EXPECT_TRUE(iter == Iter());
+}
+
+TEST(ChunkedQueue, IteratorConversion) {
+  using ConstIter = absl::chunked_queue<int64_t>::const_iterator;
+  using Iter = absl::chunked_queue<int64_t>::iterator;
+  EXPECT_FALSE((std::is_convertible<ConstIter, Iter>::value));
+  EXPECT_TRUE((std::is_convertible<Iter, ConstIter>::value));
+  absl::chunked_queue<int64_t> q;
+  ConstIter it1 = q.begin();
+  ConstIter it2 = q.cbegin();
+  Iter it3 = q.begin();
+  it1 = q.end();
+  it2 = q.cend();
+  it3 = q.end();
+  EXPECT_FALSE((std::is_assignable<Iter, ConstIter>::value));
+}
+
+struct TestEntry {
+  int x, y;
+};
+
+TEST(ChunkedQueue, Iterator2) {
+  absl::chunked_queue<TestEntry> q;
+  TestEntry e;
+  e.x = 1;
+  e.y = 2;
+  q.push_back(e);
+  e.x = 3;
+  e.y = 4;
+  q.push_back(e);
+
+  absl::chunked_queue<TestEntry>::const_iterator iter = q.begin();
+  EXPECT_EQ(iter->x, 1);
+  EXPECT_EQ(iter->y, 2);
+  ++iter;
+  EXPECT_EQ(iter->x, 3);
+  EXPECT_EQ(iter->y, 4);
+  ++iter;
+  EXPECT_TRUE(iter == q.end());
+}
+
+TEST(ChunkedQueue, Iterator_MultipleBlocks) {
+  absl::chunked_queue<int64_t> q;
+  for (int i = 0; i < 130; ++i) {
+    absl::chunked_queue<int64_t>::const_iterator iter = q.begin();
+    for (int j = 0; j < i; ++j) {
+      ASSERT_FALSE(iter == q.end());
+      EXPECT_EQ(*iter, j);
+      ++iter;
+    }
+    ASSERT_TRUE(iter == q.end());
+    q.push_back(i);
+  }
+
+  for (int i = 0; i < 130; ++i) {
+    absl::chunked_queue<int64_t>::const_iterator iter = q.begin();
+    for (int j = i; j < 130; ++j) {
+      ASSERT_FALSE(iter == q.end());
+      EXPECT_EQ(*iter, j);
+      ++iter;
+    }
+    q.pop_front();
+  }
+  EXPECT_TRUE(q.empty());
+  EXPECT_TRUE(q.begin() == q.end());
+}
+
+TEST(ChunkedQueue, Iterator_PopFrontInvalidate) {
+  absl::chunked_queue<int64_t> q;
+  for (int i = 0; i < 130; ++i) {
+    q.push_back(i);
+  }
+
+  auto iter = q.begin();
+  for (int i = 0; i < 130; ++i) {
+    auto prev = iter++;
+    ASSERT_FALSE(prev == q.end());
+    EXPECT_EQ(*prev, i);
+    q.pop_front();
+  }
+  ASSERT_TRUE(q.empty());
+}
+
+TEST(ChunkedQueue, Iterator_PushBackInvalidate) {
+  absl::chunked_queue<int64_t, 2> q;
+  q.push_back(0);
+  auto i = q.begin();
+  EXPECT_EQ(*i, 0);
+  q.push_back(1);
+  EXPECT_EQ(*++i, 1);
+  q.push_back(2);
+  EXPECT_EQ(*++i, 2);
+}
+
+struct MyType {
+  static int constructor_calls;
+  static int destructor_calls;
+
+  explicit MyType(int x) : val(x) { constructor_calls++; }
+  MyType(const MyType& t) : val(t.val) { constructor_calls++; }
+  ~MyType() { destructor_calls++; }
+
+  int val;
+};
+
+int MyType::constructor_calls = 0;
+int MyType::destructor_calls = 0;
+
+TEST(ChunkedQueue, ConstructorDestructorCalls) {
+  for (int i = 0; i < 100; i++) {
+    std::vector<MyType> vals;
+    for (int j = 0; j < i; j++) {
+      vals.push_back(MyType(j));
+    }
+    MyType::constructor_calls = 0;
+    MyType::destructor_calls = 0;
+    {
+      absl::chunked_queue<MyType> q;
+      for (int j = 0; j < i; j++) {
+        q.push_back(vals[j]);
+      }
+      if (i % 10 == 0) {
+        q.clear();
+      } else {
+        for (int j = 0; j < i; j++) {
+          EXPECT_EQ(q.front().val, j);
+          q.pop_front();
+        }
+      }
+    }
+    EXPECT_EQ(MyType::constructor_calls, i);
+    EXPECT_EQ(MyType::destructor_calls, i);
+  }
+}
+
+TEST(ChunkedQueue, MoveObjects) {
+  absl::chunked_queue<std::unique_ptr<int>> q;
+  q.push_back(std::make_unique<int>(10));
+  q.push_back(std::make_unique<int>(11));
+
+  EXPECT_EQ(10, *q.front());
+  q.pop_front();
+  EXPECT_EQ(11, *q.front());
+  q.pop_front();
+}
+
+TEST(ChunkedQueue, EmplaceBack1) {
+  absl::chunked_queue<std::pair<int, int>> q;
+  auto& v = q.emplace_back(1, 2);
+  EXPECT_THAT(v, Pair(1, 2));
+  EXPECT_THAT(q.front(), Pair(1, 2));
+  EXPECT_EQ(&v, &q.back());
+}
+
+TEST(ChunkedQueue, EmplaceBack2) {
+  absl::chunked_queue<std::pair<std::unique_ptr<int>, std::string>> q;
+  auto& v = q.emplace_back(std::make_unique<int>(11), "val12");
+  EXPECT_THAT(v, Pair(Pointee(11), "val12"));
+  EXPECT_THAT(q.front(), Pair(Pointee(11), "val12"));
+}
+
+TEST(ChunkedQueue, OveralignmentEmplaceBack) {
+  struct alignas(64) Overaligned {
+    int x;
+    int y;
+  };
+  absl::chunked_queue<Overaligned, 1, 8> q;
+  for (int i = 0; i < 10; ++i) {
+    auto& v = q.emplace_back(Overaligned{i, i});
+    EXPECT_EQ(reinterpret_cast<uintptr_t>(&v) % 64, 0);
+  }
+}
+
+TEST(ChunkedQueue, StatelessAllocatorDoesntAffectObjectSizes) {
+  // When a stateless allocator type is used -- such as when no explicit
+  // allocator type is given, and the stateless default is used -- it does not
+  // increase the object sizes from what they used to be before allocator
+  // support was added.  (In practice this verifies that allocator support makes
+  // use of the empty base-class optimization.)
+  //
+  // These "Mock*" structs model the data members of absl::chunked_queue<> and
+  // its internal ChunkedQueueBlock<> type, without any extra storage for
+  // allocator state.  (We use these to generate expected stateless-allocator
+  // object sizes in a portable way.)
+  struct MockQueue {
+    struct MockIterator {
+      void* block;
+      void* ptr;
+      void* limit;
+    };
+    MockIterator head;
+    MockIterator tail;
+    size_t size;
+  };
+  struct MockBlock {
+    void* next;
+    void* limit;
+  };
+  using TestQueueType = absl::chunked_queue<int64_t, 1, 16>;
+  EXPECT_EQ(sizeof(TestQueueType), sizeof(MockQueue));
+  EXPECT_EQ(sizeof(absl::container_internal::ChunkedQueueBlock<
+                   TestQueueType::value_type, TestQueueType::allocator_type>),
+            sizeof(MockBlock));
+}
+
+TEST(ChunkedQueue, DoesNotRoundBlockSizesUpWithNonDefaultAllocator) {
+  using OneByte = uint8_t;
+  using CustomAllocator = absl::container_internal::CountingAllocator<OneByte>;
+  using Block =
+      absl::container_internal::ChunkedQueueBlock<OneByte, CustomAllocator>;
+  int64_t allocator_live_bytes = 0;
+  CustomAllocator allocator(&allocator_live_bytes);
+  // Create a Block big enough to accomodate at least 1 OneByte.
+  Block* b = Block::New(1, &allocator);
+  ASSERT_TRUE(b != nullptr);
+  // With a non-default allocator in play, the resulting block should have
+  // capacity for exactly 1 element -- the implementation should not round the
+  // allocation size up, which may be inappropriate for non-default allocators.
+  //
+  // (Note that we don't always round up even with the default allocator in use,
+  // e.g. when compiling for ASAN analysis.)
+  EXPECT_EQ(b->size(), 1);
+  Block::Delete(b, &allocator);
+}
+
+TEST(ChunkedQueue, Hardening) {
+  bool hardened = false;
+  ABSL_HARDENING_ASSERT([&hardened]() {
+    hardened = true;
+    return true;
+  }());
+  if (!hardened) {
+    GTEST_SKIP() << "Not a hardened build";
+  }
+
+  absl::chunked_queue<int> q;
+  EXPECT_DEATH(q.front(), "");
+  EXPECT_DEATH(q.back(), "");
+  EXPECT_DEATH(q.pop_front(), "");
+
+  const absl::chunked_queue<int> cq;
+  EXPECT_DEATH(cq.front(), "");
+  EXPECT_DEATH(cq.back(), "");
+}
+
+}  // namespace
diff --git a/absl/container/internal/chunked_queue.h b/absl/container/internal/chunked_queue.h
new file mode 100644
index 0000000..c3718ac
--- /dev/null
+++ b/absl/container/internal/chunked_queue.h
@@ -0,0 +1,173 @@
+// Copyright 2025 The Abseil Authors.
+//
+// Licensed under the Apache License, Version 2.0 (the "License");
+// you may not use this file except in compliance with the License.
+// You may obtain a copy of the License at
+//
+//      https://www.apache.org/licenses/LICENSE-2.0
+//
+// Unless required by applicable law or agreed to in writing, software
+// distributed under the License is distributed on an "AS IS" BASIS,
+// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+// See the License for the specific language governing permissions and
+// limitations under the License.
+
+#ifndef ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_
+#define ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_
+
+#include <algorithm>
+#include <cstddef>
+#include <cstdint>
+#include <initializer_list>
+#include <iterator>
+#include <memory>
+#include <new>
+#include <tuple>
+#include <type_traits>
+#include <utility>
+
+#include "absl/base/attributes.h"
+#include "absl/base/config.h"
+#include "absl/base/macros.h"
+#include "absl/container/internal/layout.h"
+
+namespace absl {
+ABSL_NAMESPACE_BEGIN
+namespace container_internal {
+
+// ChunkedQueueBlock defines a node in a forward list of uninitialized storage
+// of size T's. The user is responsible for constructing and destroying T's in
+// said storage.
+//
+// ChunkedQueueBlock::New(size) returns said node, with at least size_hint T's
+// of uninitialized storage.
+template <typename T, typename Allocator>
+class ChunkedQueueBlock {
+ private:
+  using ChunkedQueueBlockAllocator = typename std::allocator_traits<
+      Allocator>::template rebind_alloc<ChunkedQueueBlock>;
+  using ByteAllocator =
+      typename std::allocator_traits<Allocator>::template rebind_alloc<char>;
+
+ public:
+  // NB, instances of this must not be created or destroyed directly, only via
+  // the New() and Delete() methods.  (This notionally-private constructor is
+  // public only to allow access from allocator types used by New().)
+  explicit ChunkedQueueBlock(size_t size)
+      : next_(nullptr), limit_(start() + size) {}
+
+  // Must be deleted by ChunkedQueueBlock::Delete.
+  static ChunkedQueueBlock* New(size_t size_hint, Allocator* alloc) {  // NOLINT
+    ABSL_ASSERT(size_hint >= size_t{1});
+    size_t allocation_bytes = AllocSize(size_hint);
+    void* mem;
+    std::tie(mem, allocation_bytes) = Allocate(allocation_bytes, alloc);
+    const size_t element_count =
+        (allocation_bytes - start_offset()) / sizeof(T);
+    ChunkedQueueBlock* as_block = static_cast<ChunkedQueueBlock*>(mem);
+    ChunkedQueueBlockAllocator block_alloc(*alloc);
+    std::allocator_traits<ChunkedQueueBlockAllocator>::construct(
+        block_alloc, as_block, element_count);
+    return as_block;
+  }
+
+  static void Delete(ChunkedQueueBlock* ptr, Allocator* alloc) {
+    const size_t allocation_bytes = AllocSize(ptr->size());
+    ChunkedQueueBlockAllocator block_alloc(*alloc);
+    std::allocator_traits<ChunkedQueueBlockAllocator>::destroy(block_alloc,
+                                                               ptr);
+    if constexpr (std::is_same_v<ByteAllocator, std::allocator<char>>) {
+#ifdef __STDCPP_DEFAULT_NEW_ALIGNMENT__
+      if (alignment() > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
+        ::operator delete(ptr
+#ifdef __cpp_sized_deallocation
+                          ,
+                          allocation_bytes
+#endif
+                          ,
+                          std::align_val_t(alignment()));
+        return;
+      }
+#endif
+      ::operator delete(ptr);
+    } else {
+      void* mem = ptr;
+      ByteAllocator byte_alloc(*alloc);
+      std::allocator_traits<ByteAllocator>::deallocate(
+          byte_alloc, static_cast<char*>(mem), allocation_bytes);
+    }
+  }
+
+  ChunkedQueueBlock* next() const { return next_; }
+  void set_next(ChunkedQueueBlock* next) { next_ = next; }
+  T* start() {
+    return reinterpret_cast<T*>(reinterpret_cast<uintptr_t>(this) +
+                                start_offset());
+  }
+  T* limit() { return limit_; }
+  size_t size() { return limit() - start(); }
+
+  static constexpr size_t block_size_from_bytes(size_t bytes) {
+    return bytes <= static_cast<size_t>(start_offset())
+               ? size_t{1}
+               : elements_in_bytes(bytes - start_offset());
+  }
+
+ private:
+  ChunkedQueueBlock(const ChunkedQueueBlock&) = delete;
+  ChunkedQueueBlock& operator=(const ChunkedQueueBlock&) = delete;
+
+  // The byte size to allocate to ensure space for `min_element_count` elements.
+  static constexpr size_t AllocSize(size_t min_element_count) {
+    return absl::container_internal::Layout<ChunkedQueueBlock, T>(
+               1, min_element_count)
+        .AllocSize();
+  }
+
+  static constexpr ptrdiff_t start_offset() {
+    return absl::container_internal::Layout<ChunkedQueueBlock, T>(1, 1)
+        .template Offset<1>();
+  }
+
+  static constexpr size_t alignment() {
+    return absl::container_internal::Layout<ChunkedQueueBlock, T>(1, 1)
+        .Alignment();
+  }
+
+  static constexpr size_t elements_in_bytes(size_t bytes) {
+    return (bytes + sizeof(T) - 1) / sizeof(T);
+  }
+
+  static std::pair<void*, size_t> Allocate(size_t allocation_bytes,
+                                           Allocator* alloc) {
+    // If we're using the default allocator, then we can use new.
+    void* mem;
+    if constexpr (std::is_same_v<ByteAllocator, std::allocator<char>>) {
+      // Older GCC versions have an unused variable warning on `alloc` inside
+      // this constexpr branch.
+      static_cast<void>(alloc);
+#ifdef __STDCPP_DEFAULT_NEW_ALIGNMENT__
+      if (alignment() > __STDCPP_DEFAULT_NEW_ALIGNMENT__) {
+        // Align the allocation to respect alignof(T).
+        mem = ::operator new(allocation_bytes, std::align_val_t(alignment()));
+        return {mem, allocation_bytes};
+      }
+#endif
+      mem = ::operator new(allocation_bytes);
+    } else {
+      ByteAllocator byte_alloc(*alloc);
+      mem = std::allocator_traits<ByteAllocator>::allocate(byte_alloc,
+                                                           allocation_bytes);
+    }
+    return {mem, allocation_bytes};
+  }
+
+  ChunkedQueueBlock* next_;
+  T* limit_;
+};
+
+}  // namespace container_internal
+ABSL_NAMESPACE_END
+}  // namespace absl
+
+#endif  // ABSL_CONTAINER_INTERNAL_CHUNKED_QUEUE_H_