From 74a6c444abb8bb491c5a2078db02bed28849b636 Mon Sep 17 00:00:00 2001 From: pdrobnjak Date: Thu, 5 Feb 2026 14:54:13 +0100 Subject: [PATCH 1/2] perf(store): lazy-init sortedCache in cachekv.Store MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Each cachekv.NewStore() was eagerly allocating a MemDB (btree + FreeList) for its sortedCache, even though sortedCache is only used when iterators are requested. During OCC parallel execution, each block creates ~42,000 cachekv stores (1000 txs * ~2 snapshots * 21 module stores), and for EVM transfer workloads, none of them ever iterate — they only use Get/Set/Delete which operate on the sync.Map cache, not sortedCache. This was the single largest source of allocation pressure: btree.NewFreeListG (57 GB/30s) + tm-db.NewMemDB (70 GB/30s) = 127 GB of the 286 GB total allocation rate. All 24 OCC workers contending on the heap allocator lock (runtime.mheap.lock) made this the #1 CPU bottleneck at 23%. The fix defers MemDB allocation to the first iterator call via ensureSortedCache(). For the common case (no iteration), the cost is zero. For the rare case (EndBlock operations, Cosmos native txs), the MemDB is allocated on demand with identical behavior. Benchmark results (M4 Max, EVM transfer workload): - TPS: ~7,800 -> ~8,200 median (+5%) - runtime.lock2 (heap contention): 23.0% -> 15.4% CPU - runtime.gcDrain: 21.6% -> 19.0% CPU - runtime.(*mheap).allocSpan: 18.4% -> 11.4% CPU - btree/MemDB/FreeList allocation: essentially eliminated - Total CPU samples over 30s: 142.2s -> 124.9s (-12%) Co-Authored-By: Claude Opus 4.5 --- sei-cosmos/store/cachekv/store.go | 11 +++++++++-- 1 file changed, 9 insertions(+), 2 deletions(-) diff --git a/sei-cosmos/store/cachekv/store.go b/sei-cosmos/store/cachekv/store.go index 54b36a667a..e233d45bdf 100644 --- a/sei-cosmos/store/cachekv/store.go +++ b/sei-cosmos/store/cachekv/store.go @@ -33,7 +33,7 @@ func NewStore(parent types.KVStore, storeKey types.StoreKey, cacheSize int) *Sto cache: &sync.Map{}, deleted: &sync.Map{}, unsortedCache: &sync.Map{}, - sortedCache: dbm.NewMemDB(), + sortedCache: nil, parent: parent, storeKey: storeKey, cacheSize: cacheSize, @@ -120,7 +120,7 @@ func (store *Store) Write() { store.cache = &sync.Map{} store.deleted = &sync.Map{} store.unsortedCache = &sync.Map{} - store.sortedCache = dbm.NewMemDB() + store.sortedCache = nil } // CacheWrap implements CacheWrapper. @@ -146,6 +146,12 @@ func (store *Store) ReverseIterator(start, end []byte) types.Iterator { return store.iterator(start, end, false) } +func (store *Store) ensureSortedCache() { + if store.sortedCache == nil { + store.sortedCache = dbm.NewMemDB() + } +} + func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { store.mtx.Lock() defer store.mtx.Unlock() @@ -167,6 +173,7 @@ func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { panic(err) } }() + store.ensureSortedCache() store.dirtyItems(start, end) cache = newMemIterator(start, end, store.sortedCache, store.deleted, ascending) return NewCacheMergeIterator(parent, cache, ascending, store.storeKey) From 4a4adcb472f9107c5880ccb7be74aa0af9706bfd Mon Sep 17 00:00:00 2001 From: pdrobnjak Date: Thu, 5 Feb 2026 15:42:22 +0100 Subject: [PATCH 2/2] refactor(store): use self-initializing getter for sortedCache Replace ensureSortedCache() + direct field access with a getOrInitSortedCache() getter at all 3 access sites, preventing possible nil dereference if sortedCache is reached outside iterator(). Co-Authored-By: Claude Opus 4.5 --- sei-cosmos/store/cachekv/store.go | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/sei-cosmos/store/cachekv/store.go b/sei-cosmos/store/cachekv/store.go index e233d45bdf..51d583d2e6 100644 --- a/sei-cosmos/store/cachekv/store.go +++ b/sei-cosmos/store/cachekv/store.go @@ -146,10 +146,11 @@ func (store *Store) ReverseIterator(start, end []byte) types.Iterator { return store.iterator(start, end, false) } -func (store *Store) ensureSortedCache() { +func (store *Store) getOrInitSortedCache() *dbm.MemDB { if store.sortedCache == nil { store.sortedCache = dbm.NewMemDB() } + return store.sortedCache } func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { @@ -173,9 +174,8 @@ func (store *Store) iterator(start, end []byte, ascending bool) types.Iterator { panic(err) } }() - store.ensureSortedCache() store.dirtyItems(start, end) - cache = newMemIterator(start, end, store.sortedCache, store.deleted, ascending) + cache = newMemIterator(start, end, store.getOrInitSortedCache(), store.deleted, ascending) return NewCacheMergeIterator(parent, cache, ascending, store.storeKey) } @@ -308,13 +308,13 @@ func (store *Store) clearUnsortedCacheSubset(unsorted []*kv.Pair, sortState sort if item.Value == nil { // deleted element, tracked by store.deleted // setting arbitrary value - if err := store.sortedCache.Set(item.Key, []byte{}); err != nil { + if err := store.getOrInitSortedCache().Set(item.Key, []byte{}); err != nil { panic(err) } continue } - if err := store.sortedCache.Set(item.Key, item.Value); err != nil { + if err := store.getOrInitSortedCache().Set(item.Key, item.Value); err != nil { panic(err) } }