-- Setup: done once per candidate; cluster re-cloned for each iteration. /* BEGIN; DROP TABLE IF EXISTS test_wide; -- Focus is on column comparison costs in heap_update, so use UNLOGGED. CREATE UNLOGGED TABLE test_wide ( k text, v int ) -- Fill two tuples per page, leaving space for a HOT update of each. WITH (fillfactor = 50); -- Suppress TOAST activity. ALTER TABLE test_wide ALTER k SET STORAGE PLAIN; -- Make a bunch of tuples of ~2000 bytes. INSERT INTO test_wide SELECT repeat(md5(n::text), 60), n FROM generate_series(1,100000) t(n); ALTER TABLE test_wide ADD UNIQUE (k); COMMIT; */ BEGIN; -- master times: 18863.307, 18501.506, 18752.469 -- patched times: 17591.783, 18297.841, 17204.112 UPDATE test_wide SET v = v + 1; SELECT pg_sleep(1); -- time for stats to transfer COMMIT; -- Confirm we that we got HOT updates. \x on SELECT * FROM pg_stat_all_tables WHERE relid = 'test_wide'::regclass; \x off SELECT pg_size_pretty(pg_relation_size('test_wide'));