diff --git a/doc/src/sgml/mvcc.sgml b/doc/src/sgml/mvcc.sgml index 14614c5..3ee1e38 100644 *** a/doc/src/sgml/mvcc.sgml --- b/doc/src/sgml/mvcc.sgml *************** COMMIT; *** 386,409 **** behave the same as SELECT in terms of searching for target rows: they will only find target rows that were committed as of the transaction start time. However, such a ! target ! row might have already been updated (or deleted or locked) by ! another concurrent transaction by the time it is found. In this case, the ! serializable transaction will wait for the first updating transaction to commit or ! roll back (if it is still in progress). If the first updater rolls back, ! then its effects are negated and the serializable transaction can proceed ! with updating the originally found row. But if the first updater commits ! (and actually updated or deleted the row, not just locked it) ! then the serializable transaction will be rolled back with the message ! ! ! ERROR: could not serialize access due to concurrent update ! ! because a serializable transaction cannot modify or lock rows changed by ! other transactions after the serializable transaction began. ! When an application receives this error message, it should abort the current transaction and retry the whole transaction from --- 386,409 ---- behave the same as SELECT in terms of searching for target rows: they will only find target rows that were committed as of the transaction start time. However, such a ! target row might have already been subject to a concurrent ! UPDATE, DELETE, SELECT ! FOR UPDATE, or SELECT FOR SHARE. In this case, ! the serializable transaction will wait for the other transaction to commit ! or roll back (if it is still in progress). If it rolls back then its effects ! are negated and the serializable transaction can proceed with modifying ! or locking the originally found row. If it commits, and the two commands ! conflict according to , ! the serializable transaction is rolled back with the message ! ! ERROR: could not serialize access due to concurrent update ! ! ! since serializable transaction cannot simply proceed with the newer row ! version like read committed ones do. ! When an application receives this error message, it should abort the current transaction and retry the whole transaction from *************** ERROR: could not serialize access due t *** 418,423 **** --- 418,463 ---- transactions will never have serialization conflicts. + + Serialization Conflicts + + + + + + + Serializable Transaction + Concurrent Transaction + + + UPDATE, DELETE + SELECT FOR UPDATE + SELECT FOR SHARE + + + + + UPDATE, DELETE + X + X + X + + + SELECT FOR UPDATE + X + X + X + + + SELECT FOR SHARE + X + + + + + +
+ The Serializable mode provides a rigorous guarantee that each transaction sees a wholly consistent view of the database. However, *************** SELECT SUM(value) FROM mytab WHERE class *** 921,926 **** --- 961,974 ---- + Serializable transactions are affected by concurrent + SELECT FOR SHARE and SELECT FOR UPDATE + for longer than those locks are actually held, and may be aborted + when trying to obtain a conflicting lock. For details, + see + + + PostgreSQL doesn't remember any information about modified rows in memory, so there is no limit on the number of rows locked at one time. However, locking a row diff --git a/src/backend/access/heap/heapam.c b/src/backend/access/heap/heapam.c index 80d2351..b711b58 100644 *** a/src/backend/access/heap/heapam.c --- b/src/backend/access/heap/heapam.c *************** static XLogRecPtr log_heap_update(Relati *** 83,88 **** --- 83,89 ---- bool all_visible_cleared, bool new_all_visible_cleared); static bool HeapSatisfiesHOTUpdate(Relation relation, Bitmapset *hot_attrs, HeapTuple oldtup, HeapTuple newtup); + static bool HeapSatisfiesLockersVisible(HeapTupleHeader tuple, Snapshot snapshot); /* ---------------------------------------------------------------- *************** simple_heap_insert(Relation relation, He *** 2033,2040 **** * update_xmax - output parameter, used only for failure case (see below) * cid - delete command ID (used for visibility test, and stored into * cmax if successful) - * crosscheck - if not InvalidSnapshot, also check tuple against this * wait - true if should wait for any conflicting update to commit/abort * * Normal, successful return value is HeapTupleMayBeUpdated, which * actually means we did delete it. Failure return codes are --- 2034,2042 ---- * update_xmax - output parameter, used only for failure case (see below) * cid - delete command ID (used for visibility test, and stored into * cmax if successful) * wait - true if should wait for any conflicting update to commit/abort + * lockcheck_snapshot - if not NULL, report the tuple as updated if it + * was locked by a transaction not visible under this snapshot * * Normal, successful return value is HeapTupleMayBeUpdated, which * actually means we did delete it. Failure return codes are *************** simple_heap_insert(Relation relation, He *** 2049,2055 **** HTSU_Result heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, Snapshot crosscheck, bool wait) { HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); --- 2051,2057 ---- HTSU_Result heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, bool wait, Snapshot lockcheck_snapshot) { HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); *************** l1: *** 2170,2181 **** else result = HeapTupleUpdated; } ! ! if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated) { ! /* Perform additional check for serializable RI updates */ ! if (!HeapTupleSatisfiesVisibility(&tp, crosscheck, buffer)) ! result = HeapTupleUpdated; } if (result != HeapTupleMayBeUpdated) --- 2172,2183 ---- else result = HeapTupleUpdated; } ! ! /* Verify visibility of locking transactions. */ ! if ((result == HeapTupleMayBeUpdated) && ! !HeapSatisfiesLockersVisible(tp.t_data, lockcheck_snapshot)) { ! result = HeapTupleUpdated; } if (result != HeapTupleMayBeUpdated) *************** l1: *** 2183,2189 **** Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated || result == HeapTupleBeingUpdated); ! Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID)); *ctid = tp.t_data->t_ctid; *update_xmax = HeapTupleHeaderGetXmax(tp.t_data); UnlockReleaseBuffer(buffer); --- 2185,2192 ---- Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated || result == HeapTupleBeingUpdated); ! Assert(!(tp.t_data->t_infomask & HEAP_XMAX_INVALID) || ! (tp.t_data->t_infomask & HEAP_IS_LOCKED)); *ctid = tp.t_data->t_ctid; *update_xmax = HeapTupleHeaderGetXmax(tp.t_data); UnlockReleaseBuffer(buffer); *************** simple_heap_delete(Relation relation, It *** 2313,2320 **** result = heap_delete(relation, tid, &update_ctid, &update_xmax, ! GetCurrentCommandId(true), InvalidSnapshot, ! true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: --- 2316,2324 ---- result = heap_delete(relation, tid, &update_ctid, &update_xmax, ! GetCurrentCommandId(true), ! true /* wait for commit */ , ! InvalidSnapshot); switch (result) { case HeapTupleSelfUpdated: *************** simple_heap_delete(Relation relation, It *** 2349,2355 **** * update_xmax - output parameter, used only for failure case (see below) * cid - update command ID (used for visibility test, and stored into * cmax/cmin if successful) ! * crosscheck - if not InvalidSnapshot, also check old tuple against this * wait - true if should wait for any conflicting update to commit/abort * * Normal, successful return value is HeapTupleMayBeUpdated, which --- 2353,2360 ---- * update_xmax - output parameter, used only for failure case (see below) * cid - update command ID (used for visibility test, and stored into * cmax/cmin if successful) ! * lockcheck_snapshot - if not NULL, report the tuple as updated if it ! * was locked by a transaction not visible under this snapshot * wait - true if should wait for any conflicting update to commit/abort * * Normal, successful return value is HeapTupleMayBeUpdated, which *************** simple_heap_delete(Relation relation, It *** 2371,2377 **** HTSU_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, Snapshot crosscheck, bool wait) { HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); --- 2376,2382 ---- HTSU_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, bool wait, Snapshot lockcheck_snapshot) { HTSU_Result result; TransactionId xid = GetCurrentTransactionId(); *************** l2: *** 2523,2541 **** result = HeapTupleUpdated; } ! if (crosscheck != InvalidSnapshot && result == HeapTupleMayBeUpdated) { ! /* Perform additional check for serializable RI updates */ ! if (!HeapTupleSatisfiesVisibility(&oldtup, crosscheck, buffer)) ! result = HeapTupleUpdated; } if (result != HeapTupleMayBeUpdated) { Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated || result == HeapTupleBeingUpdated); ! Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID)); *ctid = oldtup.t_data->t_ctid; *update_xmax = HeapTupleHeaderGetXmax(oldtup.t_data); UnlockReleaseBuffer(buffer); --- 2528,2548 ---- result = HeapTupleUpdated; } ! /* Verify visibility of locking transactions. */ ! if ((result == HeapTupleMayBeUpdated) && ! !HeapSatisfiesLockersVisible(oldtup.t_data, lockcheck_snapshot)) { ! result = HeapTupleUpdated; } + if (result != HeapTupleMayBeUpdated) { Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated || result == HeapTupleBeingUpdated); ! Assert(!(oldtup.t_data->t_infomask & HEAP_XMAX_INVALID) || ! (oldtup.t_data->t_infomask & HEAP_IS_LOCKED)); *ctid = oldtup.t_data->t_ctid; *update_xmax = HeapTupleHeaderGetXmax(oldtup.t_data); UnlockReleaseBuffer(buffer); *************** HeapSatisfiesHOTUpdate(Relation relation *** 2961,2966 **** --- 2968,3058 ---- return true; } + + /* + * Returns false if one of the tuple's lockers committed but + * aren't visible according to lockcheck_snapshot, excluding subtransactions + * of the current transaction. + * Assumes that all locking transaction either committed or aborted, + * but aren't still in progress. + */ + static bool + HeapSatisfiesLockersVisible(HeapTupleHeader tuple, Snapshot lockcheck_snapshot) + { + if (lockcheck_snapshot == InvalidSnapshot) + return true; + + if (tuple->t_infomask & HEAP_IS_LOCKED) + { + /* + * If the tuple was locked, we now check whether the locking + * transaction(s) are visible under lockcheck_snapshot. If + * they aren't, we pretend that the tuple was updated. + */ + + if (tuple->t_infomask & HEAP_XMAX_IS_MULTI) + { + TransactionId* xids; + int xids_l = GetMultiXactIdMembers(HeapTupleHeaderGetXmax(tuple), &xids); + + if (xids_l < 1) { + /* + * The multi xact either is too old to be inspected or doesn't contain members. + * The second case is probably impossible, but even if not it doesn't pose + * any problem. + * In the first case, we have to trust that all xids that were contained in + * the xact are in fact visible under lockcheck_snapshot. Currently this + * is always the case, since lockcheck_snapshot is always the transaction's + * serializable snapshot, and we call MultiXactIdSetOldestVisible() before + * acquireing that snapshot. + */ + return true; + } + else + { + int i; + for (i = 0; i < xids_l; ++i) + { + /* Ignore our own subtransactions */ + if (TransactionIdIsCurrentTransactionId(xids[i])) + continue; + + /* We expect to be called after the locking transactions' fates have been decided */ + Assert(!TransactionIdIsInProgress(xids[i])); + + if (!TransactionIdDidAbort(xids[i]) && + XidInMVCCSnapshot(xids[i], lockcheck_snapshot)) + { + /* Non-aborted, invisible locker */ + return false; + } + } + return true; + } + } + else + { + TransactionId xid = HeapTupleHeaderGetXmax(tuple); + + /* Ignore our own subtransactions */ + if (TransactionIdIsCurrentTransactionId(xid)) + return true; + + /* We expect to be called after the locking transactions' fates have been decided */ + Assert(!TransactionIdIsInProgress(xid)); + + /* Locker must either be visible or have aborted */ + return TransactionIdDidAbort(xid) || + !XidInMVCCSnapshot(xid, lockcheck_snapshot); + } + } + else + { + /* Tuple wasn't locked */ + return true; + } + } + /* * simple_heap_update - replace a tuple * *************** simple_heap_update(Relation relation, It *** 2978,2985 **** result = heap_update(relation, otid, tup, &update_ctid, &update_xmax, ! GetCurrentCommandId(true), InvalidSnapshot, ! true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: --- 3070,3077 ---- result = heap_update(relation, otid, tup, &update_ctid, &update_xmax, ! GetCurrentCommandId(true), ! true /* wait for commit */, InvalidSnapshot); switch (result) { case HeapTupleSelfUpdated: *************** simple_heap_update(Relation relation, It *** 3013,3018 **** --- 3105,3113 ---- * tuple's cmax if lock is successful) * mode: indicates if shared or exclusive tuple lock is desired * nowait: if true, ereport rather than blocking if lock not available + * lockcheck_snapshot: if not NULL, report the tuple as updated if it + * was locked by a transaction not visible under + * this snapshot * * Output parameters: * *tuple: all fields filled in *************** simple_heap_update(Relation relation, It *** 3066,3072 **** HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, LockTupleMode mode, bool nowait) { HTSU_Result result; ItemPointer tid = &(tuple->t_self); --- 3161,3168 ---- HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, LockTupleMode mode, bool nowait, ! Snapshot lockcheck_snapshot) { HTSU_Result result; ItemPointer tid = &(tuple->t_self); *************** l3: *** 3246,3256 **** else result = HeapTupleUpdated; } if (result != HeapTupleMayBeUpdated) { Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated); ! Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID)); *ctid = tuple->t_data->t_ctid; *update_xmax = HeapTupleHeaderGetXmax(tuple->t_data); LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); --- 3342,3360 ---- else result = HeapTupleUpdated; } + + /* Verify visibility of locking transactions */ + if ((result == HeapTupleMayBeUpdated) && + !HeapSatisfiesLockersVisible(tuple->t_data, lockcheck_snapshot)) + { + result = HeapTupleUpdated; + } if (result != HeapTupleMayBeUpdated) { Assert(result == HeapTupleSelfUpdated || result == HeapTupleUpdated); ! Assert(!(tuple->t_data->t_infomask & HEAP_XMAX_INVALID) || ! (tuple->t_data->t_infomask & HEAP_IS_LOCKED)); *ctid = tuple->t_data->t_ctid; *update_xmax = HeapTupleHeaderGetXmax(tuple->t_data); LockBuffer(*buffer, BUFFER_LOCK_UNLOCK); diff --git a/src/backend/access/transam/multixact.c b/src/backend/access/transam/multixact.c index b5bb311..47ff5de 100644 *** a/src/backend/access/transam/multixact.c --- b/src/backend/access/transam/multixact.c *************** static MemoryContext MXactContext = NULL *** 211,217 **** #endif /* internal MultiXactId management */ - static void MultiXactIdSetOldestVisible(void); static MultiXactId CreateMultiXactId(int nxids, TransactionId *xids); static void RecordNewMultiXact(MultiXactId multi, MultiXactOffset offset, int nxids, TransactionId *xids); --- 211,216 ---- *************** MultiXactIdSetOldestMember(void) *** 531,537 **** * there is no live transaction, now or later, that can be a member of any * MultiXactId older than the OldestVisibleMXactId we compute here. */ ! static void MultiXactIdSetOldestVisible(void) { if (!MultiXactIdIsValid(OldestVisibleMXactId[MyBackendId])) --- 530,536 ---- * there is no live transaction, now or later, that can be a member of any * MultiXactId older than the OldestVisibleMXactId we compute here. */ ! void MultiXactIdSetOldestVisible(void) { if (!MultiXactIdIsValid(OldestVisibleMXactId[MyBackendId])) diff --git a/src/backend/commands/copy.c b/src/backend/commands/copy.c index 906d547..5ae3c62 100644 *** a/src/backend/commands/copy.c --- b/src/backend/commands/copy.c *************** DoCopy(const CopyStmt *stmt, const char *** 1088,1094 **** /* Create a QueryDesc requesting no output */ cstate->queryDesc = CreateQueryDesc(plan, queryString, GetActiveSnapshot(), - InvalidSnapshot, dest, NULL, 0); /* --- 1088,1093 ---- diff --git a/src/backend/commands/explain.c b/src/backend/commands/explain.c index b776ad1..643c777 100644 *** a/src/backend/commands/explain.c --- b/src/backend/commands/explain.c *************** ExplainOnePlan(PlannedStmt *plannedstmt, *** 369,375 **** /* Create a QueryDesc requesting no output */ queryDesc = CreateQueryDesc(plannedstmt, queryString, ! GetActiveSnapshot(), InvalidSnapshot, None_Receiver, params, instrument_option); INSTR_TIME_SET_CURRENT(starttime); --- 369,375 ---- /* Create a QueryDesc requesting no output */ queryDesc = CreateQueryDesc(plannedstmt, queryString, ! GetActiveSnapshot(), None_Receiver, params, instrument_option); INSTR_TIME_SET_CURRENT(starttime); diff --git a/src/backend/commands/trigger.c b/src/backend/commands/trigger.c index 210f6b8..3816efc 100644 *** a/src/backend/commands/trigger.c --- b/src/backend/commands/trigger.c *************** GetTupleForTrigger(EState *estate, *** 2350,2363 **** Assert(epqstate != NULL); /* ! * lock tuple for update */ ltrmark:; tuple.t_self = *tid; test = heap_lock_tuple(relation, &tuple, &buffer, &update_ctid, &update_xmax, estate->es_output_cid, ! LockTupleExclusive, false); switch (test) { case HeapTupleSelfUpdated: --- 2350,2369 ---- Assert(epqstate != NULL); /* ! * lock tuple for update. ! * ! * Serializable transactions pass their snapshot as the logcheck_snapshot. ! * This lets heap_lock_tuple report concurrently FOR SHARE or FOR UPDATE ! * locked tuples as HeapTupleUpdated. */ ltrmark:; tuple.t_self = *tid; test = heap_lock_tuple(relation, &tuple, &buffer, &update_ctid, &update_xmax, estate->es_output_cid, ! LockTupleExclusive, false, ! IsXactIsoLevelSerializable ? estate->es_snapshot : ! InvalidSnapshot); switch (test) { case HeapTupleSelfUpdated: diff --git a/src/backend/executor/README b/src/backend/executor/README index dc86822..3798a48 100644 *** a/src/backend/executor/README --- b/src/backend/executor/README *************** is no explicit prohibition on SRFs in UP *** 195,197 **** --- 195,226 ---- that only the first result row of an SRF counts, because all subsequent rows will result in attempts to re-update an already updated target row. This is historical behavior and seems not worth changing.) + + Row Locks and Serializable Transactions + --------------------------------------- + + In READ COMMITTED mode, a transaction who encounters a locked row during + an UPDATE, DELETE, SELECT FOR UPDATE or SELECT FOR SHARE simply blocks + until the locking transaction commits or roll backs, and in the former case + then re-executes the statement using the new row version, as described above. + + For SERIALIZABLE transaction this is not satisfactory. The RI triggers + for example take a FOR SHARE lock on a parent row before allowing a child + row to be inserted and verify that deleting a parent row leaves no orphaned + children behind before allowing the delete to occur. From within READ COMMITTED + transactions, blocking upon a delete or a parent row until all lockers have + finished is sufficient to guarantee that this check finds any potential orphan, + since the check will be executed with a up-to-date snapshot to which the locking + transaction's changes are visible. This, however, is not true for SERIALIZABLE + transactions since these will continue to use their old snapshot and hence miss + newly inserted rows. + + Serializable transactions therefore treat a FOR SHARE or FOR UPDATE lock on a + tuple the same as an actual update during UPDATE and SELECT FOR SHARE. They are + thus aborted when trying to UPDATE or FOR UPDATE lock a row that was FOR SHARE + or FOR UPDATE locked by a concurrent transaction. + + This is implemented by the lockcheck_snapshot parameter of heap_update, heap_delete + and heap_lock_tuple. If such a snapshot is provided to one of these functions, + they return HeapTupleUpdated if the tuple locked (but not necessarily updated) + by any transaction invisible to the snapshot. diff --git a/src/backend/executor/execMain.c b/src/backend/executor/execMain.c index 8cd3ae9..049ebd8 100644 *** a/src/backend/executor/execMain.c --- b/src/backend/executor/execMain.c *************** standard_ExecutorStart(QueryDesc *queryD *** 183,189 **** * Copy other important information into the EState */ estate->es_snapshot = RegisterSnapshot(queryDesc->snapshot); - estate->es_crosscheck_snapshot = RegisterSnapshot(queryDesc->crosscheck_snapshot); estate->es_instrument = queryDesc->instrument_options; /* --- 183,188 ---- *************** standard_ExecutorEnd(QueryDesc *queryDes *** 348,354 **** /* do away with our snapshots */ UnregisterSnapshot(estate->es_snapshot); - UnregisterSnapshot(estate->es_crosscheck_snapshot); /* * Must switch out of context before destroying it --- 347,352 ---- *************** EvalPlanQualFetch(EState *estate, Relati *** 1533,1543 **** /* * This is a live tuple, so now try to lock it. */ test = heap_lock_tuple(relation, &tuple, &buffer, &update_ctid, &update_xmax, estate->es_output_cid, ! lockmode, false); /* We now have two pins on the buffer, get rid of one */ ReleaseBuffer(buffer); --- 1531,1548 ---- /* * This is a live tuple, so now try to lock it. + * + * Serializable transactions pass their snapshot as the logcheck_snapshot. + * This lets heap_lock_tuple report concurrently FOR SHARE or FOR UPDATE + * locked tuples as HeapTupleUpdated. */ + Assert(!IsXactIsoLevelSerializable || (estate->es_snapshot != InvalidSnapshot)); test = heap_lock_tuple(relation, &tuple, &buffer, &update_ctid, &update_xmax, estate->es_output_cid, ! lockmode, false, ! IsXactIsoLevelSerializable ? estate->es_snapshot : ! InvalidSnapshot); /* We now have two pins on the buffer, get rid of one */ ReleaseBuffer(buffer); *************** EvalPlanQualStart(EPQState *epqstate, ES *** 1906,1912 **** */ estate->es_direction = ForwardScanDirection; estate->es_snapshot = parentestate->es_snapshot; - estate->es_crosscheck_snapshot = parentestate->es_crosscheck_snapshot; estate->es_range_table = parentestate->es_range_table; estate->es_plannedstmt = parentestate->es_plannedstmt; estate->es_junkFilter = parentestate->es_junkFilter; --- 1911,1916 ---- diff --git a/src/backend/executor/execUtils.c b/src/backend/executor/execUtils.c index 877abf4..a70221a 100644 *** a/src/backend/executor/execUtils.c --- b/src/backend/executor/execUtils.c *************** CreateExecutorState(void) *** 109,115 **** */ estate->es_direction = ForwardScanDirection; estate->es_snapshot = SnapshotNow; - estate->es_crosscheck_snapshot = InvalidSnapshot; /* no crosscheck */ estate->es_range_table = NIL; estate->es_plannedstmt = NULL; --- 109,114 ---- diff --git a/src/backend/executor/functions.c b/src/backend/executor/functions.c index bacfe9a..0f77b58 100644 *** a/src/backend/executor/functions.c --- b/src/backend/executor/functions.c *************** postquel_start(execution_state *es, SQLF *** 415,421 **** if (IsA(es->stmt, PlannedStmt)) es->qd = CreateQueryDesc((PlannedStmt *) es->stmt, fcache->src, ! snapshot, InvalidSnapshot, dest, fcache->paramLI, 0); else --- 415,421 ---- if (IsA(es->stmt, PlannedStmt)) es->qd = CreateQueryDesc((PlannedStmt *) es->stmt, fcache->src, ! snapshot, dest, fcache->paramLI, 0); else diff --git a/src/backend/executor/nodeLockRows.c b/src/backend/executor/nodeLockRows.c index 562ee6e..d809a62 100644 *** a/src/backend/executor/nodeLockRows.c --- b/src/backend/executor/nodeLockRows.c *************** lnext: *** 71,76 **** --- 71,77 ---- ItemPointerData update_ctid; TransactionId update_xmax; LockTupleMode lockmode; + Snapshot lockcheck_snapshot = InvalidSnapshot; HTSU_Result test; HeapTuple copyTuple; *************** lnext: *** 110,123 **** /* okay, try to lock the tuple */ if (erm->markType == ROW_MARK_EXCLUSIVE) lockmode = LockTupleExclusive; else lockmode = LockTupleShared; test = heap_lock_tuple(erm->relation, &tuple, &buffer, &update_ctid, &update_xmax, estate->es_output_cid, ! lockmode, erm->noWait); ReleaseBuffer(buffer); switch (test) { --- 111,137 ---- /* okay, try to lock the tuple */ if (erm->markType == ROW_MARK_EXCLUSIVE) + { lockmode = LockTupleExclusive; + + /* + * Serializable transactions pass their snapshot as the logcheck_snapshot. + * This lets heap_lock_tuple report concurrently FOR SHARE or FOR UPDATE + * locked tuples as HeapTupleUpdated. + */ + if (IsXactIsoLevelSerializable) + lockcheck_snapshot = estate->es_snapshot; + } else + { lockmode = LockTupleShared; + } test = heap_lock_tuple(erm->relation, &tuple, &buffer, &update_ctid, &update_xmax, estate->es_output_cid, ! lockmode, erm->noWait, ! lockcheck_snapshot); ReleaseBuffer(buffer); switch (test) { diff --git a/src/backend/executor/nodeModifyTable.c b/src/backend/executor/nodeModifyTable.c index 8619ce3..2dad85d 100644 *** a/src/backend/executor/nodeModifyTable.c --- b/src/backend/executor/nodeModifyTable.c *************** ExecDelete(ItemPointer tupleid, *** 307,323 **** /* * delete the tuple * ! * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that ! * the row to be deleted is visible to that snapshot, and throw a can't- ! * serialize error if not. This is a special-case behavior needed for ! * referential integrity updates in serializable transactions. */ ldelete:; result = heap_delete(resultRelationDesc, tupleid, &update_ctid, &update_xmax, estate->es_output_cid, ! estate->es_crosscheck_snapshot, ! true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: --- 307,323 ---- /* * delete the tuple * ! * Serializable transactions pass their snapshot as the logcheck_snapshot. ! * This lets heap_lock_tuple report concurrently FOR SHARE or FOR UPDATE ! * locked tuples as HeapTupleUpdated. */ ldelete:; result = heap_delete(resultRelationDesc, tupleid, &update_ctid, &update_xmax, estate->es_output_cid, ! true, /* wait for commit */ ! IsXactIsoLevelSerializable ? estate->es_snapshot : ! InvalidSnapshot); switch (result) { case HeapTupleSelfUpdated: *************** lreplace:; *** 496,511 **** /* * replace the heap tuple * ! * Note: if es_crosscheck_snapshot isn't InvalidSnapshot, we check that ! * the row to be updated is visible to that snapshot, and throw a can't- ! * serialize error if not. This is a special-case behavior needed for ! * referential integrity updates in serializable transactions. */ result = heap_update(resultRelationDesc, tupleid, tuple, &update_ctid, &update_xmax, estate->es_output_cid, ! estate->es_crosscheck_snapshot, ! true /* wait for commit */ ); switch (result) { case HeapTupleSelfUpdated: --- 496,511 ---- /* * replace the heap tuple * ! * Serializable transactions pass their snapshot as the logcheck_snapshot. ! * This lets heap_lock_tuple report concurrently FOR SHARE or FOR UPDATE ! * locked tuples as HeapTupleUpdated. */ result = heap_update(resultRelationDesc, tupleid, tuple, &update_ctid, &update_xmax, estate->es_output_cid, ! true, /* wait for commit */ ! IsXactIsoLevelSerializable ? estate->es_snapshot : ! InvalidSnapshot); switch (result) { case HeapTupleSelfUpdated: diff --git a/src/backend/executor/spi.c b/src/backend/executor/spi.c index 7f0b5e4..c53a3e1 100644 *** a/src/backend/executor/spi.c --- b/src/backend/executor/spi.c *************** static void _SPI_prepare_plan(const char *** 51,57 **** ParamListInfo boundParams); static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, ! Snapshot snapshot, Snapshot crosscheck_snapshot, bool read_only, bool fire_triggers, long tcount); static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes, --- 51,57 ---- ParamListInfo boundParams); static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, ! Snapshot snapshot, bool read_only, bool fire_triggers, long tcount); static ParamListInfo _SPI_convert_params(int nargs, Oid *argtypes, *************** SPI_execute(const char *src, bool read_o *** 357,363 **** _SPI_prepare_plan(src, &plan, NULL); res = _SPI_execute_plan(&plan, NULL, ! InvalidSnapshot, InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); --- 357,363 ---- _SPI_prepare_plan(src, &plan, NULL); res = _SPI_execute_plan(&plan, NULL, ! InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); *************** SPI_execute_plan(SPIPlanPtr plan, Datum *** 392,398 **** _SPI_convert_params(plan->nargs, plan->argtypes, Values, Nulls, 0), ! InvalidSnapshot, InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); --- 392,398 ---- _SPI_convert_params(plan->nargs, plan->argtypes, Values, Nulls, 0), ! InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); *************** SPI_execute_plan_with_paramlist(SPIPlanP *** 421,427 **** return res; res = _SPI_execute_plan(plan, params, ! InvalidSnapshot, InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); --- 421,427 ---- return res; res = _SPI_execute_plan(plan, params, ! InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); *************** SPI_execute_plan_with_paramlist(SPIPlanP *** 444,450 **** int SPI_execute_snapshot(SPIPlanPtr plan, Datum *Values, const char *Nulls, ! Snapshot snapshot, Snapshot crosscheck_snapshot, bool read_only, bool fire_triggers, long tcount) { int res; --- 444,450 ---- int SPI_execute_snapshot(SPIPlanPtr plan, Datum *Values, const char *Nulls, ! Snapshot snapshot, bool read_only, bool fire_triggers, long tcount) { int res; *************** SPI_execute_snapshot(SPIPlanPtr plan, *** 463,469 **** _SPI_convert_params(plan->nargs, plan->argtypes, Values, Nulls, 0), ! snapshot, crosscheck_snapshot, read_only, fire_triggers, tcount); _SPI_end_call(true); --- 463,469 ---- _SPI_convert_params(plan->nargs, plan->argtypes, Values, Nulls, 0), ! snapshot, read_only, fire_triggers, tcount); _SPI_end_call(true); *************** SPI_execute_with_args(const char *src, *** 516,522 **** /* We don't need to copy the plan since it will be thrown away anyway */ res = _SPI_execute_plan(&plan, paramLI, ! InvalidSnapshot, InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); --- 516,522 ---- /* We don't need to copy the plan since it will be thrown away anyway */ res = _SPI_execute_plan(&plan, paramLI, ! InvalidSnapshot, read_only, true, tcount); _SPI_end_call(true); *************** _SPI_prepare_plan(const char *src, SPIPl *** 1752,1758 **** * * snapshot: query snapshot to use, or InvalidSnapshot for the normal * behavior of taking a new snapshot for each query. - * crosscheck_snapshot: for RI use, all others pass InvalidSnapshot * read_only: TRUE for read-only execution (no CommandCounterIncrement) * fire_triggers: TRUE to fire AFTER triggers at end of query (normal case); * FALSE means any AFTER triggers are postponed to end of outer query --- 1752,1757 ---- *************** _SPI_prepare_plan(const char *src, SPIPl *** 1760,1766 **** */ static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, ! Snapshot snapshot, Snapshot crosscheck_snapshot, bool read_only, bool fire_triggers, long tcount) { int my_res = 0; --- 1759,1765 ---- */ static int _SPI_execute_plan(SPIPlanPtr plan, ParamListInfo paramLI, ! Snapshot snapshot, bool read_only, bool fire_triggers, long tcount) { int my_res = 0; *************** _SPI_execute_plan(SPIPlanPtr plan, Param *** 1903,1909 **** qdesc = CreateQueryDesc((PlannedStmt *) stmt, plansource->query_string, ! snap, crosscheck_snapshot, dest, paramLI, 0); res = _SPI_pquery(qdesc, fire_triggers, --- 1902,1908 ---- qdesc = CreateQueryDesc((PlannedStmt *) stmt, plansource->query_string, ! snap, dest, paramLI, 0); res = _SPI_pquery(qdesc, fire_triggers, diff --git a/src/backend/tcop/pquery.c b/src/backend/tcop/pquery.c index 8ad4915..6c05510 100644 *** a/src/backend/tcop/pquery.c --- b/src/backend/tcop/pquery.c *************** QueryDesc * *** 64,70 **** CreateQueryDesc(PlannedStmt *plannedstmt, const char *sourceText, Snapshot snapshot, - Snapshot crosscheck_snapshot, DestReceiver *dest, ParamListInfo params, int instrument_options) --- 64,69 ---- *************** CreateQueryDesc(PlannedStmt *plannedstmt *** 76,83 **** qd->utilitystmt = plannedstmt->utilityStmt; /* in case DECLARE CURSOR */ qd->sourceText = sourceText; /* query text */ qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */ - /* RI check snapshot */ - qd->crosscheck_snapshot = RegisterSnapshot(crosscheck_snapshot); qd->dest = dest; /* output dest */ qd->params = params; /* parameter values passed into query */ qd->instrument_options = instrument_options; /* instrumentation --- 75,80 ---- *************** CreateUtilityQueryDesc(Node *utilitystmt *** 109,115 **** qd->utilitystmt = utilitystmt; /* utility command */ qd->sourceText = sourceText; /* query text */ qd->snapshot = RegisterSnapshot(snapshot); /* snapshot */ - qd->crosscheck_snapshot = InvalidSnapshot; /* RI check snapshot */ qd->dest = dest; /* output dest */ qd->params = params; /* parameter values passed into query */ qd->instrument_options = false; /* uninteresting for utilities */ --- 106,111 ---- *************** FreeQueryDesc(QueryDesc *qdesc) *** 134,140 **** /* forget our snapshots */ UnregisterSnapshot(qdesc->snapshot); - UnregisterSnapshot(qdesc->crosscheck_snapshot); /* Only the QueryDesc itself need be freed */ pfree(qdesc); --- 130,135 ---- *************** ProcessQuery(PlannedStmt *plan, *** 178,184 **** * Create the QueryDesc object */ queryDesc = CreateQueryDesc(plan, sourceText, ! GetActiveSnapshot(), InvalidSnapshot, dest, params, 0); /* --- 173,179 ---- * Create the QueryDesc object */ queryDesc = CreateQueryDesc(plan, sourceText, ! GetActiveSnapshot(), dest, params, 0); /* *************** PortalStart(Portal portal, ParamListInfo *** 514,520 **** queryDesc = CreateQueryDesc((PlannedStmt *) linitial(portal->stmts), portal->sourceText, GetActiveSnapshot(), - InvalidSnapshot, None_Receiver, params, 0); --- 509,514 ---- diff --git a/src/backend/utils/adt/ri_triggers.c b/src/backend/utils/adt/ri_triggers.c index aa06302..02946fa 100644 *** a/src/backend/utils/adt/ri_triggers.c --- b/src/backend/utils/adt/ri_triggers.c *************** static SPIPlanPtr ri_PlanCheck(const cha *** 230,236 **** static bool ri_PerformCheck(RI_QueryKey *qkey, SPIPlanPtr qplan, Relation fk_rel, Relation pk_rel, HeapTuple old_tuple, HeapTuple new_tuple, - bool detectNewRows, int expect_OK, const char *constrname); static void ri_ExtractValues(RI_QueryKey *qkey, int key_idx, Relation rel, HeapTuple tuple, --- 230,235 ---- *************** RI_FKey_check(PG_FUNCTION_ARGS) *** 357,363 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, NULL, NULL, - false, SPI_OK_SELECT, NameStr(riinfo.conname)); --- 356,361 ---- *************** RI_FKey_check(PG_FUNCTION_ARGS) *** 500,506 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, NULL, new_row, - false, SPI_OK_SELECT, NameStr(riinfo.conname)); --- 498,503 ---- *************** ri_Check_Pk_Match(Relation pk_rel, Relat *** 661,667 **** result = ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* treat like update */ SPI_OK_SELECT, NULL); if (SPI_finish() != SPI_OK_FINISH) --- 658,663 ---- *************** RI_FKey_noaction_del(PG_FUNCTION_ARGS) *** 818,824 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_SELECT, NameStr(riinfo.conname)); --- 814,819 ---- *************** RI_FKey_noaction_upd(PG_FUNCTION_ARGS) *** 1006,1012 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_SELECT, NameStr(riinfo.conname)); --- 1001,1006 ---- *************** RI_FKey_cascade_del(PG_FUNCTION_ARGS) *** 1168,1174 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_DELETE, NameStr(riinfo.conname)); --- 1162,1167 ---- *************** RI_FKey_cascade_upd(PG_FUNCTION_ARGS) *** 1356,1362 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, new_row, - true, /* must detect new rows */ SPI_OK_UPDATE, NameStr(riinfo.conname)); --- 1349,1354 ---- *************** RI_FKey_restrict_del(PG_FUNCTION_ARGS) *** 1527,1533 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_SELECT, NameStr(riinfo.conname)); --- 1519,1524 ---- *************** RI_FKey_restrict_upd(PG_FUNCTION_ARGS) *** 1710,1716 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_SELECT, NameStr(riinfo.conname)); --- 1701,1706 ---- *************** RI_FKey_setnull_del(PG_FUNCTION_ARGS) *** 1881,1887 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_UPDATE, NameStr(riinfo.conname)); --- 1871,1876 ---- *************** RI_FKey_setnull_upd(PG_FUNCTION_ARGS) *** 2097,2103 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_UPDATE, NameStr(riinfo.conname)); --- 2086,2091 ---- *************** RI_FKey_setdefault_del(PG_FUNCTION_ARGS) *** 2269,2275 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_UPDATE, NameStr(riinfo.conname)); --- 2257,2262 ---- *************** RI_FKey_setdefault_upd(PG_FUNCTION_ARGS) *** 2472,2478 **** ri_PerformCheck(&qkey, qplan, fk_rel, pk_rel, old_row, NULL, - true, /* must detect new rows */ SPI_OK_UPDATE, NameStr(riinfo.conname)); --- 2459,2464 ---- *************** RI_Initial_Check(Trigger *trigger, Relat *** 2792,2798 **** spi_result = SPI_execute_snapshot(qplan, NULL, NULL, GetLatestSnapshot(), - InvalidSnapshot, true, false, 1); /* Check result */ --- 2778,2783 ---- *************** static bool *** 3271,3284 **** ri_PerformCheck(RI_QueryKey *qkey, SPIPlanPtr qplan, Relation fk_rel, Relation pk_rel, HeapTuple old_tuple, HeapTuple new_tuple, - bool detectNewRows, int expect_OK, const char *constrname) { Relation query_rel, source_rel; int key_idx; Snapshot test_snapshot; - Snapshot crosscheck_snapshot; int limit; int spi_result; Oid save_userid; --- 3256,3267 ---- *************** ri_PerformCheck(RI_QueryKey *qkey, SPIPl *** 3330,3359 **** } /* - * In READ COMMITTED mode, we just need to use an up-to-date regular - * snapshot, and we will see all rows that could be interesting. But in - * SERIALIZABLE mode, we can't change the transaction snapshot. If the - * caller passes detectNewRows == false then it's okay to do the query - * with the transaction snapshot; otherwise we use a current snapshot, and - * tell the executor to error out if it finds any rows under the current - * snapshot that wouldn't be visible per the transaction snapshot. Note - * that SPI_execute_snapshot will register the snapshots, so we don't need - * to bother here. - */ - if (IsXactIsoLevelSerializable && detectNewRows) - { - CommandCounterIncrement(); /* be sure all my own work is visible */ - test_snapshot = GetLatestSnapshot(); - crosscheck_snapshot = GetTransactionSnapshot(); - } - else - { - /* the default SPI behavior is okay */ - test_snapshot = InvalidSnapshot; - crosscheck_snapshot = InvalidSnapshot; - } - - /* * If this is a select query (e.g., for a 'no action' or 'restrict' * trigger), we only need to see if there is a single row in the table, * matching the key. Otherwise, limit = 0 - because we want the query to --- 3313,3318 ---- *************** ri_PerformCheck(RI_QueryKey *qkey, SPIPl *** 3369,3375 **** /* Finally we can run the query. */ spi_result = SPI_execute_snapshot(qplan, vals, nulls, ! test_snapshot, crosscheck_snapshot, false, false, limit); /* Restore UID and security context */ --- 3328,3334 ---- /* Finally we can run the query. */ spi_result = SPI_execute_snapshot(qplan, vals, nulls, ! InvalidSnapshot, false, false, limit); /* Restore UID and security context */ diff --git a/src/backend/utils/time/snapmgr.c b/src/backend/utils/time/snapmgr.c index 59a27de..099e67b 100644 *** a/src/backend/utils/time/snapmgr.c --- b/src/backend/utils/time/snapmgr.c *************** *** 27,32 **** --- 27,33 ---- #include "access/transam.h" #include "access/xact.h" + #include "access/multixact.h" #include "storage/proc.h" #include "storage/procarray.h" #include "utils/memutils.h" *************** GetTransactionSnapshot(void) *** 125,130 **** --- 126,141 ---- if (!FirstSnapshotSet) { Assert(RegisteredSnapshots == 0); + + /* + * We must store the oldest visible multi xact *before* taking the + * serializable snapshot. Otherwise HeapSatisfiesLockersVisible in + * heapam.c will be in trouble, since it depends on being able to + * inspect all multi xact ids which might contain xids invisible to + * the serializable snapshot. + */ + if (IsXactIsoLevelSerializable) + MultiXactIdSetOldestVisible(); CurrentSnapshot = GetSnapshotData(&CurrentSnapshotData); FirstSnapshotSet = true; diff --git a/src/backend/utils/time/tqual.c b/src/backend/utils/time/tqual.c index 4f3630c..302a571 100644 *** a/src/backend/utils/time/tqual.c --- b/src/backend/utils/time/tqual.c *************** SnapshotData SnapshotSelfData = {HeapTup *** 72,80 **** SnapshotData SnapshotAnyData = {HeapTupleSatisfiesAny}; SnapshotData SnapshotToastData = {HeapTupleSatisfiesToast}; - /* local functions */ - static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot); - /* * SetHintBits() --- 72,77 ---- *************** HeapTupleSatisfiesVacuum(HeapTupleHeader *** 1253,1259 **** * by this function. This is OK for current uses, because we actually only * apply this for known-committed XIDs. */ ! static bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot) { uint32 i; --- 1250,1256 ---- * by this function. This is OK for current uses, because we actually only * apply this for known-committed XIDs. */ ! bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot) { uint32 i; diff --git a/src/include/access/heapam.h b/src/include/access/heapam.h index f963146..1d51dfb 100644 *** a/src/include/access/heapam.h --- b/src/include/access/heapam.h *************** extern Oid heap_insert(Relation relation *** 98,112 **** int options, BulkInsertState bistate); extern HTSU_Result heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, Snapshot crosscheck, bool wait); extern HTSU_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, Snapshot crosscheck, bool wait); extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer, ItemPointer ctid, TransactionId *update_xmax, CommandId cid, ! LockTupleMode mode, bool nowait); extern void heap_inplace_update(Relation relation, HeapTuple tuple); extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, Buffer buf); --- 98,113 ---- int options, BulkInsertState bistate); extern HTSU_Result heap_delete(Relation relation, ItemPointer tid, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, bool wait, Snapshot lockcheck_snapshot); extern HTSU_Result heap_update(Relation relation, ItemPointer otid, HeapTuple newtup, ItemPointer ctid, TransactionId *update_xmax, ! CommandId cid, bool wait, Snapshot lockcheck_snapshot); extern HTSU_Result heap_lock_tuple(Relation relation, HeapTuple tuple, Buffer *buffer, ItemPointer ctid, TransactionId *update_xmax, CommandId cid, ! LockTupleMode mode, bool nowait, ! Snapshot lockcheck_snapshot); extern void heap_inplace_update(Relation relation, HeapTuple tuple); extern bool heap_freeze_tuple(HeapTupleHeader tuple, TransactionId cutoff_xid, Buffer buf); diff --git a/src/include/access/multixact.h b/src/include/access/multixact.h index 5910465..4769bca 100644 *** a/src/include/access/multixact.h --- b/src/include/access/multixact.h *************** extern bool MultiXactIdIsCurrent(MultiXa *** 49,54 **** --- 49,55 ---- extern void MultiXactIdWait(MultiXactId multi); extern bool ConditionalMultiXactIdWait(MultiXactId multi); extern void MultiXactIdSetOldestMember(void); + extern void MultiXactIdSetOldestVisible(void); extern int GetMultiXactIdMembers(MultiXactId multi, TransactionId **xids); extern void AtEOXact_MultiXact(void); diff --git a/src/include/executor/execdesc.h b/src/include/executor/execdesc.h index 749b83b..2cccf38 100644 *** a/src/include/executor/execdesc.h --- b/src/include/executor/execdesc.h *************** typedef struct QueryDesc *** 39,45 **** Node *utilitystmt; /* utility statement, or null */ const char *sourceText; /* source text of the query */ Snapshot snapshot; /* snapshot to use for query */ - Snapshot crosscheck_snapshot; /* crosscheck for RI update/delete */ DestReceiver *dest; /* the destination for tuple output */ ParamListInfo params; /* param values being passed in */ int instrument_options; /* OR of InstrumentOption flags */ --- 39,44 ---- *************** typedef struct QueryDesc *** 57,63 **** extern QueryDesc *CreateQueryDesc(PlannedStmt *plannedstmt, const char *sourceText, Snapshot snapshot, - Snapshot crosscheck_snapshot, DestReceiver *dest, ParamListInfo params, int instrument_options); --- 56,61 ---- diff --git a/src/include/executor/spi.h b/src/include/executor/spi.h index 2b05564..541de92 100644 *** a/src/include/executor/spi.h --- b/src/include/executor/spi.h *************** extern int SPI_execp(SPIPlanPtr plan, Da *** 82,88 **** extern int SPI_execute_snapshot(SPIPlanPtr plan, Datum *Values, const char *Nulls, Snapshot snapshot, - Snapshot crosscheck_snapshot, bool read_only, bool fire_triggers, long tcount); extern int SPI_execute_with_args(const char *src, int nargs, Oid *argtypes, --- 82,87 ---- diff --git a/src/include/nodes/execnodes.h b/src/include/nodes/execnodes.h index 7442d2d..86721c8 100644 *** a/src/include/nodes/execnodes.h --- b/src/include/nodes/execnodes.h *************** typedef struct EState *** 337,343 **** /* Basic state for all query types: */ ScanDirection es_direction; /* current scan direction */ Snapshot es_snapshot; /* time qual to use */ - Snapshot es_crosscheck_snapshot; /* crosscheck time qual for RI */ List *es_range_table; /* List of RangeTblEntry */ PlannedStmt *es_plannedstmt; /* link to top of plan tree */ --- 337,342 ---- diff --git a/src/include/utils/tqual.h b/src/include/utils/tqual.h index e85e820..fa386e7 100644 *** a/src/include/utils/tqual.h --- b/src/include/utils/tqual.h *************** extern PGDLLIMPORT SnapshotData Snapshot *** 41,46 **** --- 41,48 ---- #define IsMVCCSnapshot(snapshot) \ ((snapshot)->satisfies == HeapTupleSatisfiesMVCC) + bool XidInMVCCSnapshot(TransactionId xid, Snapshot snapshot); + /* * HeapTupleSatisfiesVisibility * True iff heap tuple satisfies a time qual.