Newer
Older
/*-------------------------------------------------------------------------
*
* proc.c
* routines to manage per-process shared memory data structure
* Portions Copyright (c) 1996-2014, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
*
*-------------------------------------------------------------------------
*/
/*
* Interface (a):
* ProcSleep(), ProcWakeup(),
* ProcQueueAlloc() -- create a shm queue for sleeping processes
* ProcQueueInit() -- create a queue without allocing memory
* Waiting for a lock causes the backend to be put to sleep. Whoever releases
* the lock wakes the process up again (and gives it an error code so it knows
* whether it was awoken on an error condition).
*
* Interface (b):
*
* ProcReleaseLocks -- frees the locks associated with current transaction
*
* ProcKill -- destroys the shared memory state (and locks)
* associated with the process.
#include "postgres.h"
#include <signal.h>
#include <unistd.h>
#include <sys/time.h>
#include "access/transam.h"
#include "access/twophase.h"
#include "access/xact.h"
Alvaro Herrera
committed
#include "postmaster/autovacuum.h"
#include "replication/syncrep.h"
#include "storage/ipc.h"
#include "storage/lmgr.h"
#include "storage/pmsignal.h"
#include "storage/proc.h"
#include "storage/procarray.h"
#include "storage/procsignal.h"
#include "storage/spin.h"
#include "utils/timestamp.h"
/* GUC variables */
int StatementTimeout = 0;
bool log_lock_waits = false;
/* Pointer to this process's PGPROC and PGXACT structs, if any */
PGXACT *MyPgXact = NULL;
* This spinlock protects the freelist of recycled PGPROC structures.
* We cannot use an LWLock because the LWLock manager depends on already
* having a PGPROC and a wait semaphore! But these structures are touched
* relatively infrequently (only at backend startup or shutdown) and not for
* very long, so a spinlock is okay.
NON_EXEC_STATIC slock_t *ProcStructLock = NULL;
/* Pointers to shared-memory structures */
PROC_HDR *ProcGlobal = NULL;
NON_EXEC_STATIC PGPROC *AuxiliaryProcs = NULL;
PGPROC *PreparedXactProcs = NULL;
/* If we are waiting for a lock, this points to the associated LOCALLOCK */
static LOCALLOCK *lockAwaited = NULL;
/* Mark this volatile because it can be changed by signal handler */
static volatile DeadLockState deadlock_state = DS_NOT_YET_CHECKED;
static void RemoveProcFromArray(int code, Datum arg);
static void ProcKill(int code, Datum arg);
static void AuxiliaryProcKill(int code, Datum arg);
/*
* Report shared-memory space needed by InitProcGlobal.
*/
ProcGlobalShmemSize(void)
Size size = 0;
/* ProcGlobal */
size = add_size(size, sizeof(PROC_HDR));
/* MyProcs, including autovacuum workers and launcher */
size = add_size(size, mul_size(MaxBackends, sizeof(PGPROC)));
/* AuxiliaryProcs */
size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGPROC)));
/* Prepared xacts */
size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGPROC)));
/* ProcStructLock */
size = add_size(size, sizeof(slock_t));
size = add_size(size, mul_size(MaxBackends, sizeof(PGXACT)));
size = add_size(size, mul_size(NUM_AUXILIARY_PROCS, sizeof(PGXACT)));
size = add_size(size, mul_size(max_prepared_xacts, sizeof(PGXACT)));
return size;
}
/*
* Report number of semaphores needed by InitProcGlobal.
*/
int
ProcGlobalSemas(void)
/*
* We need a sema per backend (including autovacuum), plus one for each
* auxiliary process.
*/
return MaxBackends + NUM_AUXILIARY_PROCS;
/*
* InitProcGlobal -
* Initialize the global process table during postmaster or standalone
* backend startup.
* We also create all the per-process semaphores we will need to support
* the requested number of backends. We used to allocate semaphores
* only when backends were actually started up, but that is bad because
* it lets Postgres fail under load --- a lot of Unix systems are
* (mis)configured with small limits on the number of semaphores, and
* running out when trying to start another backend is a common failure.
* So, now we grab enough semaphores to support the desired max number
* of backends immediately at initialization --- if the sysadmin has set
* MaxConnections, max_worker_processes, or autovacuum_max_workers higher
* than his kernel will support, he'll find out sooner rather than later.
*
* Another reason for creating semaphores here is that the semaphore
* implementation typically requires us to create semaphores in the
* postmaster, not in backends.
*
* Note: this is NOT called by individual backends under a postmaster,
* not even in the EXEC_BACKEND case. The ProcGlobal and AuxiliaryProcs
* pointers must be propagated specially for EXEC_BACKEND operation.
InitProcGlobal(void)
PGPROC *procs;
PGXACT *pgxacts;
int i,
j;
bool found;
uint32 TotalProcs = MaxBackends + NUM_AUXILIARY_PROCS + max_prepared_xacts;
/* Create the ProcGlobal shared structure */
ProcGlobal = (PROC_HDR *)
ShmemInitStruct("Proc Header", sizeof(PROC_HDR), &found);
Assert(!found);
/*
* Initialize the data structures.
*/
ProcGlobal->spins_per_delay = DEFAULT_SPINS_PER_DELAY;
ProcGlobal->freeProcs = NULL;
ProcGlobal->autovacFreeProcs = NULL;
ProcGlobal->startupProc = NULL;
ProcGlobal->startupProcPid = 0;
ProcGlobal->startupBufferPinWaitBufId = -1;
ProcGlobal->walwriterLatch = NULL;
ProcGlobal->checkpointerLatch = NULL;
* Create and initialize all the PGPROC structures we'll need. There are
* five separate consumers: (1) normal backends, (2) autovacuum workers
* and the autovacuum launcher, (3) background workers, (4) auxiliary
* processes, and (5) prepared transactions. Each PGPROC structure is
* dedicated to exactly one of these purposes, and they do not move
* between groups.
procs = (PGPROC *) ShmemAlloc(TotalProcs * sizeof(PGPROC));
ProcGlobal->allProcs = procs;
/* XXX allProcCount isn't really all of them; it excludes prepared xacts */
ProcGlobal->allProcCount = MaxBackends + NUM_AUXILIARY_PROCS;
if (!procs)
ereport(FATAL,
(errcode(ERRCODE_OUT_OF_MEMORY),
errmsg("out of shared memory")));
MemSet(procs, 0, TotalProcs * sizeof(PGPROC));
/*
* Also allocate a separate array of PGXACT structures. This is separate
* from the main PGPROC array so that the most heavily accessed data is
* stored contiguously in memory in as few cache lines as possible. This
* provides significant performance benefits, especially on a
* multiprocessor system. There is one PGXACT structure for every PGPROC
* structure.
*/
pgxacts = (PGXACT *) ShmemAlloc(TotalProcs * sizeof(PGXACT));
MemSet(pgxacts, 0, TotalProcs * sizeof(PGXACT));
ProcGlobal->allPgXact = pgxacts;
{
/* Common initialization for all PGPROCs, regardless of type. */
* Set up per-PGPROC semaphore, latch, and backendLock. Prepared xact
* dummy PGPROCs don't need these though - they're never associated
* with a real process
*/
if (i < MaxBackends + NUM_AUXILIARY_PROCS)
{
PGSemaphoreCreate(&(procs[i].sem));
InitSharedLatch(&(procs[i].procLatch));
procs[i].backendLock = LWLockAssign();
}
procs[i].pgprocno = i;
* Newly created PGPROCs for normal backends, autovacuum and bgworkers
* must be queued up on the appropriate free list. Because there can
* only ever be a small, fixed number of auxiliary processes, no free
* list is used in that case; InitAuxiliaryProcess() instead uses a
* linear search. PGPROCs for prepared transactions are added to a
* free list by TwoPhaseShmemInit().
*/
if (i < MaxConnections)
{
/* PGPROC for normal backend, add to freeProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->freeProcs;
ProcGlobal->freeProcs = &procs[i];
}
else if (i < MaxConnections + autovacuum_max_workers + 1)
{
/* PGPROC for AV launcher/worker, add to autovacFreeProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->autovacFreeProcs;
ProcGlobal->autovacFreeProcs = &procs[i];
}
else if (i < MaxBackends)
{
/* PGPROC for bgworker, add to bgworkerFreeProcs list */
procs[i].links.next = (SHM_QUEUE *) ProcGlobal->bgworkerFreeProcs;
ProcGlobal->bgworkerFreeProcs = &procs[i];
}
/* Initialize myProcLocks[] shared memory queues. */
for (j = 0; j < NUM_LOCK_PARTITIONS; j++)
SHMQueueInit(&(procs[i].myProcLocks[j]));
}
* Save pointers to the blocks of PGPROC structures reserved for auxiliary
* processes and prepared transactions.
PreparedXactProcs = &procs[MaxBackends + NUM_AUXILIARY_PROCS];
/* Create ProcStructLock spinlock, too */
ProcStructLock = (slock_t *) ShmemAlloc(sizeof(slock_t));
SpinLockInit(ProcStructLock);
* InitProcess -- initialize a per-process data structure for this backend
InitProcess(void)
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
/*
* ProcGlobal should be set up already (if we are a backend, we inherit
* this by fork() or EXEC_BACKEND mechanism from the postmaster).
if (procglobal == NULL)
elog(PANIC, "proc header uninitialized");
if (MyProc != NULL)
* Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
InitializeLatchSupport();
* Try to get a proc struct from the free list. If this fails, we must be
* out of PGPROC structures (not to mention semaphores).
* While we are holding the ProcStructLock, also copy the current shared
* estimate of spins_per_delay to local storage.
SpinLockAcquire(ProcStructLock);
set_spins_per_delay(procglobal->spins_per_delay);
if (IsAnyAutoVacuumProcess())
MyProc = procglobal->autovacFreeProcs;
else if (IsBackgroundWorker)
MyProc = procglobal->bgworkerFreeProcs;
else
MyProc = procglobal->freeProcs;
if (MyProc != NULL)
if (IsAnyAutoVacuumProcess())
procglobal->autovacFreeProcs = (PGPROC *) MyProc->links.next;
else if (IsBackgroundWorker)
procglobal->bgworkerFreeProcs = (PGPROC *) MyProc->links.next;
else
procglobal->freeProcs = (PGPROC *) MyProc->links.next;
SpinLockRelease(ProcStructLock);
}
else
{
/*
* If we reach here, all the PGPROCs are in use. This is one of the
* possible places to detect "too many backends", so give the standard
* error message. XXX do we need to give a different failure message
* in the autovacuum case?
SpinLockRelease(ProcStructLock);
ereport(FATAL,
(errcode(ERRCODE_TOO_MANY_CONNECTIONS),
errmsg("sorry, too many clients already")));
MyPgXact = &ProcGlobal->allPgXact[MyProc->pgprocno];
/*
* Now that we have a PGPROC, mark ourselves as an active postmaster
* child; this is so that the postmaster can detect it if we exit without
* cleaning up. (XXX autovac launcher currently doesn't participate in
* this; it probably should.)
if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
MarkPostmasterChildActive();
* Initialize all fields of MyProc, except for those previously
* initialized by InitProcGlobal.
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
MyProc->lxid = InvalidLocalTransactionId;
MyProc->fpVXIDLock = false;
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
MyPgXact->xid = InvalidTransactionId;
MyPgXact->xmin = InvalidTransactionId;
MyProc->pid = MyProcPid;
/* backendId, databaseId and roleId will be filled in later */
MyProc->backendId = InvalidBackendId;
MyProc->databaseId = InvalidOid;
MyProc->roleId = InvalidOid;
MyPgXact->delayChkpt = false;
MyPgXact->vacuumFlags = 0;
/* NB -- autovac launcher intentionally does not set IS_AUTOVACUUM */
if (IsAutoVacuumWorkerProcess())
MyPgXact->vacuumFlags |= PROC_IS_AUTOVACUUM;
MyProc->lwWaiting = false;
MyProc->lwWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
}
#endif
MyProc->recoveryConflictPending = false;
/* Initialize fields for sync rep */
MyProc->waitLSN = 0;
MyProc->syncRepState = SYNC_REP_NOT_WAITING;
SHMQueueElemInit(&(MyProc->syncRepLinks));
/*
* Acquire ownership of the PGPROC's latch, so that we can use WaitLatch.
* Note that there's no particular need to do ResetLatch here.
*/
OwnLatch(&MyProc->procLatch);
* We might be reusing a semaphore that belonged to a failed process. So
* be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
PGSemaphoreReset(&MyProc->sem);
* Arrange to clean up at backend exit.
on_shmem_exit(ProcKill, 0);
/*
* Now that we have a PGPROC, we could try to acquire locks, so initialize
* the deadlock checker.
*/
InitDeadLockChecking();
/*
* InitProcessPhase2 -- make MyProc visible in the shared ProcArray.
*
* This is separate from InitProcess because we can't acquire LWLocks until
* we've created a PGPROC, but in the EXEC_BACKEND case ProcArrayAdd won't
* work until after we've done CreateSharedMemoryAndSemaphores.
*/
void
InitProcessPhase2(void)
{
Assert(MyProc != NULL);
/*
* Add our PGPROC to the PGPROC array in shared memory.
*/
ProcArrayAdd(MyProc);
/*
* Arrange to clean that up at backend exit.
*/
on_shmem_exit(RemoveProcFromArray, 0);
}
* InitAuxiliaryProcess -- create a per-auxiliary-process data structure
* This is called by bgwriter and similar processes so that they will have a
* MyProc value that's real enough to let them wait for LWLocks. The PGPROC
* and sema that are assigned are one of the extra ones created during
* InitProcGlobal.
* Auxiliary processes are presently not expected to wait for real (lockmgr)
* locks, so we need not set up the deadlock checker. They are never added
* to the ProcArray or the sinval messaging mechanism, either. They also
* don't get a VXID assigned, since this is only useful when we actually
* hold lockmgr locks.
*
* Startup process however uses locks but never waits for them in the
* normal backend sense. Startup process also takes part in sinval messaging
* as a sendOnly process, so never reads messages from sinval queue. So
* Startup process does have a VXID and does show up in pg_locks.
*/
void
InitAuxiliaryProcess(void)
PGPROC *auxproc;
int proctype;
* ProcGlobal should be set up already (if we are a backend, we inherit
* this by fork() or EXEC_BACKEND mechanism from the postmaster).
if (ProcGlobal == NULL || AuxiliaryProcs == NULL)
elog(PANIC, "proc header uninitialized");
if (MyProc != NULL)
* Initialize process-local latch support. This could fail if the kernel
* is low on resources, and if so we want to exit cleanly before acquiring
* any shared-memory resources.
*/
InitializeLatchSupport();
* We use the ProcStructLock to protect assignment and releasing of
* AuxiliaryProcs entries.
* While we are holding the ProcStructLock, also copy the current shared
* estimate of spins_per_delay to local storage.
*/
SpinLockAcquire(ProcStructLock);
set_spins_per_delay(ProcGlobal->spins_per_delay);
* Find a free auxproc ... *big* trouble if there isn't one ...
for (proctype = 0; proctype < NUM_AUXILIARY_PROCS; proctype++)
auxproc = &AuxiliaryProcs[proctype];
if (auxproc->pid == 0)
break;
}
if (proctype >= NUM_AUXILIARY_PROCS)
{
SpinLockRelease(ProcStructLock);
elog(FATAL, "all AuxiliaryProcs are in use");
/* Mark auxiliary proc as in use by me */
/* use volatile pointer to prevent code rearrangement */
((volatile PGPROC *) auxproc)->pid = MyProcPid;
MyProc = auxproc;
MyPgXact = &ProcGlobal->allPgXact[auxproc->pgprocno];
SpinLockRelease(ProcStructLock);
* Initialize all fields of MyProc, except for those previously
* initialized by InitProcGlobal.
*/
SHMQueueElemInit(&(MyProc->links));
MyProc->waitStatus = STATUS_OK;
MyProc->lxid = InvalidLocalTransactionId;
MyProc->fpVXIDLock = false;
MyProc->fpLocalTransactionId = InvalidLocalTransactionId;
MyPgXact->xid = InvalidTransactionId;
MyPgXact->xmin = InvalidTransactionId;
MyProc->backendId = InvalidBackendId;
MyProc->databaseId = InvalidOid;
MyProc->roleId = InvalidOid;
MyPgXact->delayChkpt = false;
MyPgXact->vacuumFlags = 0;
MyProc->lwWaiting = false;
MyProc->lwWaitLink = NULL;
MyProc->waitLock = NULL;
MyProc->waitProcLock = NULL;
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
}
#endif
/*
* Acquire ownership of the PGPROC's latch, so that we can use WaitLatch.
* Note that there's no particular need to do ResetLatch here.
*/
OwnLatch(&MyProc->procLatch);
* We might be reusing a semaphore that belonged to a failed process. So
* be careful and reinitialize its value here. (This is not strictly
* necessary anymore, but seems like a good idea for cleanliness.)
PGSemaphoreReset(&MyProc->sem);
/*
* Arrange to clean up at process exit.
*/
on_shmem_exit(AuxiliaryProcKill, Int32GetDatum(proctype));
/*
* Record the PID and PGPROC structures for the Startup process, for use in
* ProcSendSignal(). See comments there for further explanation.
*/
void
PublishStartupProcessInformation(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
SpinLockAcquire(ProcStructLock);
procglobal->startupProc = MyProc;
procglobal->startupProcPid = MyProcPid;
SpinLockRelease(ProcStructLock);
}
/*
* Used from bufgr to share the value of the buffer that Startup waits on,
* or to reset the value to "not waiting" (-1). This allows processing
* of recovery conflicts for buffer pins. Set is made before backends look
* at this value, so locking not required, especially since the set is
* an atomic integer set operation.
*/
void
SetStartupBufferPinWaitBufId(int bufid)
{
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
procglobal->startupBufferPinWaitBufId = bufid;
}
/*
* Used by backends when they receive a request to check for buffer pin waits.
*/
int
GetStartupBufferPinWaitBufId(void)
{
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
return procglobal->startupBufferPinWaitBufId;
/*
* Check whether there are at least N free PGPROC objects.
*
* Note: this is designed on the assumption that N will generally be small.
*/
bool
HaveNFreeProcs(int n)
{
PGPROC *proc;
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
SpinLockAcquire(ProcStructLock);
proc = procglobal->freeProcs;
while (n > 0 && proc != NULL)
proc = (PGPROC *) proc->links.next;
n--;
}
SpinLockRelease(ProcStructLock);
return (n <= 0);
}
/*
* Check if the current process is awaiting a lock.
*/
bool
IsWaitingForLock(void)
{
if (lockAwaited == NULL)
return false;
return true;
}
* Cancel any pending wait for lock, when aborting a transaction, and revert
* any strong lock count acquisition for a lock being acquired.
*
* (Normally, this would only happen if we accept a cancel/die
* interrupt while waiting; but an ereport(ERROR) before or during the lock
* wait is within the realm of possibility, too.)
void
LWLock *partitionLock;
AbortStrongLockAcquire();
/* Nothing to do if we weren't waiting for a lock */
if (lockAwaited == NULL)
return;
/*
* Turn off the deadlock and lock timeout timers, if they are still
* running (see ProcSleep). Note we must preserve the LOCK_TIMEOUT
* indicator flag, since this function is executed before
* ProcessInterrupts when responding to SIGINT; else we'd lose the
* knowledge that the SIGINT came from a lock timeout and not an external
* source.
*/
timeouts[0].id = DEADLOCK_TIMEOUT;
timeouts[0].keep_indicator = false;
timeouts[1].id = LOCK_TIMEOUT;
timeouts[1].keep_indicator = true;
disable_timeouts(timeouts, 2);
/* Unlink myself from the wait queue, if on it (might not be anymore!) */
partitionLock = LockHashPartitionLock(lockAwaited->hashcode);
LWLockAcquire(partitionLock, LW_EXCLUSIVE);
if (MyProc->links.next != NULL)
{
/* We could not have been granted the lock yet */
RemoveFromWaitQueue(MyProc, lockAwaited->hashcode);
}
else
{
/*
* Somebody kicked us off the lock queue already. Perhaps they
* granted us the lock, or perhaps they detected a deadlock. If they
* did grant us the lock, we'd better remember it in our local lock
* table.
if (MyProc->waitStatus == STATUS_OK)
GrantAwaitedLock();
}
lockAwaited = NULL;
LWLockRelease(partitionLock);
* We used to do PGSemaphoreReset() here to ensure that our proc's wait
* semaphore is reset to zero. This prevented a leftover wakeup signal
* from remaining in the semaphore if someone else had granted us the lock
* we wanted before we were able to remove ourselves from the wait-list.
* However, now that ProcSleep loops until waitStatus changes, a leftover
* wakeup signal isn't harmful, and it seems not worth expending cycles to
* get rid of a signal that most likely isn't there.
* ProcReleaseLocks() -- release locks associated with current transaction
* at main transaction commit or abort
* At main transaction commit, we release standard locks except session locks.
* At main transaction abort, we release all locks including session locks.
* Advisory locks are released only if they are transaction-level;
* session-level holds remain, whether this is a commit or not.
*
* At subtransaction commit, we don't release any locks (so this func is not
* needed at all); we will defer the releasing to the parent transaction.
* At subtransaction abort, we release all locks held by the subtransaction;
* this is implemented by retail releasing of the locks under control of
* the ResourceOwner mechanism.
ProcReleaseLocks(bool isCommit)
if (!MyProc)
return;
/* If waiting, get off wait queue (should only be needed after error) */
/* Release standard locks, including session-level if aborting */
LockReleaseAll(DEFAULT_LOCKMETHOD, !isCommit);
/* Release transaction-level advisory locks */
LockReleaseAll(USER_LOCKMETHOD, false);
/*
* RemoveProcFromArray() -- Remove this process from the shared ProcArray.
*/
static void
RemoveProcFromArray(int code, Datum arg)
{
Assert(MyProc != NULL);
ProcArrayRemove(MyProc, InvalidTransactionId);
/*
* ProcKill() -- Destroy the per-proc data structure for
* this process. Release any of its held LW locks.
ProcKill(int code, Datum arg)
/* use volatile pointer to prevent code rearrangement */
volatile PROC_HDR *procglobal = ProcGlobal;
PGPROC *proc;
Assert(MyProc != NULL);
/* Make sure we're out of the sync rep lists */
SyncRepCleanupAtProcExit();
#ifdef USE_ASSERT_CHECKING
if (assert_enabled)
{
/* Last process should have released all locks. */
for (i = 0; i < NUM_LOCK_PARTITIONS; i++)
Assert(SHMQueueEmpty(&(MyProc->myProcLocks[i])));
}
#endif
* Release any LW locks I am holding. There really shouldn't be any, but
* it's cheap to check again before we cut the knees off the LWLock
* facility by releasing our PGPROC ...
LWLockReleaseAll();
/* Make sure active replication slots are released */
if (MyReplicationSlot != NULL)
ReplicationSlotRelease();
/*
* Clear MyProc first; then disown the process latch. This is so that
* signal handlers won't try to clear the process latch after it's no
* longer ours.
*/
proc = MyProc;
MyProc = NULL;
DisownLatch(&proc->procLatch);
SpinLockAcquire(ProcStructLock);
/* Return PGPROC structure (and semaphore) to appropriate freelist */
if (IsAnyAutoVacuumProcess())
{
proc->links.next = (SHM_QUEUE *) procglobal->autovacFreeProcs;
procglobal->autovacFreeProcs = proc;
}
proc->links.next = (SHM_QUEUE *) procglobal->bgworkerFreeProcs;
procglobal->bgworkerFreeProcs = proc;
else
{
proc->links.next = (SHM_QUEUE *) procglobal->freeProcs;
procglobal->freeProcs = proc;
}
/* Update shared estimate of spins_per_delay */
procglobal->spins_per_delay = update_spins_per_delay(procglobal->spins_per_delay);
SpinLockRelease(ProcStructLock);
/*
* This process is no longer present in shared memory in any meaningful
* way, so tell the postmaster we've cleaned up acceptably well. (XXX
* autovac launcher should be included here someday)
if (IsUnderPostmaster && !IsAutoVacuumLauncherProcess())
MarkPostmasterChildInactive();
/* wake autovac launcher if needed -- see comments in FreeWorkerInfo */
if (AutovacuumLauncherPid != 0)
kill(AutovacuumLauncherPid, SIGUSR2);
}
/*
* AuxiliaryProcKill() -- Cut-down version of ProcKill for auxiliary
* processes (bgwriter, etc). The PGPROC and sema are not released, only
* marked as not-in-use.
*/
static void
AuxiliaryProcKill(int code, Datum arg)
PGPROC *auxproc PG_USED_FOR_ASSERTS_ONLY;
PGPROC *proc;
Assert(proctype >= 0 && proctype < NUM_AUXILIARY_PROCS);
auxproc = &AuxiliaryProcs[proctype];
Assert(MyProc == auxproc);
/* Release any LW locks I am holding (see notes above) */
LWLockReleaseAll();
/*
* Clear MyProc first; then disown the process latch. This is so that
* signal handlers won't try to clear the process latch after it's no
* longer ours.
*/
proc = MyProc;
MyProc = NULL;
DisownLatch(&proc->procLatch);
SpinLockAcquire(ProcStructLock);
/* Mark auxiliary proc no longer in use */
proc->pid = 0;
/* Update shared estimate of spins_per_delay */
ProcGlobal->spins_per_delay = update_spins_per_delay(ProcGlobal->spins_per_delay);
SpinLockRelease(ProcStructLock);
/*
* ProcQueue package: routines for putting processes to sleep
* and waking them up
*/
/*
* ProcQueueAlloc -- alloc/attach to a shared memory process queue
*
* Returns: a pointer to the queue
* Side Effects: Initializes the queue if it wasn't there before
Bruce Momjian
committed
#ifdef NOT_USED
Bruce Momjian
committed
PROC_QUEUE *
ProcQueueAlloc(const char *name)
PROC_QUEUE *queue;
Bruce Momjian
committed
bool found;
queue = (PROC_QUEUE *)
ShmemInitStruct(name, sizeof(PROC_QUEUE), &found);
if (!found)
ProcQueueInit(queue);
Bruce Momjian
committed
#endif
/*
* ProcQueueInit -- initialize a shared memory process queue
*/
void
ProcQueueInit(PROC_QUEUE *queue)
SHMQueueInit(&(queue->links));
queue->size = 0;
* ProcSleep -- put a process to sleep on the specified lock
* Caller must have set MyProc->heldLocks to reflect locks already held
* on the lockable object by this process (under all XIDs).
* The lock table's partition lock must be held at entry, and will be held
* at exit.
* Result: STATUS_OK if we acquired the lock, STATUS_ERROR if not (deadlock).
* ASSUME: that no one will fiddle with the queue until after
* we release the partition lock.
*
* NOTES: The process queue is now a priority queue for locking.
*
* P() on the semaphore should put us to sleep. The process
* semaphore is normally zero, so when we try to acquire it, we sleep.
ProcSleep(LOCALLOCK *locallock, LockMethod lockMethodTable)
LOCKMODE lockmode = locallock->tag.mode;
LOCK *lock = locallock->lock;
PROCLOCK *proclock = locallock->proclock;
uint32 hashcode = locallock->hashcode;
LWLock *partitionLock = LockHashPartitionLock(hashcode);
PROC_QUEUE *waitQueue = &(lock->waitProcs);
LOCKMASK myHeldLocks = MyProc->heldLocks;
bool early_deadlock = false;
int myWaitStatus;
int i;
/*
* Determine where to add myself in the wait queue.
*
* Normally I should go at the end of the queue. However, if I already
* hold locks that conflict with the request of any previous waiter, put
* myself in the queue just in front of the first such waiter. This is not
* a necessary step, since deadlock detection would move me to before that
* waiter anyway; but it's relatively cheap to detect such a conflict
* immediately, and avoid delaying till deadlock timeout.
* Special case: if I find I should go in front of some waiter, check to
* see if I conflict with already-held locks or the requests before that
* waiter. If not, then just grant myself the requested lock immediately.
* This is the same as the test for immediate grant in LockAcquire, except
* we are only considering the part of the wait queue before my insertion
* point.
*/
if (myHeldLocks != 0)
LOCKMASK aheadRequests = 0;
proc = (PGPROC *) waitQueue->links.next;
for (i = 0; i < waitQueue->size; i++)
/* Must he wait for me? */
if (lockMethodTable->conflictTab[proc->waitLockMode] & myHeldLocks)
/* Must I wait for him ? */
if (lockMethodTable->conflictTab[lockmode] & proc->heldLocks)
* Yes, so we have a deadlock. Easiest way to clean up
* correctly is to call RemoveFromWaitQueue(), but we