From 72cdf2044056b138e7f7a4a513d34e48d7f22961 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Wed, 7 Aug 2024 08:44:08 +0200 Subject: [PATCH] Remove TRACE_SORT macro The TRACE_SORT macro guarded the availability of the trace_sort GUC setting. But it has been enabled by default ever since it was introduced in PostgreSQL 8.1, and there have been no reports that someone wanted to disable it. So just remove the macro to simplify things. --- doc/src/sgml/config.sgml | 3 -- src/backend/utils/adt/mac.c | 6 ---- src/backend/utils/adt/network.c | 6 ---- src/backend/utils/adt/numeric.c | 6 ---- src/backend/utils/adt/uuid.c | 6 ---- src/backend/utils/adt/varlena.c | 4 --- src/backend/utils/misc/guc_tables.c | 2 -- src/backend/utils/sort/tuplesort.c | 39 ---------------------- src/backend/utils/sort/tuplesortvariants.c | 14 -------- src/include/pg_config_manual.h | 6 ---- src/include/utils/guc.h | 3 -- 11 files changed, 95 deletions(-) diff --git a/doc/src/sgml/config.sgml b/doc/src/sgml/config.sgml index a1a1d58a436..2937384b001 100644 --- a/doc/src/sgml/config.sgml +++ b/doc/src/sgml/config.sgml @@ -11711,9 +11711,6 @@ Developer Options If on, emit information about resource usage during sort operations. - This parameter is only available if the TRACE_SORT macro - was defined when PostgreSQL was compiled. - (However, TRACE_SORT is currently defined by default.) diff --git a/src/backend/utils/adt/mac.c b/src/backend/utils/adt/mac.c index ae4caedef50..ce0fbe7a49b 100644 --- a/src/backend/utils/adt/mac.c +++ b/src/backend/utils/adt/mac.c @@ -430,13 +430,11 @@ macaddr_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card > 100000.0) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "macaddr_abbrev: estimation ends at cardinality %f" " after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count, memtupcount); -#endif uss->estimating = false; return false; } @@ -449,23 +447,19 @@ macaddr_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card < uss->input_count / 2000.0 + 0.5) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "macaddr_abbrev: aborting abbreviation at cardinality %f" " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count, memtupcount); -#endif return true; } -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "macaddr_abbrev: cardinality %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count, memtupcount); -#endif return false; } diff --git a/src/backend/utils/adt/network.c b/src/backend/utils/adt/network.c index 640fc37dc83..450dacd031c 100644 --- a/src/backend/utils/adt/network.c +++ b/src/backend/utils/adt/network.c @@ -503,13 +503,11 @@ network_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card > 100000.0) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "network_abbrev: estimation ends at cardinality %f" " after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count, memtupcount); -#endif uss->estimating = false; return false; } @@ -522,23 +520,19 @@ network_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card < uss->input_count / 2000.0 + 0.5) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "network_abbrev: aborting abbreviation at cardinality %f" " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count, memtupcount); -#endif return true; } -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "network_abbrev: cardinality %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count, memtupcount); -#endif return false; } diff --git a/src/backend/utils/adt/numeric.c b/src/backend/utils/adt/numeric.c index d0f09237100..763a7f4be0f 100644 --- a/src/backend/utils/adt/numeric.c +++ b/src/backend/utils/adt/numeric.c @@ -2127,13 +2127,11 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card > 100000.0) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "numeric_abbrev: estimation ends at cardinality %f" " after " INT64_FORMAT " values (%d rows)", abbr_card, nss->input_count, memtupcount); -#endif nss->estimating = false; return false; } @@ -2149,24 +2147,20 @@ numeric_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card < nss->input_count / 10000.0 + 0.5) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "numeric_abbrev: aborting abbreviation at cardinality %f" " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, nss->input_count / 10000.0 + 0.5, nss->input_count, memtupcount); -#endif return true; } -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "numeric_abbrev: cardinality %f" " after " INT64_FORMAT " values (%d rows)", abbr_card, nss->input_count, memtupcount); -#endif return false; } diff --git a/src/backend/utils/adt/uuid.c b/src/backend/utils/adt/uuid.c index 45eb1b2fea9..5284d23dcc4 100644 --- a/src/backend/utils/adt/uuid.c +++ b/src/backend/utils/adt/uuid.c @@ -307,13 +307,11 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card > 100000.0) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "uuid_abbrev: estimation ends at cardinality %f" " after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count, memtupcount); -#endif uss->estimating = false; return false; } @@ -326,23 +324,19 @@ uuid_abbrev_abort(int memtupcount, SortSupport ssup) */ if (abbr_card < uss->input_count / 2000.0 + 0.5) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "uuid_abbrev: aborting abbreviation at cardinality %f" " below threshold %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count / 2000.0 + 0.5, uss->input_count, memtupcount); -#endif return true; } -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "uuid_abbrev: cardinality %f after " INT64_FORMAT " values (%d rows)", abbr_card, uss->input_count, memtupcount); -#endif return false; } diff --git a/src/backend/utils/adt/varlena.c b/src/backend/utils/adt/varlena.c index 52ab8c43c66..4f9a676c939 100644 --- a/src/backend/utils/adt/varlena.c +++ b/src/backend/utils/adt/varlena.c @@ -2455,7 +2455,6 @@ varstr_abbrev_abort(int memtupcount, SortSupport ssup) * time there are differences within full key strings not captured in * abbreviations. */ -#ifdef TRACE_SORT if (trace_sort) { double norm_abbrev_card = abbrev_distinct / (double) memtupcount; @@ -2465,7 +2464,6 @@ varstr_abbrev_abort(int memtupcount, SortSupport ssup) memtupcount, abbrev_distinct, key_distinct, norm_abbrev_card, sss->prop_card); } -#endif /* * If the number of distinct abbreviated keys approximately matches the @@ -2527,12 +2525,10 @@ varstr_abbrev_abort(int memtupcount, SortSupport ssup) * of moderately high to high abbreviated cardinality. There is little to * lose but much to gain, which our strategy reflects. */ -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "varstr_abbrev: aborted abbreviation at %d " "(abbrev_distinct: %f, key_distinct: %f, prop_card: %f)", memtupcount, abbrev_distinct, key_distinct, sss->prop_card); -#endif return true; } diff --git a/src/backend/utils/misc/guc_tables.c b/src/backend/utils/misc/guc_tables.c index c0a52cdcc3e..92aa537442c 100644 --- a/src/backend/utils/misc/guc_tables.c +++ b/src/backend/utils/misc/guc_tables.c @@ -1698,7 +1698,6 @@ struct config_bool ConfigureNamesBool[] = NULL, NULL, NULL }, -#ifdef TRACE_SORT { {"trace_sort", PGC_USERSET, DEVELOPER_OPTIONS, gettext_noop("Emit information about resource usage in sorting."), @@ -1709,7 +1708,6 @@ struct config_bool ConfigureNamesBool[] = false, NULL, NULL, NULL }, -#endif #ifdef TRACE_SYNCSCAN /* this is undocumented because not exposed in a standard build */ diff --git a/src/backend/utils/sort/tuplesort.c b/src/backend/utils/sort/tuplesort.c index b0bd3b366bb..e2a0bfcffa7 100644 --- a/src/backend/utils/sort/tuplesort.c +++ b/src/backend/utils/sort/tuplesort.c @@ -121,9 +121,7 @@ ALLOCSET_SEPARATE_THRESHOLD / sizeof(SortTuple) + 1) /* GUC variables */ -#ifdef TRACE_SORT bool trace_sort = false; -#endif #ifdef DEBUG_BOUNDED_SORT bool optimize_bounded_sort = true; @@ -337,9 +335,7 @@ struct Tuplesortstate /* * Resource snapshot for time of sort start. */ -#ifdef TRACE_SORT PGRUsage ru_start; -#endif }; /* @@ -685,10 +681,8 @@ tuplesort_begin_common(int workMem, SortCoordinate coordinate, int sortopt) state = (Tuplesortstate *) palloc0(sizeof(Tuplesortstate)); -#ifdef TRACE_SORT if (trace_sort) pg_rusage_init(&state->ru_start); -#endif state->base.sortopt = sortopt; state->base.tuples = true; @@ -904,22 +898,16 @@ tuplesort_free(Tuplesortstate *state) { /* context swap probably not needed, but let's be safe */ MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext); - -#ifdef TRACE_SORT int64 spaceUsed; if (state->tapeset) spaceUsed = LogicalTapeSetBlocks(state->tapeset); else spaceUsed = (state->allowedMem - state->availMem + 1023) / 1024; -#endif /* * Delete temporary "tape" files, if any. * - * Note: want to include this in reported total cost of sort, hence need - * for two #ifdef TRACE_SORT sections. - * * We don't bother to destroy the individual tapes here. They will go away * with the sortcontext. (In TSS_FINALMERGE state, we have closed * finished tapes already.) @@ -927,7 +915,6 @@ tuplesort_free(Tuplesortstate *state) if (state->tapeset) LogicalTapeSetClose(state->tapeset); -#ifdef TRACE_SORT if (trace_sort) { if (state->tapeset) @@ -941,14 +928,6 @@ tuplesort_free(Tuplesortstate *state) } TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, spaceUsed); -#else - - /* - * If you disabled TRACE_SORT, you can still probe sort__done, but you - * ain't getting space-used stats. - */ - TRACE_POSTGRESQL_SORT_DONE(state->tapeset != NULL, 0L); -#endif FREESTATE(state); MemoryContextSwitchTo(oldcontext); @@ -1263,12 +1242,10 @@ tuplesort_puttuple_common(Tuplesortstate *state, SortTuple *tuple, (state->memtupcount > state->bound * 2 || (state->memtupcount > state->bound && LACKMEM(state)))) { -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "switching to bounded heapsort at %d tuples: %s", state->memtupcount, pg_rusage_show(&state->ru_start)); -#endif make_bounded_heap(state); MemoryContextSwitchTo(oldcontext); return; @@ -1387,11 +1364,9 @@ tuplesort_performsort(Tuplesortstate *state) { MemoryContext oldcontext = MemoryContextSwitchTo(state->base.sortcontext); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "performsort of worker %d starting: %s", state->worker, pg_rusage_show(&state->ru_start)); -#endif switch (state->status) { @@ -1470,7 +1445,6 @@ tuplesort_performsort(Tuplesortstate *state) break; } -#ifdef TRACE_SORT if (trace_sort) { if (state->status == TSS_FINALMERGE) @@ -1481,7 +1455,6 @@ tuplesort_performsort(Tuplesortstate *state) elog(LOG, "performsort of worker %d done: %s", state->worker, pg_rusage_show(&state->ru_start)); } -#endif MemoryContextSwitchTo(oldcontext); } @@ -1905,11 +1878,9 @@ inittapes(Tuplesortstate *state, bool mergeruns) state->maxTapes = MINORDER; } -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "worker %d switching to external sort with %d tapes: %s", state->worker, state->maxTapes, pg_rusage_show(&state->ru_start)); -#endif /* Create the tape set */ inittapestate(state, state->maxTapes); @@ -2118,11 +2089,9 @@ mergeruns(Tuplesortstate *state) */ state->tape_buffer_mem = state->availMem; USEMEM(state, state->tape_buffer_mem); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "worker %d using %zu KB of memory for tape buffers", state->worker, state->tape_buffer_mem / 1024); -#endif for (;;) { @@ -2167,12 +2136,10 @@ mergeruns(Tuplesortstate *state) state->nInputRuns, state->maxTapes); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "starting merge pass of %d input runs on %d tapes, " INT64_FORMAT " KB of memory for each input tape: %s", state->nInputRuns, state->nInputTapes, input_buffer_size / 1024, pg_rusage_show(&state->ru_start)); -#endif /* Prepare the new input tapes for merge pass. */ for (tapenum = 0; tapenum < state->nInputTapes; tapenum++) @@ -2378,12 +2345,10 @@ dumptuples(Tuplesortstate *state, bool alltuples) state->currentRun++; -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "worker %d starting quicksort of run %d: %s", state->worker, state->currentRun, pg_rusage_show(&state->ru_start)); -#endif /* * Sort all tuples accumulated within the allowed amount of memory for @@ -2391,12 +2356,10 @@ dumptuples(Tuplesortstate *state, bool alltuples) */ tuplesort_sort_memtuples(state); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "worker %d finished quicksort of run %d: %s", state->worker, state->currentRun, pg_rusage_show(&state->ru_start)); -#endif memtupwrite = state->memtupcount; for (i = 0; i < memtupwrite; i++) @@ -2426,12 +2389,10 @@ dumptuples(Tuplesortstate *state, bool alltuples) markrunend(state->destTape); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "worker %d finished writing run %d to tape %d: %s", state->worker, state->currentRun, (state->currentRun - 1) % state->nOutputTapes + 1, pg_rusage_show(&state->ru_start)); -#endif } /* diff --git a/src/backend/utils/sort/tuplesortvariants.c b/src/backend/utils/sort/tuplesortvariants.c index 05a853caa36..558309c9850 100644 --- a/src/backend/utils/sort/tuplesortvariants.c +++ b/src/backend/utils/sort/tuplesortvariants.c @@ -181,12 +181,10 @@ tuplesort_begin_heap(TupleDesc tupDesc, Assert(nkeys > 0); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", nkeys, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); -#endif base->nKeys = nkeys; @@ -258,13 +256,11 @@ tuplesort_begin_cluster(TupleDesc tupDesc, oldcontext = MemoryContextSwitchTo(base->maincontext); arg = (TuplesortClusterArg *) palloc0(sizeof(TuplesortClusterArg)); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin tuple sort: nkeys = %d, workMem = %d, randomAccess = %c", RelationGetNumberOfAttributes(indexRel), workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); -#endif base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel); @@ -368,13 +364,11 @@ tuplesort_begin_index_btree(Relation heapRel, oldcontext = MemoryContextSwitchTo(base->maincontext); arg = (TuplesortIndexBTreeArg *) palloc(sizeof(TuplesortIndexBTreeArg)); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin index sort: unique = %c, workMem = %d, randomAccess = %c", enforceUnique ? 't' : 'f', workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); -#endif base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel); @@ -452,7 +446,6 @@ tuplesort_begin_index_hash(Relation heapRel, oldcontext = MemoryContextSwitchTo(base->maincontext); arg = (TuplesortIndexHashArg *) palloc(sizeof(TuplesortIndexHashArg)); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin index sort: high_mask = 0x%x, low_mask = 0x%x, " @@ -462,7 +455,6 @@ tuplesort_begin_index_hash(Relation heapRel, max_buckets, workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); -#endif base->nKeys = 1; /* Only one sort column, the hash code */ @@ -503,12 +495,10 @@ tuplesort_begin_index_gist(Relation heapRel, oldcontext = MemoryContextSwitchTo(base->maincontext); arg = (TuplesortIndexBTreeArg *) palloc(sizeof(TuplesortIndexBTreeArg)); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin index sort: workMem = %d, randomAccess = %c", workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); -#endif base->nKeys = IndexRelationGetNumberOfKeyAttributes(indexRel); @@ -560,13 +550,11 @@ tuplesort_begin_index_brin(int workMem, sortopt); TuplesortPublic *base = TuplesortstateGetPublic(state); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin index sort: workMem = %d, randomAccess = %c", workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); -#endif base->nKeys = 1; /* Only one sort column, the block number */ @@ -596,12 +584,10 @@ tuplesort_begin_datum(Oid datumType, Oid sortOperator, Oid sortCollation, oldcontext = MemoryContextSwitchTo(base->maincontext); arg = (TuplesortDatumArg *) palloc(sizeof(TuplesortDatumArg)); -#ifdef TRACE_SORT if (trace_sort) elog(LOG, "begin datum sort: workMem = %d, randomAccess = %c", workMem, sortopt & TUPLESORT_RANDOMACCESS ? 't' : 'f'); -#endif base->nKeys = 1; /* always a one-column sort */ diff --git a/src/include/pg_config_manual.h b/src/include/pg_config_manual.h index b769030d8fa..e799c2989b8 100644 --- a/src/include/pg_config_manual.h +++ b/src/include/pg_config_manual.h @@ -358,12 +358,6 @@ */ /* #define WAL_DEBUG */ -/* - * Enable tracing of resource consumption during sort operations; - * see also the trace_sort GUC var. For 8.1 this is enabled by default. - */ -#define TRACE_SORT 1 - /* * Enable tracing of syncscan operations (see also the trace_syncscan GUC var). */ diff --git a/src/include/utils/guc.h b/src/include/utils/guc.h index 4129ea37eec..b7dd403fb24 100644 --- a/src/include/utils/guc.h +++ b/src/include/utils/guc.h @@ -294,10 +294,7 @@ extern PGDLLIMPORT int tcp_user_timeout; extern PGDLLIMPORT char *role_string; extern PGDLLIMPORT bool in_hot_standby_guc; - -#ifdef TRACE_SORT extern PGDLLIMPORT bool trace_sort; -#endif #ifdef DEBUG_BOUNDED_SORT extern PGDLLIMPORT bool optimize_bounded_sort; -- 2.46.0