From b07075d68181df37e5d2d2d8f0435ac4c44ef80b Mon Sep 17 00:00:00 2001 From: "Andrei V. Lepikhov" Date: Tue, 22 Oct 2024 09:02:02 +0400 Subject: [PATCH 1/4] Stabilise incremental sort cost calculation. Carefully identify a column/expression that can represent the path key in cost calculation of specific sort operator. Columns may have different numbers of distinct values. That's why the order of columns in the sort operation may impact number of the comparison function's calls. Sorting has only pathkeys as input for the cost estimation. This patch, instead of a blind choice of the first equivalence class member, attempts to find an expression that chooses the most negligible ndistinct value. TODO: Filtering EC members, external to this sort operator is not a big deal. But in that case it would be necessary to pass underlying relids to cost calculation routine that would cause the API change. So, here we stay as simple as possible. Add into EquivalenceMember the number of distinct values - em_ndistinct. It may be additionally used later in groups number estimations. --- src/backend/optimizer/path/costsize.c | 72 +++++++++++++++++-- src/backend/optimizer/path/equivclass.c | 1 + src/include/nodes/pathnodes.h | 2 + .../regress/expected/incremental_sort.out | 51 +++++++++++++ src/test/regress/sql/incremental_sort.sql | 31 ++++++++ 5 files changed, 152 insertions(+), 5 deletions(-) diff --git a/src/backend/optimizer/path/costsize.c b/src/backend/optimizer/path/costsize.c index 79991b1980..325acdacbf 100644 --- a/src/backend/optimizer/path/costsize.c +++ b/src/backend/optimizer/path/costsize.c @@ -192,6 +192,8 @@ static int32 get_expr_width(PlannerInfo *root, const Node *expr); static double relation_byte_size(double tuples, int width); static double page_size(double tuples, int width); static double get_parallel_divisor(Path *path); +static EquivalenceMember *identify_sort_ecmember(PlannerInfo *root, + EquivalenceClass *ec); /* @@ -2016,22 +2018,21 @@ cost_incremental_sort(Path *path, */ foreach(l, pathkeys) { - PathKey *key = (PathKey *) lfirst(l); - EquivalenceMember *member = (EquivalenceMember *) - linitial(key->pk_eclass->ec_members); + PathKey *key = (PathKey *) lfirst(l); + EquivalenceMember *em = identify_sort_ecmember(root, key->pk_eclass); /* * Check if the expression contains Var with "varno 0" so that we * don't call estimate_num_groups in that case. */ - if (bms_is_member(0, pull_varnos(root, (Node *) member->em_expr))) + if (bms_is_member(0, pull_varnos(root, (Node *) em->em_expr))) { unknown_varno = true; break; } /* expression not containing any Vars with "varno 0" */ - presortedExprs = lappend(presortedExprs, member->em_expr); + presortedExprs = lappend(presortedExprs, em->em_expr); if (foreach_current_index(l) + 1 >= presorted_keys) break; @@ -6491,3 +6492,64 @@ compute_gather_rows(Path *path) return clamp_row_est(path->rows * get_parallel_divisor(path)); } + +/* + * Find suitable member of the equivalence class. + * Passing through the list of EC members find the member with minimum of + * distinct values. Cache estimated number of distincts in the em_ndistinct + * field of each member. + */ +static EquivalenceMember * +identify_sort_ecmember(PlannerInfo *root, EquivalenceClass *ec) +{ + EquivalenceMember *candidate = linitial(ec->ec_members); + + if (root == NULL) + /* Fast path */ + return candidate; + + foreach_node(EquivalenceMember, em, ec->ec_members) + { + VariableStatData vardata; + + if (em->em_ndistinct == 0.) + /* Nothing helpful */ + continue; + + if (em->em_is_child || em->em_is_const || bms_is_empty(em->em_relids) || + bms_is_member(0, em->em_relids)) + { + em->em_ndistinct = 0.; + continue; + } + + if (em->em_ndistinct < 0.0) + { + bool isdefault = true; + double ndist; + + /* Let's check candidate's ndistinct value */ + examine_variable(root, (Node *) em->em_expr, 0, &vardata); + if (HeapTupleIsValid(vardata.statsTuple)) + ndist = get_variable_numdistinct(&vardata, &isdefault); + ReleaseVariableStats(vardata); + + if (isdefault) + { + em->em_ndistinct = 0.; + continue; + } + + em->em_ndistinct = ndist; + } + + Assert(em->em_ndistinct > 0.); + + if (candidate->em_ndistinct == 0. || + em->em_ndistinct < candidate->em_ndistinct) + candidate = em; + } + + Assert(candidate != NULL); + return candidate; +} \ No newline at end of file diff --git a/src/backend/optimizer/path/equivclass.c b/src/backend/optimizer/path/equivclass.c index 51d806326e..2894919b24 100644 --- a/src/backend/optimizer/path/equivclass.c +++ b/src/backend/optimizer/path/equivclass.c @@ -526,6 +526,7 @@ add_eq_member(EquivalenceClass *ec, Expr *expr, Relids relids, em->em_datatype = datatype; em->em_jdomain = jdomain; em->em_parent = parent; + em->em_ndistinct = -1.0; if (bms_is_empty(relids)) { diff --git a/src/include/nodes/pathnodes.h b/src/include/nodes/pathnodes.h index 14ccfc1ac1..3cb6a8a44f 100644 --- a/src/include/nodes/pathnodes.h +++ b/src/include/nodes/pathnodes.h @@ -1439,6 +1439,8 @@ typedef struct EquivalenceMember JoinDomain *em_jdomain; /* join domain containing the source clause */ /* if em_is_child is true, this links to corresponding EM for top parent */ struct EquivalenceMember *em_parent pg_node_attr(read_write_ignore); + + double em_ndistinct; /* cached value of ndistinct: 0- default value or 'unknown'; -1 - not defined yet */ } EquivalenceMember; /* diff --git a/src/test/regress/expected/incremental_sort.out b/src/test/regress/expected/incremental_sort.out index 5fd54a10b1..77ac213910 100644 --- a/src/test/regress/expected/incremental_sort.out +++ b/src/test/regress/expected/incremental_sort.out @@ -1698,3 +1698,54 @@ explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order b Order By: (a <-> '(5,5)'::point) (6 rows) +-- Check: +-- commuting of sides in an expression doesn't influence cost estimation +CREATE TABLE sort_ndist_t1 (x numeric, y numeric); +CREATE TABLE sort_ndist_t2 (x numeric, y numeric); +INSERT INTO sort_ndist_t1 (x,y) + SELECT gs%10, gs%10 FROM generate_series(1,1E4) AS gs; +INSERT INTO sort_ndist_t2 (x,y) + SELECT gs, gs FROM generate_series(1,1E4) AS gs; +CREATE INDEX t1_idx ON sort_ndist_t1 (x); +CREATE INDEX t2_idx ON sort_ndist_t2 (x); +VACUUM ANALYZE sort_ndist_t1, sort_ndist_t2; +SET enable_hashjoin = 'off'; +-- Having lots of duplicates after the join it is more effective to use plain +-- Sort instead of incremental sort: with small number of groups we do the same +-- stuff like Sort but with extra penalty. +EXPLAIN (COSTS OFF) +SELECT t1.x, t1.y FROM sort_ndist_t1 t1, sort_ndist_t2 t2 +WHERE t1.x=t2.x +ORDER BY t1.x,t1.y; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.x, t1.y + -> Nested Loop + -> Seq Scan on sort_ndist_t1 t1 + -> Memoize + Cache Key: t1.x + Cache Mode: logical + -> Index Only Scan using t2_idx on sort_ndist_t2 t2 + Index Cond: (x = t1.x) +(9 rows) + +EXPLAIN (COSTS OFF) -- the plan must be the same as above +SELECT t1.x, t1.y FROM sort_ndist_t1 t1, sort_ndist_t2 t2 +WHERE t2.x=t1.x +ORDER BY t1.x,t1.y; + QUERY PLAN +-------------------------------------------------------------------- + Sort + Sort Key: t1.x, t1.y + -> Nested Loop + -> Seq Scan on sort_ndist_t1 t1 + -> Memoize + Cache Key: t1.x + Cache Mode: logical + -> Index Only Scan using t2_idx on sort_ndist_t2 t2 + Index Cond: (x = t1.x) +(9 rows) + +RESET enable_hashjoin; +DROP TABLE sort_ndist_t1, sort_ndist_t2; diff --git a/src/test/regress/sql/incremental_sort.sql b/src/test/regress/sql/incremental_sort.sql index ab471bdfff..1334371e88 100644 --- a/src/test/regress/sql/incremental_sort.sql +++ b/src/test/regress/sql/incremental_sort.sql @@ -292,3 +292,34 @@ create index point_table_a_idx on point_table using gist(a); -- Ensure we get an incremental sort plan for both of the following queries explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b limit 1; explain (costs off) select a, b, a <-> point(5, 5) dist from point_table order by dist, b desc limit 1; + +-- Check: +-- commuting of sides in an expression doesn't influence cost estimation +CREATE TABLE sort_ndist_t1 (x numeric, y numeric); +CREATE TABLE sort_ndist_t2 (x numeric, y numeric); + +INSERT INTO sort_ndist_t1 (x,y) + SELECT gs%10, gs%10 FROM generate_series(1,1E4) AS gs; +INSERT INTO sort_ndist_t2 (x,y) + SELECT gs, gs FROM generate_series(1,1E4) AS gs; +CREATE INDEX t1_idx ON sort_ndist_t1 (x); +CREATE INDEX t2_idx ON sort_ndist_t2 (x); +VACUUM ANALYZE sort_ndist_t1, sort_ndist_t2; + +SET enable_hashjoin = 'off'; + +-- Having lots of duplicates after the join it is more effective to use plain +-- Sort instead of incremental sort: with small number of groups we do the same +-- stuff like Sort but with extra penalty. +EXPLAIN (COSTS OFF) +SELECT t1.x, t1.y FROM sort_ndist_t1 t1, sort_ndist_t2 t2 +WHERE t1.x=t2.x +ORDER BY t1.x,t1.y; + +EXPLAIN (COSTS OFF) -- the plan must be the same as above +SELECT t1.x, t1.y FROM sort_ndist_t1 t1, sort_ndist_t2 t2 +WHERE t2.x=t1.x +ORDER BY t1.x,t1.y; + +RESET enable_hashjoin; +DROP TABLE sort_ndist_t1, sort_ndist_t2; -- 2.47.0