diff --git a/contrib/file_fdw/file_fdw.c b/contrib/file_fdw/file_fdw.c
index e890770..f84a01f 100644
*** a/contrib/file_fdw/file_fdw.c
--- b/contrib/file_fdw/file_fdw.c
***************
*** 20,25 ****
--- 20,26 ----
#include "commands/copy.h"
#include "commands/defrem.h"
#include "commands/explain.h"
+ #include "commands/vacuum.h"
#include "foreign/fdwapi.h"
#include "foreign/foreign.h"
#include "miscadmin.h"
*************** static void fileBeginForeignScan(Foreign
*** 123,128 ****
--- 124,132 ----
static TupleTableSlot *fileIterateForeignScan(ForeignScanState *node);
static void fileReScanForeignScan(ForeignScanState *node);
static void fileEndForeignScan(ForeignScanState *node);
+ static void fileAnalyzeForeignTable(Relation onerel,
+ VacuumStmt *vacstmt,
+ int elevel);
/*
* Helper functions
*************** static void estimate_size(PlannerInfo *r
*** 136,142 ****
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
FileFdwPlanState *fdw_private,
Cost *startup_cost, Cost *total_cost);
!
/*
* Foreign-data wrapper handler function: return a struct with pointers
--- 140,149 ----
static void estimate_costs(PlannerInfo *root, RelOptInfo *baserel,
FileFdwPlanState *fdw_private,
Cost *startup_cost, Cost *total_cost);
! static int acquire_sample_rows(Relation onerel,
! HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows,
! BlockNumber *totalpages, int elevel);
/*
* Foreign-data wrapper handler function: return a struct with pointers
*************** file_fdw_handler(PG_FUNCTION_ARGS)
*** 155,160 ****
--- 162,168 ----
fdwroutine->IterateForeignScan = fileIterateForeignScan;
fdwroutine->ReScanForeignScan = fileReScanForeignScan;
fdwroutine->EndForeignScan = fileEndForeignScan;
+ fdwroutine->AnalyzeForeignTable = fileAnalyzeForeignTable;
PG_RETURN_POINTER(fdwroutine);
}
*************** estimate_size(PlannerInfo *root, RelOptI
*** 662,693 ****
double nrows;
/*
! * Get size of the file. It might not be there at plan time, though, in
! * which case we have to use a default estimate.
*/
! if (stat(fdw_private->filename, &stat_buf) < 0)
! stat_buf.st_size = 10 * BLCKSZ;
! /*
! * Convert size to pages for use in I/O cost estimate later.
! */
! pages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ;
! if (pages < 1)
! pages = 1;
! fdw_private->pages = pages;
! /*
! * Estimate the number of tuples in the file. We back into this estimate
! * using the planner's idea of the relation width; which is bogus if not
! * all columns are being read, not to mention that the text representation
! * of a row probably isn't the same size as its internal representation.
! * FIXME later.
! */
! tuple_width = MAXALIGN(baserel->width) + MAXALIGN(sizeof(HeapTupleHeaderData));
! ntuples = clamp_row_est((double) stat_buf.st_size / (double) tuple_width);
fdw_private->ntuples = ntuples;
/*
--- 670,716 ----
double nrows;
/*
! * Use statistics stored in pg_class as is if any. Otherwise, calculate
! * them from file size and average tuple width.
*/
! if (baserel->pages > 0)
! {
! pages = baserel->pages;
! ntuples = baserel->tuples;
! }
! else
! {
! /*
! * Get size of the file. It might not be there at plan time, though,
! * in which case we have to use a default estimate.
! */
! if (stat(fdw_private->filename, &stat_buf) < 0)
! stat_buf.st_size = 10 * BLCKSZ;
! /*
! * Convert size to pages for use in I/O cost estimate later.
! */
! pages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ;
! if (pages < 1)
! pages = 1;
! /*
! * Estimate the number of tuples in the file. We back into this
! * estimate using the planner's idea of the relation width; which is
! * bogus if not all columns are being read, not to mention that the
! * text representation of a row probably isn't the same size as its
! * internal representation. FIXME later.
! */
! tuple_width = MAXALIGN(baserel->width) +
! MAXALIGN(sizeof(HeapTupleHeaderData));
! ntuples = clamp_row_est((double) stat_buf.st_size /
! (double) tuple_width);
! }
+ /* Pass estimates to subsequent functions via FileFdwPlanState. */
+ fdw_private->pages = pages;
fdw_private->ntuples = ntuples;
/*
*************** estimate_size(PlannerInfo *root, RelOptI
*** 709,714 ****
--- 732,747 ----
}
/*
+ * fileAnalyzeForeignTable
+ * Analyze foreign table
+ */
+ static void
+ fileAnalyzeForeignTable(Relation onerel, VacuumStmt *vacstmt, int elevel)
+ {
+ do_analyze_rel(onerel, vacstmt, elevel, false, acquire_sample_rows);
+ }
+
+ /*
* Estimate costs of scanning a foreign table.
*
* Results are returned in *startup_cost and *total_cost.
*************** estimate_costs(PlannerInfo *root, RelOpt
*** 736,738 ****
--- 769,957 ----
run_cost += cpu_per_tuple * ntuples;
*total_cost = *startup_cost + run_cost;
}
+
+ /*
+ * acquire_sample_rows -- acquire a random sample of rows from the table
+ *
+ * Selected rows are returned in the caller-allocated array rows[], which must
+ * have at least targrows entries. The actual number of rows selected is
+ * returned as the function result. We also count the number of valid rows in
+ * the table, and return it into *totalrows.
+ *
+ * The returned list of tuples is in order by physical position in the table.
+ * (We will rely on this later to derive correlation estimates.)
+ */
+ static int
+ acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
+ double *totalrows, double *totaldeadrows,
+ BlockNumber *totalpages, int elevel)
+ {
+ int numrows = 0;
+ int invalrows = 0; /* total # rows violating
+ the NOT NULL constraints */
+ double validrows = 0; /* total # rows collected */
+ double rowstoskip = -1; /* -1 means not set yet */
+ double rstate;
+ HeapTuple tuple;
+ TupleDesc tupDesc;
+ TupleConstr *constr;
+ int natts;
+ int attrChk;
+ Datum *values;
+ bool *nulls;
+ bool found;
+ bool sample_it = false;
+ char *filename;
+ struct stat stat_buf;
+ List *options;
+ CopyState cstate;
+ ErrorContextCallback errcontext;
+
+ Assert(onerel);
+ Assert(targrows > 0);
+
+ tupDesc = RelationGetDescr(onerel);
+ constr = tupDesc->constr;
+ natts = tupDesc->natts;
+ values = (Datum *) palloc(tupDesc->natts * sizeof(Datum));
+ nulls = (bool *) palloc(tupDesc->natts * sizeof(bool));
+
+ /* Fetch options of foreign table */
+ fileGetOptions(RelationGetRelid(onerel), &filename, &options);
+
+ /*
+ * Get size of the file.
+ */
+ if (stat(filename, &stat_buf) < 0)
+ ereport(ERROR,
+ (errcode_for_file_access(),
+ errmsg("could not stat file \"%s\": %m",
+ filename)));
+
+ /*
+ * Convert size to pages for use in I/O cost estimate.
+ */
+ *totalpages = (stat_buf.st_size + (BLCKSZ - 1)) / BLCKSZ;
+ if (*totalpages < 1)
+ *totalpages = 1;
+
+ /*
+ * Create CopyState from FDW options. We always acquire all columns, so
+ * as to match the expected ScanTupleSlot signature.
+ */
+ cstate = BeginCopyFrom(onerel, filename, NIL, options);
+
+ /* Prepare for sampling rows */
+ rstate = init_selection_state(targrows);
+
+ /* Set up callback to identify error line number. */
+ errcontext.callback = CopyFromErrorCallback;
+ errcontext.arg = (void *) cstate;
+ errcontext.previous = error_context_stack;
+ error_context_stack = &errcontext;
+
+ for (;;)
+ {
+ sample_it = true;
+
+ /*
+ * Check for user-requested abort.
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ found = NextCopyFrom(cstate, NULL, values, nulls, NULL);
+
+ if (!found)
+ break;
+
+ tuple = heap_form_tuple(tupDesc, values, nulls);
+
+ if (constr && constr->has_not_null)
+ {
+ for (attrChk = 1; attrChk <= natts; attrChk++)
+ {
+ if (onerel->rd_att->attrs[attrChk - 1]->attnotnull &&
+ heap_attisnull(tuple, attrChk))
+ {
+ sample_it = false;
+ break;
+ }
+ }
+ }
+
+ if (!sample_it)
+ {
+ invalrows += 1;
+ heap_freetuple(tuple);
+ continue;
+ }
+
+ /*
+ * The first targrows sample rows are simply copied into the
+ * reservoir. Then we start replacing tuples in the sample
+ * until we reach the end of the relation. This algorithm is
+ * from Jeff Vitter's paper (see full citation below). It
+ * works by repeatedly computing the number of tuples to skip
+ * before selecting a tuple, which replaces a randomly chosen
+ * element of the reservoir (current set of tuples). At all
+ * times the reservoir is a true random sample of the tuples
+ * we've passed over so far, so when we fall off the end of
+ * the relation we're done.
+ */
+ if (numrows < targrows)
+ rows[numrows++] = heap_copytuple(tuple);
+ else
+ {
+ /*
+ * t in Vitter's paper is the number of records already
+ * processed. If we need to compute a new S value, we
+ * must use the not-yet-incremented value of samplerows as
+ * t.
+ */
+ if (rowstoskip < 0)
+ rowstoskip = get_next_S(validrows, targrows, &rstate);
+
+ if (rowstoskip <= 0)
+ {
+ /*
+ * Found a suitable tuple, so save it, replacing one
+ * old tuple at random
+ */
+ int k = (int) (targrows * random_fract());
+
+ Assert(k >= 0 && k < targrows);
+ heap_freetuple(rows[k]);
+ rows[k] = heap_copytuple(tuple);
+ }
+
+ rowstoskip -= 1;
+ }
+
+ validrows += 1;
+ heap_freetuple(tuple);
+ }
+
+ /* Remove error callback. */
+ error_context_stack = errcontext.previous;
+
+ *totalrows = validrows;
+ *totaldeadrows = 0;
+
+ EndCopyFrom(cstate);
+
+ pfree(values);
+ pfree(nulls);
+
+ /*
+ * Emit some interesting relation info
+ */
+ ereport(elevel,
+ (errmsg("\"%s\": scanned, "
+ "containing %d valid rows and %d invalid rows; "
+ "%d rows in sample, %d total rows",
+ RelationGetRelationName(onerel),
+ (int) validrows, invalrows,
+ numrows, (int) *totalrows)));
+
+ return numrows;
+ }
diff --git a/contrib/file_fdw/input/file_fdw.source b/contrib/file_fdw/input/file_fdw.source
index 8e3d553..21b6fb4 100644
*** a/contrib/file_fdw/input/file_fdw.source
--- b/contrib/file_fdw/input/file_fdw.source
*************** EXECUTE st(100);
*** 111,116 ****
--- 111,121 ----
EXECUTE st(100);
DEALLOCATE st;
+ -- statistics collection tests
+ ANALYZE agg_csv;
+ SELECT relpages, reltuples FROM pg_class WHERE relname = 'agg_csv';
+ SELECT * FROM pg_stats WHERE tablename = 'agg_csv';
+
-- tableoid
SELECT tableoid::regclass, b FROM agg_csv;
diff --git a/contrib/file_fdw/output/file_fdw.source b/contrib/file_fdw/output/file_fdw.source
index 84f0750..fe0d67f 100644
*** a/contrib/file_fdw/output/file_fdw.source
--- b/contrib/file_fdw/output/file_fdw.source
*************** EXECUTE st(100);
*** 174,179 ****
--- 174,194 ----
(1 row)
DEALLOCATE st;
+ -- statistics collection tests
+ ANALYZE agg_csv;
+ SELECT relpages, reltuples FROM pg_class WHERE relname = 'agg_csv';
+ relpages | reltuples
+ ----------+-----------
+ 1 | 3
+ (1 row)
+
+ SELECT * FROM pg_stats WHERE tablename = 'agg_csv';
+ schemaname | tablename | attname | inherited | null_frac | avg_width | n_distinct | most_common_vals | most_common_freqs | histogram_bounds | correlation
+ ------------+-----------+---------+-----------+-----------+-----------+------------+------------------+-------------------+-------------------------+-------------
+ public | agg_csv | a | f | 0 | 2 | -1 | | | {0,42,100} | -0.5
+ public | agg_csv | b | f | 0 | 4 | -1 | | | {0.09561,99.097,324.78} | 0.5
+ (2 rows)
+
-- tableoid
SELECT tableoid::regclass, b FROM agg_csv;
tableoid | b
diff --git a/contrib/pgsql_fdw/deparse.c b/contrib/pgsql_fdw/deparse.c
index 6f8f753..f4a422c 100644
*** a/contrib/pgsql_fdw/deparse.c
--- b/contrib/pgsql_fdw/deparse.c
*************** typedef struct foreign_executable_cxt
*** 46,53 ****
* Get string representation which can be used in SQL statement from a node.
*/
static void deparseExpr(StringInfo buf, Expr *expr, PlannerInfo *root);
! static void deparseRelation(StringInfo buf, Oid relid, PlannerInfo *root,
! bool need_prefix);
static void deparseVar(StringInfo buf, Var *node, PlannerInfo *root,
bool need_prefix);
static void deparseConst(StringInfo buf, Const *node, PlannerInfo *root);
--- 46,52 ----
* Get string representation which can be used in SQL statement from a node.
*/
static void deparseExpr(StringInfo buf, Expr *expr, PlannerInfo *root);
! static void deparseRelation(StringInfo buf, Oid relid, bool need_prefix);
static void deparseVar(StringInfo buf, Var *node, PlannerInfo *root,
bool need_prefix);
static void deparseConst(StringInfo buf, Const *node, PlannerInfo *root);
*************** static bool is_builtin(Oid procid);
*** 77,89 ****
* Deparse query representation into SQL statement which suits for remote
* PostgreSQL server. This function basically creates simple query string
* which consists of only SELECT, FROM clauses.
*/
void
deparseSimpleSql(StringInfo buf,
Oid relid,
PlannerInfo *root,
! RelOptInfo *baserel,
! ForeignTable *table)
{
StringInfoData foreign_relname;
bool first;
--- 76,90 ----
* Deparse query representation into SQL statement which suits for remote
* PostgreSQL server. This function basically creates simple query string
* which consists of only SELECT, FROM clauses.
+ *
+ * Parameters root and baserel are optional; when either of them was NULL,
+ * SELECT clause is simply "SELECT *".
*/
void
deparseSimpleSql(StringInfo buf,
Oid relid,
PlannerInfo *root,
! RelOptInfo *baserel)
{
StringInfoData foreign_relname;
bool first;
*************** deparseSimpleSql(StringInfo buf,
*** 101,107 ****
* evaluated on local, can be replaced with literal "NULL" in the SELECT
* clause to reduce overhead of tuple handling tuple and data transfer.
*/
! if (baserel->baserestrictinfo != NIL)
{
ListCell *lc;
--- 102,108 ----
* evaluated on local, can be replaced with literal "NULL" in the SELECT
* clause to reduce overhead of tuple handling tuple and data transfer.
*/
! if (baserel != NULL)
{
ListCell *lc;
*************** deparseSimpleSql(StringInfo buf,
*** 134,179 ****
* function requires entries for dropped columns. Such entries must be
* initialized with NULL before calling tuple constructor.
*/
! appendStringInfo(buf, "SELECT ");
! attr_used = list_union(attr_used, baserel->reltargetlist);
! first = true;
! for (attr = 1; attr <= baserel->max_attr; attr++)
{
! RangeTblEntry *rte = root->simple_rte_array[baserel->relid];
! Var *var = NULL;
! ListCell *lc;
! /* Ignore dropped attributes. */
! if (get_rte_attribute_is_dropped(rte, attr))
! continue;
! if (!first)
! appendStringInfo(buf, ", ");
! first = false;
! /*
! * We use linear search here, but it wouldn't be problem since
! * attr_used seems to not become so large.
! */
! foreach (lc, attr_used)
! {
! var = lfirst(lc);
! if (var->varattno == attr)
! break;
! var = NULL;
}
! if (var != NULL)
! deparseVar(buf, var, root, false);
! else
! appendStringInfo(buf, "NULL");
}
- appendStringInfoChar(buf, ' ');
/*
* deparse FROM clause, including alias if any
*/
appendStringInfo(buf, "FROM ");
! deparseRelation(buf, table->relid, root, true);
elog(DEBUG3, "Remote SQL: %s", buf->data);
}
--- 135,187 ----
* function requires entries for dropped columns. Such entries must be
* initialized with NULL before calling tuple constructor.
*/
! if (root == NULL || baserel == NULL)
{
! appendStringInfo(buf, "SELECT * ");
! }
! else
! {
! appendStringInfo(buf, "SELECT ");
! attr_used = list_union(attr_used, baserel->reltargetlist);
! first = true;
! for (attr = 1; attr <= baserel->max_attr; attr++)
! {
! RangeTblEntry *rte = root->simple_rte_array[baserel->relid];
! Var *var = NULL;
! ListCell *lc;
! /* Ignore dropped attributes. */
! if (get_rte_attribute_is_dropped(rte, attr))
! continue;
! if (!first)
! appendStringInfo(buf, ", ");
! first = false;
! /*
! * We use linear search here, but it wouldn't be problem since
! * attr_used seems to not become so large.
! */
! foreach (lc, attr_used)
! {
! var = lfirst(lc);
! if (var->varattno == attr)
! break;
! var = NULL;
! }
! if (var != NULL)
! deparseVar(buf, var, root, false);
! else
! appendStringInfo(buf, "NULL");
}
! appendStringInfoChar(buf, ' ');
}
/*
* deparse FROM clause, including alias if any
*/
appendStringInfo(buf, "FROM ");
! deparseRelation(buf, relid, true);
elog(DEBUG3, "Remote SQL: %s", buf->data);
}
*************** sortConditions(PlannerInfo *root,
*** 219,224 ****
--- 227,298 ----
}
/*
+ * Deparse SELECT statement to acquire sample rows of given relation into buf.
+ */
+ void
+ deparseAnalyzeSql(StringInfo buf, Relation rel)
+ {
+ Oid relid = RelationGetRelid(rel);
+ TupleDesc tupdesc = RelationGetDescr(rel);
+ int i;
+ char *colname;
+ List *options;
+ ListCell *lc;
+ bool first = true;
+ char *nspname;
+ char *relname;
+ ForeignTable *table;
+
+ /* Deparse SELECT clause, use attribute name or colname option. */
+ appendStringInfo(buf, "SELECT ");
+ for (i = 0; i < tupdesc->natts; i++)
+ {
+ if (tupdesc->attrs[i]->attisdropped)
+ continue;
+
+ colname = NameStr(tupdesc->attrs[i]->attname);
+ options = GetForeignColumnOptions(relid, tupdesc->attrs[i]->attnum);
+
+ foreach(lc, options)
+ {
+ DefElem *def= (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "colname") == 0)
+ {
+ colname = defGetString(def);
+ break;
+ }
+ }
+
+ if (!first)
+ appendStringInfo(buf, ", ");
+ appendStringInfo(buf, "%s", quote_identifier(colname));
+ first = false;
+ }
+
+ /*
+ * Deparse FROM clause, use namespace and relation name, or use nspname and
+ * colname options respectively.
+ */
+ nspname = get_namespace_name(get_rel_namespace(relid));
+ relname = get_rel_name(relid);
+ table = GetForeignTable(relid);
+ foreach(lc, table->options)
+ {
+ DefElem *def= (DefElem *) lfirst(lc);
+
+ if (strcmp(def->defname, "nspname") == 0)
+ nspname = defGetString(def);
+ else if (strcmp(def->defname, "relname") == 0)
+ relname = defGetString(def);
+ }
+
+ appendStringInfo(buf, " FROM %s.%s", quote_identifier(nspname),
+ quote_identifier(relname));
+ }
+
+
+ /*
* Deparse given expression into buf. Actual string operation is delegated to
* node-type-specific functions.
*
*************** deparseVar(StringInfo buf,
*** 353,387 ****
* option overrides schema name.
*/
static void
! deparseRelation(StringInfo buf,
! Oid relid,
! PlannerInfo *root,
! bool need_prefix)
{
- int i;
- RangeTblEntry *rte = NULL;
ListCell *lc;
const char *nspname = NULL; /* plain namespace name */
const char *relname = NULL; /* plain relation name */
const char *q_nspname; /* quoted namespace name */
const char *q_relname; /* quoted relation name */
- /* Find RangeTblEntry for the relation from PlannerInfo. */
- for (i = 1; i < root->simple_rel_array_size; i++)
- {
- if (root->simple_rte_array[i]->relid == relid)
- {
- rte = root->simple_rte_array[i];
- break;
- }
- }
- if (rte == NULL)
- elog(ERROR, "relation with OID %u is not used in the query", relid);
-
/* If target is a foreign table, obtain additional catalog information. */
! if (rte->relkind == RELKIND_FOREIGN_TABLE)
{
! ForeignTable *table = GetForeignTable(rte->relid);
/*
* Use value of FDW options if any, instead of the name of object
--- 427,444 ----
* option overrides schema name.
*/
static void
! deparseRelation(StringInfo buf, Oid relid, bool need_prefix)
{
ListCell *lc;
const char *nspname = NULL; /* plain namespace name */
const char *relname = NULL; /* plain relation name */
const char *q_nspname; /* quoted namespace name */
const char *q_relname; /* quoted relation name */
/* If target is a foreign table, obtain additional catalog information. */
! if (get_rel_relkind(relid) == RELKIND_FOREIGN_TABLE)
{
! ForeignTable *table = GetForeignTable(relid);
/*
* Use value of FDW options if any, instead of the name of object
diff --git a/contrib/pgsql_fdw/expected/pgsql_fdw.out b/contrib/pgsql_fdw/expected/pgsql_fdw.out
index 8e50614..7a6001b 100644
*** a/contrib/pgsql_fdw/expected/pgsql_fdw.out
--- b/contrib/pgsql_fdw/expected/pgsql_fdw.out
*************** INSERT INTO "S 1"."T 2"
*** 75,80 ****
--- 75,81 ----
'AAA' || to_char(id, 'FM000')
FROM generate_series(1, 100) id;
COMMIT;
+ ANALYZE;
-- ===================================================================
-- tests for pgsql_fdw_validator
-- ===================================================================
*************** ALTER FOREIGN TABLE ft2 ALTER COLUMN c1
*** 169,174 ****
--- 170,198 ----
(2 rows)
-- ===================================================================
+ -- ANALYZE
+ -- ===================================================================
+ ANALYZE ft1;
+ ANALYZE ft2;
+ SELECT relpages, reltuples from pg_class where oid = 'ft1'::regclass;
+ relpages | reltuples
+ ----------+-----------
+ 0 | 1000
+ (1 row)
+
+ SELECT staattnum, count(*) FROM pg_statistic WHERE starelid = 'ft1'::regclass GROUP BY staattnum ORDER BY staattnum;
+ staattnum | count
+ -----------+-------
+ 2 | 1
+ 3 | 1
+ 4 | 1
+ 5 | 1
+ 6 | 1
+ 7 | 1
+ 8 | 1
+ (7 rows)
+
+ -- ===================================================================
-- simple queries
-- ===================================================================
-- single table, with/without alias
*************** EXECUTE st1(101, 101);
*** 459,478 ****
-- subquery using stable function (can't be pushed down)
PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND EXTRACT(dow FROM c4) = 6) ORDER BY c1;
EXPLAIN (COSTS false) EXECUTE st2(10, 20);
! QUERY PLAN
! -----------------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: t1.c1
! -> Hash Join
! Hash Cond: (t1.c3 = t2.c3)
-> Foreign Scan on ft1 t1
Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7 FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(pg_catalog.<) 20))
! -> Hash
! -> HashAggregate
! -> Foreign Scan on ft2 t2
! Filter: (date_part('dow'::text, c4) = 6::double precision)
! Remote SQL: SELECT "C 1", NULL, c3, c4, NULL, NULL, NULL FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(pg_catalog.>) 10))
! (11 rows)
EXECUTE st2(10, 20);
c1 | c2 | c3 | c4 | c5 | c6 | c7
--- 483,501 ----
-- subquery using stable function (can't be pushed down)
PREPARE st2(int) AS SELECT * FROM ft1 t1 WHERE t1.c1 < $2 AND t1.c3 IN (SELECT c3 FROM ft2 t2 WHERE c1 > $1 AND EXTRACT(dow FROM c4) = 6) ORDER BY c1;
EXPLAIN (COSTS false) EXECUTE st2(10, 20);
! QUERY PLAN
! -----------------------------------------------------------------------------------------------------------------------------------------
Sort
Sort Key: t1.c1
! -> Nested Loop Semi Join
! Join Filter: (t1.c3 = t2.c3)
-> Foreign Scan on ft1 t1
Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7 FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(pg_catalog.<) 20))
! -> Materialize
! -> Foreign Scan on ft2 t2
! Filter: (date_part('dow'::text, c4) = 6::double precision)
! Remote SQL: SELECT "C 1", NULL, c3, c4, NULL, NULL, NULL FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(pg_catalog.>) 10))
! (10 rows)
EXECUTE st2(10, 20);
c1 | c2 | c3 | c4 | c5 | c6 | c7
*************** EXPLAIN (COSTS false) EXECUTE st4(1);
*** 552,561 ****
(2 rows)
EXPLAIN (COSTS false) EXECUTE st4(1);
! QUERY PLAN
! ---------------------------------------------------------------------------------------------------------------
Foreign Scan on ft1 t1
! Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7 FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(pg_catalog.=) $1))
(2 rows)
-- cleanup
--- 575,584 ----
(2 rows)
EXPLAIN (COSTS false) EXECUTE st4(1);
! QUERY PLAN
! --------------------------------------------------------------------------------------------------------------
Foreign Scan on ft1 t1
! Remote SQL: SELECT "C 1", c2, c3, c4, c5, c6, c7 FROM "S 1"."T 1" WHERE (("C 1" OPERATOR(pg_catalog.=) 1))
(2 rows)
-- cleanup
diff --git a/contrib/pgsql_fdw/pgsql_fdw.c b/contrib/pgsql_fdw/pgsql_fdw.c
index 8975955..0412c2c 100644
*** a/contrib/pgsql_fdw/pgsql_fdw.c
--- b/contrib/pgsql_fdw/pgsql_fdw.c
***************
*** 17,22 ****
--- 17,23 ----
#include "catalog/pg_foreign_table.h"
#include "commands/defrem.h"
#include "commands/explain.h"
+ #include "commands/vacuum.h"
#include "funcapi.h"
#include "miscadmin.h"
#include "optimizer/cost.h"
*************** static TupleTableSlot *pgsqlIterateForei
*** 166,171 ****
--- 167,176 ----
static void pgsqlReScanForeignScan(ForeignScanState *node);
static void pgsqlEndForeignScan(ForeignScanState *node);
+ static void pgsqlAnalyzeForeignTable(Relation relation,
+ VacuumStmt *vacstmt,
+ int elevel);
+
/*
* Helper functions
*/
*************** static void execute_query(ForeignScanSta
*** 181,186 ****
--- 186,195 ----
static PGresult *fetch_result(ForeignScanState *node);
static void store_result(ForeignScanState *node, PGresult *res);
static void pgsql_fdw_error_callback(void *arg);
+ static int pgsql_acquire_sample_rows(Relation onerel, HeapTuple *rows,
+ int targrows, double *totalrows,
+ double *totaldeadrows,
+ BlockNumber *totalpages, int elevel);
/* Exported functions, but not written in pgsql_fdw.h. */
void _PG_init(void);
*************** static FdwRoutine fdwroutine = {
*** 222,227 ****
--- 231,237 ----
pgsqlEndForeignScan,
/* Optional handler functions. */
+ pgsqlAnalyzeForeignTable,
};
/*
*************** pgsqlGetForeignRelSize(PlannerInfo *root
*** 288,294 ****
* appended later.
*/
sortConditions(root, baserel, &remote_conds, ¶m_conds, &local_conds);
! deparseSimpleSql(sql, foreigntableid, root, baserel, table);
if (list_length(remote_conds) > 0)
{
appendWhereClause(sql, fpstate->has_where, remote_conds, root);
--- 298,304 ----
* appended later.
*/
sortConditions(root, baserel, &remote_conds, ¶m_conds, &local_conds);
! deparseSimpleSql(sql, foreigntableid, root, baserel);
if (list_length(remote_conds) > 0)
{
appendWhereClause(sql, fpstate->has_where, remote_conds, root);
*************** pgsqlEndForeignScan(ForeignScanState *no
*** 761,766 ****
--- 771,786 ----
}
/*
+ * Collect statistics of a foreign table, and store the result in system
+ * catalogs.
+ */
+ static void
+ pgsqlAnalyzeForeignTable(Relation relation, VacuumStmt *vacstmt, int elevel)
+ {
+ do_analyze_rel(relation, vacstmt, elevel, false, pgsql_acquire_sample_rows);
+ }
+
+ /*
* Estimate costs of executing given SQL statement.
*/
static void
*************** pgsql_fdw_error_callback(void *arg)
*** 1111,1113 ****
--- 1131,1344 ----
errcontext("column %s of foreign table %s",
quote_identifier(colname), quote_identifier(relname));
}
+
+ /*
+ * Acquire a random sample of rows from foreign table managed by pgsql_fdw.
+ *
+ * pgsql_fdw doesn't provide direct access to remote buffer, so we exeucte
+ * simple SELECT statement which retrieves whole rows from remote side, and
+ * pick some samples from them.
+ */
+ static int
+ pgsql_acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
+ double *totalrows, double *totaldeadrows,
+ BlockNumber *totalpages, int elevel)
+ {
+ StringInfoData sql;
+ StringInfoData buf;
+ ForeignTable *table;
+ ForeignServer *server;
+ UserMapping *user;
+ int fetch_count;
+ PGconn *conn = NULL;
+ PGresult *res = NULL;
+ int row;
+ int i;
+ int j;
+ /* variables for tuple creation */
+ TupleDesc tupdesc;
+ AttInMetadata *attinmeta;
+ HeapTuple tuple;
+ char **values;
+ /* variables for sampling */
+ int numrows = 0;
+ double samplerows = 0;
+ double rowstoskip = -1;
+ double rstate;
+
+ /* Prepare for sampling rows */
+ rstate = init_selection_state(targrows);
+
+ /* Prepare tuple construction. */
+ tupdesc = onerel->rd_att;
+ attinmeta = TupleDescGetAttInMetadata(tupdesc);
+ values = (char **) palloc(sizeof(char *) * tupdesc->natts);
+
+ /*
+ * Construct SELECT statement which retrieves whole rows from remote. We
+ * can't avoid running sequential scan on remote side to get practical
+ * statistics, so this seems reasonable compromise.
+ */
+ initStringInfo(&sql);
+ deparseAnalyzeSql(&sql, onerel);
+ initStringInfo(&buf);
+ appendStringInfo(&buf, "DECLARE pgsql_fdw_cur CURSOR FOR %s", sql.data);
+
+ table = GetForeignTable(onerel->rd_id);
+ server = GetForeignServer(table->serverid);
+ user = GetUserMapping(GetOuterUserId(), server->serverid);
+ conn = GetConnection(server, user, true);
+
+ /*
+ * Acquire sample rows from the result set.
+ */
+ PG_TRY();
+ {
+ /* Declare non-scrollable cursor for analyze. */
+ res = PQexec(conn, buf.data);
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ ereport(ERROR,
+ (errmsg("could not declare cursor"),
+ errdetail("%s", PQerrorMessage(conn)),
+ errhint("%s", buf.data)));
+ PQclear(res);
+ res = NULL;
+
+ /* Execute FETCH statement until all rows have been retrieved. */
+ resetStringInfo(&buf);
+ fetch_count = GetFetchCountOption(table, server);
+ appendStringInfo(&buf, "FETCH %d FROM pgsql_fdw_cur", fetch_count);
+ while (true)
+ {
+ /*
+ * ANALYZE against foreign tables are not done in processes of
+ * vacuum, so here we use CHECK_FOR_INTERRUPTS instead of
+ * vacuum_delay_point().
+ */
+ CHECK_FOR_INTERRUPTS();
+
+ /* Fetch next bunch of results from remote side. */
+ res = PQexec(conn, buf.data);
+ if (PQresultStatus(res) != PGRES_TUPLES_OK)
+ ereport(ERROR,
+ (errmsg("could not fetch rows from foreign server"),
+ errdetail("%s", PQerrorMessage(conn)),
+ errhint("%s", buf.data)));
+ if (PQntuples(res) == 0)
+ break;
+
+ /* Loop though the result set and pick samples up. */
+ for (row = 0; row < PQntuples(res); row++)
+ {
+ if (numrows < targrows)
+ {
+ /*
+ * Create sample tuple from the result, and append to the
+ * tuple buffer. Note that i and j point entries in
+ * catalog and PGresult respectively.
+ */
+ for (i = 0, j = 0; i < tupdesc->natts; i++)
+ {
+ if (tupdesc->attrs[i]->attisdropped)
+ continue;
+
+ if (PQgetisnull(res, row, j))
+ values[i] = NULL;
+ else
+ values[i] = PQgetvalue(res, row, j);
+ j++;
+ }
+ tuple = BuildTupleFromCStrings(attinmeta, values);
+ rows[numrows++] = tuple;
+ }
+ else
+ {
+ /*
+ * The first targrows sample rows are simply copied into
+ * the reservoir. Then we start replacing tuples in the
+ * sample until we reach the end of the relation. This
+ * algorithm is from Jeff Vitter's paper, similarly to
+ * acquire_sample_rows in analyze.c.
+ *
+ * We don't have block-wise accessibility, so every row in
+ * the PGresult is possible to be sample.
+ */
+ if (rowstoskip < 0)
+ rowstoskip = get_next_S(samplerows, targrows, &rstate);
+
+ if (rowstoskip <= 0)
+ {
+ int k = (int) (targrows * random_fract());
+
+ Assert(k >= 0 && k < targrows);
+
+ /*
+ * Create sample tuple from the result, and replace at
+ * random.
+ */
+ heap_freetuple(rows[k]);
+ for (i = 0, j = 0; i < tupdesc->natts; i++)
+ {
+ if (tupdesc->attrs[i]->attisdropped)
+ continue;
+
+ if (PQgetisnull(res, row, j))
+ values[i] = NULL;
+ else
+ values[i] = PQgetvalue(res, row, j);
+ j++;
+ }
+ tuple = BuildTupleFromCStrings(attinmeta, values);
+ rows[k] = tuple;
+ }
+
+ rowstoskip -= 1;
+ }
+
+ samplerows += 1;
+ }
+
+ PQclear(res);
+ res = NULL;
+ }
+
+ /* Close the cursor. */
+ res = PQexec(conn, "CLOSE pgsql_fdw_cur");
+ if (PQresultStatus(res) != PGRES_COMMAND_OK)
+ ereport(ERROR,
+ (errmsg("could not close cursor"),
+ errdetail("%s", PQerrorMessage(conn))));
+ }
+ PG_CATCH();
+ {
+ PQclear(res);
+ PG_RE_THROW();
+ }
+ PG_END_TRY();
+
+ ReleaseConnection(conn);
+
+ /* We assume that we have no dead tuple. */
+ *totaldeadrows = 0.0;
+
+ /* We've retrieved all living tuples from foreign server. */
+ *totalrows = samplerows;
+
+ /*
+ * We don't update pg_class.relpages because we don't care that in
+ * planning at all.
+ */
+
+ /*
+ * Emit some interesting relation info
+ */
+ ereport(elevel,
+ (errmsg("\"%s\": scanned with \"%s\", "
+ "containing %.0f live rows and %.0f dead rows; "
+ "%d rows in sample, %.0f estimated total rows",
+ RelationGetRelationName(onerel), sql.data,
+ samplerows, 0.0,
+ numrows, samplerows)));
+
+ return numrows;
+ }
diff --git a/contrib/pgsql_fdw/pgsql_fdw.h b/contrib/pgsql_fdw/pgsql_fdw.h
index 5487d52..8ca58d4 100644
*** a/contrib/pgsql_fdw/pgsql_fdw.h
--- b/contrib/pgsql_fdw/pgsql_fdw.h
*************** int GetFetchCountOption(ForeignTable *ta
*** 29,36 ****
void deparseSimpleSql(StringInfo buf,
Oid relid,
PlannerInfo *root,
! RelOptInfo *baserel,
! ForeignTable *table);
void appendWhereClause(StringInfo buf,
bool has_where,
List *exprs,
--- 29,35 ----
void deparseSimpleSql(StringInfo buf,
Oid relid,
PlannerInfo *root,
! RelOptInfo *baserel);
void appendWhereClause(StringInfo buf,
bool has_where,
List *exprs,
*************** void sortConditions(PlannerInfo *root,
*** 40,44 ****
--- 39,44 ----
List **remote_conds,
List **param_conds,
List **local_conds);
+ void deparseAnalyzeSql(StringInfo buf, Relation rel);
#endif /* PGSQL_FDW_H */
diff --git a/contrib/pgsql_fdw/sql/pgsql_fdw.sql b/contrib/pgsql_fdw/sql/pgsql_fdw.sql
index 6692af7..f0c282a 100644
*** a/contrib/pgsql_fdw/sql/pgsql_fdw.sql
--- b/contrib/pgsql_fdw/sql/pgsql_fdw.sql
*************** INSERT INTO "S 1"."T 2"
*** 85,90 ****
--- 85,91 ----
'AAA' || to_char(id, 'FM000')
FROM generate_series(1, 100) id;
COMMIT;
+ ANALYZE;
-- ===================================================================
-- tests for pgsql_fdw_validator
*************** ALTER FOREIGN TABLE ft2 ALTER COLUMN c1
*** 141,146 ****
--- 142,155 ----
\det+
-- ===================================================================
+ -- ANALYZE
+ -- ===================================================================
+ ANALYZE ft1;
+ ANALYZE ft2;
+ SELECT relpages, reltuples from pg_class where oid = 'ft1'::regclass;
+ SELECT staattnum, count(*) FROM pg_statistic WHERE starelid = 'ft1'::regclass GROUP BY staattnum ORDER BY staattnum;
+
+ -- ===================================================================
-- simple queries
-- ===================================================================
-- single table, with/without alias
diff --git a/doc/src/sgml/fdwhandler.sgml b/doc/src/sgml/fdwhandler.sgml
index f7bf3d8..4f962b7 100644
*** a/doc/src/sgml/fdwhandler.sgml
--- b/doc/src/sgml/fdwhandler.sgml
*************** EndForeignScan (ForeignScanState *node);
*** 277,282 ****
--- 277,305 ----
+
+ void
+ AnalyzeForeignTable (Relation onerel,
+ VacuumStmt *vacstmt,
+ int elevel);
+
+
+ Collect statistics on a foreign table and store the results in the
+ pg_class and pg_statistics system catalogs.
+ This is optional, and if implemented, called when ANALYZE>
+ command is run. The statistics are used by the query planner in order to
+ make good choices of query plans.
+
+
+
+ The function can be implemented by writing a sampling function that
+ acquires a random sample of rows from an external data source and
+ then by calling do_analyze_rel>, where you should pass
+ the sampling function as an argument.
+ The function must be set to NULL if it isn't implemented.
+
+
+
The FdwRoutine> struct type is declared in
src/include/foreign/fdwapi.h>, which see for additional
details.
diff --git a/doc/src/sgml/maintenance.sgml b/doc/src/sgml/maintenance.sgml
index 93c3ff5..54d0838 100644
*** a/doc/src/sgml/maintenance.sgml
--- b/doc/src/sgml/maintenance.sgml
***************
*** 284,289 ****
--- 284,293 ----
ANALYZE> strictly as a function of the number of rows
inserted or updated; it has no knowledge of whether that will lead
to meaningful statistical changes.
+ Note that the autovacuum daemon does not issue ANALYZE>
+ commands on foreign tables. It is recommended to run manually-managed
+ ANALYZE> commands as needed, which typically are executed
+ according to a schedule by cron or Task Scheduler scripts.
diff --git a/doc/src/sgml/ref/alter_foreign_table.sgml b/doc/src/sgml/ref/alter_foreign_table.sgml
index c4cdaa8..af5c0a8 100644
*** a/doc/src/sgml/ref/alter_foreign_table.sgml
--- b/doc/src/sgml/ref/alter_foreign_table.sgml
*************** ALTER FOREIGN TABLE [ IF EXISTS ] column [ RESTRICT | CASCADE ]
ALTER [ COLUMN ] column [ SET DATA ] TYPE type
ALTER [ COLUMN ] column { SET | DROP } NOT NULL
+ ALTER [ COLUMN ] column SET STATISTICS integer
+ ALTER [ COLUMN ] column SET ( attribute_option = value [, ... ] )
+ ALTER [ COLUMN ] column RESET ( attribute_option [, ... ] )
ALTER [ COLUMN ] column OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ])
OWNER TO new_owner
OPTIONS ( [ ADD | SET | DROP ] option ['value'] [, ... ])
*************** ALTER FOREIGN TABLE [ IF EXISTS ]
+ SET STATISTICS
+
+
+ This form
+ sets the per-column statistics-gathering target for subsequent
+ operations.
+ The target can be set in the range 0 to 10000; alternatively, set it
+ to -1 to revert to using the system default statistics
+ target ().
+
+
+
+
+
+ SET ( attribute_option = value [, ... ] )
+ RESET ( attribute_option [, ... ] )
+
+
+ This form
+ sets or resets a per-attribute option. Currently, the only defined
+ per-attribute option is n_distinct>, which overrides
+ the number-of-distinct-values estimates made by subsequent
+ operations.
+ When set to a positive value, ANALYZE> will assume that
+ the column contains exactly the specified number of distinct nonnull
+ values.
+ When set to a negative value, which must be greater than or equal
+ to -1, ANALYZE> will assume that the number of distinct
+ nonnull values in the column is linear in the size of the foreign
+ table; the exact count is to be computed by multiplying the estimated
+ foreign table size by the absolute value of the given number.
+ For example,
+ a value of -1 implies that all values in the column are distinct,
+ while a value of -0.5 implies that each value appears twice on the
+ average.
+ This can be useful when the size of the foreign table changes over
+ time, since the multiplication by the number of rows in the foreign
+ table is not performed until query planning time. Specify a value
+ of 0 to revert to estimating the number of distinct values normally.
+
+
+
+
+
OWNER
diff --git a/doc/src/sgml/ref/analyze.sgml b/doc/src/sgml/ref/analyze.sgml
index 8c9057b..524a1c9 100644
*** a/doc/src/sgml/ref/analyze.sgml
--- b/doc/src/sgml/ref/analyze.sgml
*************** ANALYZE [ VERBOSE ] [
With no parameter, ANALYZE examines every table in the
! current database. With a parameter, ANALYZE examines
! only that table. It is further possible to give a list of column names,
! in which case only the statistics for those columns are collected.
--- 39,49 ----
With no parameter, ANALYZE examines every table in the
! current database except for foreign tables. With a parameter,
! ANALYZE examines only that table. For a foreign table, it is
! necessary to specify the name of that table. It is further possible to
! give a list of column names, in which case only the statistics for those
! columns are collected.
*************** ANALYZE [ VERBOSE ] [
The name (possibly schema-qualified) of a specific table to
! analyze. Defaults to all tables in the current database.
--- 65,72 ----
The name (possibly schema-qualified) of a specific table to
! analyze. Defaults to all tables in the current database except
! for foreign tables.
*************** ANALYZE [ VERBOSE ] [ ANALYZE is run.
To avoid this, raise the amount of statistics collected by
! ANALYZE, as described below.
--- 140,148 ----
In rare situations, this non-determinism will cause the planner's
choices of query plans to change after ANALYZE is run.
To avoid this, raise the amount of statistics collected by
! ANALYZE, as described below. Note that the time
! needed to analyze on foreign tables depends on the implementation of
! the foreign data wrapper via which such tables are attached.
diff --git a/src/backend/commands/analyze.c b/src/backend/commands/analyze.c
index 9cd6e67..5579dee 100644
*** a/src/backend/commands/analyze.c
--- b/src/backend/commands/analyze.c
***************
*** 23,28 ****
--- 23,29 ----
#include "access/xact.h"
#include "catalog/index.h"
#include "catalog/indexing.h"
+ #include "catalog/pg_class.h"
#include "catalog/pg_collation.h"
#include "catalog/pg_inherits_fn.h"
#include "catalog/pg_namespace.h"
***************
*** 30,35 ****
--- 31,38 ----
#include "commands/tablecmds.h"
#include "commands/vacuum.h"
#include "executor/executor.h"
+ #include "foreign/foreign.h"
+ #include "foreign/fdwapi.h"
#include "miscadmin.h"
#include "nodes/nodeFuncs.h"
#include "parser/parse_oper.h"
*************** typedef struct AnlIndexData
*** 78,91 ****
int default_statistics_target = 100;
/* A few variables that don't seem worth passing around as parameters */
- static int elevel = -1;
-
static MemoryContext anl_context = NULL;
static BufferAccessStrategy vac_strategy;
- static void do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh);
static void BlockSampler_Init(BlockSampler bs, BlockNumber nblocks,
int samplesize);
static bool BlockSampler_HasMore(BlockSampler bs);
--- 81,91 ----
*************** static void compute_index_stats(Relation
*** 96,112 ****
MemoryContext col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum,
Node *index_expr);
! static int acquire_sample_rows(Relation onerel, HeapTuple *rows,
! int targrows, double *totalrows, double *totaldeadrows);
! static double random_fract(void);
! static double init_selection_state(int n);
! static double get_next_S(double t, int n, double *stateptr);
! static int compare_rows(const void *a, const void *b);
static int acquire_inherited_sample_rows(Relation onerel,
HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows);
static void update_attstats(Oid relid, bool inh,
int natts, VacAttrStats **vacattrstats);
static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
--- 96,112 ----
MemoryContext col_context);
static VacAttrStats *examine_attribute(Relation onerel, int attnum,
Node *index_expr);
! static int acquire_sample_rows(Relation onerel,
! HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows,
! BlockNumber *totalpages, int elevel);
static int acquire_inherited_sample_rows(Relation onerel,
HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows,
! BlockNumber *totalpages, int elevel);
static void update_attstats(Oid relid, bool inh,
int natts, VacAttrStats **vacattrstats);
+ static int compare_rows(const void *a, const void *b);
static Datum std_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
static Datum ind_fetch_func(VacAttrStatsP stats, int rownum, bool *isNull);
*************** static Datum ind_fetch_func(VacAttrStats
*** 117,123 ****
--- 117,125 ----
void
analyze_rel(Oid relid, VacuumStmt *vacstmt, BufferAccessStrategy bstrategy)
{
+ int elevel;
Relation onerel;
+ FdwRoutine *fdwroutine;
/* Set up static variables */
if (vacstmt->options & VACOPT_VERBOSE)
*************** analyze_rel(Oid relid, VacuumStmt *vacst
*** 182,191 ****
}
/*
! * Check that it's a plain table; we used to do this in get_rel_oids() but
! * seems safer to check after we've locked the relation.
*/
! if (onerel->rd_rel->relkind != RELKIND_RELATION)
{
/* No need for a WARNING if we already complained during VACUUM */
if (!(vacstmt->options & VACOPT_VACUUM))
--- 184,195 ----
}
/*
! * Check that it's a plain table or foreign table; we used to do this
! * in get_rel_oids() but seems safer to check after we've locked the
! * relation.
*/
! if (onerel->rd_rel->relkind != RELKIND_RELATION &&
! onerel->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
{
/* No need for a WARNING if we already complained during VACUUM */
if (!(vacstmt->options & VACOPT_VACUUM))
*************** analyze_rel(Oid relid, VacuumStmt *vacst
*** 209,215 ****
}
/*
! * We can ANALYZE any table except pg_statistic. See update_attstats
*/
if (RelationGetRelid(onerel) == StatisticRelationId)
{
--- 213,221 ----
}
/*
! * We can ANALYZE any table except pg_statistic. See update_attstats.
! * In addition, we can ANALYZE foreign tables if AnalyzeForeignTable
! * callback routines of underlying foreign-data wrappers are implemented.
*/
if (RelationGetRelid(onerel) == StatisticRelationId)
{
*************** analyze_rel(Oid relid, VacuumStmt *vacst
*** 217,222 ****
--- 223,242 ----
return;
}
+ if (onerel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ fdwroutine = GetFdwRoutineByRelId(RelationGetRelid(onerel));
+
+ if (fdwroutine->AnalyzeForeignTable == NULL)
+ {
+ ereport(WARNING,
+ (errmsg("skipping \"%s\" --- underlying foreign-data wrapper cannot analyze it",
+ RelationGetRelationName(onerel))));
+ relation_close(onerel, ShareUpdateExclusiveLock);
+ return;
+ }
+ }
+
/*
* OK, let's do it. First let other backends know I'm in ANALYZE.
*/
*************** analyze_rel(Oid relid, VacuumStmt *vacst
*** 224,239 ****
MyPgXact->vacuumFlags |= PROC_IN_ANALYZE;
LWLockRelease(ProcArrayLock);
! /*
! * Do the normal non-recursive ANALYZE.
! */
! do_analyze_rel(onerel, vacstmt, false);
! /*
! * If there are child tables, do recursive ANALYZE.
! */
! if (onerel->rd_rel->relhassubclass)
! do_analyze_rel(onerel, vacstmt, true);
/*
* Close source relation now, but keep lock so that no one deletes it
--- 244,281 ----
MyPgXact->vacuumFlags |= PROC_IN_ANALYZE;
LWLockRelease(ProcArrayLock);
! if (onerel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
! {
! ereport(elevel,
! (errmsg("analyzing \"%s.%s\"",
! get_namespace_name(RelationGetNamespace(onerel)),
! RelationGetRelationName(onerel))));
! fdwroutine->AnalyzeForeignTable(onerel, vacstmt, elevel);
! }
! else
! {
! /*
! * Do the normal non-recursive ANALYZE.
! */
! ereport(elevel,
! (errmsg("analyzing \"%s.%s\"",
! get_namespace_name(RelationGetNamespace(onerel)),
! RelationGetRelationName(onerel))));
! do_analyze_rel(onerel, vacstmt, elevel, false, acquire_sample_rows);
! /*
! * If there are child tables, do recursive ANALYZE.
! */
! if (onerel->rd_rel->relhassubclass)
! {
! ereport(elevel,
! (errmsg("analyzing \"%s.%s\" inheritance tree",
! get_namespace_name(RelationGetNamespace(onerel)),
! RelationGetRelationName(onerel))));
! do_analyze_rel(onerel, vacstmt, elevel, true,
! acquire_inherited_sample_rows);
! }
! }
/*
* Close source relation now, but keep lock so that no one deletes it
*************** analyze_rel(Oid relid, VacuumStmt *vacst
*** 255,262 ****
/*
* do_analyze_rel() -- analyze one relation, recursively or not
*/
! static void
! do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, bool inh)
{
int attr_cnt,
tcnt,
--- 297,305 ----
/*
* do_analyze_rel() -- analyze one relation, recursively or not
*/
! void
! do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, int elevel,
! bool inh, SampleRowAcquireFunc acquirefunc)
{
int attr_cnt,
tcnt,
*************** do_analyze_rel(Relation onerel, VacuumSt
*** 271,276 ****
--- 314,320 ----
numrows;
double totalrows,
totaldeadrows;
+ BlockNumber totalpages;
HeapTuple *rows;
PGRUsage ru0;
TimestampTz starttime = 0;
*************** do_analyze_rel(Relation onerel, VacuumSt
*** 279,295 ****
int save_sec_context;
int save_nestlevel;
- if (inh)
- ereport(elevel,
- (errmsg("analyzing \"%s.%s\" inheritance tree",
- get_namespace_name(RelationGetNamespace(onerel)),
- RelationGetRelationName(onerel))));
- else
- ereport(elevel,
- (errmsg("analyzing \"%s.%s\"",
- get_namespace_name(RelationGetNamespace(onerel)),
- RelationGetRelationName(onerel))));
-
/*
* Set up a working context so that we can easily free whatever junk gets
* created.
--- 323,328 ----
*************** do_analyze_rel(Relation onerel, VacuumSt
*** 447,457 ****
*/
rows = (HeapTuple *) palloc(targrows * sizeof(HeapTuple));
if (inh)
! numrows = acquire_inherited_sample_rows(onerel, rows, targrows,
! &totalrows, &totaldeadrows);
else
! numrows = acquire_sample_rows(onerel, rows, targrows,
! &totalrows, &totaldeadrows);
/*
* Compute the statistics. Temporary results during the calculations for
--- 480,492 ----
*/
rows = (HeapTuple *) palloc(targrows * sizeof(HeapTuple));
if (inh)
! numrows = acquirefunc(onerel, rows, targrows,
! &totalrows, &totaldeadrows,
! NULL, elevel);
else
! numrows = acquirefunc(onerel, rows, targrows,
! &totalrows, &totaldeadrows,
! &totalpages, elevel);
/*
* Compute the statistics. Temporary results during the calculations for
*************** do_analyze_rel(Relation onerel, VacuumSt
*** 532,538 ****
*/
if (!inh)
vac_update_relstats(onerel,
! RelationGetNumberOfBlocks(onerel),
totalrows,
visibilitymap_count(onerel),
hasindex,
--- 567,573 ----
*/
if (!inh)
vac_update_relstats(onerel,
! totalpages,
totalrows,
visibilitymap_count(onerel),
hasindex,
*************** BlockSampler_Next(BlockSampler bs)
*** 1015,1021 ****
*/
static int
acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows)
{
int numrows = 0; /* # rows now in reservoir */
double samplerows = 0; /* total # rows collected */
--- 1050,1057 ----
*/
static int
acquire_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows,
! BlockNumber *totalpages, int elevel)
{
int numrows = 0; /* # rows now in reservoir */
double samplerows = 0; /* total # rows collected */
*************** acquire_sample_rows(Relation onerel, Hea
*** 1030,1035 ****
--- 1066,1073 ----
Assert(targrows > 0);
totalblocks = RelationGetNumberOfBlocks(onerel);
+ if (totalpages)
+ *totalpages = totalblocks;
/* Need a cutoff xmin for HeapTupleSatisfiesVacuum */
OldestXmin = GetOldestXmin(onerel->rd_rel->relisshared, true);
*************** acquire_sample_rows(Relation onerel, Hea
*** 1252,1258 ****
}
/* Select a random value R uniformly distributed in (0 - 1) */
! static double
random_fract(void)
{
return ((double) random() + 1) / ((double) MAX_RANDOM_VALUE + 2);
--- 1290,1296 ----
}
/* Select a random value R uniformly distributed in (0 - 1) */
! double
random_fract(void)
{
return ((double) random() + 1) / ((double) MAX_RANDOM_VALUE + 2);
*************** random_fract(void)
*** 1272,1285 ****
* determines the number of records to skip before the next record is
* processed.
*/
! static double
init_selection_state(int n)
{
/* Initial value of W (for use when Algorithm Z is first applied) */
return exp(-log(random_fract()) / n);
}
! static double
get_next_S(double t, int n, double *stateptr)
{
double S;
--- 1310,1323 ----
* determines the number of records to skip before the next record is
* processed.
*/
! double
init_selection_state(int n)
{
/* Initial value of W (for use when Algorithm Z is first applied) */
return exp(-log(random_fract()) / n);
}
! double
get_next_S(double t, int n, double *stateptr)
{
double S;
*************** compare_rows(const void *a, const void *
*** 1395,1401 ****
*/
static int
acquire_inherited_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows)
{
List *tableOIDs;
Relation *rels;
--- 1433,1440 ----
*/
static int
acquire_inherited_sample_rows(Relation onerel, HeapTuple *rows, int targrows,
! double *totalrows, double *totaldeadrows,
! BlockNumber *totalpages, int elevel)
{
List *tableOIDs;
Relation *rels;
*************** acquire_inherited_sample_rows(Relation o
*** 1458,1463 ****
--- 1497,1504 ----
totalblocks += relblocks[nrels];
nrels++;
}
+ if (totalpages)
+ *totalpages = totalblocks;
/*
* Now sample rows from each relation, proportionally to its fraction of
*************** acquire_inherited_sample_rows(Relation o
*** 1491,1497 ****
rows + numrows,
childtargrows,
&trows,
! &tdrows);
/* We may need to convert from child's rowtype to parent's */
if (childrows > 0 &&
--- 1532,1540 ----
rows + numrows,
childtargrows,
&trows,
! &tdrows,
! NULL,
! elevel);
/* We may need to convert from child's rowtype to parent's */
if (childrows > 0 &&
diff --git a/src/backend/commands/tablecmds.c b/src/backend/commands/tablecmds.c
index 9853686..3031496 100644
*** a/src/backend/commands/tablecmds.c
--- b/src/backend/commands/tablecmds.c
*************** static void ATPrepSetStatistics(Relation
*** 320,325 ****
--- 320,327 ----
Node *newValue, LOCKMODE lockmode);
static void ATExecSetStatistics(Relation rel, const char *colName,
Node *newValue, LOCKMODE lockmode);
+ static void ATPrepSetOptions(Relation rel, const char *colName,
+ Node *options, LOCKMODE lockmode);
static void ATExecSetOptions(Relation rel, const char *colName,
Node *options, bool isReset, LOCKMODE lockmode);
static void ATExecSetStorage(Relation rel, const char *colName,
*************** ATPrepCmd(List **wqueue, Relation rel, A
*** 3021,3027 ****
break;
case AT_SetOptions: /* ALTER COLUMN SET ( options ) */
case AT_ResetOptions: /* ALTER COLUMN RESET ( options ) */
! ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX);
/* This command never recurses */
pass = AT_PASS_MISC;
break;
--- 3023,3030 ----
break;
case AT_SetOptions: /* ALTER COLUMN SET ( options ) */
case AT_ResetOptions: /* ALTER COLUMN RESET ( options ) */
! ATSimplePermissions(rel, ATT_TABLE | ATT_INDEX | ATT_FOREIGN_TABLE);
! ATPrepSetOptions(rel, cmd->name, cmd->def, lockmode);
/* This command never recurses */
pass = AT_PASS_MISC;
break;
*************** ATPrepSetStatistics(Relation rel, const
*** 4999,5008 ****
* allowSystemTableMods to be turned on.
*/
if (rel->rd_rel->relkind != RELKIND_RELATION &&
! rel->rd_rel->relkind != RELKIND_INDEX)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
! errmsg("\"%s\" is not a table or index",
RelationGetRelationName(rel))));
/* Permissions checks */
--- 5002,5012 ----
* allowSystemTableMods to be turned on.
*/
if (rel->rd_rel->relkind != RELKIND_RELATION &&
! rel->rd_rel->relkind != RELKIND_INDEX &&
! rel->rd_rel->relkind != RELKIND_FOREIGN_TABLE)
ereport(ERROR,
(errcode(ERRCODE_WRONG_OBJECT_TYPE),
! errmsg("\"%s\" is not a table, index or foreign table",
RelationGetRelationName(rel))));
/* Permissions checks */
*************** ATExecSetStatistics(Relation rel, const
*** 5071,5076 ****
--- 5075,5100 ----
}
static void
+ ATPrepSetOptions(Relation rel, const char *colName, Node *options,
+ LOCKMODE lockmode)
+ {
+ if (rel->rd_rel->relkind == RELKIND_FOREIGN_TABLE)
+ {
+ ListCell *cell;
+
+ foreach(cell, (List *) options)
+ {
+ DefElem *def = (DefElem *) lfirst(cell);
+
+ if (pg_strcasecmp(def->defname, "n_distinct_inherited") == 0)
+ ereport(ERROR,
+ (errcode(ERRCODE_FEATURE_NOT_SUPPORTED),
+ errmsg("cannot set \"n_distinct_inherited\" for foreign tables")));
+ }
+ }
+ }
+
+ static void
ATExecSetOptions(Relation rel, const char *colName, Node *options,
bool isReset, LOCKMODE lockmode)
{
diff --git a/src/bin/psql/describe.c b/src/bin/psql/describe.c
index dc2248b..b3d2078 100644
*** a/src/bin/psql/describe.c
--- b/src/bin/psql/describe.c
*************** describeOneTableDetails(const char *sche
*** 1104,1110 ****
bool printTableInitialized = false;
int i;
char *view_def = NULL;
! char *headers[6];
char **seq_values = NULL;
char **modifiers = NULL;
char **ptr;
--- 1104,1110 ----
bool printTableInitialized = false;
int i;
char *view_def = NULL;
! char *headers[7];
char **seq_values = NULL;
char **modifiers = NULL;
char **ptr;
*************** describeOneTableDetails(const char *sche
*** 1395,1401 ****
if (verbose)
{
headers[cols++] = gettext_noop("Storage");
! if (tableinfo.relkind == 'r')
headers[cols++] = gettext_noop("Stats target");
/* Column comments, if the relkind supports this feature. */
if (tableinfo.relkind == 'r' || tableinfo.relkind == 'v' ||
--- 1395,1401 ----
if (verbose)
{
headers[cols++] = gettext_noop("Storage");
! if (tableinfo.relkind == 'r' || tableinfo.relkind == 'f')
headers[cols++] = gettext_noop("Stats target");
/* Column comments, if the relkind supports this feature. */
if (tableinfo.relkind == 'r' || tableinfo.relkind == 'v' ||
*************** describeOneTableDetails(const char *sche
*** 1498,1504 ****
false, false);
/* Statistics target, if the relkind supports this feature */
! if (tableinfo.relkind == 'r')
{
printTableAddCell(&cont, PQgetvalue(res, i, firstvcol + 1),
false, false);
--- 1498,1504 ----
false, false);
/* Statistics target, if the relkind supports this feature */
! if (tableinfo.relkind == 'r' || tableinfo.relkind == 'f')
{
printTableAddCell(&cont, PQgetvalue(res, i, firstvcol + 1),
false, false);
diff --git a/src/bin/psql/tab-complete.c b/src/bin/psql/tab-complete.c
index 975d655..d113adf 100644
*** a/src/bin/psql/tab-complete.c
--- b/src/bin/psql/tab-complete.c
*************** static const SchemaQuery Query_for_list_
*** 409,414 ****
--- 409,429 ----
NULL
};
+ static const SchemaQuery Query_for_list_of_tf = {
+ /* catname */
+ "pg_catalog.pg_class c",
+ /* selcondition */
+ "c.relkind IN ('r', 'f')",
+ /* viscondition */
+ "pg_catalog.pg_table_is_visible(c.oid)",
+ /* namespace */
+ "c.relnamespace",
+ /* result */
+ "pg_catalog.quote_ident(c.relname)",
+ /* qualresult */
+ NULL
+ };
+
static const SchemaQuery Query_for_list_of_views = {
/* catname */
"pg_catalog.pg_class c",
*************** psql_completion(char *text, int start, i
*** 2833,2839 ****
/* ANALYZE */
/* If the previous word is ANALYZE, produce list of tables */
else if (pg_strcasecmp(prev_wd, "ANALYZE") == 0)
! COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tables, NULL);
/* WHERE */
/* Simple case of the word before the where being the table name */
--- 2848,2854 ----
/* ANALYZE */
/* If the previous word is ANALYZE, produce list of tables */
else if (pg_strcasecmp(prev_wd, "ANALYZE") == 0)
! COMPLETE_WITH_SCHEMA_QUERY(Query_for_list_of_tf, NULL);
/* WHERE */
/* Simple case of the word before the where being the table name */
diff --git a/src/include/commands/vacuum.h b/src/include/commands/vacuum.h
index 3deee66..3e89e82 100644
*** a/src/include/commands/vacuum.h
--- b/src/include/commands/vacuum.h
*************** extern void lazy_vacuum_rel(Relation one
*** 170,174 ****
--- 170,183 ----
extern void analyze_rel(Oid relid, VacuumStmt *vacstmt,
BufferAccessStrategy bstrategy);
extern bool std_typanalyze(VacAttrStats *stats);
+ typedef int (*SampleRowAcquireFunc) (Relation onerel, HeapTuple *rows,
+ int targrows, double *totalrows,
+ double *totaldeadrows,
+ BlockNumber *totalpages, int elevel);
+ extern void do_analyze_rel(Relation onerel, VacuumStmt *vacstmt, int elevel,
+ bool inh, SampleRowAcquireFunc acquirefunc);
+ extern double random_fract(void);
+ extern double init_selection_state(int n);
+ extern double get_next_S(double t, int n, double *stateptr);
#endif /* VACUUM_H */
diff --git a/src/include/foreign/fdwapi.h b/src/include/foreign/fdwapi.h
index 854f177..d7181c7 100644
*** a/src/include/foreign/fdwapi.h
--- b/src/include/foreign/fdwapi.h
***************
*** 12,19 ****
--- 12,21 ----
#ifndef FDWAPI_H
#define FDWAPI_H
+ #include "foreign/foreign.h"
#include "nodes/execnodes.h"
#include "nodes/relation.h"
+ #include "utils/rel.h"
/* To avoid including explain.h here, reference ExplainState thus: */
struct ExplainState;
*************** typedef void (*ReScanForeignScan_functio
*** 50,55 ****
--- 52,60 ----
typedef void (*EndForeignScan_function) (ForeignScanState *node);
+ typedef void (*AnalyzeForeignTable_function) (Relation relation,
+ VacuumStmt *vacstmt,
+ int elevel);
/*
* FdwRoutine is the struct returned by a foreign-data wrapper's handler
*************** typedef struct FdwRoutine
*** 64,69 ****
--- 69,78 ----
{
NodeTag type;
+ /*
+ * These handlers are required to execute a scan on a foreign table. If
+ * any of them was NULL, scans on a foreign table managed by such FDW fail.
+ */
GetForeignRelSize_function GetForeignRelSize;
GetForeignPaths_function GetForeignPaths;
GetForeignPlan_function GetForeignPlan;
*************** typedef struct FdwRoutine
*** 72,77 ****
--- 81,92 ----
IterateForeignScan_function IterateForeignScan;
ReScanForeignScan_function ReScanForeignScan;
EndForeignScan_function EndForeignScan;
+
+ /*
+ * Handlers below are optional. You can set any of them to NULL to tell
+ * PostgreSQL backend that the FDW doesn't have the capability.
+ */
+ AnalyzeForeignTable_function AnalyzeForeignTable;
} FdwRoutine;
diff --git a/src/test/regress/expected/foreign_data.out b/src/test/regress/expected/foreign_data.out
index ba86883..f1379a6 100644
*** a/src/test/regress/expected/foreign_data.out
--- b/src/test/regress/expected/foreign_data.out
*************** CREATE FOREIGN TABLE ft1 (
*** 679,690 ****
COMMENT ON FOREIGN TABLE ft1 IS 'ft1';
COMMENT ON COLUMN ft1.c1 IS 'ft1.c1';
\d+ ft1
! Foreign table "public.ft1"
! Column | Type | Modifiers | FDW Options | Storage | Description
! --------+---------+-----------+--------------------------------+----------+-------------
! c1 | integer | not null | ("param 1" 'val1') | plain | ft1.c1
! c2 | text | | (param2 'val2', param3 'val3') | extended |
! c3 | date | | | plain |
Server: s0
FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
Has OIDs: no
--- 679,690 ----
COMMENT ON FOREIGN TABLE ft1 IS 'ft1';
COMMENT ON COLUMN ft1.c1 IS 'ft1.c1';
\d+ ft1
! Foreign table "public.ft1"
! Column | Type | Modifiers | FDW Options | Storage | Stats target | Description
! --------+---------+-----------+--------------------------------+----------+--------------+-------------
! c1 | integer | not null | ("param 1" 'val1') | plain | | ft1.c1
! c2 | text | | (param2 'val2', param3 'val3') | extended | |
! c3 | date | | | plain | |
Server: s0
FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
Has OIDs: no
*************** ERROR: cannot alter system column "xmin
*** 730,748 ****
ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'),
ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2');
ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
\d+ ft1
! Foreign table "public.ft1"
! Column | Type | Modifiers | FDW Options | Storage | Description
! --------+---------+-----------+--------------------------------+----------+-------------
! c1 | integer | not null | ("param 1" 'val1') | plain |
! c2 | text | | (param2 'val2', param3 'val3') | extended |
! c3 | date | | | plain |
! c4 | integer | | | plain |
! c6 | integer | not null | | plain |
! c7 | integer | | (p1 'v1', p2 'v2') | plain |
! c8 | text | | (p2 'V2') | extended |
! c9 | integer | | | plain |
! c10 | integer | | (p1 'v1') | plain |
Server: s0
FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
Has OIDs: no
--- 730,753 ----
ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'),
ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2');
ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000;
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100);
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct_inherited = 100); -- ERROR
+ ERROR: cannot set "n_distinct_inherited" for foreign tables
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1;
\d+ ft1
! Foreign table "public.ft1"
! Column | Type | Modifiers | FDW Options | Storage | Stats target | Description
! --------+---------+-----------+--------------------------------+----------+--------------+-------------
! c1 | integer | not null | ("param 1" 'val1') | plain | 10000 |
! c2 | text | | (param2 'val2', param3 'val3') | extended | |
! c3 | date | | | plain | |
! c4 | integer | | | plain | |
! c6 | integer | not null | | plain | |
! c7 | integer | | (p1 'v1', p2 'v2') | plain | |
! c8 | text | | (p2 'V2') | extended | |
! c9 | integer | | | plain | |
! c10 | integer | | (p1 'v1') | plain | |
Server: s0
FDW Options: (delimiter ',', quote '"', "be quoted" 'value')
Has OIDs: no
diff --git a/src/test/regress/sql/foreign_data.sql b/src/test/regress/sql/foreign_data.sql
index 0c95672..03b5680 100644
*** a/src/test/regress/sql/foreign_data.sql
--- b/src/test/regress/sql/foreign_data.sql
*************** ALTER FOREIGN TABLE ft1 ALTER COLUMN xmi
*** 307,312 ****
--- 307,316 ----
ALTER FOREIGN TABLE ft1 ALTER COLUMN c7 OPTIONS (ADD p1 'v1', ADD p2 'v2'),
ALTER COLUMN c8 OPTIONS (ADD p1 'v1', ADD p2 'v2');
ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 OPTIONS (SET p2 'V2', DROP p1);
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET STATISTICS 10000;
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct = 100);
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c1 SET (n_distinct_inherited = 100); -- ERROR
+ ALTER FOREIGN TABLE ft1 ALTER COLUMN c8 SET STATISTICS -1;
\d+ ft1
-- can't change the column type if it's used elsewhere
CREATE TABLE use_ft1_column_type (x ft1);