From 990329d7a8e6110ecf099162680f6c0eaf38ea14 Mon Sep 17 00:00:00 2001 From: Peter Eisentraut Date: Mon, 23 Sep 2024 13:13:18 +0200 Subject: [PATCH] fixup! Standardize syntax in internal documentation --- contrib/start-scripts/macos/README | 6 ++-- src/backend/access/gin/README | 6 ++-- src/backend/access/gist/README | 8 ++--- src/backend/access/hash/README | 2 +- src/backend/lib/README | 22 ++++++------ src/backend/optimizer/plan/README | 6 ++-- src/backend/parser/README | 42 +++++++++++------------ src/backend/storage/lmgr/README-SSI | 6 ++-- src/backend/utils/fmgr/README | 1 - src/backend/utils/misc/README | 8 ++--- src/interfaces/ecpg/preproc/README.parser | 20 +++++------ src/port/README | 8 ++--- src/test/recovery/README | 4 +-- src/test/ssl/README | 14 ++++---- src/tools/pgindent/README | 24 ++++++------- 15 files changed, 88 insertions(+), 89 deletions(-) diff --git a/contrib/start-scripts/macos/README b/contrib/start-scripts/macos/README index 8fe6efb657d..c71f98880ca 100644 --- a/contrib/start-scripts/macos/README +++ b/contrib/start-scripts/macos/README @@ -15,11 +15,11 @@ if you plan to run the Postgres server under some user name other than "postgres", adjust the UserName parameter value for that. 4. Copy the modified org.postgresql.postgres.plist file into - /Library/LaunchDaemons/. You must do this as root: +/Library/LaunchDaemons/. You must do this as root: - sudo cp org.postgresql.postgres.plist /Library/LaunchDaemons + sudo cp org.postgresql.postgres.plist /Library/LaunchDaemons - because the file will be ignored if it is not root-owned. + because the file will be ignored if it is not root-owned. At this point a reboot should launch the server. But if you want to test it without rebooting, you can do diff --git a/src/backend/access/gin/README b/src/backend/access/gin/README index 08faa944105..ccff6896513 100644 --- a/src/backend/access/gin/README +++ b/src/backend/access/gin/README @@ -124,9 +124,9 @@ know are: or IndexInfoFindDataOffset + sizeof(int2)) there is a byte indicating the "category" of the null entry. These are the possible categories: - 1 = ordinary null key value extracted from an indexable item - 2 = placeholder for zero-key indexable item - 3 = placeholder for null indexable item + 1 = ordinary null key value extracted from an indexable item + 2 = placeholder for zero-key indexable item + 3 = placeholder for null indexable item Placeholder null entries are inserted into the index because otherwise there would be no index entry at all for an empty or null indexable item, diff --git a/src/backend/access/gist/README b/src/backend/access/gist/README index af082fc2bb0..62818b09d79 100644 --- a/src/backend/access/gist/README +++ b/src/backend/access/gist/README @@ -477,7 +477,7 @@ value. The page is not recycled, until that XID is no longer visible to anyone. That's much more conservative than necessary, but let's keep it simple. - -Authors: - Teodor Sigaev - Oleg Bartunov +Authors +------- +* Teodor Sigaev +* Oleg Bartunov diff --git a/src/backend/access/hash/README b/src/backend/access/hash/README index a5df13a68a2..5b4c758df62 100644 --- a/src/backend/access/hash/README +++ b/src/backend/access/hash/README @@ -255,7 +255,7 @@ The reader algorithm is: release the buffer content lock on old bucket, but not pin retake the buffer content lock on new bucket arrange to scan the old bucket normally and the new bucket for - tuples which are not moved-by-split + tuples which are not moved-by-split -- then, per read request: reacquire content lock on current page step to next page if necessary (no chaining of content locks, but keep diff --git a/src/backend/lib/README b/src/backend/lib/README index fc8e1aa1f7c..735e3790513 100644 --- a/src/backend/lib/README +++ b/src/backend/lib/README @@ -1,27 +1,27 @@ This directory contains a general purpose data structures, for use anywhere in the backend: - binaryheap.c - a binary heap +* binaryheap.c - a binary heap - bipartite_match.c - Hopcroft-Karp maximum cardinality algorithm for bipartite graphs +* bipartite_match.c - Hopcroft-Karp maximum cardinality algorithm for bipartite graphs - bloomfilter.c - probabilistic, space-efficient set membership testing +* bloomfilter.c - probabilistic, space-efficient set membership testing - dshash.c - concurrent hash tables backed by dynamic shared memory areas +* dshash.c - concurrent hash tables backed by dynamic shared memory areas - hyperloglog.c - a streaming cardinality estimator +* hyperloglog.c - a streaming cardinality estimator - ilist.c - single and double-linked lists +* ilist.c - single and double-linked lists - integerset.c - a data structure for holding large set of integers +* integerset.c - a data structure for holding large set of integers - knapsack.c - knapsack problem solver +* knapsack.c - knapsack problem solver - pairingheap.c - a pairing heap +* pairingheap.c - a pairing heap - rbtree.c - a red-black tree +* rbtree.c - a red-black tree - stringinfo.c - an extensible string type +* stringinfo.c - an extensible string type Aside from the inherent characteristics of the data structures, there are a diff --git a/src/backend/optimizer/plan/README b/src/backend/optimizer/plan/README index 93dd422dc15..04d9c50c51e 100644 --- a/src/backend/optimizer/plan/README +++ b/src/backend/optimizer/plan/README @@ -91,9 +91,9 @@ on each call) ExecReScan() now supports most of Plan types... Explanation of EXPLAIN. -vac=> explain select * from tmp where x >= (select max(x2) from test2 -where y2 = y and exists (select * from tempx where tx = x)); -NOTICE: QUERY PLAN: + vac=> explain select * from tmp where x >= (select max(x2) from test2 + where y2 = y and exists (select * from tempx where tx = x)); + NOTICE: QUERY PLAN: Seq Scan on tmp (cost=40.03 size=101 width=8) SubPlan diff --git a/src/backend/parser/README b/src/backend/parser/README index e6016fa4301..9ac8026c73a 100644 --- a/src/backend/parser/README +++ b/src/backend/parser/README @@ -7,27 +7,27 @@ This directory does more than tokenize and parse SQL queries. It also creates Query structures for the various complex queries that are passed to the optimizer and then executor. - parser.c things start here - scan.l break query into tokens - scansup.c handle escapes in input strings - gram.y parse the tokens and produce a "raw" parse tree - analyze.c top level of parse analysis for optimizable queries - parse_agg.c handle aggregates, like SUM(col1), AVG(col2), ... - parse_clause.c handle clauses like WHERE, ORDER BY, GROUP BY, ... - parse_coerce.c handle coercing expressions to different data types - parse_collate.c assign collation information in completed expressions - parse_cte.c handle Common Table Expressions (WITH clauses) - parse_expr.c handle expressions like col, col + 3, x = 3 or x = 4 - parse_enr.c handle ephemeral named rels (trigger transition tables, ...) - parse_func.c handle functions, table.column and column identifiers - parse_merge.c handle MERGE - parse_node.c create nodes for various structures - parse_oper.c handle operators in expressions - parse_param.c handle Params (for the cases used in the core backend) - parse_relation.c support routines for tables and column handling - parse_target.c handle the result list of the query - parse_type.c support routines for data type handling - parse_utilcmd.c parse analysis for utility commands (done at execution time) +* parser.c things start here +* scan.l break query into tokens +* scansup.c handle escapes in input strings +* gram.y parse the tokens and produce a "raw" parse tree +* analyze.c top level of parse analysis for optimizable queries +* parse_agg.c handle aggregates, like SUM(col1), AVG(col2), ... +* parse_clause.c handle clauses like WHERE, ORDER BY, GROUP BY, ... +* parse_coerce.c handle coercing expressions to different data types +* parse_collate.c assign collation information in completed expressions +* parse_cte.c handle Common Table Expressions (WITH clauses) +* parse_expr.c handle expressions like col, col + 3, x = 3 or x = 4 +* parse_enr.c handle ephemeral named rels (trigger transition tables, ...) +* parse_func.c handle functions, table.column and column identifiers +* parse_merge.c handle MERGE +* parse_node.c create nodes for various structures +* parse_oper.c handle operators in expressions +* parse_param.c handle Params (for the cases used in the core backend) +* parse_relation.c support routines for tables and column handling +* parse_target.c handle the result list of the query +* parse_type.c support routines for data type handling +* parse_utilcmd.c parse analysis for utility commands (done at execution time) See also src/common/keywords.c, which contains the table of standard keywords and the keyword lookup function. We separated that out because diff --git a/src/backend/storage/lmgr/README-SSI b/src/backend/storage/lmgr/README-SSI index 48d6e025d83..be9a6f96cfe 100644 --- a/src/backend/storage/lmgr/README-SSI +++ b/src/backend/storage/lmgr/README-SSI @@ -414,14 +414,14 @@ to be added from scratch. 2. The existing in-memory lock structures were not suitable for tracking SIREAD locks. -- In PostgreSQL, tuple level locks are not held in RAM for + - In PostgreSQL, tuple level locks are not held in RAM for any length of time; lock information is written to the tuples involved in the transactions. -- In PostgreSQL, existing lock structures have pointers to + - In PostgreSQL, existing lock structures have pointers to memory which is related to a session. SIREAD locks need to persist past the end of the originating transaction and even the session which ran it. -- PostgreSQL needs to be able to tolerate a large number of + - PostgreSQL needs to be able to tolerate a large number of transactions executing while one long-running transaction stays open -- the in-RAM techniques discussed in the papers wouldn't support that. diff --git a/src/backend/utils/fmgr/README b/src/backend/utils/fmgr/README index ee14362b8e1..3edfbe933db 100644 --- a/src/backend/utils/fmgr/README +++ b/src/backend/utils/fmgr/README @@ -171,7 +171,6 @@ For float4, float8, and int8, the PG_GETARG macros will hide whether the types are pass-by-value or pass-by-reference. For example, if float8 is pass-by-reference then PG_GETARG_FLOAT8 expands to - (* (float8 *) DatumGetPointer(fcinfo->args[number].value)) and would typically be called like this: diff --git a/src/backend/utils/misc/README b/src/backend/utils/misc/README index abfa4737577..eaedfbc6df3 100644 --- a/src/backend/utils/misc/README +++ b/src/backend/utils/misc/README @@ -35,21 +35,21 @@ OK or false if not. The function can optionally do a few other things: additional information to the generic "invalid value for parameter FOO" complaint that guc.c will emit. To do that, call - void GUC_check_errdetail(const char *format, ...) + void GUC_check_errdetail(const char *format, ...) where the format string and additional arguments follow the rules for errdetail() arguments. The resulting string will be emitted as the DETAIL line of guc.c's error report, so it should follow the message style guidelines for DETAIL messages. There is also - void GUC_check_errhint(const char *format, ...) + void GUC_check_errhint(const char *format, ...) which can be used in the same way to append a HINT message. Occasionally it may even be appropriate to override guc.c's generic primary message or error code, which can be done with - void GUC_check_errcode(int sqlerrcode) - void GUC_check_errmsg(const char *format, ...) + void GUC_check_errcode(int sqlerrcode) + void GUC_check_errmsg(const char *format, ...) In general, check_hooks should avoid throwing errors directly if possible, though this may be impractical to avoid for some corner cases such as diff --git a/src/interfaces/ecpg/preproc/README.parser b/src/interfaces/ecpg/preproc/README.parser index ec517a3b5a3..b647e248aba 100644 --- a/src/interfaces/ecpg/preproc/README.parser +++ b/src/interfaces/ecpg/preproc/README.parser @@ -25,24 +25,24 @@ rules concatenated together. e.g. if gram.y has this: then "dumpedtokens" is "ruleAtokenAtokenBtokenC". "postfix" above can be: -a) "block" - the automatic rule created by parse.pl is completely +1) "block" - the automatic rule created by parse.pl is completely overridden, the code block has to be written completely as it were in a plain bison grammar -b) "rule" - the automatic rule is extended on, so new syntaxes +2) "rule" - the automatic rule is extended on, so new syntaxes are accepted for "ruleA". E.g.: - ECPG: ruleAtokenAtokenBtokenC rule - | tokenD tokenE { action_code; } - ... + ECPG: ruleAtokenAtokenBtokenC rule + | tokenD tokenE { action_code; } + ... It will be substituted with: - ruleA: - | tokenD tokenE { action_code; } - ... + ruleA: + | tokenD tokenE { action_code; } + ... -c) "addon" - the automatic action for the rule (SQL syntax constructed +3) "addon" - the automatic action for the rule (SQL syntax constructed from the tokens concatenated together) is prepended with a new action code part. This code part is written as is's already inside the { ... } diff --git a/src/port/README b/src/port/README index e5aeed07b61..6a8594aba8d 100644 --- a/src/port/README +++ b/src/port/README @@ -14,15 +14,15 @@ libraries. This is done by removing -lpgport from the link line: # Need to recompile any libpgport object files LIBS := $(filter-out -lpgport, $(LIBS)) - and adding infrastructure to recompile the object files: + and adding infrastructure to recompile the object files: OBJS= execute.o typename.o descriptor.o data.o error.o prepare.o memory.o \ connect.o misc.o path.o exec.o \ $(filter strlcat.o, $(LIBOBJS)) -The problem is that there is no testing of which object files need to be -added, but missing functions usually show up when linking user -applications. + The problem is that there is no testing of which object files need to be + added, but missing functions usually show up when linking user + applications. 2) For applications, we use -lpgport before -lpq, so the static files from libpgport are linked first. This avoids having applications diff --git a/src/test/recovery/README b/src/test/recovery/README index c18fe7e21e9..d13156a140d 100644 --- a/src/test/recovery/README +++ b/src/test/recovery/README @@ -15,11 +15,11 @@ in addition to the core code. Run - make check + make check or - make installcheck + make installcheck You can use "make installcheck" if you previously did "make install". In that case, the code in the installation tree is tested. With diff --git a/src/test/ssl/README b/src/test/ssl/README index 957694c6892..c29f636c42e 100644 --- a/src/test/ssl/README +++ b/src/test/ssl/README @@ -43,19 +43,19 @@ Certificates The test suite needs a set of public/private key pairs and certificates to run: -* root_ca +* root_ca: root CA, use to sign the server and client CA certificates. -* server_ca +* server_ca: CA used to sign server certificates. -* client_ca +* client_ca: CA used to sign client certificates. -* server-cn-only -server-cn-and-alt-names -server-single-alt-name -server-multiple-alt-names +* server-cn-only, +server-cn-and-alt-names, +server-single-alt-name, +server-multiple-alt-names, server-no-names: server certificates, with small variations in the hostnames present in the certificate. Signed by server_ca. diff --git a/src/tools/pgindent/README b/src/tools/pgindent/README index d984af8f1db..ac533481c01 100644 --- a/src/tools/pgindent/README +++ b/src/tools/pgindent/README @@ -23,11 +23,11 @@ PREREQUISITES: ("man perlmodinstall" explains it). Or, if you have cpan installed, this should work: - cpan SHANCOCK/Perl-Tidy-20230309.tar.gz + cpan SHANCOCK/Perl-Tidy-20230309.tar.gz Or if you have cpanm installed, you can just use: - cpanm https://cpan.metacpan.org/authors/id/S/SH/SHANCOCK/Perl-Tidy-20230309.tar.gz + cpanm https://cpan.metacpan.org/authors/id/S/SH/SHANCOCK/Perl-Tidy-20230309.tar.gz DOING THE INDENT RUN BEFORE A NORMAL COMMIT: @@ -36,7 +36,7 @@ DOING THE INDENT RUN BEFORE A NORMAL COMMIT: 2) Run pgindent on the C files: - src/tools/pgindent/pgindent . + src/tools/pgindent/pgindent . If any files generate errors, restore their original versions with "git checkout", and see below for cleanup ideas. @@ -55,9 +55,9 @@ DOING THE INDENT RUN BEFORE A NORMAL COMMIT: 6) Do a full test build: - make -s clean - make -s all # look for unexpected warnings, and errors of course - make check-world + make -s clean + make -s all # look for unexpected warnings, and errors of course + make check-world Your configure switches should include at least --enable-tap-tests or else much of the Perl code won't get exercised. @@ -70,7 +70,7 @@ AT LEAST ONCE PER RELEASE CYCLE: -------------------------------- 1) Download the latest typedef file from the buildfarm: - wget -O src/tools/pgindent/typedefs.list https://buildfarm.postgresql.org/cgi-bin/typedefs.pl + wget -O src/tools/pgindent/typedefs.list https://buildfarm.postgresql.org/cgi-bin/typedefs.pl This step resolves any differences between the incrementally updated version of the file and a clean, autogenerated one. @@ -81,17 +81,17 @@ AT LEAST ONCE PER RELEASE CYCLE: 3) Indent the Perl code using perltidy: - src/tools/pgindent/pgperltidy . + src/tools/pgindent/pgperltidy . If you want to use some perltidy version that's not in your PATH, first set the PERLTIDY environment variable to point to it. 4) Reformat the bootstrap catalog data files: - ./configure # "make" will not work in an unconfigured tree - cd src/include/catalog - make reformat-dat-files - cd ../../.. + ./configure # "make" will not work in an unconfigured tree + cd src/include/catalog + make reformat-dat-files + cd ../../.. 5) When you're done, "git commit" everything including the typedefs.list file you used. -- 2.46.0