1: 9fc1df7509 = 1: 76da087e0c Revert ECPG's use of pnstrdup() 2: fdd89bdee0 ! 2: 6b6d48e001 Remove fe_memutils from libpgcommon_shlib @@ Metadata ## Commit message ## Remove fe_memutils from libpgcommon_shlib - libpq appears to have no need for this, and the exit() references cause - our libpq-refs-stamp test to fail if the linker doesn't strip out the - unused code. + libpq must not use palloc/pfree. It's not allowed to exit on allocation + failure, and mixing the frontend pfree with malloc is architecturally + unsound. + + Remove fe_memutils from the shlib build entirely, to keep devs from + accidentally depending on it in the future. ## src/common/Makefile ## @@ src/common/Makefile: endif - # libraries such as libpq to report errors directly. + # A few files are currently only built for frontend, not server. + # logging.c is excluded from OBJS_FRONTEND_SHLIB (shared library) as + # a matter of policy, because it is not appropriate for general purpose +-# libraries such as libpq to report errors directly. ++# libraries such as libpq to report errors directly. fe_memutils.c is ++# excluded because libpq must not exit() on allocation failure. OBJS_FRONTEND_SHLIB = \ $(OBJS_COMMON) \ - fe_memutils.o \ @@ src/common/Makefile: endif ## src/common/meson.build ## @@ src/common/meson.build: common_sources_cflags = { + # A few files are currently only built for frontend, not server. + # logging.c is excluded from OBJS_FRONTEND_SHLIB (shared library) as + # a matter of policy, because it is not appropriate for general purpose +-# libraries such as libpq to report errors directly. ++# libraries such as libpq to report errors directly. fe_memutils.c is ++# excluded because libpq must not exit() on allocation failure. common_sources_frontend_shlib = common_sources common_sources_frontend_shlib += files( 3: ae3ae1cfaa = 3: faf3707623 common/jsonapi: support libpq as a client 4: 92b257643e ! 4: 4017611c19 libpq: add OAUTHBEARER SASL mechanism @@ Commit message Thomas Munro wrote the kqueue() implementation for oauth-curl; thanks! + = Debug Mode = + + A "dangerous debugging mode" may be enabled in libpq, by setting the + environment variable PGOAUTHDEBUG=UNSAFE. This will do several things + that you will not want in a production system: + + - permits the use of plaintext HTTP in the OAuth provider exchange + - sprays HTTP traffic, containing several critical secrets, to stderr + - permits the use of zero-second retry intervals, which can DoS the + client + = PQauthDataHook = Clients may override two pieces of OAuth handling using the new @@ Commit message condition?) - support require_auth - fill in documentation stubs + - support protocol "variants" implemented by major providers - ...and more. Co-authored-by: Daniel Gustafsson @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + struct provider provider; + struct device_authz authz; + ++ int running; /* is asynchronous work in progress? */ + bool user_prompted; /* have we already sent the authz prompt? */ ++ bool debugging; /* can we give unsafe developer assistance? */ +}; + +/* @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + * RFC 8628 is pretty silent on sanity checks for the interval. As a matter of + * practicality, round any fractional intervals up to the next second, and clamp + * the result at a minimum of one. (Zero-second intervals would result in an -+ * expensive network polling loop.) ++ * expensive network polling loop.) Tests may remove the lower bound with ++ * PGOAUTHDEBUG, for improved performance. + * + * TODO: maybe clamp the upper bound too, based on the libpq timeout and/or the + * code expiration time? + */ +static int -+parse_interval(const char *interval_str) ++parse_interval(struct async_ctx *actx, const char *interval_str) +{ + double parsed; + int cnt; @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + parsed = ceil(parsed); + + if (parsed < 1) -+ return 1; /* TODO this slows down the tests -+ * considerably... */ ++ return actx->debugging ? 0 : 1; ++ + else if (INT_MAX <= parsed) + return INT_MAX; + @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + {"verification_uri", JSON_TOKEN_STRING, {&authz->verification_uri}, REQUIRED}, + + /* ++ * Some services (Google, Azure) spell verification_uri differently. We ++ * accept either. ++ */ ++ {"verification_url", JSON_TOKEN_STRING, {&authz->verification_uri}, REQUIRED}, ++ ++ /* + * The following fields are technically REQUIRED, but we don't use + * them anywhere yet: + * @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + * we at least know they're valid JSON numbers. + */ + if (authz->interval_str) -+ authz->interval = parse_interval(authz->interval_str); ++ authz->interval = parse_interval(actx, authz->interval_str); + else + { + /* @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) +} + +/* -+ * Adds or removes timeouts from the multiplexer set, as directed by the -+ * libcurl multi handle. Rather than continually adding and removing the timer, -+ * we keep it in the set at all times and just disarm it when it's not needed. ++ * Enables or disables the timer in the multiplexer set. The timeout value is ++ * in milliseconds (negative values disable the timer). Rather than continually ++ * adding and removing the timer, we keep it in the set at all times and just ++ * disarm it when it's not needed. + */ -+static int -+register_timer(CURLM *curlm, long timeout, void *ctx) ++static bool ++set_timer(struct async_ctx *actx, long timeout) +{ +#if HAVE_SYS_EPOLL_H -+ struct async_ctx *actx = ctx; + struct itimerspec spec = {0}; + + if (timeout < 0) @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + * A zero timeout means libcurl wants us to call back immediately. + * That's not technically an option for timerfd, but we can make the + * timeout ridiculously short. -+ * -+ * TODO: maybe just signal drive_request() to immediately call back in -+ * this case? + */ + spec.it_value.tv_nsec = 1; + } @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + if (timerfd_settime(actx->timerfd, 0 /* no flags */ , &spec, NULL) < 0) + { + actx_error(actx, "setting timerfd to %ld: %m", timeout); -+ return -1; ++ return false; + } +#endif +#ifdef HAVE_SYS_EVENT_H -+ struct async_ctx *actx = ctx; + struct kevent ev; + + EV_SET(&ev, 1, EVFILT_TIMER, timeout < 0 ? EV_DELETE : EV_ADD, @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + if (kevent(actx->mux, &ev, 1, NULL, 0, NULL) < 0 && errno != ENOENT) + { + actx_error(actx, "setting kqueue timer to %ld: %m", timeout); -+ return -1; ++ return false; + } +#endif + ++ return true; ++} ++ ++/* ++ * Adds or removes timeouts from the multiplexer set, as directed by the ++ * libcurl multi handle. ++ */ ++static int ++register_timer(CURLM *curlm, long timeout, void *ctx) ++{ ++ struct async_ctx *actx = ctx; ++ ++ /* ++ * TODO: maybe just signal drive_request() to immediately call back in ++ * the (timeout == 0) case? ++ */ ++ if (!set_timer(actx, timeout)) ++ return -1; /* actx_error already called */ ++ ++ return 0; ++} ++ ++/* ++ * Prints Curl request debugging information to stderr. ++ * ++ * Note that this will expose a number of critical secrets, so users have to opt ++ * into this (see PGOAUTHDEBUG). ++ */ ++static int ++debug_callback(CURL *handle, curl_infotype type, char *data, size_t size, ++ void *clientp) ++{ ++ const char * const end = data + size; ++ const char *prefix; ++ ++ /* Prefixes are modeled off of the default libcurl debug output. */ ++ switch (type) ++ { ++ case CURLINFO_TEXT: ++ prefix = "*"; ++ break; ++ ++ case CURLINFO_HEADER_IN: /* fall through */ ++ case CURLINFO_DATA_IN: ++ prefix = "<"; ++ break; ++ ++ case CURLINFO_HEADER_OUT: /* fall through */ ++ case CURLINFO_DATA_OUT: ++ prefix = ">"; ++ break; ++ ++ default: ++ return 0; ++ } ++ ++ /* ++ * Split the output into lines for readability; sometimes multiple headers ++ * are included in a single call. ++ */ ++ while (data < end) ++ { ++ size_t len = end - data; ++ char *eol = memchr(data, '\n', len); ++ ++ if (eol) ++ len = eol - data + 1; ++ ++ /* TODO: handle unprintables */ ++ fprintf(stderr, "%s %.*s%s", prefix, (int) len, data, ++ eol ? "" : "\n"); ++ ++ data += len; ++ } ++ + return 0; +} + @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + /* No alternative resolver, TODO: warn about timeouts */ + } + -+ /* TODO investigate using conn->Pfdebug and CURLOPT_DEBUGFUNCTION here */ ++ if (actx->debugging) ++ { ++ /* ++ * Set a callback for retrieving error information from libcurl, the ++ * function only takes effect when CURLOPT_VERBOSE has been set so make ++ * sure the order is kept. ++ */ ++ CHECK_SETOPT(actx, CURLOPT_DEBUGFUNCTION, debug_callback, return false); + CHECK_SETOPT(actx, CURLOPT_VERBOSE, 1L, return false); ++ } ++ + CHECK_SETOPT(actx, CURLOPT_ERRORBUFFER, actx->curl_err, return false); + + /* -+ * Only HTTP[S] is allowed. TODO: disallow HTTP without user opt-in ++ * Only HTTPS is allowed. (Debug mode additionally allows HTTP; this is ++ * intended for testing only.) ++ * ++ * There's a bit of unfortunate complexity around the choice of CURLoption. ++ * CURLOPT_PROTOCOLS is deprecated in modern Curls, but its replacement ++ * didn't show up until relatively recently. + */ -+ CHECK_SETOPT(actx, CURLOPT_PROTOCOLS, CURLPROTO_HTTP | CURLPROTO_HTTPS, return false); ++ { ++#if CURL_AT_LEAST_VERSION(7, 85, 0) ++ const CURLoption popt = CURLOPT_PROTOCOLS_STR; ++ const char *protos = "https"; ++ const char * const unsafe = "https,http"; ++#else ++ const CURLoption popt = CURLOPT_PROTOCOLS; ++ long protos = CURLPROTO_HTTPS; ++ const long unsafe = CURLPROTO_HTTPS | CURLPROTO_HTTP; ++#endif ++ ++ if (actx->debugging) ++ protos = unsafe; ++ ++ CHECK_SETOPT(actx, popt, protos, return false); ++ } + + /* + * Suppress the Accept header to make our request as minimal as possible. @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + * anything important there across this call). + * + * Once a request is queued, it can be driven to completion via drive_request(). ++ * If actx->running is zero upon return, the request has already finished and ++ * drive_request() can be called without returning control to the client. + */ +static bool +start_request(struct async_ctx *actx) +{ + CURLMcode err; -+ int running; + + resetPQExpBuffer(&actx->work_data); + CHECK_SETOPT(actx, CURLOPT_WRITEFUNCTION, append_data, return false); @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + return false; + } + -+ err = curl_multi_socket_action(actx->curlm, CURL_SOCKET_TIMEOUT, 0, &running); -+ if (err) -+ { -+ actx_error(actx, "asynchronous HTTP request failed: %s", -+ curl_multi_strerror(err)); -+ return false; -+ } -+ + /* -+ * Sanity check. ++ * actx->running tracks the number of running handles, so we can immediately ++ * call back if no waiting is needed. + * -+ * TODO: even though this is nominally an asynchronous process, there are -+ * apparently operations that can synchronously fail by this point, such -+ * as connections to closed local ports. Maybe we need to let this case -+ * fall through to drive_request instead, or else perform a -+ * curl_multi_info_read immediately. ++ * Even though this is nominally an asynchronous process, there are some ++ * operations that can synchronously fail by this point (e.g. connections ++ * to closed local ports) or even synchronously succeed if the stars align ++ * (all the libcurl connection caches hit and the server is fast). + */ -+ if (running != 1) ++ err = curl_multi_socket_action(actx->curlm, CURL_SOCKET_TIMEOUT, 0, &actx->running); ++ if (err) + { -+ actx_error(actx, "failed to queue HTTP request"); ++ actx_error(actx, "asynchronous HTTP request failed: %s", ++ curl_multi_strerror(err)); + return false; + } + @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) +} + +/* ++ * CURL_IGNORE_DEPRECATION was added in 7.87.0. If it's not defined, we can make ++ * it a no-op. ++ */ ++#ifndef CURL_IGNORE_DEPRECATION ++#define CURL_IGNORE_DEPRECATION(x) x ++#endif ++ ++/* + * Drives the multi handle towards completion. The caller should have already + * set up an asynchronous request via start_request(). + */ @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) +drive_request(struct async_ctx *actx) +{ + CURLMcode err; -+ int running; + CURLMsg *msg; + int msgs_left; + bool done; + -+ err = curl_multi_socket_all(actx->curlm, &running); ++ if (actx->running) ++ { ++ /* ++ * There's an async request in progress. Pump the multi handle. ++ * ++ * TODO: curl_multi_socket_all() is deprecated, presumably because it's ++ * inefficient and pointless if your event loop has already handed you ++ * the exact sockets that are ready. But that's not our use case -- ++ * our client has no way to tell us which sockets are ready. (They don't ++ * even know there are sockets to begin with.) ++ * ++ * We can grab the list of triggered events from the multiplexer ++ * ourselves, but that's effectively what curl_multi_socket_all() is ++ * going to do... so it appears to be exactly the API we need. ++ * ++ * Ignore the deprecation for now. This needs a followup on ++ * curl-library@, to make sure we're not shooting ourselves in the foot ++ * in some other way. ++ */ ++ CURL_IGNORE_DEPRECATION( ++ err = curl_multi_socket_all(actx->curlm, &actx->running); ++ ) ++ + if (err) + { + actx_error(actx, "asynchronous HTTP request failed: %s", @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + return PGRES_POLLING_FAILED; + } + -+ if (running) ++ if (actx->running) + { + /* We'll come back again. */ + return PGRES_POLLING_READING; + } ++ } + + done = false; + while ((msg = curl_multi_info_read(actx->curlm, &msgs_left)) != NULL) @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + return true; +} + ++/* ++ * Finishes the token request and examines the response. If the flow has ++ * completed, a valid token will be returned via the parameter list. Otherwise, ++ * the token parameter remains unchanged, and the caller needs to wait for ++ * another interval (which will have been increased in response to a slow_down ++ * message from the server) before starting a new token request. ++ * ++ * False is returned only for permanent error conditions. ++ */ ++static bool ++handle_token_response(struct async_ctx *actx, char **token) ++{ ++ bool success = false; ++ struct token tok = {0}; ++ const struct token_error *err; ++ ++ if (!finish_token_request(actx, &tok)) ++ goto token_cleanup; ++ ++ if (tok.access_token) ++ { ++ /* Construct our Bearer token. */ ++ resetPQExpBuffer(&actx->work_data); ++ appendPQExpBuffer(&actx->work_data, "Bearer %s", tok.access_token); ++ ++ if (PQExpBufferDataBroken(actx->work_data)) ++ { ++ actx_error(actx, "out of memory"); ++ goto token_cleanup; ++ } ++ ++ *token = strdup(actx->work_data.data); ++ if (!*token) ++ { ++ actx_error(actx, "out of memory"); ++ goto token_cleanup; ++ } ++ ++ success = true; ++ goto token_cleanup; ++ } ++ ++ /* ++ * authorization_pending and slow_down are the only ++ * acceptable errors; anything else and we bail. ++ */ ++ err = &tok.err; ++ if (!err->error) ++ { ++ /* TODO test */ ++ actx_error(actx, "unknown error"); ++ goto token_cleanup; ++ } ++ ++ if (strcmp(err->error, "authorization_pending") != 0 && ++ strcmp(err->error, "slow_down") != 0) ++ { ++ if (err->error_description) ++ appendPQExpBuffer(&actx->errbuf, "%s ", err->error_description); ++ ++ appendPQExpBuffer(&actx->errbuf, "(%s)", err->error); ++ goto token_cleanup; ++ } ++ ++ /* ++ * A slow_down error requires us to permanently increase ++ * our retry interval by five seconds. RFC 8628, Sec. 3.5. ++ */ ++ if (strcmp(err->error, "slow_down") == 0) ++ { ++ int prev_interval = actx->authz.interval; ++ ++ actx->authz.interval += 5; ++ if (actx->authz.interval < prev_interval) ++ { ++ actx_error(actx, "slow_down interval overflow"); ++ goto token_cleanup; ++ } ++ } ++ ++ success = true; ++ ++token_cleanup: ++ free_token(&tok); ++ return success; ++} ++ ++/* ++ * Displays a device authorization prompt for action by the end user, either via ++ * the PQauthDataHook, or by a message on standard error if no hook is set. ++ */ ++static bool ++prompt_user(struct async_ctx *actx, PGconn *conn) ++{ ++ int res; ++ PQpromptOAuthDevice prompt = { ++ .verification_uri = actx->authz.verification_uri, ++ .user_code = actx->authz.user_code, ++ /* TODO: optional fields */ ++ }; ++ ++ res = PQauthDataHook(PQAUTHDATA_PROMPT_OAUTH_DEVICE, conn, &prompt); ++ ++ if (!res) ++ { ++ fprintf(stderr, "Visit %s and enter the code: %s", ++ prompt.verification_uri, prompt.user_code); ++ } ++ else if (res < 0) ++ { ++ actx_error(actx, "device prompt failed"); ++ return false; ++ } ++ ++ return true; ++} ++ + +/* + * The top-level, nonblocking entry point for the libcurl implementation. This @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + fe_oauth_state *state = conn->sasl_state; + struct async_ctx *actx; + -+ struct token tok = {0}; -+ + /* + * XXX This is not safe. libcurl has stringent requirements for the thread + * context in which you call curl_global_init(), because it's going to try @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + + if (!state->async_ctx) + { ++ const char *env; ++ + /* + * Create our asynchronous state, and hook it into the upper-level + * OAuth state immediately, so any failures below won't leak the @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + actx->timerfd = -1; +#endif + ++ /* Should we enable unsafe features? */ ++ env = getenv("PGOAUTHDEBUG"); ++ if (env && strcmp(env, "UNSAFE") == 0) ++ actx->debugging = true; ++ + state->async_ctx = actx; + state->free_async_ctx = free_curl_async_ctx; + @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + + actx = state->async_ctx; + ++ do ++ { + /* By default, the multiplexer is the altsock. Reassign as desired. */ + *altsock = actx->mux; + @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + else if (status != PGRES_POLLING_OK) + { + /* not done yet */ -+ free_token(&tok); + return status; + } + } @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + break; + } + ++ /* ++ * Each case here must ensure that actx->running is set while we're ++ * waiting on some asynchronous work. Most cases rely on start_request() ++ * to do that for them. ++ */ + switch (actx->step) + { + case OAUTH_STEP_INIT: @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + break; + + case OAUTH_STEP_TOKEN_REQUEST: -+ { -+ const struct token_error *err; -+#ifdef HAVE_SYS_EPOLL_H -+ struct itimerspec spec = {0}; -+#endif -+#ifdef HAVE_SYS_EVENT_H -+ struct kevent ev = {0}; -+#endif -+ -+ if (!finish_token_request(actx, &tok)) ++ if (!handle_token_response(actx, &state->token)) + goto error_return; + + if (!actx->user_prompted) + { -+ int res; -+ PQpromptOAuthDevice prompt = { -+ .verification_uri = actx->authz.verification_uri, -+ .user_code = actx->authz.user_code, -+ /* TODO: optional fields */ -+ }; -+ + /* + * Now that we know the token endpoint isn't broken, give + * the user the login instructions. + */ -+ res = PQauthDataHook(PQAUTHDATA_PROMPT_OAUTH_DEVICE, conn, -+ &prompt); -+ -+ if (!res) -+ { -+ fprintf(stderr, "Visit %s and enter the code: %s", -+ prompt.verification_uri, prompt.user_code); -+ } -+ else if (res < 0) -+ { -+ actx_error(actx, "device prompt failed"); ++ if (!prompt_user(actx, conn)) + goto error_return; -+ } + + actx->user_prompted = true; + } + -+ if (tok.access_token) -+ { -+ /* Construct our Bearer token. */ -+ resetPQExpBuffer(&actx->work_data); -+ appendPQExpBuffer(&actx->work_data, "Bearer %s", -+ tok.access_token); -+ -+ if (PQExpBufferDataBroken(actx->work_data)) -+ { -+ actx_error(actx, "out of memory"); -+ goto error_return; -+ } -+ -+ state->token = strdup(actx->work_data.data); -+ break; -+ } -+ -+ /* -+ * authorization_pending and slow_down are the only acceptable -+ * errors; anything else and we bail. -+ */ -+ err = &tok.err; -+ if (!err->error || (strcmp(err->error, "authorization_pending") -+ && strcmp(err->error, "slow_down"))) -+ { -+ /* TODO handle !err->error */ -+ if (err->error_description) -+ appendPQExpBuffer(&actx->errbuf, "%s ", -+ err->error_description); -+ -+ appendPQExpBuffer(&actx->errbuf, "(%s)", err->error); -+ -+ goto error_return; -+ } -+ -+ /* -+ * A slow_down error requires us to permanently increase our -+ * retry interval by five seconds. RFC 8628, Sec. 3.5. -+ */ -+ if (strcmp(err->error, "slow_down") == 0) -+ { -+ int prev_interval = actx->authz.interval; -+ -+ actx->authz.interval += 5; -+ if (actx->authz.interval < prev_interval) -+ { -+ actx_error(actx, "slow_down interval overflow"); -+ goto error_return; -+ } -+ } ++ if (state->token) ++ break; /* done! */ + + /* + * Wait for the required interval before issuing the next + * request. + */ -+ Assert(actx->authz.interval > 0); -+#ifdef HAVE_SYS_EPOLL_H -+ spec.it_value.tv_sec = actx->authz.interval; -+ -+ if (timerfd_settime(actx->timerfd, 0 /* no flags */ , &spec, NULL) < 0) -+ { -+ actx_error(actx, "failed to set timerfd: %m"); ++ if (!set_timer(actx, actx->authz.interval * 1000)) + goto error_return; -+ } + ++#ifdef HAVE_SYS_EPOLL_H ++ /* ++ * No Curl requests are running, so we can simplify by ++ * having the client wait directly on the timerfd rather ++ * than the multiplexer. (This isn't possible for kqueue.) ++ */ + *altsock = actx->timerfd; +#endif -+#ifdef HAVE_SYS_EVENT_H -+ /* XXX: I guess this wants to be hidden in a routine */ -+ EV_SET(&ev, 1, EVFILT_TIMER, EV_ADD, 0, -+ actx->authz.interval * 1000, 0); -+ if (kevent(actx->mux, &ev, 1, NULL, 0, NULL) < 0) -+ { -+ actx_error(actx, "failed to set kqueue timer: %m"); -+ goto error_return; -+ } -+ /* XXX: why did we change the altsock in the epoll version? */ -+#endif ++ + actx->step = OAUTH_STEP_WAIT_INTERVAL; ++ actx->running = 1; + break; -+ } + + case OAUTH_STEP_WAIT_INTERVAL: + actx->errctx = "failed to obtain access token"; @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + break; + } + -+ free_token(&tok); ++ /* ++ * The vast majority of the time, if we don't have a token at this ++ * point, actx->running will be set. But there are some corner cases ++ * where we can immediately loop back around; see start_request(). ++ */ ++ } while (!state->token && !actx->running); + + /* If we've stored a token, we're done. Otherwise come back later. */ + return state->token ? PGRES_POLLING_OK : PGRES_POLLING_READING; @@ src/interfaces/libpq/fe-auth-oauth-curl.c (new) + + appendPQExpBufferStr(&conn->errorMessage, "\n"); + -+ free_token(&tok); + return PGRES_POLLING_FAILED; +} 5: 16994b449d ! 5: eaff43dc27 backend: add OAUTHBEARER SASL mechanism @@ Commit message further checks are done. Several TODOs: - - port to platforms other than "modern Linux/BSD" - implement more helpful handling of HBA misconfigurations - use logdetail during auth failures - allow passing the configured issuer to the oauth_validator_command, to @@ .cirrus.tasks.yml: task: + libcurl4-openssl-dev:i386 \ matrix: - - name: Linux - Debian Bullseye - Autoconf + - name: Linux - Debian Bookworm - Autoconf @@ .cirrus.tasks.yml: task: folder: $CCACHE_DIR @@ src/backend/utils/misc/guc_tables.c #include "nodes/queryjumble.h" #include "optimizer/cost.h" @@ src/backend/utils/misc/guc_tables.c: struct config_string ConfigureNamesString[] = - check_synchronized_standby_slots, assign_synchronized_standby_slots, NULL + check_restrict_nonsystem_relation_kind, assign_restrict_nonsystem_relation_kind, NULL }, + { @@ src/interfaces/libpq/fe-auth-oauth-curl.c: free_token(struct token *tok) OAUTH_STEP_DISCOVERY, OAUTH_STEP_DEVICE_AUTHORIZATION, OAUTH_STEP_TOKEN_REQUEST, +@@ src/interfaces/libpq/fe-auth-oauth-curl.c: handle_token_response(struct async_ctx *actx, char **token) + if (!finish_token_request(actx, &tok)) + goto token_cleanup; + ++ /* A successful token request gives either a token or an in-band error. */ ++ Assert(tok.access_token || tok.err.error); ++ + if (tok.access_token) + { + /* Construct our Bearer token. */ +@@ src/interfaces/libpq/fe-auth-oauth-curl.c: handle_token_response(struct async_ctx *actx, char **token) + * acceptable errors; anything else and we bail. + */ + err = &tok.err; +- if (!err->error) +- { +- /* TODO test */ +- actx_error(actx, "unknown error"); +- goto token_cleanup; +- } +- + if (strcmp(err->error, "authorization_pending") != 0 && + strcmp(err->error, "slow_down") != 0) + { ## src/test/modules/Makefile ## @@ src/test/modules/Makefile: SUBDIRS = \ @@ src/test/modules/oauth_validator/t/001_server.pl (new) +my ($log_start, $log_end); +$log_start = $node->wait_for_log(qr/reloading configuration files/); + ++ ++# To test against HTTP rather than HTTPS, we need to enable PGOAUTHDEBUG. But ++# first, check to make sure the client refuses such connections by default. ++$node->connect_fails("user=test dbname=postgres oauth_client_id=f02c6361-0635", ++ "HTTPS is required without debug mode", ++ expected_stderr => qr/failed to fetch OpenID discovery document: Unsupported protocol/); ++ ++$ENV{PGOAUTHDEBUG} = "UNSAFE"; ++ +my $user = "test"; +if ($node->connect_ok("user=$user dbname=postgres oauth_client_id=f02c6361-0635", "connect", + expected_stderr => qr@Visit https://example\.com/ and enter the code: postgresuser@)) @@ src/test/modules/oauth_validator/t/001_server.pl (new) + "content type with charset (whitespace)", + expected_stderr => qr@Visit https://example\.com/ and enter the code: postgresuser@ +); ++$node->connect_ok( ++ connstr(stage => 'device', uri_spelling => "verification_url"), ++ "alternative spelling of verification_uri", ++ expected_stderr => qr@Visit https://example\.com/ and enter the code: postgresuser@ ++); + +$node->connect_fails( + connstr(stage => 'device', content_type => 'text/plain'), @@ src/test/modules/oauth_validator/t/oauth_server.py (new) + + return "authorization_pending" + ++ def _uri_spelling(self) -> str: ++ """ ++ Returns "verification_uri" unless the test has requested something ++ different. ++ """ ++ if self._should_modify() and "uri_spelling" in self._test_params: ++ return self._test_params["uri_spelling"] ++ ++ return "verification_uri" ++ + def _send_json(self, js: JsonObject) -> None: + """ + Sends the provided JSON dict as an application/json response. @@ src/test/modules/oauth_validator/t/oauth_server.py (new) + resp = { + "device_code": "postgres", + "user_code": "postgresuser", -+ "verification_uri": uri, ++ self._uri_spelling(): uri, + "expires-in": 5, + } + @@ src/test/perl/PostgreSQL/Test/Cluster.pm: sub connect_ok - is($stderr, "", "$test_name: no stderr"); + if (defined($params{expected_stderr})) + { -+ like($stderr, $params{expected_stderr}, "$test_name: stderr matches"); ++ if (like($stderr, $params{expected_stderr}, "$test_name: stderr matches") ++ && ($ret != 0)) ++ { ++ # In this case (failing test but matching stderr) we'll have ++ # swallowed the output needed to debug. Put it back into the logs. ++ diag("$test_name: full stderr:\n" . $stderr); ++ } + } + else + { 6: d163d2ca0a ! 6: 5253f7190a Review comments @@ src/interfaces/libpq/fe-auth-oauth-curl.c /* * Parsed JSON Representations * -@@ src/interfaces/libpq/fe-auth-oauth-curl.c: struct async_ctx - struct device_authz authz; - - bool user_prompted; /* have we already sent the authz prompt? */ -+ -+ int running; - }; - - /* @@ src/interfaces/libpq/fe-auth-oauth-curl.c: parse_oauth_json(struct async_ctx *actx, const struct json_field *fields) return false; } @@ src/interfaces/libpq/fe-auth-oauth-curl.c: append_data(char *buf, size_t size, s return len; } -@@ src/interfaces/libpq/fe-auth-oauth-curl.c: static bool - start_request(struct async_ctx *actx) - { - CURLMcode err; -- int running; - - resetPQExpBuffer(&actx->work_data); - CHECK_SETOPT(actx, CURLOPT_WRITEFUNCTION, append_data, return false); -@@ src/interfaces/libpq/fe-auth-oauth-curl.c: start_request(struct async_ctx *actx) - return false; - } - -- err = curl_multi_socket_action(actx->curlm, CURL_SOCKET_TIMEOUT, 0, &running); -+ err = curl_multi_socket_action(actx->curlm, CURL_SOCKET_TIMEOUT, 0, &actx->running); - if (err) - { - actx_error(actx, "asynchronous HTTP request failed: %s", -@@ src/interfaces/libpq/fe-auth-oauth-curl.c: start_request(struct async_ctx *actx) - } - - /* -- * Sanity check. -- * -- * TODO: even though this is nominally an asynchronous process, there are -- * apparently operations that can synchronously fail by this point, such -- * as connections to closed local ports. Maybe we need to let this case -- * fall through to drive_request instead, or else perform a -- * curl_multi_info_read immediately. -+ * Even though this is nominally an asynchronous process, there are some -+ * operations that can synchronously fail by this point like connections -+ * to closed local ports. Fall through and leave the sanity check for the -+ * next state consuming actx. - */ -- if (running != 1) -- { -- actx_error(actx, "failed to queue HTTP request"); -- return false; -- } - - return true; - } -@@ src/interfaces/libpq/fe-auth-oauth-curl.c: static PostgresPollingStatusType - drive_request(struct async_ctx *actx) - { - CURLMcode err; -- int running; - CURLMsg *msg; - int msgs_left; - bool done; - -- err = curl_multi_socket_all(actx->curlm, &running); -+ /* Sanity check the previous operation */ -+ if (actx->running != 1) -+ { -+ actx_error(actx, "failed to queue HTTP request"); -+ return false; -+ } -+ -+ err = curl_multi_socket_all(actx->curlm, &actx->running); - if (err) - { - actx_error(actx, "asynchronous HTTP request failed: %s", -@@ src/interfaces/libpq/fe-auth-oauth-curl.c: drive_request(struct async_ctx *actx) - return PGRES_POLLING_FAILED; - } - -- if (running) -+ if (actx->running) - { - /* We'll come back again. */ - return PGRES_POLLING_READING; @@ src/interfaces/libpq/fe-auth-oauth-curl.c: start_device_authz(struct async_ctx *actx, PGconn *conn) appendPQExpBuffer(work_buffer, "client_id=%s", conn->oauth_client_id); if (conn->oauth_scope) @@ src/interfaces/libpq/fe-auth-oauth-curl.c: finish_token_request(struct async_ctx + return false; } - -@@ src/interfaces/libpq/fe-auth-oauth-curl.c: pg_fe_run_oauth_flow(PGconn *conn, pgsocket *altsock) - * errors; anything else and we bail. - */ - err = &tok.err; -- if (!err->error || (strcmp(err->error, "authorization_pending") -- && strcmp(err->error, "slow_down"))) -+ if (!err->error) -+ { -+ actx_error(actx, "unknown error"); -+ goto error_return; -+ } -+ -+ if (strcmp(err->error, "authorization_pending") != 0 && -+ strcmp(err->error, "slow_down") != 0) - { -- /* TODO handle !err->error */ - if (err->error_description) - appendPQExpBuffer(&actx->errbuf, "%s ", - err->error_description); - - appendPQExpBuffer(&actx->errbuf, "(%s)", err->error); -- - goto error_return; - } - + /* ## src/interfaces/libpq/fe-auth-oauth.c ## @@ src/interfaces/libpq/fe-auth-oauth.c: handle_oauth_sasl_error(PGconn *conn, char *msg, int msglen) 7: 9117bf8be2 ! 7: 8b9641b122 DO NOT MERGE: Add pytest suite for OAuth @@ .cirrus.tasks.yml: task: + python3-venv \ matrix: - - name: Linux - Debian Bullseye - Autoconf + - name: Linux - Debian Bookworm - Autoconf @@ .cirrus.tasks.yml: task: # Also build & test in a 32bit build - it's gotten rare to test that @@ .cirrus.tasks.yml: task: @@ .cirrus.tasks.yml: task: -Dllvm=disabled \ --pkg-config-path /usr/lib/i386-linux-gnu/pkgconfig/ \ - -DPERL=perl5.32-i386-linux-gnu \ + -DPERL=perl5.36-i386-linux-gnu \ - -DPG_TEST_EXTRA="$PG_TEST_EXTRA" \ + -DPG_TEST_EXTRA="${PG_TEST_EXTRA//"python"}" \ build-32 @@ src/test/python/client/conftest.py (new) +# SPDX-License-Identifier: PostgreSQL +# + ++import contextlib +import socket +import sys +import threading @@ src/test/python/client/conftest.py (new) + def run(self): + try: + conn = psycopg2.connect(host="127.0.0.1", **self._kwargs) ++ with contextlib.closing(conn): + self._pump_async(conn) -+ conn.close() + except Exception as e: + self.exception = e + @@ src/test/python/client/test_oauth.py (new) + self._handle(params=params) + + ++@pytest.fixture(autouse=True) ++def enable_client_oauth_debugging(monkeypatch): ++ """ ++ HTTP providers aren't allowed by default; enable them via envvar. ++ """ ++ monkeypatch.setenv("PGOAUTHDEBUG", "UNSAFE") ++ ++ +@pytest.fixture +def openid_provider(unused_tcp_port_factory): + """ @@ src/test/python/client/test_oauth.py (new) + pytest.param("application/json \t;\t charset=utf-8", id="charset (whitespace)"), + ], +) ++@pytest.mark.parametrize("uri_spelling", ["verification_url", "verification_uri"]) +@pytest.mark.parametrize( + "asynchronous", + [ @@ src/test/python/client/test_oauth.py (new) + accept, + openid_provider, + asynchronous, ++ uri_spelling, + content_type, + retries, + scope, @@ src/test/python/client/test_oauth.py (new) + "device_code": device_code, + "user_code": user_code, + "interval": 0, -+ "verification_uri": verification_url, ++ uri_spelling: verification_url, + "expires_in": 5, + } + @@ src/test/python/client/test_oauth.py (new) + + expected_error = "slow_down interval overflow" + with pytest.raises(psycopg2.OperationalError, match=expected_error): ++ client.check_completed() ++ ++ ++def test_oauth_refuses_http(accept, openid_provider, monkeypatch): ++ """ ++ HTTP must be refused without PGOAUTHDEBUG. ++ """ ++ monkeypatch.delenv("PGOAUTHDEBUG") ++ sock, client = accept( ++ oauth_client_id=secrets.token_hex(), ++ ) ++ ++ # No provider callbacks necessary; we should fail immediately. ++ ++ with sock: ++ with pq3.wrap(sock, debug_stream=sys.stdout) as conn: ++ initial = start_oauth_handshake(conn) ++ ++ # Fail the SASL exchange and link to the HTTP provider. ++ discovery_uri = f"{openid_provider.issuer}/.well-known/openid-configuration" ++ resp = json.dumps( ++ { ++ "status": "invalid_token", ++ "openid-configuration": discovery_uri, ++ } ++ ) ++ ++ pq3.send( ++ conn, ++ pq3.types.AuthnRequest, ++ type=pq3.authn.SASLContinue, ++ body=resp.encode("ascii"), ++ ) ++ ++ # Per RFC, the client is required to send a dummy ^A response. ++ pkt = pq3.recv1(conn) ++ assert pkt.type == pq3.types.PasswordMessage ++ assert pkt.payload == b"\x01" ++ ++ # Now fail the SASL exchange. ++ pq3.send( ++ conn, ++ pq3.types.ErrorResponse, ++ fields=[ ++ b"SFATAL", ++ b"C28000", ++ b"Mdoesn't matter", ++ b"", ++ ], ++ ) ++ ++ # FIXME: We'll get a second connection, but it won't do anything. ++ sock, _ = accept() ++ expect_disconnected_handshake(sock) ++ ++ expected_error = "failed to fetch OpenID discovery document: Unsupported protocol" ++ with pytest.raises(psycopg2.OperationalError, match=expected_error): + client.check_completed() ## src/test/python/conftest.py (new) ##