diff options
-rw-r--r-- | INSTALL | 211 | ||||
-rw-r--r-- | INSTALL-CI-DEV | 2 | ||||
-rw-r--r-- | INSTALL-DEV | 1 | ||||
-rw-r--r-- | INSTALL-GITHUB-DEV | 33 | ||||
-rw-r--r-- | INSTALL-PROXY | 2 | ||||
-rw-r--r-- | LICENSE | 2 | ||||
-rw-r--r-- | etc/brep-module.conf | 8 | ||||
-rw-r--r-- | etc/private/install/brep-module.conf | 8 | ||||
-rw-r--r-- | libbrep/utility.hxx | 5 | ||||
-rw-r--r-- | manifest | 6 | ||||
-rw-r--r-- | mod/hmac.cxx | 13 | ||||
-rw-r--r-- | mod/mod-build-configs.cxx | 6 | ||||
-rw-r--r-- | mod/mod-build-result.cxx | 30 | ||||
-rw-r--r-- | mod/mod-build-task.cxx | 27 | ||||
-rw-r--r-- | mod/mod-builds.cxx | 6 | ||||
-rw-r--r-- | mod/mod-ci-github-gh.cxx | 52 | ||||
-rw-r--r-- | mod/mod-ci-github-gh.hxx | 52 | ||||
-rw-r--r-- | mod/mod-ci-github-gq.cxx | 94 | ||||
-rw-r--r-- | mod/mod-ci-github-gq.hxx | 36 | ||||
-rw-r--r-- | mod/mod-ci-github-service-data.cxx | 10 | ||||
-rw-r--r-- | mod/mod-ci-github-service-data.hxx | 13 | ||||
-rw-r--r-- | mod/mod-ci-github.cxx | 955 | ||||
-rw-r--r-- | mod/mod-ci-github.hxx | 34 | ||||
-rw-r--r-- | mod/mod-ci.cxx | 55 | ||||
-rw-r--r-- | mod/mod-ci.hxx | 4 | ||||
-rw-r--r-- | mod/mod-submit.cxx | 10 | ||||
-rw-r--r-- | mod/module.cli | 10 | ||||
-rw-r--r-- | mod/tenant-service.cxx | 18 | ||||
-rw-r--r-- | mod/tenant-service.hxx | 12 |
29 files changed, 1217 insertions, 498 deletions
@@ -1,7 +1,8 @@ -This guide shows how to install and configure brep on a "deployment" machine as -opposed to a "development" one (see INSTALL-DEV for the latter). Here we assume -you are using a systemd-based distribution. If not, then you will need to -replace systemctl commands with the equivalent init.d ones. +This guide describes how to install and configure brep on a "deployment" +machine as opposed to a "development" one (see INSTALL-DEV for the +latter). Here we assume you are using a systemd-based distribution. If not, +then you will need to replace systemctl commands with the equivalent init.d +ones. The below instructions include steps for setting up brep as the build2 build bot controller, package submission, and CI request services. All these @@ -233,6 +234,61 @@ $ psql -d brep_build -c 'SELECT DISTINCT name FROM build_package' $ cp install/share/brep/etc/brep-module.conf config/ $ edit config/brep-module.conf # Adjust default values if required. +See the following sub-sections for details on configuring various optional +brep functionality. + +Once the brep module configuration is ready, the next step is to enable +it in the Apache2 configuration file. Here we assume you have setup an +appropriate Apache2 virtual server. Open the corresponding Apache2 .conf +file and add the contents of brep/etc/brep-apache2.conf into the +<VirtualHost> section. + +The output content types of the brep module are application/xhtml+xml, +text/manifest and text/plain. If you would like to make sure they get +compressed (along with linked CSS), also add the following lines: + + # Compress brep output (xhtml+xml) and CSS. + # + AddOutputFilterByType DEFLATE application/xhtml+xml + AddOutputFilterByType DEFLATE text/manifest + AddOutputFilterByType DEFLATE text/plain + AddOutputFilterByType DEFLATE text/css + +Then restart Apache2: + +$ sudo systemctl restart apache2 + +To verify, visit the repository root. To troubleshoot, see Apache logs. + +Now that Apache2 loads the brep module which requires PostgreSQL, it is a good +idea to make the Apache2 service depend on PostgreSQL so that they are started +in proper order. Here is how we can do it with systemd (with newer versions +you can use 'systemctl edit' instead of mkdir and cat): + +# mkdir -p /etc/systemd/system/apache2.service.d/ +# cat >/etc/systemd/system/apache2.service.d/postgresql.conf +[Unit] +Requires=postgresql.service +After=postgresql.service +^D + +# mkdir -p /etc/systemd/system/postgresql.service.d/ +# cat >/etc/systemd/system/postgresql.service.d/apache2.conf +[Unit] +Wants=apache2.service +^D + +# systemctl daemon-reload +# systemctl cat apache2 # Verify override is listed. +# systemctl cat postgresql # Verify override is listed. +# systemctl stop postgresql +# systemctl status apache2 # Verify stopped. +# systemctl start postgresql +# systemctl status apache2 # Verify started. + + +6.1 Enabling build bot controller functionality + To enable the build2 build bot controller functionality you will need to set the build-config option in brep-module.conf. To also enable the build artifacts upload functionality you will need to specify the upload-data @@ -250,6 +306,9 @@ $ setfacl -m g:www-data:rwx /home/brep/bindist-data For sample upload handler implementations see brep/handler/upload/. + +6.2 Enabling package submission functionality + To enable the package submission functionality you will need to specify the submit-data and submit-temp directories in brep-module.conf. Note that these directories must exist and have read, write, and execute permissions granted @@ -272,6 +331,9 @@ $ edit config/submit.xhtml # Add custom form fields, adjust CSS style, etc. For sample submission handler implementations see brep/handler/submit/. + +6.3 Enabling CI request functionality + To enable the CI request functionality you will need to specify the ci-data directory in brep-module.conf. Note that this directory must exist and have read, write, and execute permissions granted to the www-data user. This, for @@ -291,52 +353,119 @@ $ edit config/ci.xhtml # Add custom form fields, adjust CSS style, etc. For sample CI request handler implementations see brep/handler/ci/. -Here we assume you have setup an appropriate Apache2 virtual server. Open the -corresponding Apache2 .conf file and add the contents of -brep/etc/brep-apache2.conf into the <VirtualHost> section. -The output content types of the brep module are application/xhtml+xml, -text/manifest and text/plain. If you would like to make sure they get -compressed (along with linked CSS), also add the following lines: +6.4 Enabling GitHub CI integration - # Compress brep output (xhtml+xml) and CSS. - # - AddOutputFilterByType DEFLATE application/xhtml+xml - AddOutputFilterByType DEFLATE text/manifest - AddOutputFilterByType DEFLATE text/plain - AddOutputFilterByType DEFLATE text/css +6.4.1 Background -Restart Apache2: +The GitHub CI integration has one user-configurable setting: +warning=<success|failure> (whether or not to fail on warnings). -$ sudo systemctl restart apache2 +In order not to have to support repository configuration files, a deployment +will consist of two registered GitHub Apps with the same webhook URL (i.e., +the same brep instance) but different query parameters: one with +warning=success and the other with warning=failure. The App id is passed (as a +query parameter) so that we know which private key to use (the key cannot be +shared between Apps). -To verify, visit the repository root. To troubleshoot, see Apache logs. +We will call the warning=success App the "Default App" and the warning=failure +App the "Werror App". -Now that Apache2 loads the brep module which requires PostgreSQL, it is a good -idea to make the Apache2 service depend on PostgreSQL so that they are started -in proper order. Here is how we can do it with systemd (with newer versions -you can use 'systemctl edit' instead of mkdir and cat): +6.4.2 Create the GitHub Apps -# mkdir -p /etc/systemd/system/apache2.service.d/ -# cat >/etc/systemd/system/apache2.service.d/postgresql.conf -[Unit] -Requires=postgresql.service -After=postgresql.service -^D +To create a GitHub App under the <org> organization, visit +https://github.com/organizations/<org>/settings/apps (Settings -> Developer +settings -> GitHub Apps). Then click on New GitHub App. -# mkdir -p /etc/systemd/system/postgresql.service.d/ -# cat >/etc/systemd/system/postgresql.service.d/apache2.conf -[Unit] -Wants=apache2.service -^D +App names (note: 34 character limit): -# systemctl daemon-reload -# systemctl cat apache2 # Verify override is listed. -# systemctl cat postgresql # Verify override is listed. -# systemctl stop postgresql -# systemctl status apache2 # Verify stopped. -# systemctl start postgresql -# systemctl status apache2 # Verify started. + Default App: "<org> CI" + Werror App: "<org> CI - warnings as errors" + +App description: + + Default App: "Trigger <org> CI on branch push and pull request." + Werror App: "Trigger <org> CI on branch push and pull request. Warnings are + treated as errors". + +App homepage: + + https://ci.<org>.org/ + +Skip the "Identifying and authorizing users" and "Post installation" sections. + +Leave webhooks active. + +Webhook URL: + + Default App: https://ci.<org>.org/?ci-github&app-id=XXX&warning=success + Werror App: https://ci.<org>.org/?ci-github&app-id=XXX&warning=failure + +Note that the App id only becomes available once the App has been registered +so we update it later in both URLs. + +Webhook secret: Use the same random 64-character string for both Apps. + + echo `tr -dc -- A-Za-z0-9 </dev/urandom | head -c 64` + +Note that GitHub says only that the secret should be "a random string with +high entropy." However lots of sources say 32 bytes should be secure enough +for HMAC-SHA256, while other sources recommend 64 bytes for maximal security +at an insignificant performance cost. (Keys longer than 64 bytes are hashed to +match the internal block size and are therefore not recommended.) + +Repository permissions: + - Checks: RW + - Contents: RO (for Push events) + - Metadata (mandatory): RO + - Pull requests: RO + +Subscribed events: + - Check suite + - Pull request + - Push + +Note that GitHub Apps with write access to the "Checks" permission are +automatically subscribed to check_suite(requested|rerequested) and check_run +events so no need to subscribe explicitly. However in order to receive +check_suite(completed) events, which we need, one does have to subscribe to +Check suite. + +Select "Any account" under "Where can this GitHub App be installed?". + +Click "Create GitHub App". + +When the page reloads (should be the General tab), note the App id and replace +the XXX in the webhook URL with it. + +Still in the General tab, scroll to Private keys and generate a private key. +The file will be downloaded by the browser. + +@@ TODO Logo +@@ TODO Create Marketplace listing + +6.4.3 Configure brep + +Assume the following configuration values: + +- Webhook secret: abcdefg +- Default App id: 12345 +- Werror App id: 67890 + +In brep-module.conf: + +Set the webhook secret from the GitHub App settings: + + ci-github-app-webhook-secret abcdefg + +Associate each GitHub App id with the App's private key: + + ci-github-app-id-private-key 12345=path/to/default-app-private-key.pem + ci-github-app-id-private-key 67890=path/to/werror-app-private-key.pem + +Now brep should be ready to handle the webhook event requests triggered by +branch pushes and pull requests in repositories into which one of these Apps +has been installed. 7. Optimize CSS diff --git a/INSTALL-CI-DEV b/INSTALL-CI-DEV index b8502d8..c1bd8ec 100644 --- a/INSTALL-CI-DEV +++ b/INSTALL-CI-DEV @@ -1,4 +1,4 @@ -This guide shows how to configure the brep module for serving the CI and +This guide describes how to configure the brep module for serving the CI and build2 build bot requests and how to smoke-test it. Note that during the testing both the user and CI submission handler (executed diff --git a/INSTALL-DEV b/INSTALL-DEV index f023962..c197b7b 100644 --- a/INSTALL-DEV +++ b/INSTALL-DEV @@ -1,6 +1,7 @@ The goal of this setup is to run the brep Apache2 modules from the development build while still being as close to the real deployment as possible. To this end, we use default, system-wide installations of both Apache2 and Postgres. +See also INSTALL-CI-DEV and INSTALL-GITHUB-DEV. In the below instructions replace <user> with your login and www-data with the user under which Apache2 is running (See the "User" directive in the Apache2 diff --git a/INSTALL-GITHUB-DEV b/INSTALL-GITHUB-DEV index 244d26c..602b65d 100644 --- a/INSTALL-GITHUB-DEV +++ b/INSTALL-GITHUB-DEV @@ -1,4 +1,4 @@ -This document explains how to get GitHub webhooks (a notification that an +This guide describes how to get GitHub webhooks (a notification that an event such as a push has occurred on a repository) delivered to a locally-running instance of brep (currently to initiate a CI job). @@ -63,9 +63,17 @@ At this stage the only settings we need to update are: - Checks: RW - Metadata (mandatory): RO - Pull requests: RO + - Contents: RO (for Push events) - Subscribed events - Check suite - Pull request + - Push + + Note that GitHub apps with write access to the "Checks" permission are + automatically subscribed to check_suite(requested|rerequested) and check_run + events so no need to subscribe explicitly. However in order to receive + check_suite(completed) events, which we do, one does have to subscribe to + check_suite. Click "Create GitHub App" button. When the page reloads: @@ -126,10 +134,17 @@ aspects but can't redeliver webhooks. - Branch push (BP). - - Success (also observe check run state transitions). - - Failure (also observe check run state transitions). + - Success (observe check runs state transitions). Test with 2 build configs. + - Failure (observe check runs state transitions). + - Push new commit to branch. - Re-requested check suite. - - Re-requested check run. + - Re-requested check run (observe check run state transitions). + - Re-requested check run but tenant archived. + - Cancel previous check suite on forced push. + - Cancel previous check suite on branch delete. + - Head commit shared with another BP. + - Cancel previous check suite on forced push with shared previous commit. + - Cancel previous check suite on branch delete with shared previous commit. - Pull request (PR). @@ -137,16 +152,20 @@ aspects but can't redeliver webhooks. - Success. - Failure. + - Push new commit to head. - Re-requested check suite. - Re-requested check run. - - Head shared with BP. - - Not meargeable/head behind base. + - Head shared with BP (pull_request is received after check_suite) + - Not meargeable. + - Head behind base. + - Head commit has changed while testing manageability. - Remote PR. - Success. - Failure. + - Push new commit to head. + - Cancel previous check suite on head move. - Re-requested check suite. - Re-requested check run. - Head shared with another remote PR. - - Cancel previous check suite on head move. diff --git a/INSTALL-PROXY b/INSTALL-PROXY index 418846a..2a3f3f4 100644 --- a/INSTALL-PROXY +++ b/INSTALL-PROXY @@ -1,4 +1,4 @@ -This guide shows how to configure the Apache2-based HTTP proxy server for +This guide describes how to configure the Apache2-based HTTP proxy server for proxying HTTP(S) requests and caching the responses. Note that for security reasons most clients (curl, wget, etc) perform HTTPS @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2014-2024 the build2 authors (see the AUTHORS and LEGAL files). +Copyright (c) 2014-2025 the build2 authors (see the AUTHORS and LEGAL files). Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal diff --git a/etc/brep-module.conf b/etc/brep-module.conf index fd6ba67..cdf028a 100644 --- a/etc/brep-module.conf +++ b/etc/brep-module.conf @@ -454,13 +454,15 @@ menu About=?about # The GitHub App's configured webhook secret. If not set, then the GitHub CI -# service is disabled. Note: make sure to choose a strong (random) secret. +# service is disabled. Note that the path must be absolute. Note: make sure to +# choose a strong (random) secret. # -# ci-github-app-webhook-secret +# ci-github-app-webhook-secret <path> # The private key used during GitHub API authentication for the specified -# GitHub App ID. Both vales are found in the GitHub App's settings. +# GitHub App ID. Both vales are found in the GitHub App's settings. Note that +# the paths must be absolute. # # ci-github-app-id-private-key <id>=<path> diff --git a/etc/private/install/brep-module.conf b/etc/private/install/brep-module.conf index 07db881..2545a87 100644 --- a/etc/private/install/brep-module.conf +++ b/etc/private/install/brep-module.conf @@ -462,13 +462,15 @@ submit-handler-timeout 120 # The GitHub App's configured webhook secret. If not set, then the GitHub CI -# service is disabled. Note: make sure to choose a strong (random) secret. +# service is disabled. Note that the path must be absolute. Note: make sure to +# choose a strong (random) secret. # -# ci-github-app-webhook-secret +# ci-github-app-webhook-secret <path> # The private key used during GitHub API authentication for the specified -# GitHub App ID. Both vales are found in the GitHub App's settings. +# GitHub App ID. Both vales are found in the GitHub App's settings. Note that +# the paths must be absolute. # # ci-github-app-id-private-key <id>=<path> diff --git a/libbrep/utility.hxx b/libbrep/utility.hxx index fce8fb5..1925d01 100644 --- a/libbrep/utility.hxx +++ b/libbrep/utility.hxx @@ -12,7 +12,7 @@ #include <algorithm> // * #include <libbutl/utility.hxx> // icasecmp(), reverse_iterate(), - // operator<<(ostream, exception) + // operator<<(ostream, exception), etc namespace brep { @@ -28,6 +28,9 @@ namespace brep // <libbutl/utility.hxx> // using butl::utf8; + using butl::trim; + using butl::trim_left; + using butl::trim_right; using butl::icasecmp; using butl::reverse_iterate; } @@ -24,9 +24,9 @@ depends: libapr1 depends: libapreq2 depends: libcmark-gfm == 0.29.0-a.4 depends: libcmark-gfm-extensions == 0.29.0-a.4 -depends: libstudxml ^1.1.0-b.10 -depends: libodb ^2.5.0-b.27 -depends: libodb-pgsql ^2.5.0-b.27 +depends: libstudxml ^1.1.0 +depends: libodb ^2.5.0 +depends: libodb-pgsql ^2.5.0 depends: libbutl [0.18.0-a.0.1 0.18.0-a.1) depends: libbpkg [0.18.0-a.0.1 0.18.0-a.1) depends: libbbot [0.18.0-a.0.1 0.18.0-a.1) diff --git a/mod/hmac.cxx b/mod/hmac.cxx index 1a78b4c..cfb0e23 100644 --- a/mod/hmac.cxx +++ b/mod/hmac.cxx @@ -16,6 +16,12 @@ compute_hmac (const options::openssl_options& o, // To compute an HMAC over stdin with the key <secret>: // + // openssl dgst -sha256 -hmac <secret> + // + // Note that since openssl 3.0 the `mac` command is the preferred method + // for generating HMACs. For future reference, the equivalent command + // would be: + // // openssl mac -digest SHA256 -macopt "key:<secret>" HMAC // // Note that here we assume both output and diagnostics will fit into pipe @@ -25,10 +31,9 @@ compute_hmac (const options::openssl_options& o, path ("-"), // Write output to openssl::in. process::pipe (errp.in.get (), move (errp.out)), process_env (o.openssl (), o.openssl_envvar ()), - "mac", o.openssl_option (), - "-digest", "SHA256", - "-macopt", string ("key:") + k, - "HMAC"); + "dgst", o.openssl_option (), + "-sha256", + "-hmac", k); ifdstream err (move (errp.in)); diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx index ce79edb..2754f95 100644 --- a/mod/mod-build-configs.cxx +++ b/mod/mod-build-configs.cxx @@ -34,10 +34,12 @@ init (scanner& s) s, unknown_mode::fail, unknown_mode::fail); if (options_->build_config_specified ()) + { build_config_module::init (*options_); - if (options_->root ().empty ()) - options_->root (dir_path ("/")); + if (options_->root ().empty ()) + options_->root (dir_path ("/")); + } } bool brep::build_configs:: diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx index cc058b5..b303386 100644 --- a/mod/mod-build-result.cxx +++ b/mod/mod-build-result.cxx @@ -565,7 +565,7 @@ handle (request& rq, response&) { assert (tss); // Wouldn't be here otherwise. - const tenant_service& ss (tss->first); + tenant_service& ss (tss->first); const build& b (*tss->second); // Release the database connection since build_built() notification can @@ -576,7 +576,33 @@ handle (request& rq, response&) if (auto f = tsb->build_built (b.tenant, ss, b, log_writer_)) { conn = build_db_->connection (); - update_tenant_service_state (conn, ss.type, ss.id, f); + + bool build_completed (false); + + if (optional<string> data = + update_tenant_service_state ( + conn, ss.type, ss.id, + [&f, &build_completed] (const string& tid, + const tenant_service& ts) + { + auto r (f (tid, ts)); + build_completed = r.second; + return move (r.first); + })) + { + ss.data = move (data); + } + + if (build_completed) + { + // Release the database connection since the build_completed() + // notification can potentially be time-consuming (e.g., it may + // perform an HTTP request). + // + conn.reset (); + + tsb->build_completed (b.tenant, ss, log_writer_); + } } } diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx index c8b1bb2..88e5618 100644 --- a/mod/mod-build-task.cxx +++ b/mod/mod-build-task.cxx @@ -2407,7 +2407,7 @@ handle (request& rq, response& rs) } // If a third-party service needs to be notified about the package - // build, then call the tenant_service_build_built::build_building() + // build, then call the tenant_service_build_building::build_building() // callback function and, if requested, update the tenant-associated // service state. // @@ -2556,9 +2556,32 @@ handle (request& rq, response& rs) { conn = build_db_->connection (); + bool build_completed (false); + if (optional<string> data = - update_tenant_service_state (conn, ss.type, ss.id, f)) + update_tenant_service_state ( + conn, ss.type, ss.id, + [&f, &build_completed] (const string& tid, + const tenant_service& ts) + { + auto r (f (tid, ts)); + build_completed = r.second; + return move (r.first); + })) + { ss.data = move (data); + } + + if (build_completed) + { + // Release the database connection since the build_completed() + // notification can potentially be time-consuming (e.g., it may + // perform an HTTP request). + // + conn.reset (); + + tsb->build_completed (b.tenant, ss, log_writer_); + } } } } diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx index 0155c2e..b11b3d7 100644 --- a/mod/mod-builds.cxx +++ b/mod/mod-builds.cxx @@ -58,10 +58,10 @@ init (scanner& s) { database_module::init (*options_, options_->build_db_retry ()); build_config_module::init (*options_); - } - if (options_->root ().empty ()) - options_->root (dir_path ("/")); + if (options_->root ().empty ()) + options_->root (dir_path ("/")); + } } template <typename T, typename C> diff --git a/mod/mod-ci-github-gh.cxx b/mod/mod-ci-github-gh.cxx index 021ff6b..2e886ac 100644 --- a/mod/mod-ci-github-gh.cxx +++ b/mod/mod-ci-github-gh.cxx @@ -656,6 +656,58 @@ namespace brep return os; } + // gh_push_event + // + gh_push_event:: + gh_push_event (json::parser& p) + { + p.next_expect (event::begin_object); + + bool rf (false), bf (false), af (false), fd (false), dl (false), + rp (false), in (false); + + // Skip unknown/uninteresting members. + // + while (p.next_expect (event::name, event::end_object)) + { + auto c = [&p] (bool& v, const char* s) + { + return p.name () == s ? (v = true) : false; + }; + + if (c (rf, "ref")) ref = p.next_expect_string (); + else if (c (bf, "before")) before = p.next_expect_string (); + else if (c (af, "after")) after = p.next_expect_string (); + else if (c (fd, "forced")) forced = p.next_expect_boolean<bool> (); + else if (c (dl, "deleted")) deleted = p.next_expect_boolean<bool> (); + else if (c (rp, "repository")) repository = gh_repository (p); + else if (c (in, "installation")) installation = gh_installation (p); + else p.next_expect_value_skip (); + } + + if (!rf) missing_member (p, "gh_push_event", "ref"); + if (!bf) missing_member (p, "gh_push_event", "before"); + if (!af) missing_member (p, "gh_push_event", "after"); + if (!fd) missing_member (p, "gh_push_event", "forced"); + if (!dl) missing_member (p, "gh_push_event", "deleted"); + if (!rp) missing_member (p, "gh_push_event", "repository"); + if (!in) missing_member (p, "gh_push_event", "installation"); + } + + ostream& + operator<< (ostream& os, const gh_push_event& p) + { + os << "ref: " << p.ref + << ", before: " << p.before + << ", after: " << p.after + << ", forced: " << p.forced + << ", deleted: " << p.deleted + << ", repository { " << p.repository << " }" + << ", installation { " << p.installation << " }"; + + return os; + } + // gh_installation_access_token // // Example JSON: diff --git a/mod/mod-ci-github-gh.hxx b/mod/mod-ci-github-gh.hxx index ab6dbaa..91f5bfe 100644 --- a/mod/mod-ci-github-gh.hxx +++ b/mod/mod-ci-github-gh.hxx @@ -215,6 +215,55 @@ namespace brep gh_pull_request_event () = default; }; + // The push webhook event. + // + struct gh_push_event + { + // The full git ref that was pushed. Example: refs/heads/main or + // refs/tags/v3.14.1. + // + string ref; + + // The SHA of the most recent commit on ref before the push. + // + // The GitHub API reference says this member is always present and + // non-null. Testing shows that an absent before commit is represented by + // a value of "0000000000000000000000000000000000000000". + // + string before; + + // The SHA of the most recent commit on ref after the push. + // + string after; + + // True if this was a forced push of the ref. I.e., history was + // overwritten. + // + bool forced; + + // True if this was a branch deletion. + // + bool deleted; + + gh_repository repository; + gh_installation installation; + + // Note: not received from GitHub but set from the app-id webhook query + // parameter instead. + // + // For some reason, unlike the check_suite and check_run webhooks, the + // push webhook does not contain the app id. For the sake of simplicity we + // emulate check_suite and check_run by storing the app-id webhook query + // parameter here. + // + string app_id; + + explicit + gh_push_event (json::parser&); + + gh_push_event () = default; + }; + // Installation access token (IAT) returned when we authenticate as a GitHub // app installation. // @@ -297,6 +346,9 @@ namespace brep operator<< (ostream&, const gh_pull_request_event&); ostream& + operator<< (ostream&, const gh_push_event&); + + ostream& operator<< (ostream&, const gh_installation_access_token&); } diff --git a/mod/mod-ci-github-gq.cxx b/mod/mod-ci-github-gq.cxx index db69f0c..37d944c 100644 --- a/mod/mod-ci-github-gq.cxx +++ b/mod/mod-ci-github-gq.cxx @@ -380,9 +380,12 @@ namespace brep assert (cr.state != build_state::built); // Not supported. - // Ensure details URL is non-empty if present. + // Ensure details URL and output are non-empty if present. // assert (!cr.details_url || !cr.details_url->empty ()); + assert (!cr.description || + (!cr.description->title.empty () && + !cr.description->summary.empty ())); string al ("cr" + to_string (i)); // Field alias. @@ -396,6 +399,13 @@ namespace brep os << '\n'; os << " detailsUrl: " << gq_str (*cr.details_url); } + if (cr.description) + { + os << " output: {" << '\n' + << " title: " << gq_str (cr.description->title) << '\n' + << " summary: " << gq_str (cr.description->summary) << '\n' + << " }"; + } os << "})" << '\n' // Specify the selection set (fields to be returned). Note that we // rename `id` to `node_id` (using a field alias) for consistency with @@ -417,9 +427,9 @@ namespace brep // Serialize a `createCheckRun` mutation for a build to GraphQL. // - // The build result argument (`br`) is required if the build_state is built - // because GitHub does not allow a check run status of completed without a - // conclusion. + // The conclusion argument (`co`) is required if the check run status is + // completed because GitHub does not allow a check run status of completed + // without a conclusion. // // The details URL argument (`du`) can be empty for queued but not for the // other states. @@ -433,12 +443,18 @@ namespace brep const optional<string>& du, // Details URL. const check_run& cr, const string& st, // Check run status. - optional<gq_built_result> br = nullopt) + const string& ti, // Output title. + const string& su, // Output summary. + optional<string> co = nullopt) // Conclusion. { // Ensure details URL is non-empty if present. // assert (!du || !du->empty ()); + // Ensure we have conclusion if the status is completed. + // + assert (st != "COMPLETED" || co); + ostringstream os; os << "mutation {" << '\n'; @@ -455,15 +471,13 @@ namespace brep os << '\n'; os << " detailsUrl: " << gq_str (*du); } - if (br) - { - os << '\n'; - os << " conclusion: " << gq_enum (br->conclusion) << '\n' - << " output: {" << '\n' - << " title: " << gq_str (br->title) << '\n' - << " summary: " << gq_str (br->summary) << '\n' - << " }"; - } + os << '\n'; + if (co) + os << " conclusion: " << gq_enum (*co) << '\n'; + os << " output: {" << '\n' + << " title: " << gq_str (ti) << '\n' + << " summary: " << gq_str (su) << '\n' + << " }"; os << "})" << '\n' // Specify the selection set (fields to be returned). Note that we // rename `id` to `node_id` (using a field alias) for consistency with @@ -485,7 +499,7 @@ namespace brep // Serialize an `updateCheckRun` mutation for one build to GraphQL. // - // The `co` (conclusion) argument is required if the build_state is built + // The `br` argument is required if the check run status is completed // because GitHub does not allow updating a check run to completed without a // conclusion. // @@ -495,14 +509,11 @@ namespace brep static string gq_mutation_update_check_run (const string& ri, // Repository ID. const string& ni, // Node ID. - const optional<string>& du, // Details URL. const string& st, // Check run status. optional<timestamp> sa, // Started at. optional<gq_built_result> br) { - // Ensure details URL is non-empty if present. - // - assert (!du || !du->empty ()); + assert (st != "COMPLETED" || br); ostringstream os; @@ -527,11 +538,6 @@ namespace brep ": " + e.what ()); } } - if (du) - { - os << '\n'; - os << " detailsUrl: " << gq_str (*du); - } if (br) { os << '\n'; @@ -586,11 +592,11 @@ namespace brep const string& hs, const optional<string>& du, build_state st, - optional<gq_built_result> br) + string ti, string su) { - // Must have a result if state is built. + // State cannot be built without a conclusion. // - assert (st != build_state::built || br); + assert (st != build_state::built && !ti.empty () && !su.empty ()); string rq ( gq_serialize_request ( @@ -599,7 +605,8 @@ namespace brep du, cr, gh_to_status (st), - move (br)))); + move (ti), move (su), + nullopt /* conclusion */))); vector<check_run> crs {move (cr)}; crs[0].state = st; @@ -612,12 +619,40 @@ namespace brep } bool + gq_create_check_run (const basic_mark& error, + check_run& cr, + const string& iat, + const string& rid, + const string& hs, + const optional<string>& du, + gq_built_result br) + { + string rq ( + gq_serialize_request ( + gq_mutation_create_check_run (rid, + hs, + du, + cr, + gh_to_status (build_state::built), + move (br.title), move (br.summary), + move (br.conclusion)))); + + vector<check_run> crs {move (cr)}; + crs[0].state = build_state::built; + + bool r (gq_mutate_check_runs (error, crs, iat, move (rq))); + + cr = move (crs[0]); + + return r; + } + + bool gq_update_check_run (const basic_mark& error, check_run& cr, const string& iat, const string& rid, const string& nid, - const optional<string>& du, build_state st, optional<gq_built_result> br) { @@ -636,7 +671,6 @@ namespace brep gq_serialize_request ( gq_mutation_update_check_run (rid, nid, - du, gh_to_status (st), sa, move (br)))); diff --git a/mod/mod-ci-github-gq.hxx b/mod/mod-ci-github-gq.hxx index 50950d4..0fc3817 100644 --- a/mod/mod-ci-github-gq.hxx +++ b/mod/mod-ci-github-gq.hxx @@ -20,7 +20,7 @@ namespace brep // // Create a new check run on GitHub for each build with the build state, - // name, and details_url taken from each check_run object. Update + // name, details_url, and output taken from each check_run object. Update // `check_runs` with the new data (node id and state_synced). Return false // and issue diagnostics if the request failed. // @@ -39,18 +39,32 @@ namespace brep const string& repository_id, const string& head_sha); - // Create a new check run on GitHub for a build. Update `cr` with the new - // data (node id, state, and state_synced). Return false and issue - // diagnostics if the request failed. + // Create a new check run on GitHub for a build in the queued or building + // state. Note that the state cannot be built because in that case a + // conclusion is required. + // + // Update `cr` with the new data (node id, state, and state_synced). Return + // false and issue diagnostics if the request failed. // // Throw invalid_argument if the passed data is invalid, missing, or // inconsistent. // - // If the details_url is absent GitHub will use the app's homepage. + // If the details_url is absent GitHub will use the app's homepage. Title + // and summary are required and cannot be empty. // - // The gq_built_result is required if the build_state is built because - // GitHub does not allow a check run status of `completed` without at least - // a conclusion. + bool + gq_create_check_run (const basic_mark& error, + check_run& cr, + const string& installation_access_token, + const string& repository_id, + const string& head_sha, + const optional<string>& details_url, + build_state, + string title, + string summary); + + // As above but create a check run in the built state (which requires a + // conclusion). // struct gq_built_result { @@ -66,8 +80,7 @@ namespace brep const string& repository_id, const string& head_sha, const optional<string>& details_url, - build_state, - optional<gq_built_result> = nullopt); + gq_built_result); // Update a check run on GitHub. Update `cr` with the new data (state and // state_synced). Return false and issue diagnostics if the request failed. @@ -79,8 +92,6 @@ namespace brep // built to built is allowed). The latter case is signalled by setting the // check_run state_synced member to false and the state member to built. // - // If the details_url is absent GitHub will use the app's homepage. - // // The gq_built_result is required if the build_state is built because // GitHub does not allow a check run status of `completed` without at least // a conclusion. @@ -91,7 +102,6 @@ namespace brep const string& installation_access_token, const string& repository_id, const string& node_id, - const optional<string>& details_url, build_state, optional<gq_built_result> = nullopt); diff --git a/mod/mod-ci-github-service-data.cxx b/mod/mod-ci-github-service-data.cxx index 9f66a6c..c51f791 100644 --- a/mod/mod-ci-github-service-data.cxx +++ b/mod/mod-ci-github-service-data.cxx @@ -113,7 +113,15 @@ namespace brep } } - check_runs.emplace_back (move (bid), move (nm), move (nid), s, ss, rs); + check_runs.push_back ( + check_run {move (bid), + move (nm), + move (nid), + s, + ss, + rs, + nullopt, /* details_url */ + nullopt /* description */}); p.next_expect (event::end_object); } diff --git a/mod/mod-ci-github-service-data.hxx b/mod/mod-ci-github-service-data.hxx index 50bb49d..5d36696 100644 --- a/mod/mod-ci-github-service-data.hxx +++ b/mod/mod-ci-github-service-data.hxx @@ -33,10 +33,17 @@ namespace brep optional<result_status> status; // Only if state is built & synced. - // Note: never serialized (only used to pass information to the GraphQL - // functions). + // Note: these are never serialized (only used to pass information to the + // GraphQL functions). // - optional<string> details_url; + struct description_type + { + string title; + string summary; + }; + + optional<string> details_url; + optional<description_type> description; string state_string () const diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx index 5bcec98..c451154 100644 --- a/mod/mod-ci-github.cxx +++ b/mod/mod-ci-github.cxx @@ -77,12 +77,45 @@ namespace brep // Prepare for the CI requests handling, if configured. // - if (options_->build_config_specified () && - options_->ci_github_app_webhook_secret_specified ()) + if (options_->ci_github_app_webhook_secret_specified ()) { + if (!options_->build_config_specified ()) + fail << "package building functionality must be enabled"; + if (!options_->ci_github_app_id_private_key_specified ()) fail << "no app id/private key mappings configured"; + for (const auto& pr: options_->ci_github_app_id_private_key ()) + { + if (pr.second.relative ()) + fail << "ci-github-app-id-private-key path must be absolute"; + } + + // Read the webhook secret from the configured path. + // + { + const path& p (options_->ci_github_app_webhook_secret ()); + + if (p.relative ()) + fail << "ci-github-app-webhook-secret path must be absolute"; + + try + { + ifdstream is (p); + getline (is, webhook_secret_, '\0'); + + // Trim leading/trailing whitespaces (presumably GitHub does the + // same in its web UI). + // + if (trim (webhook_secret_).empty ()) + fail << "empty webhook secret in " << p; + } + catch (const io_error& e) + { + fail << "unable to read webhook secret from " << p << ": " << e; + } + } + ci_start::init (make_shared<options::ci_start> (*options_)); database_module::init (*options_, options_->build_db_retry ()); @@ -207,10 +240,10 @@ namespace brep // try { - string h ( - compute_hmac (*options_, - body.data (), body.size (), - options_->ci_github_app_webhook_secret ().c_str ())); + string h (compute_hmac (*options_, + body.data (), + body.size (), + webhook_secret_.c_str ())); if (!icasecmp (h, hmac)) { @@ -277,7 +310,7 @@ namespace brep // Note: "GitHub continues to add new event types and new actions to // existing event types." As a result we ignore known actions that we are // not interested in and log and ignore unknown actions. The thinking here - // is that we want be "notified" of new actions at which point we can + // is that we want to be "notified" of new actions at which point we can // decide whether to ignore them or to handle. // if (event == "check_suite") @@ -307,14 +340,17 @@ namespace brep if (cs.action == "requested") { - return handle_check_suite_request (move (cs), warning_success); + // Branch pushes are handled in handle_branch_push() so ignore this + // event. + // + return true; } else if (cs.action == "rerequested") { // Someone manually requested to re-run all the check runs in this // check suite. Treat as a new request. // - return handle_check_suite_request (move (cs), warning_success); + return handle_check_suite_rerequest (move (cs), warning_success); } else if (cs.action == "completed") { @@ -404,7 +440,7 @@ namespace brep throw invalid_request (400, move (m)); } - // Store the app-id webhook query parameter in the gh_pull_request + // Store the app-id webhook query parameter in the gh_pull_request_event // object (see gh_pull_request for an explanation). // // When we receive the other webhooks we do check that the app ids in @@ -494,6 +530,40 @@ namespace brep return true; } } + else if (event == "push") + { + // Push events are triggered by branch pushes, branch creation, and + // branch deletion. + // + gh_push_event ps; + try + { + json::parser p (body.data (), body.size (), "push event"); + + ps = gh_push_event (p); + } + catch (const json::invalid_json_input& e) + { + string m ("malformed JSON in " + e.name + " request body"); + + error << m << ", line: " << e.line << ", column: " << e.column + << ", byte offset: " << e.position << ", error: " << e; + + throw invalid_request (400, move (m)); + } + + // Store the app-id webhook query parameter in the gh_push_event + // object (see gh_push_event for an explanation). + // + // When we receive the other webhooks we do check that the app ids in + // the payload and query match but here we have to assume it is valid. + // + ps.app_id = app_id; + + // Note that the push request event has no action. + // + return handle_branch_push (move (ps), warning_success); + } else { // Log to investigate. @@ -509,52 +579,292 @@ namespace brep // static string conclusion_check_run_name ("CONCLUSION"); - // Return the colored circle corresponding to a result_status. - // - static string - circle (result_status rs) + static check_run::description_type conclusion_check_run_building_description { + "\U000026AA IN PROGRESS", // "Medium white" circle. + "Waiting for all builds to complete"}; + + bool ci_github:: + handle_branch_push (gh_push_event ps, bool warning_success) { - switch (rs) + HANDLER_DIAG; + + l3 ([&]{trace << "push event { " << ps << " }";}); + + // Cancel the CI tenant associated with the overwritten/deleted previous + // head commit if this is a forced push or a branch deletion. + // + if (ps.forced || ps.deleted) { - case result_status::success: return "\U0001F7E2"; // Green circle. - case result_status::warning: return "\U0001F7E0"; // Orange circle. - case result_status::error: - case result_status::abort: - case result_status::abnormal: return "\U0001F534"; // Red circle. + // Service id that will uniquely identify the CI tenant. + // + string sid (ps.repository.node_id + ':' + ps.before); - // Valid values we should never encounter. + // Note that it's possible this commit still exists in another branch so + // we do refcount-aware cancel. // - case result_status::skip: - case result_status::interrupt: - throw invalid_argument ("unexpected result_status value: " + - to_string (rs)); + if (optional<tenant_service> ts = cancel (error, warn, + verb_ ? &trace : nullptr, + *build_db_, retry_, + "ci-github", sid, + true /* ref_count */)) + { + l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to " + : "deletion of ") + << ps.ref << ": attempted to cancel CI of previous" + << " head commit with tenant_service id " << sid + << " (ref_count: " << ts->ref_count << ')';}); + } + else + { + // It's possible that there was no CI for the previous commit for + // various reasons (e.g., CI was not enabled). + // + l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to " + : "deletion of ") + << ps.ref << ": failed to cancel CI of previous" + << " head commit with tenant_service id " << sid;}); + } } - return ""; // Should never reach. + if (ps.deleted) + return true; // Do nothing further if this was a branch deletion. + + // While we don't need the installation access token in this request, + // let's obtain it to flush out any permission issues early. Also, it is + // valid for an hour so we will most likely make use of it. + // + optional<string> jwt (generate_jwt (ps.app_id, trace, error)); + if (!jwt) + throw server_error (); + + optional<gh_installation_access_token> iat ( + obtain_installation_access_token (ps.installation.id, + move (*jwt), + error)); + if (!iat) + throw server_error (); + + l3 ([&]{trace << "installation_access_token { " << *iat << " }";}); + + // While it would have been nice to cancel CIs of PRs with this branch as + // base not to waste resources, there are complications: Firstly, we can + // only do this for remote PRs (since local PRs will most likely share the + // result with branch push). Secondly, we try to do our best even if the + // branch protection rule for head behind is not enabled. In this case, it + // would be good to complete the CI. So maybe/later. See also the head + // case in handle_pull_request(), where we do cancel remote PRs that are + // not shared. + + // Service id that uniquely identifies the CI tenant. + // + string sid (ps.repository.node_id + ':' + ps.after); + + service_data sd (warning_success, + iat->token, + iat->expires_at, + ps.app_id, + ps.installation.id, + move (ps.repository.node_id), + move (ps.repository.clone_url), + service_data::local, + false /* pre_check */, + false /* re_requested */, + ps.after /* check_sha */, + ps.after /* report_sha */); + + // Create an unloaded CI tenant, doing nothing if one already exists + // (which could've been created by handle_pull_request() or by us as a + // result of a push to another branch). Note that the tenant's reference + // count is incremented in all cases. + // + // Note: use no delay since we need to (re)create the synthetic conclusion + // check run as soon as possible. + // + // Note that we use the create() API instead of start() since duplicate + // management is not available in start(). + // + // After this call we will start getting the build_unloaded() + // notifications until (1) we load the tenant, (2) we cancel it, or (3) + // it gets archived after some timeout. + // + if (!create (error, warn, verb_ ? &trace : nullptr, + *build_db_, retry_, + tenant_service (sid, "ci-github", sd.json ()), + chrono::seconds (30) /* interval */, + chrono::seconds (0) /* delay */, + duplicate_tenant_mode::ignore)) + { + fail << "push " + ps.after + " to " + ps.ref + << ": unable to create unloaded CI tenant"; + } + + return true; } - // Make a check run summary from a CI start_result. + // Miscellaneous pull request facts // - static string - to_check_run_summary (const optional<ci_start::start_result>& r) + // - Although some of the GitHub documentation makes it sound like they + // expect check runs to be added to both the PR head commit and the merge + // commit, the PR UI does not react to the merge commit's check runs + // consistently. It actually seems to be quite broken. The only thing it + // does seem to do reliably is blocking the PR merge if the merge commit's + // check runs are not successful (i.e, overriding the PR head commit's + // check runs). But the UI looks quite messed up generally in this state. + // + // - When new commits are added to a PR base branch, pull_request.base.sha + // does not change, but the test merge commit will be updated to include + // the new commits to the base branch. + // + // - When new commits are added to a PR head branch, pull_request.head.sha + // gets updated with the head commit's SHA and check_suite.pull_requests[] + // will contain all PRs with this branch as head. + // + bool ci_github:: + handle_pull_request (gh_pull_request_event pr, bool warning_success) { - string s; + HANDLER_DIAG; - s = "```\n"; - if (r) s += r->message; - else s += "Internal service error"; - s += "\n```"; + l3 ([&]{trace << "pull_request event { " << pr << " }";}); - return s; + // While we don't need the installation access token in this request, + // let's obtain it to flush out any permission issues early. Also, it is + // valid for an hour so we will most likely make use of it. + // + optional<string> jwt (generate_jwt (pr.pull_request.app_id, trace, error)); + if (!jwt) + throw server_error (); + + optional<gh_installation_access_token> iat ( + obtain_installation_access_token (pr.installation.id, + move (*jwt), + error)); + if (!iat) + throw server_error (); + + l3 ([&]{trace << "installation_access_token { " << *iat << " }";}); + + // Distinguish between local and remote PRs by comparing the head and base + // repositories' paths. + // + service_data::kind_type kind ( + pr.pull_request.head_path == pr.pull_request.base_path + ? service_data::local + : service_data::remote); + + // Note that similar to the branch push case above, while it would have + // been nice to cancel the previous CI job once the PR head moves (the + // "synchronize" event), due to the head sharing problem the previous CI + // job might actually still be relevant (in both local and remote PR + // cases). So we only do it for the remote PRs and only if the head is not + // shared (via tenant reference counting). + // + if (kind == service_data::remote && pr.action == "synchronize") + { + if (pr.before) + { + // Service id that will uniquely identify the CI tenant. + // + string sid (pr.repository.node_id + ':' + *pr.before); + + if (optional<tenant_service> ts = cancel (error, warn, + verb_ ? &trace : nullptr, + *build_db_, retry_, + "ci-github", sid, + true /* ref_count */)) + { + l3 ([&]{trace << "pull request " << pr.pull_request.node_id + << ": attempted to cancel CI of previous head commit" + << " (ref_count: " << ts->ref_count << ')';}); + } + else + { + // It's possible that there was no CI for the previous commit for + // various reasons (e.g., CI was not enabled). + // + l3 ([&]{trace << "pull request " << pr.pull_request.node_id + << ": failed to cancel CI of previous head commit " + << "with tenant_service id " << sid;}); + } + } + else + { + error << "pull request " << pr.pull_request.node_id + << ": before commit is missing in synchronize event"; + } + } + + // Note: for remote PRs the check_sha will be set later, in + // build_unloaded_pre_check(), to test merge commit id. + // + string check_sha (kind == service_data::local + ? pr.pull_request.head_sha + : ""); + + // Note that PR rebuilds (re-requested) are handled by + // handle_check_suite_rerequest(). + // + // Note that, in the case of a remote PR, GitHub will copy the PR head + // commit from the head (forked) repository into the base repository. So + // the check runs must always be added to the base repository, whether the + // PR is local or remote. The head commit refs are located at + // refs/pull/<PR-number>/head. + // + service_data sd (warning_success, + move (iat->token), + iat->expires_at, + pr.pull_request.app_id, + pr.installation.id, + move (pr.repository.node_id), + move (pr.repository.clone_url), + kind, true /* pre_check */, false /* re_request */, + move (check_sha), + move (pr.pull_request.head_sha) /* report_sha */, + pr.pull_request.node_id, + pr.pull_request.number); + + // Create an unloaded CI tenant for the pre-check phase (during which we + // wait for the PR's merge commit and behindness to become available). + // + // Create with an empty service id so that the generated tenant id is used + // instead during the pre-check phase (so as not to clash with a proper + // service id for this head commit, potentially created in + // handle_branch_push() or as another PR). + // + tenant_service ts ("", "ci-github", sd.json ()); + + // Note: use no delay since we need to start the actual CI (which in turn + // (re)creates the synthetic conclusion check run) as soon as possible. + // + // After this call we will start getting the build_unloaded() + // notifications -- which will be routed to build_unloaded_pre_check() -- + // until we cancel the tenant or it gets archived after some timeout. + // (Note that we never actually load this request, we always cancel it; + // see build_unloaded_pre_check() for details.) + // + if (!create (error, + warn, + verb_ ? &trace : nullptr, + *build_db_, retry_, + move (ts), + chrono::seconds (30) /* interval */, + chrono::seconds (0) /* delay */)) + { + fail << "pull request " << pr.pull_request.node_id + << ": unable to create unloaded pre-check tenant"; + } + + return true; } bool ci_github:: - handle_check_suite_request (gh_check_suite_event cs, bool warning_success) + handle_check_suite_rerequest (gh_check_suite_event cs, bool warning_success) { HANDLER_DIAG; l3 ([&]{trace << "check_suite event { " << cs << " }";}); + assert (cs.action == "rerequested"); + // While we don't need the installation access token in this request, // let's obtain it to flush out any permission issues early. Also, it is // valid for an hour so we will most likely make use of it. @@ -572,15 +882,6 @@ namespace brep l3 ([&]{trace << "installation_access_token { " << *iat << " }";}); - // While it would have been nice to cancel CIs of PRs with this branch as - // base not to waste resources, there are complications: Firstly, we can - // only do this for remote PRs (since local PRs will most likely share the - // result with branch push). Secondly, we try to do our best even if the - // branch protection rule for head behind is not enabled. In this case, it - // would be good to complete the CI. So maybe/later. See also the head - // case in handle_pull_request(), where we do cancel remote PRs that are - // not shared. - // Service id that uniquely identifies the CI tenant. // string sid (cs.repository.node_id + ':' + cs.check_suite.head_sha); @@ -604,9 +905,10 @@ namespace brep string check_sha; service_data::kind_type kind; - bool re_requested (cs.action == "rerequested"); - if (re_requested && !cs.check_suite.head_branch) + if (!cs.check_suite.head_branch) { + // Rebuild of remote PR. + // kind = service_data::remote; if (optional<tenant_data> d = find (*build_db_, "ci-github", sid)) @@ -634,7 +936,7 @@ namespace brep } else { - // Branch push or local PR rebuild. + // Rebuild of branch push or local PR. // kind = service_data::local; check_sha = cs.check_suite.head_sha; @@ -647,20 +949,15 @@ namespace brep cs.installation.id, move (cs.repository.node_id), move (cs.repository.clone_url), - kind, false /* pre_check */, re_requested, + kind, false /* pre_check */, true /* re_requested */, move (check_sha), move (cs.check_suite.head_sha) /* report_sha */); - // If this check suite is being re-run, replace the existing CI tenant if - // it exists; otherwise create a new one, doing nothing if a tenant - // already exists (which could've been created by handle_pull_request()). + // Replace the existing CI tenant if it exists. // // Note that GitHub UI does not allow re-running the entire check suite // until all the check runs are completed. // - duplicate_tenant_mode dtm (re_requested - ? duplicate_tenant_mode::replace - : duplicate_tenant_mode::ignore); // Create an unloaded CI tenant. // @@ -681,7 +978,7 @@ namespace brep tenant_service (sid, "ci-github", sd.json ()), chrono::seconds (30) /* interval */, chrono::seconds (0) /* delay */, - dtm)); + duplicate_tenant_mode::replace)); if (!pr) { @@ -689,8 +986,7 @@ namespace brep << ": unable to create unloaded CI tenant"; } - if (dtm == duplicate_tenant_mode::replace && - pr->second == duplicate_tenant_result::created) + if (pr->second == duplicate_tenant_result::created) { error << "check suite " << cs.check_suite.node_id << ": re-requested but tenant_service with id " << sid @@ -819,6 +1115,45 @@ namespace brep return true; } + // Return the colored circle corresponding to a result_status. + // + static string + circle (result_status rs) + { + switch (rs) + { + case result_status::success: return "\U0001F7E2"; // Green circle. + case result_status::warning: return "\U0001F7E0"; // Orange circle. + case result_status::error: + case result_status::abort: + case result_status::abnormal: return "\U0001F534"; // Red circle. + + // Valid values we should never encounter. + // + case result_status::skip: + case result_status::interrupt: + throw invalid_argument ("unexpected result_status value: " + + to_string (rs)); + } + + return ""; // Should never reach. + } + + // Make a check run summary from a CI start_result. + // + static string + to_check_run_summary (const optional<ci_start::start_result>& r) + { + string s; + + s = "```\n"; + if (r) s += r->message; + else s += "Internal service error"; + s += "\n```"; + + return s; + } + // Create a gq_built_result. // // Throw invalid_argument in case of invalid result_status. @@ -826,8 +1161,14 @@ namespace brep static gq_built_result make_built_result (result_status rs, bool warning_success, string message) { + string title (circle (rs == result_status::warning && !warning_success + ? result_status::error + : rs)); + title += ' '; + title += ucase (to_string (rs)); + return {gh_to_conclusion (rs, warning_success), - circle (rs) + ' ' + ucase (to_string (rs)), + move (title), move (message)}; } @@ -920,6 +1261,9 @@ namespace brep ccr.name = conclusion_check_run_name; + const gh_installation_access_token* iat (nullptr); + optional<gh_installation_access_token> new_iat; + // Load the service data, failing the check runs if the tenant has been // archived. // @@ -930,108 +1274,111 @@ namespace brep // string sid (repo_node_id + ':' + head_sha); - if (optional<tenant_data> d = find (*build_db_, "ci-github", sid)) + optional<tenant_data> d (find (*build_db_, "ci-github", sid)); + if (!d) { - if (d->archived) // Tenant is archived - { - // Fail (re-create) the check runs. - // - optional<gh_installation_access_token> iat (get_iat ()); - if (!iat) - throw server_error (); + // No such tenant. + // + fail << "check run " << cr.check_run.node_id + << " re-requested but tenant_service with id " << sid + << " does not exist"; + } - gq_built_result br ( - make_built_result ( - result_status::error, warning_success, - "Unable to rebuild individual configuration: build has " - "been archived")); + tenant_service& ts (d->service); - // Try to update the conclusion check run even if the first update - // fails. - // - bool f (false); // Failed. + try + { + sd = service_data (*ts.data); + } + catch (const invalid_argument& e) + { + fail << "failed to parse service data: " << e; + } - if (gq_create_check_run (error, bcr, iat->token, - repo_node_id, head_sha, - cr.check_run.details_url, - build_state::built, br)) - { - l3 ([&]{trace << "created check_run { " << bcr << " }";}); - } - else - { - error << "check_run " << cr.check_run.node_id - << ": unable to re-create check run"; - f = true; - } + if (!sd.conclusion_node_id) + fail << "no conclusion node id for check run " << cr.check_run.node_id; - if (gq_create_check_run (error, ccr, iat->token, - repo_node_id, head_sha, - nullopt /* details_url */, - build_state::built, move (br))) - { - l3 ([&]{trace << "created conclusion check_run { " << ccr << " }";}); - } - else - { - error << "check_run " << cr.check_run.node_id - << ": unable to re-create conclusion check run"; - f = true; - } + tenant_id = d->tenant_id; - // Fail the handler if either of the check runs could not be - // updated. - // - if (f) - throw server_error (); + // Get a new IAT if the one from the service data has expired. + // + if (system_clock::now () > sd.installation_access.expires_at) + { + if ((new_iat = get_iat ())) + iat = &*new_iat; + else + throw server_error (); + } + else + iat = &sd.installation_access; - return true; - } + if (d->archived) // Tenant is archived + { + // Fail (update) the check runs. + // + gq_built_result br ( + make_built_result ( + result_status::error, warning_success, + "Unable to rebuild individual configuration: build has " + "been archived")); + + // Try to update the conclusion check run even if the first update + // fails. + // + bool f (false); // Failed. - tenant_service& ts (d->service); + if (gq_update_check_run (error, bcr, iat->token, + repo_node_id, cr.check_run.node_id, + build_state::built, br)) + { + l3 ([&]{trace << "updated check_run { " << bcr << " }";}); + } + else + { + error << "check_run " << cr.check_run.node_id + << ": unable to update check run"; + f = true; + } - try + if (gq_update_check_run (error, ccr, iat->token, + repo_node_id, *sd.conclusion_node_id, + build_state::built, move (br))) { - sd = service_data (*ts.data); + l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";}); } - catch (const invalid_argument& e) + else { - fail << "failed to parse service data: " << e; + error << "check_run " << cr.check_run.node_id + << ": unable to update conclusion check run"; + f = true; } - tenant_id = d->tenant_id; - } - else - { - // No such tenant. + // Fail the handler if either of the check runs could not be + // updated. // - fail << "check run " << cr.check_run.node_id - << " re-requested but tenant_service with id " << sid - << " does not exist"; - } - } + if (f) + throw server_error (); - // Get a new IAT if the one from the service data has expired. - // - const gh_installation_access_token* iat (nullptr); - optional<gh_installation_access_token> new_iat; - - if (system_clock::now () > sd.installation_access.expires_at) - { - if ((new_iat = get_iat ())) - iat = &*new_iat; - else - throw server_error (); + return true; + } } - else - iat = &sd.installation_access; // Fail if it's the conclusion check run that is being re-requested. // + // Expect that if the user selects re-run all failed checks we will + // receive multiple check runs, one of which will be the conclusion. And + // if we fail it while it happens to arrive last, then we will end up in + // the wrong overall state (real check run is building while conclusion is + // failed). It seems the best we can do is to ignore it: if the user did + // request a rebuild of the conclusion check run explicitly, there will be + // no change, which is not ideal but is still an indication that this + // operation is not supported. + // if (cr.check_run.name == conclusion_check_run_name) { l3 ([&]{trace << "re-requested conclusion check_run";}); +#if 0 if (!sd.conclusion_node_id) fail << "no conclusion node id for check run " << cr.check_run.node_id; @@ -1043,7 +1390,6 @@ namespace brep // if (gq_update_check_run (error, ccr, iat->token, repo_node_id, *sd.conclusion_node_id, - nullopt /* details_url */, build_state::built, move (br))) { l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";}); @@ -1054,6 +1400,7 @@ namespace brep << ": unable to update conclusion check run " << *sd.conclusion_node_id; } +#endif return true; } @@ -1123,6 +1470,8 @@ namespace brep ccr.state = build_state::building; ccr.state_synced = false; + ccr.details_url = details_url (tenant_id); + ccr.description = conclusion_check_run_building_description; if (gq_create_check_runs (error, check_runs, iat->token, repo_node_id, head_sha)) @@ -1273,7 +1622,6 @@ namespace brep // if (gq_update_check_run (error, bcr, iat->token, repo_node_id, *bcr.node_id, - nullopt /* details_url */, build_state::built, br)) { l3 ([&]{trace << "updated check_run { " << bcr << " }";}); @@ -1290,7 +1638,6 @@ namespace brep // if (gq_update_check_run (error, ccr, iat->token, repo_node_id, *ccr.node_id, - nullopt /* details_url */, build_state::built, move (br))) { l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";}); @@ -1310,160 +1657,6 @@ namespace brep return true; } - // Miscellaneous pull request facts - // - // - Although some of the GitHub documentation makes it sound like they - // expect check runs to be added to both the PR head commit and the merge - // commit, the PR UI does not react to the merge commit's check runs - // consistently. It actually seems to be quite broken. The only thing it - // does seem to do reliably is blocking the PR merge if the merge commit's - // check runs are not successful (i.e, overriding the PR head commit's - // check runs). But the UI looks quite messed up generally in this state. - // - // - When new commits are added to a PR base branch, pull_request.base.sha - // does not change, but the test merge commit will be updated to include - // the new commits to the base branch. - // - // - When new commits are added to a PR head branch, pull_request.head.sha - // gets updated with the head commit's SHA and check_suite.pull_requests[] - // will contain all PRs with this branch as head. - // - bool ci_github:: - handle_pull_request (gh_pull_request_event pr, bool warning_success) - { - HANDLER_DIAG; - - l3 ([&]{trace << "pull_request event { " << pr << " }";}); - - // While we don't need the installation access token in this request, - // let's obtain it to flush out any permission issues early. Also, it is - // valid for an hour so we will most likely make use of it. - // - optional<string> jwt (generate_jwt (pr.pull_request.app_id, trace, error)); - if (!jwt) - throw server_error (); - - optional<gh_installation_access_token> iat ( - obtain_installation_access_token (pr.installation.id, - move (*jwt), - error)); - if (!iat) - throw server_error (); - - l3 ([&]{trace << "installation_access_token { " << *iat << " }";}); - - // Distinguish between local and remote PRs by comparing the head and base - // repositories' paths. - // - service_data::kind_type kind ( - pr.pull_request.head_path == pr.pull_request.base_path - ? service_data::local - : service_data::remote); - - // Note that similar to the branch push case above, while it would have - // been nice to cancel the previous CI job once the PR head moves (the - // "synchronize" event), due to the head sharing problem the previous CI - // job might actually still be relevant (in both local and remote PR - // cases). So we only do it for the remote PRs and only if the head is not - // shared (via tenant reference counting). - // - if (kind == service_data::remote && pr.action == "synchronize") - { - if (pr.before) - { - // Service id that will uniquely identify the CI tenant. - // - string sid (pr.repository.node_id + ':' + *pr.before); - - if (optional<tenant_service> ts = cancel (error, warn, - verb_ ? &trace : nullptr, - *build_db_, retry_, - "ci-github", sid, - true /* ref_count */)) - { - l3 ([&]{trace << "pull request " << pr.pull_request.node_id - << ": attempted to cancel CI of previous head commit" - << " (ref_count: " << ts->ref_count << ')';}); - } - else - { - // It's possible that there was no CI for the previous commit for - // various reasons (e.g., CI was not enabled). - // - l3 ([&]{trace << "pull request " << pr.pull_request.node_id - << ": failed to cancel CI of previous head commit " - << "with tenant_service id " << sid;}); - } - } - else - { - error << "pull request " << pr.pull_request.node_id - << ": before commit is missing in synchronize event"; - } - } - - // Note: for remote PRs the check_sha will be set later, in - // build_unloaded_pre_check(), to test merge commit id. - // - string check_sha (kind == service_data::local - ? pr.pull_request.head_sha - : ""); - - // Note that PR rebuilds (re-requested) are handled by check_suite(). - // - // Note that, in the case of a remote PR, GitHub will copy the PR head - // commit from the head (forked) repository into the base repository. So - // the check runs must always be added to the base repository, whether the - // PR is local or remote. The head commit refs are located at - // refs/pull/<PR-number>/head. - // - service_data sd (warning_success, - move (iat->token), - iat->expires_at, - pr.pull_request.app_id, - pr.installation.id, - move (pr.repository.node_id), - move (pr.repository.clone_url), - kind, true /* pre_check */, false /* re_request */, - move (check_sha), - move (pr.pull_request.head_sha) /* report_sha */, - pr.pull_request.node_id, - pr.pull_request.number); - - // Create an unloaded CI tenant for the pre-check phase (during which we - // wait for the PR's merge commit and behindness to become available). - // - // Create with an empty service id so that the generated tenant id is used - // instead during the pre-check phase (so as not to clash with a proper - // service id for this head commit, potentially created in - // handle_check_suite() or as another PR). - // - tenant_service ts ("", "ci-github", sd.json ()); - - // Note: use no delay since we need to start the actual CI (which in turn - // (re)creates the synthetic conclusion check run) as soon as possible. - // - // After this call we will start getting the build_unloaded() - // notifications -- which will be routed to build_unloaded_pre_check() -- - // until we cancel the tenant or it gets archived after some timeout. - // (Note that we never actually load this request, we always cancel it; - // see build_unloaded_pre_check() for details.) - // - if (!create (error, - warn, - verb_ ? &trace : nullptr, - *build_db_, retry_, - move (ts), - chrono::seconds (30) /* interval */, - chrono::seconds (0) /* delay */)) - { - fail << "pull request " << pr.pull_request.node_id - << ": unable to create unloaded pre-check tenant"; - } - - return true; - } - function<optional<string> (const string&, const tenant_service&)> ci_github:: build_unloaded (const string& ti, tenant_service&& ts, @@ -1577,7 +1770,8 @@ namespace brep // Create an unloaded CI tenant, doing nothing if one already exists // (which could've been created by a head branch push or another PR - // sharing the same head commit). + // sharing the same head commit). Note that the tenant's reference count + // is incremented in all cases. // // Note: use no delay since we need to (re)create the synthetic // conclusion check run as soon as possible. @@ -1609,7 +1803,7 @@ namespace brep // local PR, or another remote PR) which in turn means the CI // result may end up being for head, not merge commit. There is // nothing we can do about it on our side (the user can enable the - // head-behind- base protection on their side). + // head-behind-base protection on their side). // if (sd.kind == service_data::remote) { @@ -1742,9 +1936,13 @@ namespace brep // Create a synthetic check run with an in-progress state. Return the // check run on success or nullopt on failure. // - auto create_synthetic_cr = [iat, + auto create_synthetic_cr = [&tenant_id, + iat, &sd, - &error] (string name) -> optional<check_run> + &error, + this] (string name, + const check_run::description_type& output) + -> optional<check_run> { check_run cr; cr.name = move (name); @@ -1756,8 +1954,9 @@ namespace brep iat->token, sd.repository_node_id, sd.report_sha, - nullopt /* details_url */, - build_state::building)) + details_url (tenant_id), + build_state::building, + output.title, output.summary)) { return cr; } @@ -1792,7 +1991,6 @@ namespace brep iat->token, sd.repository_node_id, node_id, - nullopt /* details_url */, build_state::built, move (br))) { @@ -1803,19 +2001,21 @@ namespace brep return nullopt; }; + // (Re)create the synthetic conclusion check run first in order to convert + // a potentially completed check suite to building as early as possible. + // // Note that there is a window between receipt of a check_suite or // pull_request event and the first bot/worker asking for a task, which // could be substantial. We could probably (also) try to (re)create the // conclusion checkrun in the webhook handler. @@ Maybe/later. - - // (Re)create the synthetic conclusion check run first in order to convert - // a potentially completed check suite to building as early as possible. // string conclusion_node_id; // Conclusion check run node ID. if (!sd.conclusion_node_id) { - if (auto cr = create_synthetic_cr (conclusion_check_run_name)) + if (auto cr = + create_synthetic_cr (conclusion_check_run_name, + conclusion_check_run_building_description)) { l3 ([&]{trace << "created check_run { " << *cr << " }";}); @@ -2109,11 +2309,15 @@ namespace brep // bs.push_back (b); - crs.emplace_back (move (bid), - gh_check_run_name (b, &hs), - nullopt, /* node_id */ - build_state::queued, - false /* state_synced */); + crs.push_back ( + check_run {move (bid), + gh_check_run_name (b, &hs), + nullopt, /* node_id */ + build_state::queued, + false /* state_synced */, + nullopt /* status */, + details_url (b), + nullopt /* description */}); } } @@ -2317,7 +2521,6 @@ namespace brep iat->token, sd.repository_node_id, *cr->node_id, - details_url (b), build_state::building)) { // Do nothing further if the state was already built on GitHub (note @@ -2393,7 +2596,8 @@ namespace brep return nullptr; } - function<optional<string> (const string&, const tenant_service&)> ci_github:: + function<pair<optional<string>, bool> (const string&, + const tenant_service&)> ci_github:: build_built (const string& tenant_id, const tenant_service& ts, const build& b, @@ -2432,8 +2636,57 @@ namespace brep // optional<result_status> conclusion (*b.status); + // Conclusion check run summary. Will include the success/warning/failure + // count breakdown. + // + string summary; + check_run cr; // Updated check run. { + // The success/warning/failure counts. + // + // Note that the warning count will be included in the success or + // failure count (depending on the value of sd.warning_success). + // + size_t succ_count (0), warn_count (0), fail_count (0); + + // Count a result_status under the appropriate category. + // + auto count = [&succ_count, + &warn_count, + &fail_count, + ws = sd.warning_success] (result_status rs) + { + switch (rs) + { + case result_status::success: ++succ_count; break; + + case result_status::error: + case result_status::abort: + case result_status::abnormal: ++fail_count; break; + + case result_status::warning: + { + ++warn_count; + + if (ws) + ++succ_count; + else + ++fail_count; + + break; + } + + case result_status::skip: + case result_status::interrupt: + { + assert (false); + } + } + }; + + count (*b.status); + string bid (gh_check_run_name (b)); // Full build id. optional<check_run> scr; @@ -2452,6 +2705,8 @@ namespace brep if (conclusion) *conclusion |= *cr.status; + + count (*cr.status); } else conclusion = nullopt; @@ -2489,6 +2744,29 @@ namespace brep } cr.state_synced = false; + + // Construct the conclusion check run summary if all check runs are + // built. + // + if (conclusion) + { + ostringstream os; + + // Note: the warning count has already been included in the success or + // failure count. + // + os << fail_count << " failed"; + if (!sd.warning_success && warn_count != 0) + os << " (" << warn_count << " due to warnings)"; + + os << ", " << succ_count << " succeeded"; + if (sd.warning_success && warn_count != 0) + os << " (" << warn_count << " with warnings)"; + + os << ", " << (succ_count + fail_count) << " total"; + + summary = os.str (); + } } // Get a new installation access token if the current one has expired. @@ -2633,7 +2911,6 @@ namespace brep iat->token, sd.repository_node_id, *cr.node_id, - details_url (b), build_state::built, move (br))) { @@ -2657,7 +2934,6 @@ namespace brep sd.repository_node_id, sd.report_sha, details_url (b), - build_state::built, move (br))) { assert (cr.state == build_state::built); @@ -2681,8 +2957,7 @@ namespace brep result_status rs (*conclusion); gq_built_result br ( - make_built_result (rs, sd.warning_success, - "All configurations are built")); + make_built_result (rs, sd.warning_success, move (summary))); check_run cr; @@ -2698,7 +2973,6 @@ namespace brep iat->token, sd.repository_node_id, *sd.conclusion_node_id, - nullopt /* details_url */, build_state::built, move (br))) { @@ -2725,13 +2999,15 @@ namespace brep completed = completed, error = move (error), warn = move (warn)] (const string& ti, - const tenant_service& ts) -> optional<string> + const tenant_service& ts) { // NOTE: this lambda may be called repeatedly (e.g., due to transaction // being aborted) and so should not move out of its captures. + // Do nothing if the tenant has been replaced. + // if (tenant_id != ti) - return nullopt; // Do nothing if the tenant has been replaced. + return make_pair (optional<string> (), false); service_data sd; try @@ -2741,7 +3017,7 @@ namespace brep catch (const invalid_argument& e) { error << "failed to parse service data: " << e; - return nullopt; + return make_pair (optional<string> (), false); } if (iat) @@ -2799,7 +3075,7 @@ namespace brep } } - return sd.json (); + return make_pair (optional<string> (sd.json ()), false); }; } catch (const std::exception& e) @@ -2818,10 +3094,11 @@ namespace brep { // This code is based on build_force_url() in mod/build.cxx. // - return options_->host () + - "/@" + b.tenant + + return + options_->host () + + tenant_dir (options_->root (), b.tenant).string () + "?builds=" + mime_url_encode (b.package_name.string ()) + - "&pv=" + b.package_version.string () + + "&pv=" + mime_url_encode (b.package_version.string ()) + "&tg=" + mime_url_encode (b.target.string ()) + "&tc=" + mime_url_encode (b.target_config_name) + "&pc=" + mime_url_encode (b.package_config_name) + @@ -2829,6 +3106,15 @@ namespace brep b.toolchain_version.string (); } + string ci_github:: + details_url (const string& t) const + { + return + options_->host () + + tenant_dir (options_->root (), t).string () + + "?builds"; + } + static optional<build_id> parse_details_url (const string& details_url) try @@ -2841,12 +3127,21 @@ namespace brep // Extract the tenant from the URL path. // - // Example path: @d2586f57-21dc-40b7-beb2-6517ad7917dd + // Example paths: // - if (!u.path || u.path->size () != 37 || (*u.path)[0] != '@') + // @d2586f57-21dc-40b7-beb2-6517ad7917dd (37 characters) + // <brep-root>/@d2586f57-21dc-40b7-beb2-6517ad7917dd + // + if (!u.path) return nullopt; - r.package.tenant = u.path->substr (1); + { + size_t p (u.path->find ('@')); + if (p == string::npos || u.path->size () - p != 37) + return nullopt; // Tenant not found or too short. + + r.package.tenant = u.path->substr (p + 1); + } // Extract the rest of the build_id members from the URL query. // @@ -2889,7 +3184,7 @@ namespace brep }; if (c (pn, "builds")) r.package.name = package_name (decval ()); - else if (c (pv, "pv")) r.package.version = make_version (rawval ()); + else if (c (pv, "pv")) r.package.version = make_version (decval ()); else if (c (tg, "tg")) r.target = target_triplet (decval ()); else if (c (tc, "tc")) r.target_config_name = decval (); else if (c (pc, "pc")) r.package_config_name = decval (); @@ -2901,8 +3196,8 @@ namespace brep // Note: parsing code based on mod/mod-builds.cxx. // - size_t p (v.find_first_of ('-')); - if (p >= v.size () - 1) + size_t p (v.find ('-')); + if (p == string::npos || p >= v.size () - 1) return nullopt; // Invalid format. r.toolchain_name = v.substr (0, p); diff --git a/mod/mod-ci-github.hxx b/mod/mod-ci-github.hxx index 059801a..4d306bb 100644 --- a/mod/mod-ci-github.hxx +++ b/mod/mod-ci-github.hxx @@ -72,7 +72,8 @@ namespace brep const build&, const diag_epilogue& log_writer) const noexcept override; - virtual function<optional<string> (const string&, const tenant_service&)> + virtual function<pair<optional<string>, bool> (const string&, + const tenant_service&)> build_built (const string& tenant_id, const tenant_service&, const build&, @@ -82,43 +83,56 @@ namespace brep virtual void init (cli::scanner&); - // Handle the check_suite event `requested` and `rerequested` actions. + // Handle push events (branch push). // // If warning_success is true, then map result_status::warning to SUCCESS // and to FAILURE otherwise. // bool - handle_check_suite_request (gh_check_suite_event, bool warning_success); + handle_branch_push (gh_push_event, bool warning_success); - // Handle the check_suite event `completed` action. + // Handle the pull_request event `opened` and `synchronize` actions. // // If warning_success is true, then map result_status::warning to SUCCESS // and to FAILURE otherwise. // bool - handle_check_suite_completed (gh_check_suite_event, bool warning_success); + handle_pull_request (gh_pull_request_event, bool warning_success); - // Handle the check_run event `rerequested` action. + // Handle the check_suite event `rerequested` action. // // If warning_success is true, then map result_status::warning to SUCCESS // and to FAILURE otherwise. // bool - handle_check_run_rerequest (const gh_check_run_event&, bool warning_success); + handle_check_suite_rerequest (gh_check_suite_event, bool warning_success); - // Handle the pull_request event `opened` and `synchronize` actions. + // Handle the check_suite event `completed` action. // // If warning_success is true, then map result_status::warning to SUCCESS // and to FAILURE otherwise. // bool - handle_pull_request (gh_pull_request_event, bool warning_success); + handle_check_suite_completed (gh_check_suite_event, bool warning_success); + + // Handle the check_run event `rerequested` action. + // + // If warning_success is true, then map result_status::warning to SUCCESS + // and to FAILURE otherwise. + // + bool + handle_check_run_rerequest (const gh_check_run_event&, bool warning_success); // Build a check run details_url for a build. // string details_url (const build&) const; + // Build a check run details_url for a tenant. + // + string + details_url (const string& tenant) const; + optional<string> generate_jwt (const string& app_id, const basic_mark& trace, @@ -137,6 +151,8 @@ namespace brep shared_ptr<options::ci_github> options_; tenant_service_map& tenant_service_map_; + + string webhook_secret_; }; } diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx index 46fbf6a..85c00c6 100644 --- a/mod/mod-ci.cxx +++ b/mod/mod-ci.cxx @@ -105,17 +105,17 @@ init (scanner& s) fail << "unable to read ci-form file '" << ci_form << "': " << e; } } - } #ifdef BREP_CI_TENANT_SERVICE_UNLOADED - if (!options_->build_config_specified ()) - fail << "package building functionality must be enabled"; + if (!options_->build_config_specified ()) + fail << "package building functionality must be enabled"; - database_module::init (*options_, options_->build_db_retry ()); + database_module::init (*options_, options_->build_db_retry ()); #endif - if (options_->root ().empty ()) - options_->root (dir_path ("/")); + if (options_->root ().empty ()) + options_->root (dir_path ("/")); + } } bool brep::ci:: @@ -131,8 +131,6 @@ handle (request& rq, response& rs) HANDLER_DIAG; - const dir_path& root (options_->root ()); - // We will respond with the manifest to the CI request submission protocol // violations and with a plain text message on the internal errors. In the // latter case we will always respond with the same neutral message for @@ -180,6 +178,8 @@ handle (request& rq, response& rs) if (!options_->ci_data_specified ()) return respond_manifest (404, "CI request submission disabled"); + const dir_path& root (options_->root ()); + // Parse the request form data and verify the submission size limit. // // Note that the submission may include the overrides upload that we don't @@ -387,18 +387,19 @@ handle (request& rq, response& rs) optional<start_result> r; - if (optional<string> ref = create (error, - warn, - verb_ ? &trace : nullptr, - *build_db_, - tenant_service ("", "ci", rl.string ()), - chrono::seconds (40), - chrono::seconds (10))) + if (optional<pair<string, duplicate_tenant_result>> ref = + create (error, + warn, + verb_ ? &trace : nullptr, + *build_db_, retry_, + tenant_service ("", "ci", rl.string ()), + chrono::seconds (40), + chrono::seconds (10))) { string msg ("unloaded CI request is created: " + - options_->host () + tenant_dir (root, *ref).string ()); + options_->host () + tenant_dir (root, ref->first).string ()); - r = start_result {200, move (msg), move (*ref), {}}; + r = start_result {200, move (msg), move (ref->first), {}}; } #endif @@ -495,8 +496,8 @@ build_building (const string& /*tenant_id*/, }; } -function<optional<string> (const string& tenant_id, - const brep::tenant_service&)> brep::ci:: +function<pair<optional<string>, bool> (const string& tenant_id, + const brep::tenant_service&)> brep::ci:: build_built (const string& /*tenant_id*/, const tenant_service&, const build& b, @@ -514,13 +515,16 @@ build_built (const string& /*tenant_id*/, b.toolchain_name + '/' + b.toolchain_version.string ()); - return ts.data ? *ts.data + ", " + s : s; + return make_pair ( + optional<string> (ts.data ? *ts.data + ", " + s : s), false); }; } #ifdef BREP_CI_TENANT_SERVICE_UNLOADED -function<optional<string> (const brep::tenant_service&)> brep::ci:: -build_unloaded (tenant_service&& ts, +function<optional<string> (const string& tenant_id, + const brep::tenant_service&)> brep::ci:: +build_unloaded (const string& /* tenant_id */, + tenant_service&& ts, const diag_epilogue& log_writer) const noexcept { NOTIFICATION_DIAG (log_writer); @@ -532,7 +536,7 @@ build_unloaded (tenant_service&& ts, repository_location rl (*ts.data); if (!load (error, warn, verb_ ? &trace : nullptr, - *build_db_, + *build_db_, retry_, move (ts), rl)) return nullptr; // The diagnostics is already issued. @@ -545,7 +549,10 @@ build_unloaded (tenant_service&& ts, return nullptr; } - return [] (const tenant_service& ts) {return "loaded " + *ts.data;}; + return [] (const string& tenant_id, const tenant_service& ts) + { + return "loaded " + tenant_id + ' ' + *ts.data; + }; } #endif #endif diff --git a/mod/mod-ci.hxx b/mod/mod-ci.hxx index 132b5b0..54532e6 100644 --- a/mod/mod-ci.hxx +++ b/mod/mod-ci.hxx @@ -87,8 +87,8 @@ namespace brep const build&, const diag_epilogue& log_writer) const noexcept override; - virtual function<optional<string> (const string& tenant_id, - const tenant_service&)> + virtual function<pair<optional<string>, bool> (const string& tenant_id, + const tenant_service&)> build_built (const string& tenant_id, const tenant_service&, const build&, diff --git a/mod/mod-submit.cxx b/mod/mod-submit.cxx index 5ee358a..6c767cb 100644 --- a/mod/mod-submit.cxx +++ b/mod/mod-submit.cxx @@ -93,10 +93,10 @@ init (scanner& s) if (options_->submit_handler_specified () && options_->submit_handler ().relative ()) fail << "submit-handler path must be absolute"; - } - if (options_->root ().empty ()) - options_->root (dir_path ("/")); + if (options_->root ().empty ()) + options_->root (dir_path ("/")); + } } bool brep::submit:: @@ -109,8 +109,6 @@ handle (request& rq, response& rs) HANDLER_DIAG; - const dir_path& root (options_->root ()); - // We will respond with the manifest to the submission protocol violations // and with a plain text message on the internal errors. In the latter case // we will always respond with the same neutral message for security reason, @@ -163,6 +161,8 @@ handle (request& rq, response& rs) if (!options_->submit_data_specified ()) return respond_manifest (404, "submission disabled"); + const dir_path& root (options_->root ()); + // Parse the request form data and verify the submission size limit. // // Note that if it is exceeded, then there are parameters and this is the diff --git a/mod/module.cli b/mod/module.cli index 1273bf4..ba2b986 100644 --- a/mod/module.cli +++ b/mod/module.cli @@ -850,12 +850,12 @@ namespace brep // GitHub CI-specific options. // - string ci-github-app-webhook-secret + path ci-github-app-webhook-secret { - "<secret>", + "<path>", "The GitHub App's configured webhook secret. If not set, then the - GitHub CI service is disabled. Note: make sure to choose a strong - (random) secret." + GitHub CI service is disabled. Note that the path must be absolute. + Note: make sure to choose a strong (random) secret." } std::map<string, dir_path> ci-github-app-id-private-key @@ -863,7 +863,7 @@ namespace brep "<id>=<path>", "The private key used during GitHub API authentication for the specified GitHub App ID. Both vales are found in the GitHub App's - settings." + settings. Note that the paths must be absolute." } uint16_t ci-github-jwt-validity-period = 600 diff --git a/mod/tenant-service.cxx b/mod/tenant-service.cxx new file mode 100644 index 0000000..2c1f3bc --- /dev/null +++ b/mod/tenant-service.cxx @@ -0,0 +1,18 @@ +// file : mod/tenant-service.cxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#include <mod/tenant-service.hxx> + +namespace brep +{ + void tenant_service_build_built:: + build_completed (const string& /* tenant_id */, + const tenant_service&, + const diag_epilogue& /* log_writer */) const noexcept + { + // If this notification is requested, then this function needs to be + // overridden by the tenant service implementation. + // + assert (false); + } +} diff --git a/mod/tenant-service.hxx b/mod/tenant-service.hxx index 5564a56..d909eaa 100644 --- a/mod/tenant-service.hxx +++ b/mod/tenant-service.hxx @@ -127,12 +127,20 @@ namespace brep class tenant_service_build_built: public virtual tenant_service_base { public: - virtual function<optional<string> (const string& tenant_id, - const tenant_service&)> + // The second half of the pair signals whether to call the + // build_completed() notification. + // + virtual function<pair<optional<string>, bool> (const string& tenant_id, + const tenant_service&)> build_built (const string& tenant_id, const tenant_service&, const build&, const diag_epilogue& log_writer) const noexcept = 0; + + virtual void + build_completed (const string& tenant_id, + const tenant_service&, + const diag_epilogue& log_writer) const noexcept; }; // This notification is only made on unloaded CI requests created with the |