aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--INSTALL211
-rw-r--r--INSTALL-CI-DEV2
-rw-r--r--INSTALL-DEV1
-rw-r--r--INSTALL-GITHUB-DEV135
-rw-r--r--INSTALL-PROXY2
-rw-r--r--LICENSE2
-rw-r--r--etc/brep-module.conf17
-rw-r--r--etc/private/install/brep-module.conf17
-rw-r--r--libbrep/utility.hxx5
-rw-r--r--manifest6
-rw-r--r--mod/hmac.cxx13
-rw-r--r--mod/mod-build-configs.cxx6
-rw-r--r--mod/mod-builds.cxx6
-rw-r--r--mod/mod-ci-github-gh.cxx183
-rw-r--r--mod/mod-ci-github-gh.hxx169
-rw-r--r--mod/mod-ci-github-gq.cxx2
-rw-r--r--mod/mod-ci-github-service-data.cxx26
-rw-r--r--mod/mod-ci-github-service-data.hxx9
-rw-r--r--mod/mod-ci-github.cxx897
-rw-r--r--mod/mod-ci-github.hxx32
-rw-r--r--mod/mod-ci.cxx48
-rw-r--r--mod/mod-submit.cxx10
-rw-r--r--mod/module.cli23
23 files changed, 1279 insertions, 543 deletions
diff --git a/INSTALL b/INSTALL
index 3476de0..7986b84 100644
--- a/INSTALL
+++ b/INSTALL
@@ -1,7 +1,8 @@
-This guide shows how to install and configure brep on a "deployment" machine as
-opposed to a "development" one (see INSTALL-DEV for the latter). Here we assume
-you are using a systemd-based distribution. If not, then you will need to
-replace systemctl commands with the equivalent init.d ones.
+This guide describes how to install and configure brep on a "deployment"
+machine as opposed to a "development" one (see INSTALL-DEV for the
+latter). Here we assume you are using a systemd-based distribution. If not,
+then you will need to replace systemctl commands with the equivalent init.d
+ones.
The below instructions include steps for setting up brep as the build2 build
bot controller, package submission, and CI request services. All these
@@ -233,6 +234,61 @@ $ psql -d brep_build -c 'SELECT DISTINCT name FROM build_package'
$ cp install/share/brep/etc/brep-module.conf config/
$ edit config/brep-module.conf # Adjust default values if required.
+See the following sub-sections for details on configuring various optional
+brep functionality.
+
+Once the brep module configuration is ready, the next step is to enable
+it in the Apache2 configuration file. Here we assume you have setup an
+appropriate Apache2 virtual server. Open the corresponding Apache2 .conf
+file and add the contents of brep/etc/brep-apache2.conf into the
+<VirtualHost> section.
+
+The output content types of the brep module are application/xhtml+xml,
+text/manifest and text/plain. If you would like to make sure they get
+compressed (along with linked CSS), also add the following lines:
+
+ # Compress brep output (xhtml+xml) and CSS.
+ #
+ AddOutputFilterByType DEFLATE application/xhtml+xml
+ AddOutputFilterByType DEFLATE text/manifest
+ AddOutputFilterByType DEFLATE text/plain
+ AddOutputFilterByType DEFLATE text/css
+
+Then restart Apache2:
+
+$ sudo systemctl restart apache2
+
+To verify, visit the repository root. To troubleshoot, see Apache logs.
+
+Now that Apache2 loads the brep module which requires PostgreSQL, it is a good
+idea to make the Apache2 service depend on PostgreSQL so that they are started
+in proper order. Here is how we can do it with systemd (with newer versions
+you can use 'systemctl edit' instead of mkdir and cat):
+
+# mkdir -p /etc/systemd/system/apache2.service.d/
+# cat >/etc/systemd/system/apache2.service.d/postgresql.conf
+[Unit]
+Requires=postgresql.service
+After=postgresql.service
+^D
+
+# mkdir -p /etc/systemd/system/postgresql.service.d/
+# cat >/etc/systemd/system/postgresql.service.d/apache2.conf
+[Unit]
+Wants=apache2.service
+^D
+
+# systemctl daemon-reload
+# systemctl cat apache2 # Verify override is listed.
+# systemctl cat postgresql # Verify override is listed.
+# systemctl stop postgresql
+# systemctl status apache2 # Verify stopped.
+# systemctl start postgresql
+# systemctl status apache2 # Verify started.
+
+
+6.1 Enabling build bot controller functionality
+
To enable the build2 build bot controller functionality you will need to set
the build-config option in brep-module.conf. To also enable the build
artifacts upload functionality you will need to specify the upload-data
@@ -250,6 +306,9 @@ $ setfacl -m g:www-data:rwx /home/brep/bindist-data
For sample upload handler implementations see brep/handler/upload/.
+
+6.2 Enabling package submission functionality
+
To enable the package submission functionality you will need to specify the
submit-data and submit-temp directories in brep-module.conf. Note that these
directories must exist and have read, write, and execute permissions granted
@@ -272,6 +331,9 @@ $ edit config/submit.xhtml # Add custom form fields, adjust CSS style, etc.
For sample submission handler implementations see brep/handler/submit/.
+
+6.3 Enabling CI request functionality
+
To enable the CI request functionality you will need to specify the ci-data
directory in brep-module.conf. Note that this directory must exist and have
read, write, and execute permissions granted to the www-data user. This, for
@@ -291,52 +353,119 @@ $ edit config/ci.xhtml # Add custom form fields, adjust CSS style, etc.
For sample CI request handler implementations see brep/handler/ci/.
-Here we assume you have setup an appropriate Apache2 virtual server. Open the
-corresponding Apache2 .conf file and add the contents of
-brep/etc/brep-apache2.conf into the <VirtualHost> section.
-The output content types of the brep module are application/xhtml+xml,
-text/manifest and text/plain. If you would like to make sure they get
-compressed (along with linked CSS), also add the following lines:
+6.4 Enabling GitHub CI integration
- # Compress brep output (xhtml+xml) and CSS.
- #
- AddOutputFilterByType DEFLATE application/xhtml+xml
- AddOutputFilterByType DEFLATE text/manifest
- AddOutputFilterByType DEFLATE text/plain
- AddOutputFilterByType DEFLATE text/css
+6.4.1 Background
-Restart Apache2:
+The GitHub CI integration has one user-configurable setting:
+warning=<success|failure> (whether or not to fail on warnings).
-$ sudo systemctl restart apache2
+In order not to have to support repository configuration files, a deployment
+will consist of two registered GitHub Apps with the same webhook URL (i.e.,
+the same brep instance) but different query parameters: one with
+warning=success and the other with warning=failure. The App id is passed (as a
+query parameter) so that we know which private key to use (the key cannot be
+shared between Apps).
-To verify, visit the repository root. To troubleshoot, see Apache logs.
+We will call the warning=success App the "Default App" and the warning=failure
+App the "Werror App".
-Now that Apache2 loads the brep module which requires PostgreSQL, it is a good
-idea to make the Apache2 service depend on PostgreSQL so that they are started
-in proper order. Here is how we can do it with systemd (with newer versions
-you can use 'systemctl edit' instead of mkdir and cat):
+6.4.2 Create the GitHub Apps
-# mkdir -p /etc/systemd/system/apache2.service.d/
-# cat >/etc/systemd/system/apache2.service.d/postgresql.conf
-[Unit]
-Requires=postgresql.service
-After=postgresql.service
-^D
+To create a GitHub App under the <org> organization, visit
+https://github.com/organizations/<org>/settings/apps (Settings -> Developer
+settings -> GitHub Apps). Then click on New GitHub App.
-# mkdir -p /etc/systemd/system/postgresql.service.d/
-# cat >/etc/systemd/system/postgresql.service.d/apache2.conf
-[Unit]
-Wants=apache2.service
-^D
+App names (note: 34 character limit):
-# systemctl daemon-reload
-# systemctl cat apache2 # Verify override is listed.
-# systemctl cat postgresql # Verify override is listed.
-# systemctl stop postgresql
-# systemctl status apache2 # Verify stopped.
-# systemctl start postgresql
-# systemctl status apache2 # Verify started.
+ Default App: "<org> CI"
+ Werror App: "<org> CI - warnings as errors"
+
+App description:
+
+ Default App: "Trigger <org> CI on branch push and pull request."
+ Werror App: "Trigger <org> CI on branch push and pull request. Warnings are
+ treated as errors".
+
+App homepage:
+
+ https://ci.<org>.org/
+
+Skip the "Identifying and authorizing users" and "Post installation" sections.
+
+Leave webhooks active.
+
+Webhook URL:
+
+ Default App: https://ci.<org>.org/?ci-github&app-id=XXX&warning=success
+ Werror App: https://ci.<org>.org/?ci-github&app-id=XXX&warning=failure
+
+Note that the App id only becomes available once the App has been registered
+so we update it later in both URLs.
+
+Webhook secret: Use the same random 64-character string for both Apps.
+
+ echo `tr -dc -- A-Za-z0-9 </dev/urandom | head -c 64`
+
+Note that GitHub says only that the secret should be "a random string with
+high entropy." However lots of sources say 32 bytes should be secure enough
+for HMAC-SHA256, while other sources recommend 64 bytes for maximal security
+at an insignificant performance cost. (Keys longer than 64 bytes are hashed to
+match the internal block size and are therefore not recommended.)
+
+Repository permissions:
+ - Checks: RW
+ - Contents: RO (for Push events)
+ - Metadata (mandatory): RO
+ - Pull requests: RO
+
+Subscribed events:
+ - Check suite
+ - Pull request
+ - Push
+
+Note that GitHub Apps with write access to the "Checks" permission are
+automatically subscribed to check_suite(requested|rerequested) and check_run
+events so no need to subscribe explicitly. However in order to receive
+check_suite(completed) events, which we need, one does have to subscribe to
+Check suite.
+
+Select "Any account" under "Where can this GitHub App be installed?".
+
+Click "Create GitHub App".
+
+When the page reloads (should be the General tab), note the App id and replace
+the XXX in the webhook URL with it.
+
+Still in the General tab, scroll to Private keys and generate a private key.
+The file will be downloaded by the browser.
+
+@@ TODO Logo
+@@ TODO Create Marketplace listing
+
+6.4.3 Configure brep
+
+Assume the following configuration values:
+
+- Webhook secret: abcdefg
+- Default App id: 12345
+- Werror App id: 67890
+
+In brep-module.conf:
+
+Set the webhook secret from the GitHub App settings:
+
+ ci-github-app-webhook-secret abcdefg
+
+Associate each GitHub App id with the App's private key:
+
+ ci-github-app-id-private-key 12345=path/to/default-app-private-key.pem
+ ci-github-app-id-private-key 67890=path/to/werror-app-private-key.pem
+
+Now brep should be ready to handle the webhook event requests triggered by
+branch pushes and pull requests in repositories into which one of these Apps
+has been installed.
7. Optimize CSS
diff --git a/INSTALL-CI-DEV b/INSTALL-CI-DEV
index b8502d8..c1bd8ec 100644
--- a/INSTALL-CI-DEV
+++ b/INSTALL-CI-DEV
@@ -1,4 +1,4 @@
-This guide shows how to configure the brep module for serving the CI and
+This guide describes how to configure the brep module for serving the CI and
build2 build bot requests and how to smoke-test it.
Note that during the testing both the user and CI submission handler (executed
diff --git a/INSTALL-DEV b/INSTALL-DEV
index f023962..c197b7b 100644
--- a/INSTALL-DEV
+++ b/INSTALL-DEV
@@ -1,6 +1,7 @@
The goal of this setup is to run the brep Apache2 modules from the development
build while still being as close to the real deployment as possible. To this
end, we use default, system-wide installations of both Apache2 and Postgres.
+See also INSTALL-CI-DEV and INSTALL-GITHUB-DEV.
In the below instructions replace <user> with your login and www-data with the
user under which Apache2 is running (See the "User" directive in the Apache2
diff --git a/INSTALL-GITHUB-DEV b/INSTALL-GITHUB-DEV
index 45f4b9b..602b65d 100644
--- a/INSTALL-GITHUB-DEV
+++ b/INSTALL-GITHUB-DEV
@@ -1,4 +1,4 @@
-This document explains how to get GitHub webhooks (a notification that an
+This guide describes how to get GitHub webhooks (a notification that an
event such as a push has occurred on a repository) delivered to a
locally-running instance of brep (currently to initiate a CI job).
@@ -22,33 +22,67 @@ instance. This is achieved by setting the GitHub app's webhook URL to that of
the webhook proxy smee.io (as recommended by GitHub) and connecting it to our
local brep instance via the locally-run smee client (a Node application).
+0.0 User configuration
+
+This GitHub CI integration only has one user-configurable option:
+warning=<success|failure> (whether or not to fail on warnings).
+
+In order not to have to support repository configuration files the live
+deployment will consist of two registered GitHub apps with the same webhook
+URL (i.e., the same brep instance) but different query parameters: one with
+warning=success and the other with warning=failure. The app id is passed so
+that we know which private key to use (the key cannot be shared between apps).
+
+Only a single GitHub app is required during development however.
+
1. Follow the instructions in INSTALL-DEV to get brep set up.
-2. Register the GitHub app
+2. Set up the webhook proxy
-GitHub doc: Registering a GitHub App (note that that doc is a bit out of date)
-https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app
+Go to https://smee.io/ and start a new channel. Note the webhook proxy URL,
+which will look something like
+
+ https://smee.io/7stvNqVgyQRlIhbY
-Skip the steps marked "optional" and leave authorization-related settings at
-their defaults.
+This will be used in the GitHub app's webhook URL below.
-@@ TODO Update authentication-related info once better understood.
+3. Register the GitHub app
+
+GitHub reference: Registering a GitHub App (note: somewhat out of date)
+https://docs.github.com/en/apps/creating-github-apps/registering-a-github-app/registering-a-github-app
-At this stage the only settings important to us are:
+At this stage the only settings we need to update are:
- App name
-- Webhook URL (updated later -- leave webhooks deactivated for now)
+- Homepage URL (https://build2.org)
+- Webhook
+ - URL: set to the webhook proxy URL
+ - Secret (e.g. "deadbeef")
+ - Leave SSL verification enabled
- Repository permissions
- Checks: RW
+ - Metadata (mandatory): RO
- Pull requests: RO
- - Contents: RO
- - Metadata: RO
+ - Contents: RO (for Push events)
- Subscribed events
- Check suite
- - Check run
- Pull request
+ - Push
-3. Install the GitHub app
+ Note that GitHub apps with write access to the "Checks" permission are
+ automatically subscribed to check_suite(requested|rerequested) and check_run
+ events so no need to subscribe explicitly. However in order to receive
+ check_suite(completed) events, which we do, one does have to subscribe to
+ check_suite.
+
+Click "Create GitHub App" button. When the page reloads:
+
+- Note the app id (e.g. 12345).
+- Append "?app-id=12345&warning=failure" to the webhook URL.
+- Scroll to Private keys and generate a private key. The file will be
+ downloaded by the browser.
+
+4. Install the GitHub app
GitHub doc: Installing your own GitHub App
https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app
@@ -56,14 +90,20 @@ https://docs.github.com/en/apps/using-github-apps/installing-your-own-github-app
It would probably make sense to install it to your own user account and
restrict its access to a test repository.
-4. Forward GitHub webhooks to a local brep instance
+5. Configure brep
-Go to https://smee.io/ and start a new channel. Note the webhook proxy URL,
-which will look something like
+In brep-module.conf:
- https://smee.io/7stvNqVgyQRlIhbY
+- Set the webhook secret from the GitHub app settings:
-Set the GitHub app's webhook URL to this proxy URL.
+ ci-github-app-webhook-secret "deadbeef"
+
+- Associate the GitHub app id with the path of the private key downloaded
+ above:
+
+ ci-github-app-id-private-key 12345=path/to/private-key.pem
+
+6. Forward GitHub webhooks to a local brep instance
Install the smee client:
@@ -79,12 +119,53 @@ GitHub CI endpoint's URL with --target:
Trigger a webhook delivery from GitHub by pushing a commit to a repository the
GitHub app is installed in. You should see the webhook delivery on the smee.io
-channel page and the smee client will also print something to terminal.
-
-Any webhook delivery can be redelivered by clicking a button on the smee.io
-channel page (or the app's advanced settings page on GitHub) so no need to
-repeatedly push to the repository.
-
-You can also see the HTTP headers and JSON payload of delivered webhooks on
-both the GitHub app's advanced settings page and the smee.io channel page, but
-smee.io's presentation is much better. (There's also wireshark of course.)
+channel page.
+
+A webhook can be redelivered from the smee.io channel page or the app's
+advanced settings page on GitHub so no need to repeatedly push to the
+repository.
+
+Both the smee.io channel and the GitHub app's advanced settings show the JSON
+payloads of delivered webhooks. smee.io's presentation is better but the
+GitHub app page also shows the HTTP headers. Wireshark might be better in both
+aspects but can't redeliver webhooks.
+
+7. Test scenarios
+
+- Branch push (BP).
+
+ - Success (observe check runs state transitions). Test with 2 build configs.
+ - Failure (observe check runs state transitions).
+ - Push new commit to branch.
+ - Re-requested check suite.
+ - Re-requested check run (observe check run state transitions).
+ - Re-requested check run but tenant archived.
+ - Cancel previous check suite on forced push.
+ - Cancel previous check suite on branch delete.
+ - Head commit shared with another BP.
+ - Cancel previous check suite on forced push with shared previous commit.
+ - Cancel previous check suite on branch delete with shared previous commit.
+
+- Pull request (PR).
+
+ - Local PR.
+
+ - Success.
+ - Failure.
+ - Push new commit to head.
+ - Re-requested check suite.
+ - Re-requested check run.
+ - Head shared with BP (pull_request is received after check_suite)
+ - Not meargeable.
+ - Head behind base.
+ - Head commit has changed while testing manageability.
+
+ - Remote PR.
+
+ - Success.
+ - Failure.
+ - Push new commit to head.
+ - Cancel previous check suite on head move.
+ - Re-requested check suite.
+ - Re-requested check run.
+ - Head shared with another remote PR.
diff --git a/INSTALL-PROXY b/INSTALL-PROXY
index 418846a..2a3f3f4 100644
--- a/INSTALL-PROXY
+++ b/INSTALL-PROXY
@@ -1,4 +1,4 @@
-This guide shows how to configure the Apache2-based HTTP proxy server for
+This guide describes how to configure the Apache2-based HTTP proxy server for
proxying HTTP(S) requests and caching the responses.
Note that for security reasons most clients (curl, wget, etc) perform HTTPS
diff --git a/LICENSE b/LICENSE
index 5a25163..a1e9de0 100644
--- a/LICENSE
+++ b/LICENSE
@@ -1,6 +1,6 @@
MIT License
-Copyright (c) 2014-2024 the build2 authors (see the AUTHORS and LEGAL files).
+Copyright (c) 2014-2025 the build2 authors (see the AUTHORS and LEGAL files).
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
diff --git a/etc/brep-module.conf b/etc/brep-module.conf
index 0143434..cdf028a 100644
--- a/etc/brep-module.conf
+++ b/etc/brep-module.conf
@@ -453,21 +453,18 @@ menu About=?about
# ci-handler-timeout
-# The GitHub App ID. Found in the app's settings on GitHub.
-#
-# ci-github-app-id
-
-
# The GitHub App's configured webhook secret. If not set, then the GitHub CI
-# service is disabled. Note: make sure to choose a strong (random) secret.
+# service is disabled. Note that the path must be absolute. Note: make sure to
+# choose a strong (random) secret.
#
-# ci-github-app-webhook-secret
+# ci-github-app-webhook-secret <path>
-# The private key used during GitHub API authentication. Created in the GitHub
-# App's settings.
+# The private key used during GitHub API authentication for the specified
+# GitHub App ID. Both vales are found in the GitHub App's settings. Note that
+# the paths must be absolute.
#
-# ci-github-app-private-key
+# ci-github-app-id-private-key <id>=<path>
# The number of seconds a JWT (authentication token) should be valid for. The
diff --git a/etc/private/install/brep-module.conf b/etc/private/install/brep-module.conf
index e1f921d..2545a87 100644
--- a/etc/private/install/brep-module.conf
+++ b/etc/private/install/brep-module.conf
@@ -461,21 +461,18 @@ submit-handler-timeout 120
# ci-handler-timeout
-# The GitHub App ID. Found in the app's settings on GitHub.
-#
-# ci-github-app-id
-
-
# The GitHub App's configured webhook secret. If not set, then the GitHub CI
-# service is disabled. Note: make sure to choose a strong (random) secret.
+# service is disabled. Note that the path must be absolute. Note: make sure to
+# choose a strong (random) secret.
#
-# ci-github-app-webhook-secret
+# ci-github-app-webhook-secret <path>
-# The private key used during GitHub API authentication. Created in the GitHub
-# App's settings.
+# The private key used during GitHub API authentication for the specified
+# GitHub App ID. Both vales are found in the GitHub App's settings. Note that
+# the paths must be absolute.
#
-# ci-github-app-private-key
+# ci-github-app-id-private-key <id>=<path>
# The number of seconds a JWT (authentication token) should be valid for. The
diff --git a/libbrep/utility.hxx b/libbrep/utility.hxx
index fce8fb5..1925d01 100644
--- a/libbrep/utility.hxx
+++ b/libbrep/utility.hxx
@@ -12,7 +12,7 @@
#include <algorithm> // *
#include <libbutl/utility.hxx> // icasecmp(), reverse_iterate(),
- // operator<<(ostream, exception)
+ // operator<<(ostream, exception), etc
namespace brep
{
@@ -28,6 +28,9 @@ namespace brep
// <libbutl/utility.hxx>
//
using butl::utf8;
+ using butl::trim;
+ using butl::trim_left;
+ using butl::trim_right;
using butl::icasecmp;
using butl::reverse_iterate;
}
diff --git a/manifest b/manifest
index 720f35e..7681a23 100644
--- a/manifest
+++ b/manifest
@@ -24,9 +24,9 @@ depends: libapr1
depends: libapreq2
depends: libcmark-gfm == 0.29.0-a.4
depends: libcmark-gfm-extensions == 0.29.0-a.4
-depends: libstudxml ^1.1.0-b.10
-depends: libodb ^2.5.0-b.27
-depends: libodb-pgsql ^2.5.0-b.27
+depends: libstudxml ^1.1.0
+depends: libodb ^2.5.0
+depends: libodb-pgsql ^2.5.0
depends: libbutl [0.18.0-a.0.1 0.18.0-a.1)
depends: libbpkg [0.18.0-a.0.1 0.18.0-a.1)
depends: libbbot [0.18.0-a.0.1 0.18.0-a.1)
diff --git a/mod/hmac.cxx b/mod/hmac.cxx
index 1a78b4c..cfb0e23 100644
--- a/mod/hmac.cxx
+++ b/mod/hmac.cxx
@@ -16,6 +16,12 @@ compute_hmac (const options::openssl_options& o,
// To compute an HMAC over stdin with the key <secret>:
//
+ // openssl dgst -sha256 -hmac <secret>
+ //
+ // Note that since openssl 3.0 the `mac` command is the preferred method
+ // for generating HMACs. For future reference, the equivalent command
+ // would be:
+ //
// openssl mac -digest SHA256 -macopt "key:<secret>" HMAC
//
// Note that here we assume both output and diagnostics will fit into pipe
@@ -25,10 +31,9 @@ compute_hmac (const options::openssl_options& o,
path ("-"), // Write output to openssl::in.
process::pipe (errp.in.get (), move (errp.out)),
process_env (o.openssl (), o.openssl_envvar ()),
- "mac", o.openssl_option (),
- "-digest", "SHA256",
- "-macopt", string ("key:") + k,
- "HMAC");
+ "dgst", o.openssl_option (),
+ "-sha256",
+ "-hmac", k);
ifdstream err (move (errp.in));
diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx
index ce79edb..2754f95 100644
--- a/mod/mod-build-configs.cxx
+++ b/mod/mod-build-configs.cxx
@@ -34,10 +34,12 @@ init (scanner& s)
s, unknown_mode::fail, unknown_mode::fail);
if (options_->build_config_specified ())
+ {
build_config_module::init (*options_);
- if (options_->root ().empty ())
- options_->root (dir_path ("/"));
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
+ }
}
bool brep::build_configs::
diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx
index 0155c2e..b11b3d7 100644
--- a/mod/mod-builds.cxx
+++ b/mod/mod-builds.cxx
@@ -58,10 +58,10 @@ init (scanner& s)
{
database_module::init (*options_, options_->build_db_retry ());
build_config_module::init (*options_);
- }
- if (options_->root ().empty ())
- options_->root (dir_path ("/"));
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
+ }
}
template <typename T, typename C>
diff --git a/mod/mod-ci-github-gh.cxx b/mod/mod-ci-github-gh.cxx
index a25e52c..2e886ac 100644
--- a/mod/mod-ci-github-gh.cxx
+++ b/mod/mod-ci-github-gh.cxx
@@ -123,7 +123,52 @@ namespace brep
{
p.next_expect (event::begin_object);
- bool ni (false), hb (false), hs (false), cc (false), co (false);
+ bool ni (false), hb (false), hs (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (ni, "node_id")) node_id = p.next_expect_string ();
+ else if (c (hb, "head_branch"))
+ {
+ string* v (p.next_expect_string_null ());
+ if (v != nullptr)
+ head_branch = *v;
+ }
+ else if (c (hs, "head_sha")) head_sha = p.next_expect_string ();
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ni) missing_member (p, "gh_check_suite", "node_id");
+ if (!hb) missing_member (p, "gh_check_suite", "head_branch");
+ if (!hs) missing_member (p, "gh_check_suite", "head_sha");
+ }
+
+ ostream&
+ operator<< (ostream& os, const gh_check_suite& cs)
+ {
+ os << "node_id: " << cs.node_id
+ << ", head_branch: " << (cs.head_branch ? *cs.head_branch : "null")
+ << ", head_sha: " << cs.head_sha;
+
+ return os;
+ }
+
+ // gh_check_suite_ex
+ //
+ gh_check_suite_ex::
+ gh_check_suite_ex (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool ni (false), hb (false), hs (false), cc (false), co (false),
+ ap (false);
// Skip unknown/uninteresting members.
//
@@ -150,24 +195,54 @@ namespace brep
if (v != nullptr)
conclusion = *v;
}
+ else if (c (ap, "app"))
+ {
+ p.next_expect (event::begin_object);
+
+ bool ai (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (ai, "id"))
+ {
+ // Note: unlike the check_run webhook's app.id, the check_suite
+ // one can be null. It's unclear under what circumstances, but it
+ // shouldn't happen unless something is broken.
+ //
+ string* v (p.next_expect_number_null ());
+
+ if (v == nullptr)
+ throw_json (p, "check_suite.app.id is null");
+
+ app_id = *v;
+ }
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ai) missing_member (p, "gh_check_suite_ex.app", "id");
+ }
else p.next_expect_value_skip ();
}
- if (!ni) missing_member (p, "gh_check_suite", "node_id");
- if (!hb) missing_member (p, "gh_check_suite", "head_branch");
- if (!hs) missing_member (p, "gh_check_suite", "head_sha");
- if (!cc) missing_member (p, "gh_check_suite", "latest_check_runs_count");
- if (!co) missing_member (p, "gh_check_suite", "conclusion");
+ if (!ni) missing_member (p, "gh_check_suite_ex", "node_id");
+ if (!hb) missing_member (p, "gh_check_suite_ex", "head_branch");
+ if (!hs) missing_member (p, "gh_check_suite_ex", "head_sha");
+ if (!cc) missing_member (p, "gh_check_suite_ex", "latest_check_runs_count");
+ if (!co) missing_member (p, "gh_check_suite_ex", "conclusion");
+ if (!ap) missing_member (p, "gh_check_suite_ex", "app");
}
ostream&
- operator<< (ostream& os, const gh_check_suite& cs)
+ operator<< (ostream& os, const gh_check_suite_ex& cs)
{
os << "node_id: " << cs.node_id
<< ", head_branch: " << (cs.head_branch ? *cs.head_branch : "null")
<< ", head_sha: " << cs.head_sha
<< ", latest_check_runs_count: " << cs.check_runs_count
- << ", conclusion: " << (cs.conclusion ? *cs.conclusion : "null");
+ << ", conclusion: " << (cs.conclusion ? *cs.conclusion : "null")
+ << ", app_id: " << cs.app_id;
return os;
}
@@ -208,7 +283,8 @@ namespace brep
{
p.next_expect (event::begin_object);
- bool ni (false), nm (false), st (false), du (false), cs (false);
+ bool ni (false), nm (false), st (false), du (false), cs (false),
+ ap (false);
// Skip unknown/uninteresting members.
//
@@ -224,14 +300,31 @@ namespace brep
else if (c (st, "status")) status = p.next_expect_string ();
else if (c (du, "details_url")) details_url = p.next_expect_string ();
else if (c (cs, "check_suite")) check_suite = gh_check_suite (p);
+ else if (c (ap, "app"))
+ {
+ p.next_expect (event::begin_object);
+
+ bool ai (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (ai, "id")) app_id = p.next_expect_number ();
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ai) missing_member (p, "gh_check_run_ex.app", "id");
+ }
else p.next_expect_value_skip ();
}
- if (!ni) missing_member (p, "gh_check_run", "node_id");
- if (!nm) missing_member (p, "gh_check_run", "name");
- if (!st) missing_member (p, "gh_check_run", "status");
- if (!du) missing_member (p, "gh_check_run", "details_url");
- if (!cs) missing_member (p, "gh_check_run", "check_suite");
+ if (!ni) missing_member (p, "gh_check_run_ex", "node_id");
+ if (!nm) missing_member (p, "gh_check_run_ex", "name");
+ if (!st) missing_member (p, "gh_check_run_ex", "status");
+ if (!du) missing_member (p, "gh_check_run_ex", "details_url");
+ if (!cs) missing_member (p, "gh_check_run_ex", "check_suite");
+ if (!ap) missing_member (p, "gh_check_run_ex", "app");
}
@@ -250,7 +343,8 @@ namespace brep
{
os << static_cast<const gh_check_run&> (cr)
<< ", details_url: " << cr.details_url
- << ", check_suite: { " << cr.check_suite << " }";
+ << ", check_suite: { " << cr.check_suite << " }"
+ << ", app_id: " << cr.app_id;
return os;
}
@@ -356,7 +450,8 @@ namespace brep
<< "path: " << pr.head_path
<< ", ref: " << pr.head_ref
<< ", sha: " << pr.head_sha
- << " }";
+ << " }"
+ << ", app_id: " << pr.app_id;
return os;
}
@@ -418,7 +513,7 @@ namespace brep
return p.name () == s ? (v = true) : false;
};
- if (c (i, "id")) id = p.next_expect_number<uint64_t> ();
+ if (c (i, "id")) id = p.next_expect_number ();
else p.next_expect_value_skip ();
}
@@ -452,7 +547,7 @@ namespace brep
};
if (c (ac, "action")) action = p.next_expect_string ();
- else if (c (cs, "check_suite")) check_suite = gh_check_suite (p);
+ else if (c (cs, "check_suite")) check_suite = gh_check_suite_ex (p);
else if (c (rp, "repository")) repository = gh_repository (p);
else if (c (in, "installation")) installation = gh_installation (p);
else p.next_expect_value_skip ();
@@ -561,6 +656,58 @@ namespace brep
return os;
}
+ // gh_push_event
+ //
+ gh_push_event::
+ gh_push_event (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool rf (false), bf (false), af (false), fd (false), dl (false),
+ rp (false), in (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (rf, "ref")) ref = p.next_expect_string ();
+ else if (c (bf, "before")) before = p.next_expect_string ();
+ else if (c (af, "after")) after = p.next_expect_string ();
+ else if (c (fd, "forced")) forced = p.next_expect_boolean<bool> ();
+ else if (c (dl, "deleted")) deleted = p.next_expect_boolean<bool> ();
+ else if (c (rp, "repository")) repository = gh_repository (p);
+ else if (c (in, "installation")) installation = gh_installation (p);
+ else p.next_expect_value_skip ();
+ }
+
+ if (!rf) missing_member (p, "gh_push_event", "ref");
+ if (!bf) missing_member (p, "gh_push_event", "before");
+ if (!af) missing_member (p, "gh_push_event", "after");
+ if (!fd) missing_member (p, "gh_push_event", "forced");
+ if (!dl) missing_member (p, "gh_push_event", "deleted");
+ if (!rp) missing_member (p, "gh_push_event", "repository");
+ if (!in) missing_member (p, "gh_push_event", "installation");
+ }
+
+ ostream&
+ operator<< (ostream& os, const gh_push_event& p)
+ {
+ os << "ref: " << p.ref
+ << ", before: " << p.before
+ << ", after: " << p.after
+ << ", forced: " << p.forced
+ << ", deleted: " << p.deleted
+ << ", repository { " << p.repository << " }"
+ << ", installation { " << p.installation << " }";
+
+ return os;
+ }
+
// gh_installation_access_token
//
// Example JSON:
diff --git a/mod/mod-ci-github-gh.hxx b/mod/mod-ci-github-gh.hxx
index 05c289e..91f5bfe 100644
--- a/mod/mod-ci-github-gh.hxx
+++ b/mod/mod-ci-github-gh.hxx
@@ -26,9 +26,10 @@ namespace brep
// GitHub request/response types (all start with gh_).
//
// Note that the GitHub REST and GraphQL APIs use different id types and
- // values. In the REST API they are usually integers (but sometimes
- // strings!) whereas in GraphQL they are always strings (note:
- // base64-encoded and opaque, not just the REST id value as a string).
+ // values. In the REST API they are usually integers (but check the API
+ // reference for the object in question) whereas in GraphQL they are always
+ // strings (note: base64-encoded and opaque, not just the REST id value as a
+ // string).
//
// In both APIs the id field is called `id`, but REST responses and webhook
// events also contain the corresponding GraphQL object's id in the
@@ -43,7 +44,7 @@ namespace brep
//
namespace json = butl::json;
- // The "check_suite" object within a check_suite webhook event request.
+ // The check_suite member of a check_run webhook event (gh_check_run_event).
//
struct gh_check_suite
{
@@ -51,15 +52,32 @@ namespace brep
optional<string> head_branch;
string head_sha;
+ explicit
+ gh_check_suite (json::parser&);
+
+ gh_check_suite () = default;
+ };
+
+ // The check_suite member of a check_suite webhook event
+ // (gh_check_suite_event).
+ //
+ struct gh_check_suite_ex: gh_check_suite
+ {
size_t check_runs_count;
optional<string> conclusion;
+ string app_id;
+
explicit
- gh_check_suite (json::parser&);
+ gh_check_suite_ex (json::parser&);
- gh_check_suite () = default;
+ gh_check_suite_ex () = default;
};
+ // The check_run object returned in response to GraphQL requests
+ // (e.g. create or update check run). Note that we specifiy the set of
+ // members to return in the GraphQL request.
+ //
struct gh_check_run
{
string node_id;
@@ -72,17 +90,24 @@ namespace brep
gh_check_run () = default;
};
+ // The check_run member of a check_run webhook event (gh_check_run_event).
+ //
struct gh_check_run_ex: gh_check_run
{
string details_url;
gh_check_suite check_suite;
+ string app_id;
+
explicit
gh_check_run_ex (json::parser&);
gh_check_run_ex () = default;
};
+ // The pull_request member of a pull_request webhook event
+ // (gh_pull_request_event).
+ //
struct gh_pull_request
{
string node_id;
@@ -96,38 +121,24 @@ namespace brep
string head_ref;
string head_sha;
+ // Note: not received from GitHub but set from the app-id webhook query
+ // parameter instead.
+ //
+ // For some reason, unlike the check_suite and check_run webhooks, the
+ // pull_request webhook does not contain the app id. For the sake of
+ // simplicity we emulate check_suite and check_run by storing the app-id
+ // webhook query parameter here.
+ //
+ string app_id;
+
explicit
gh_pull_request (json::parser&);
gh_pull_request () = default;
};
- // Return the GitHub check run status corresponding to a build_state.
+ // The repository member of various webhook events.
//
- string
- gh_to_status (build_state);
-
- // Return the build_state corresponding to a GitHub check run status
- // string. Throw invalid_argument if the passed status was invalid.
- //
- build_state
- gh_from_status (const string&);
-
- // If warning_success is true, then map result_status::warning to `SUCCESS`
- // and to `FAILURE` otherwise.
- //
- // Throw invalid_argument in case of unsupported result_status value
- // (currently skip, interrupt).
- //
- string
- gh_to_conclusion (result_status, bool warning_success);
-
- // Create a check_run name from a build. If the second argument is not
- // NULL, return an abbreviated id if possible.
- //
- string
- gh_check_run_name (const build&, const build_queued_hints* = nullptr);
-
struct gh_repository
{
string node_id;
@@ -140,9 +151,11 @@ namespace brep
gh_repository () = default;
};
+ // The installation member of various webhook events.
+ //
struct gh_installation
{
- uint64_t id; // Note: used for installation access token (REST API).
+ string id; // Note: used for installation access token (REST API).
explicit
gh_installation (json::parser&);
@@ -150,12 +163,12 @@ namespace brep
gh_installation () = default;
};
- // The check_suite webhook event request.
+ // The check_suite webhook event.
//
struct gh_check_suite_event
{
string action;
- gh_check_suite check_suite;
+ gh_check_suite_ex check_suite;
gh_repository repository;
gh_installation installation;
@@ -165,6 +178,8 @@ namespace brep
gh_check_suite_event () = default;
};
+ // The check_run webhook event.
+ //
struct gh_check_run_event
{
string action;
@@ -178,6 +193,8 @@ namespace brep
gh_check_run_event () = default;
};
+ // The pull_request webhook event.
+ //
struct gh_pull_request_event
{
string action;
@@ -198,6 +215,58 @@ namespace brep
gh_pull_request_event () = default;
};
+ // The push webhook event.
+ //
+ struct gh_push_event
+ {
+ // The full git ref that was pushed. Example: refs/heads/main or
+ // refs/tags/v3.14.1.
+ //
+ string ref;
+
+ // The SHA of the most recent commit on ref before the push.
+ //
+ // The GitHub API reference says this member is always present and
+ // non-null. Testing shows that an absent before commit is represented by
+ // a value of "0000000000000000000000000000000000000000".
+ //
+ string before;
+
+ // The SHA of the most recent commit on ref after the push.
+ //
+ string after;
+
+ // True if this was a forced push of the ref. I.e., history was
+ // overwritten.
+ //
+ bool forced;
+
+ // True if this was a branch deletion.
+ //
+ bool deleted;
+
+ gh_repository repository;
+ gh_installation installation;
+
+ // Note: not received from GitHub but set from the app-id webhook query
+ // parameter instead.
+ //
+ // For some reason, unlike the check_suite and check_run webhooks, the
+ // push webhook does not contain the app id. For the sake of simplicity we
+ // emulate check_suite and check_run by storing the app-id webhook query
+ // parameter here.
+ //
+ string app_id;
+
+ explicit
+ gh_push_event (json::parser&);
+
+ gh_push_event () = default;
+ };
+
+ // Installation access token (IAT) returned when we authenticate as a GitHub
+ // app installation.
+ //
struct gh_installation_access_token
{
string token;
@@ -211,6 +280,32 @@ namespace brep
gh_installation_access_token () = default;
};
+ // Return the GitHub check run status corresponding to a build_state.
+ //
+ string
+ gh_to_status (build_state);
+
+ // Return the build_state corresponding to a GitHub check run status
+ // string. Throw invalid_argument if the passed status was invalid.
+ //
+ build_state
+ gh_from_status (const string&);
+
+ // If warning_success is true, then map result_status::warning to `SUCCESS`
+ // and to `FAILURE` otherwise.
+ //
+ // Throw invalid_argument in case of unsupported result_status value
+ // (currently skip, interrupt).
+ //
+ string
+ gh_to_conclusion (result_status, bool warning_success);
+
+ // Create a check_run name from a build. If the second argument is not
+ // NULL, return an abbreviated id if possible.
+ //
+ string
+ gh_check_run_name (const build&, const build_queued_hints* = nullptr);
+
// Throw system_error if the conversion fails due to underlying operating
// system errors.
//
@@ -227,6 +322,9 @@ namespace brep
operator<< (ostream&, const gh_check_suite&);
ostream&
+ operator<< (ostream&, const gh_check_suite_ex&);
+
+ ostream&
operator<< (ostream&, const gh_check_run&);
ostream&
@@ -248,6 +346,9 @@ namespace brep
operator<< (ostream&, const gh_pull_request_event&);
ostream&
+ operator<< (ostream&, const gh_push_event&);
+
+ ostream&
operator<< (ostream&, const gh_installation_access_token&);
}
diff --git a/mod/mod-ci-github-gq.cxx b/mod/mod-ci-github-gq.cxx
index 774eeed..db69f0c 100644
--- a/mod/mod-ci-github-gq.cxx
+++ b/mod/mod-ci-github-gq.cxx
@@ -740,7 +740,7 @@ namespace brep
{
if (ma == "CONFLICTING")
r = {move (hs), false, nullopt};
- if (ma == "UNKNOWN")
+ else if (ma == "UNKNOWN")
; // Still being generated; leave r absent.
else
throw_json (p, "unexpected mergeable value '" + ma + '\'');
diff --git a/mod/mod-ci-github-service-data.cxx b/mod/mod-ci-github-service-data.cxx
index 31a556d..4598302 100644
--- a/mod/mod-ci-github-service-data.cxx
+++ b/mod/mod-ci-github-service-data.cxx
@@ -54,8 +54,8 @@ namespace brep
p.next_expect_name ("installation_access");
installation_access = gh_installation_access_token (p);
- installation_id =
- p.next_expect_member_number<uint64_t> ("installation_id");
+ app_id = p.next_expect_member_string ("app_id");
+ installation_id = p.next_expect_member_string ("installation_id");
repository_node_id = p.next_expect_member_string ("repository_node_id");
repository_clone_url = p.next_expect_member_string ("repository_clone_url");
@@ -113,7 +113,14 @@ namespace brep
}
}
- check_runs.emplace_back (move (bid), move (nm), move (nid), s, ss, rs);
+ check_runs.push_back (
+ check_run {move (bid),
+ move (nm),
+ move (nid),
+ s,
+ ss,
+ rs,
+ nullopt /* details_url */});
p.next_expect (event::end_object);
}
@@ -135,7 +142,8 @@ namespace brep
service_data (bool ws,
string iat_tok,
timestamp iat_ea,
- uint64_t iid,
+ string aid,
+ string iid,
string rid,
string rcu,
kind_type k,
@@ -146,7 +154,8 @@ namespace brep
: kind (k), pre_check (pc), re_request (rr),
warning_success (ws),
installation_access (move (iat_tok), iat_ea),
- installation_id (iid),
+ app_id (move (aid)),
+ installation_id (move (iid)),
repository_node_id (move (rid)),
repository_clone_url (move (rcu)),
check_sha (move (cs)),
@@ -161,7 +170,8 @@ namespace brep
service_data (bool ws,
string iat_tok,
timestamp iat_ea,
- uint64_t iid,
+ string aid,
+ string iid,
string rid,
string rcu,
kind_type k,
@@ -174,7 +184,8 @@ namespace brep
: kind (k), pre_check (pc), re_request (rr),
warning_success (ws),
installation_access (move (iat_tok), iat_ea),
- installation_id (iid),
+ app_id (move (aid)),
+ installation_id (move (iid)),
repository_node_id (move (rid)),
repository_clone_url (move (rcu)),
pr_node_id (move (pid)),
@@ -233,6 +244,7 @@ namespace brep
s.end_object ();
+ s.member ("app_id", app_id);
s.member ("installation_id", installation_id);
s.member ("repository_node_id", repository_node_id);
s.member ("repository_clone_url", repository_clone_url);
diff --git a/mod/mod-ci-github-service-data.hxx b/mod/mod-ci-github-service-data.hxx
index 0f4c760..50bb49d 100644
--- a/mod/mod-ci-github-service-data.hxx
+++ b/mod/mod-ci-github-service-data.hxx
@@ -89,7 +89,8 @@ namespace brep
//
gh_installation_access_token installation_access;
- uint64_t installation_id;
+ string app_id;
+ string installation_id;
string repository_node_id; // GitHub-internal opaque repository id.
@@ -151,7 +152,8 @@ namespace brep
service_data (bool warning_success,
string iat_token,
timestamp iat_expires_at,
- uint64_t installation_id,
+ string app_id,
+ string installation_id,
string repository_node_id,
string repository_clone_url,
kind_type kind,
@@ -165,7 +167,8 @@ namespace brep
service_data (bool warning_success,
string iat_token,
timestamp iat_expires_at,
- uint64_t installation_id,
+ string app_id,
+ string installation_id,
string repository_node_id,
string repository_clone_url,
kind_type kind,
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index 14b3c00..67ab2a7 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -61,6 +61,8 @@ namespace brep
void ci_github::
init (scanner& s)
{
+ HANDLER_DIAG;
+
{
shared_ptr<tenant_service_base> ts (
dynamic_pointer_cast<tenant_service_base> (shared_from_this ()));
@@ -75,9 +77,45 @@ namespace brep
// Prepare for the CI requests handling, if configured.
//
- if (options_->build_config_specified () &&
- options_->ci_github_app_webhook_secret_specified ())
+ if (options_->ci_github_app_webhook_secret_specified ())
{
+ if (!options_->build_config_specified ())
+ fail << "package building functionality must be enabled";
+
+ if (!options_->ci_github_app_id_private_key_specified ())
+ fail << "no app id/private key mappings configured";
+
+ for (const auto& pr: options_->ci_github_app_id_private_key ())
+ {
+ if (pr.second.relative ())
+ fail << "ci-github-app-id-private-key path must be absolute";
+ }
+
+ // Read the webhook secret from the configured path.
+ //
+ {
+ const path& p (options_->ci_github_app_webhook_secret ());
+
+ if (p.relative ())
+ fail << "ci-github-app-webhook-secret path must be absolute";
+
+ try
+ {
+ ifdstream is (p);
+ getline (is, webhook_secret_, '\0');
+
+ // Trim leading/trailing whitespaces (presumably GitHub does the
+ // same in its web UI).
+ //
+ if (trim (webhook_secret_).empty ())
+ fail << "empty webhook secret in " << p;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read webhook secret from " << p << ": " << e;
+ }
+ }
+
ci_start::init (make_shared<options::ci_start> (*options_));
database_module::init (*options_, options_->build_db_retry ());
@@ -202,10 +240,10 @@ namespace brep
//
try
{
- string h (
- compute_hmac (*options_,
- body.data (), body.size (),
- options_->ci_github_app_webhook_secret ().c_str ()));
+ string h (compute_hmac (*options_,
+ body.data (),
+ body.size (),
+ webhook_secret_.c_str ()));
if (!icasecmp (h, hmac))
{
@@ -221,33 +259,48 @@ namespace brep
fail << "unable to compute request HMAC: " << e;
}
- // Process the `warning` webhook request query parameter.
+ // Process the `app-id` and `warning` webhook request query parameters.
//
+ string app_id;
bool warning_success;
{
const name_values& rps (rq.parameters (1024, true /* url_only */));
- auto i (find_if (rps.begin (), rps.end (),
- [] (auto&& rp) {return rp.name == "warning";}));
+ bool ai (false), wa (false);
- if (i == rps.end ())
- throw invalid_request (400,
- "missing 'warning' webhook query parameter");
+ auto badreq = [] (const string& m)
+ {
+ throw invalid_request (400, m);
+ };
- if (!i->value)
- throw invalid_request (
- 400, "missing 'warning' webhook query parameter value");
+ for (const name_value& rp: rps)
+ {
+ if (rp.name == "app-id")
+ {
+ if (!rp.value)
+ badreq ("missing 'app-id' webhook query parameter value");
- const string& v (*i->value);
+ ai = true;
+ app_id = *rp.value;
+ }
+ else if (rp.name == "warning")
+ {
+ if (!rp.value)
+ badreq ("missing 'warning' webhook query parameter value");
- if (v == "success") warning_success = true;
- else if (v == "failure") warning_success = false;
- else
- {
- throw invalid_request (
- 400,
- "invalid 'warning' webhook query parameter value: '" + v + '\'');
+ wa = true;
+ const string& v (*rp.value);
+
+ if (v == "success") warning_success = true;
+ else if (v == "failure") warning_success = false;
+ else
+ badreq ("invalid 'warning' webhook query parameter value: '" + v +
+ '\'');
+ }
}
+
+ if (!ai) badreq ("missing 'app-id' webhook query parameter");
+ if (!wa) badreq ("missing 'warning' webhook query parameter");
}
// There is a webhook event (specified in the x-github-event header) and
@@ -257,7 +310,7 @@ namespace brep
// Note: "GitHub continues to add new event types and new actions to
// existing event types." As a result we ignore known actions that we are
// not interested in and log and ignore unknown actions. The thinking here
- // is that we want be "notified" of new actions at which point we can
+ // is that we want to be "notified" of new actions at which point we can
// decide whether to ignore them or to handle.
//
if (event == "check_suite")
@@ -279,16 +332,25 @@ namespace brep
throw invalid_request (400, move (m));
}
+ if (cs.check_suite.app_id != app_id)
+ {
+ fail << "webhook check_suite app.id " << cs.check_suite.app_id
+ << " does not match app-id query parameter " << app_id;
+ }
+
if (cs.action == "requested")
{
- return handle_check_suite_request (move (cs), warning_success);
+ // Branch pushes are handled in handle_branch_push() so ignore this
+ // event.
+ //
+ return true;
}
else if (cs.action == "rerequested")
{
// Someone manually requested to re-run all the check runs in this
// check suite. Treat as a new request.
//
- return handle_check_suite_request (move (cs), warning_success);
+ return handle_check_suite_rerequest (move (cs), warning_success);
}
else if (cs.action == "completed")
{
@@ -327,6 +389,12 @@ namespace brep
throw invalid_request (400, move (m));
}
+ if (cr.check_run.app_id != app_id)
+ {
+ fail << "webhook check_run app.id " << cr.check_run.app_id
+ << " does not match app-id query parameter " << app_id;
+ }
+
if (cr.action == "rerequested")
{
// Someone manually requested to re-run a specific check run.
@@ -372,6 +440,14 @@ namespace brep
throw invalid_request (400, move (m));
}
+ // Store the app-id webhook query parameter in the gh_pull_request_event
+ // object (see gh_pull_request for an explanation).
+ //
+ // When we receive the other webhooks we do check that the app ids in
+ // the payload and query match but here we have to assume it is valid.
+ //
+ pr.pull_request.app_id = app_id;
+
if (pr.action == "opened" ||
pr.action == "synchronize")
{
@@ -454,6 +530,40 @@ namespace brep
return true;
}
}
+ else if (event == "push")
+ {
+ // Push events are triggered by branch pushes, branch creation, and
+ // branch deletion.
+ //
+ gh_push_event ps;
+ try
+ {
+ json::parser p (body.data (), body.size (), "push event");
+
+ ps = gh_push_event (p);
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ string m ("malformed JSON in " + e.name + " request body");
+
+ error << m << ", line: " << e.line << ", column: " << e.column
+ << ", byte offset: " << e.position << ", error: " << e;
+
+ throw invalid_request (400, move (m));
+ }
+
+ // Store the app-id webhook query parameter in the gh_push_event
+ // object (see gh_push_event for an explanation).
+ //
+ // When we receive the other webhooks we do check that the app ids in
+ // the payload and query match but here we have to assume it is valid.
+ //
+ ps.app_id = app_id;
+
+ // Note that the push request event has no action.
+ //
+ return handle_branch_push (move (ps), warning_success);
+ }
else
{
// Log to investigate.
@@ -469,57 +579,293 @@ namespace brep
//
static string conclusion_check_run_name ("CONCLUSION");
- // Return the colored circle corresponding to a result_status.
- //
- static string
- circle (result_status rs)
+ bool ci_github::
+ handle_branch_push (gh_push_event ps, bool warning_success)
{
- switch (rs)
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "push event { " << ps << " }";});
+
+ // Cancel the CI tenant associated with the overwritten/deleted previous
+ // head commit if this is a forced push or a branch deletion.
+ //
+ if (ps.forced || ps.deleted)
{
- case result_status::success: return "\U0001F7E2"; // Green circle.
- case result_status::warning: return "\U0001F7E0"; // Orange circle.
- case result_status::error:
- case result_status::abort:
- case result_status::abnormal: return "\U0001F534"; // Red circle.
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (ps.repository.node_id + ':' + ps.before);
- // Valid values we should never encounter.
+ // Note that it's possible this commit still exists in another branch so
+ // we do refcount-aware cancel.
//
- case result_status::skip:
- case result_status::interrupt:
- throw invalid_argument ("unexpected result_status value: " +
- to_string (rs));
+ if (optional<tenant_service> ts = cancel (error, warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ "ci-github", sid,
+ true /* ref_count */))
+ {
+ l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to "
+ : "deletion of ")
+ << ps.ref << ": attempted to cancel CI of previous"
+ << " head commit with tenant_service id " << sid
+ << " (ref_count: " << ts->ref_count << ')';});
+ }
+ else
+ {
+ // It's possible that there was no CI for the previous commit for
+ // various reasons (e.g., CI was not enabled).
+ //
+ l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to "
+ : "deletion of ")
+ << ps.ref << ": failed to cancel CI of previous"
+ << " head commit with tenant_service id " << sid;});
+ }
}
- return ""; // Should never reach.
+ if (ps.deleted)
+ return true; // Do nothing further if this was a branch deletion.
+
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (ps.app_id, trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (ps.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ // While it would have been nice to cancel CIs of PRs with this branch as
+ // base not to waste resources, there are complications: Firstly, we can
+ // only do this for remote PRs (since local PRs will most likely share the
+ // result with branch push). Secondly, we try to do our best even if the
+ // branch protection rule for head behind is not enabled. In this case, it
+ // would be good to complete the CI. So maybe/later. See also the head
+ // case in handle_pull_request(), where we do cancel remote PRs that are
+ // not shared.
+
+ // Service id that uniquely identifies the CI tenant.
+ //
+ string sid (ps.repository.node_id + ':' + ps.after);
+
+ service_data sd (warning_success,
+ iat->token,
+ iat->expires_at,
+ ps.app_id,
+ ps.installation.id,
+ move (ps.repository.node_id),
+ move (ps.repository.clone_url),
+ service_data::local,
+ false /* pre_check */,
+ false /* re_requested */,
+ ps.after /* check_sha */,
+ ps.after /* report_sha */);
+
+ // Create an unloaded CI tenant, doing nothing if one already exists
+ // (which could've been created by handle_pull_request() or by us as a
+ // result of a push to another branch). Note that the tenant's reference
+ // count is incremented in all cases.
+ //
+ // Note: use no delay since we need to (re)create the synthetic conclusion
+ // check run as soon as possible.
+ //
+ // Note that we use the create() API instead of start() since duplicate
+ // management is not available in start().
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the tenant, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ if (!create (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ tenant_service (sid, "ci-github", sd.json ()),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */,
+ duplicate_tenant_mode::ignore))
+ {
+ fail << "push " + ps.after + " to " + ps.ref
+ << ": unable to create unloaded CI tenant";
+ }
+
+ return true;
}
- // Make a check run summary from a CI start_result.
+ // Miscellaneous pull request facts
//
- static string
- to_check_run_summary (const optional<ci_start::start_result>& r)
+ // - Although some of the GitHub documentation makes it sound like they
+ // expect check runs to be added to both the PR head commit and the merge
+ // commit, the PR UI does not react to the merge commit's check runs
+ // consistently. It actually seems to be quite broken. The only thing it
+ // does seem to do reliably is blocking the PR merge if the merge commit's
+ // check runs are not successful (i.e, overriding the PR head commit's
+ // check runs). But the UI looks quite messed up generally in this state.
+ //
+ // - When new commits are added to a PR base branch, pull_request.base.sha
+ // does not change, but the test merge commit will be updated to include
+ // the new commits to the base branch.
+ //
+ // - When new commits are added to a PR head branch, pull_request.head.sha
+ // gets updated with the head commit's SHA and check_suite.pull_requests[]
+ // will contain all PRs with this branch as head.
+ //
+ bool ci_github::
+ handle_pull_request (gh_pull_request_event pr, bool warning_success)
{
- string s;
+ HANDLER_DIAG;
- s = "```\n";
- if (r) s += r->message;
- else s += "Internal service error";
- s += "\n```";
+ l3 ([&]{trace << "pull_request event { " << pr << " }";});
- return s;
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (pr.pull_request.app_id, trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (pr.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ // Distinguish between local and remote PRs by comparing the head and base
+ // repositories' paths.
+ //
+ service_data::kind_type kind (
+ pr.pull_request.head_path == pr.pull_request.base_path
+ ? service_data::local
+ : service_data::remote);
+
+ // Note that similar to the branch push case above, while it would have
+ // been nice to cancel the previous CI job once the PR head moves (the
+ // "synchronize" event), due to the head sharing problem the previous CI
+ // job might actually still be relevant (in both local and remote PR
+ // cases). So we only do it for the remote PRs and only if the head is not
+ // shared (via tenant reference counting).
+ //
+ if (kind == service_data::remote && pr.action == "synchronize")
+ {
+ if (pr.before)
+ {
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (pr.repository.node_id + ':' + *pr.before);
+
+ if (optional<tenant_service> ts = cancel (error, warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ "ci-github", sid,
+ true /* ref_count */))
+ {
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": attempted to cancel CI of previous head commit"
+ << " (ref_count: " << ts->ref_count << ')';});
+ }
+ else
+ {
+ // It's possible that there was no CI for the previous commit for
+ // various reasons (e.g., CI was not enabled).
+ //
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": failed to cancel CI of previous head commit "
+ << "with tenant_service id " << sid;});
+ }
+ }
+ else
+ {
+ error << "pull request " << pr.pull_request.node_id
+ << ": before commit is missing in synchronize event";
+ }
+ }
+
+ // Note: for remote PRs the check_sha will be set later, in
+ // build_unloaded_pre_check(), to test merge commit id.
+ //
+ string check_sha (kind == service_data::local
+ ? pr.pull_request.head_sha
+ : "");
+
+ // Note that PR rebuilds (re-requested) are handled by
+ // handle_check_suite_rerequest().
+ //
+ // Note that, in the case of a remote PR, GitHub will copy the PR head
+ // commit from the head (forked) repository into the base repository. So
+ // the check runs must always be added to the base repository, whether the
+ // PR is local or remote. The head commit refs are located at
+ // refs/pull/<PR-number>/head.
+ //
+ service_data sd (warning_success,
+ move (iat->token),
+ iat->expires_at,
+ pr.pull_request.app_id,
+ pr.installation.id,
+ move (pr.repository.node_id),
+ move (pr.repository.clone_url),
+ kind, true /* pre_check */, false /* re_request */,
+ move (check_sha),
+ move (pr.pull_request.head_sha) /* report_sha */,
+ pr.pull_request.node_id,
+ pr.pull_request.number);
+
+ // Create an unloaded CI tenant for the pre-check phase (during which we
+ // wait for the PR's merge commit and behindness to become available).
+ //
+ // Create with an empty service id so that the generated tenant id is used
+ // instead during the pre-check phase (so as not to clash with a proper
+ // service id for this head commit, potentially created in
+ // handle_branch_push() or as another PR).
+ //
+ tenant_service ts ("", "ci-github", sd.json ());
+
+ // Note: use no delay since we need to start the actual CI (which in turn
+ // (re)creates the synthetic conclusion check run) as soon as possible.
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications -- which will be routed to build_unloaded_pre_check() --
+ // until we cancel the tenant or it gets archived after some timeout.
+ // (Note that we never actually load this request, we always cancel it;
+ // see build_unloaded_pre_check() for details.)
+ //
+ if (!create (error,
+ warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ move (ts),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */))
+ {
+ fail << "pull request " << pr.pull_request.node_id
+ << ": unable to create unloaded pre-check tenant";
+ }
+
+ return true;
}
bool ci_github::
- handle_check_suite_request (gh_check_suite_event cs, bool warning_success)
+ handle_check_suite_rerequest (gh_check_suite_event cs, bool warning_success)
{
HANDLER_DIAG;
l3 ([&]{trace << "check_suite event { " << cs << " }";});
+ assert (cs.action == "rerequested");
+
// While we don't need the installation access token in this request,
// let's obtain it to flush out any permission issues early. Also, it is
// valid for an hour so we will most likely make use of it.
//
- optional<string> jwt (generate_jwt (trace, error));
+ optional<string> jwt (generate_jwt (cs.check_suite.app_id, trace, error));
if (!jwt)
throw server_error ();
@@ -532,15 +878,6 @@ namespace brep
l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- // While it would have been nice to cancel CIs of PRs with this branch as
- // base not to waste resources, there are complications: Firstly, we can
- // only do this for remote PRs (since local PRs will most likely share the
- // result with branch push). Secondly, we try to do our best even if the
- // branch protection rule for head behind is not enabled. In this case, it
- // would be good to complete the CI. So maybe/later. See also the head
- // case in handle_pull_request(), where we do cancel remote PRs that are
- // not shared.
-
// Service id that uniquely identifies the CI tenant.
//
string sid (cs.repository.node_id + ':' + cs.check_suite.head_sha);
@@ -564,9 +901,10 @@ namespace brep
string check_sha;
service_data::kind_type kind;
- bool re_requested (cs.action == "rerequested");
- if (re_requested && !cs.check_suite.head_branch)
+ if (!cs.check_suite.head_branch)
{
+ // Rebuild of remote PR.
+ //
kind = service_data::remote;
if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
@@ -594,7 +932,7 @@ namespace brep
}
else
{
- // Branch push or local PR rebuild.
+ // Rebuild of branch push or local PR.
//
kind = service_data::local;
check_sha = cs.check_suite.head_sha;
@@ -603,23 +941,19 @@ namespace brep
service_data sd (warning_success,
iat->token,
iat->expires_at,
+ cs.check_suite.app_id,
cs.installation.id,
move (cs.repository.node_id),
move (cs.repository.clone_url),
- kind, false /* pre_check */, re_requested,
+ kind, false /* pre_check */, true /* re_requested */,
move (check_sha),
move (cs.check_suite.head_sha) /* report_sha */);
- // If this check suite is being re-run, replace the existing CI tenant if
- // it exists; otherwise create a new one, doing nothing if a tenant
- // already exists (which could've been created by handle_pull_request()).
+ // Replace the existing CI tenant if it exists.
//
// Note that GitHub UI does not allow re-running the entire check suite
// until all the check runs are completed.
//
- duplicate_tenant_mode dtm (re_requested
- ? duplicate_tenant_mode::replace
- : duplicate_tenant_mode::ignore);
// Create an unloaded CI tenant.
//
@@ -640,7 +974,7 @@ namespace brep
tenant_service (sid, "ci-github", sd.json ()),
chrono::seconds (30) /* interval */,
chrono::seconds (0) /* delay */,
- dtm));
+ duplicate_tenant_mode::replace));
if (!pr)
{
@@ -648,8 +982,7 @@ namespace brep
<< ": unable to create unloaded CI tenant";
}
- if (dtm == duplicate_tenant_mode::replace &&
- pr->second == duplicate_tenant_result::created)
+ if (pr->second == duplicate_tenant_result::created)
{
error << "check suite " << cs.check_suite.node_id
<< ": re-requested but tenant_service with id " << sid
@@ -778,6 +1111,45 @@ namespace brep
return true;
}
+ // Return the colored circle corresponding to a result_status.
+ //
+ static string
+ circle (result_status rs)
+ {
+ switch (rs)
+ {
+ case result_status::success: return "\U0001F7E2"; // Green circle.
+ case result_status::warning: return "\U0001F7E0"; // Orange circle.
+ case result_status::error:
+ case result_status::abort:
+ case result_status::abnormal: return "\U0001F534"; // Red circle.
+
+ // Valid values we should never encounter.
+ //
+ case result_status::skip:
+ case result_status::interrupt:
+ throw invalid_argument ("unexpected result_status value: " +
+ to_string (rs));
+ }
+
+ return ""; // Should never reach.
+ }
+
+ // Make a check run summary from a CI start_result.
+ //
+ static string
+ to_check_run_summary (const optional<ci_start::start_result>& r)
+ {
+ string s;
+
+ s = "```\n";
+ if (r) s += r->message;
+ else s += "Internal service error";
+ s += "\n```";
+
+ return s;
+ }
+
// Create a gq_built_result.
//
// Throw invalid_argument in case of invalid result_status.
@@ -851,7 +1223,7 @@ namespace brep
auto get_iat = [this, &trace, &error, &cr] ()
-> optional<gh_installation_access_token>
{
- optional<string> jwt (generate_jwt (trace, error));
+ optional<string> jwt (generate_jwt (cr.check_run.app_id, trace, error));
if (!jwt)
return nullopt;
@@ -879,6 +1251,9 @@ namespace brep
ccr.name = conclusion_check_run_name;
+ const gh_installation_access_token* iat (nullptr);
+ optional<gh_installation_access_token> new_iat;
+
// Load the service data, failing the check runs if the tenant has been
// archived.
//
@@ -889,108 +1264,113 @@ namespace brep
//
string sid (repo_node_id + ':' + head_sha);
- if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
+ optional<tenant_data> d (find (*build_db_, "ci-github", sid));
+ if (!d)
{
- if (d->archived) // Tenant is archived
- {
- // Fail (re-create) the check runs.
- //
- optional<gh_installation_access_token> iat (get_iat ());
- if (!iat)
- throw server_error ();
+ // No such tenant.
+ //
+ fail << "check run " << cr.check_run.node_id
+ << " re-requested but tenant_service with id " << sid
+ << " does not exist";
+ }
- gq_built_result br (
- make_built_result (
- result_status::error, warning_success,
- "Unable to rebuild individual configuration: build has "
- "been archived"));
+ tenant_service& ts (d->service);
- // Try to update the conclusion check run even if the first update
- // fails.
- //
- bool f (false); // Failed.
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "failed to parse service data: " << e;
+ }
- if (gq_create_check_run (error, bcr, iat->token,
- repo_node_id, head_sha,
- cr.check_run.details_url,
- build_state::built, br))
- {
- l3 ([&]{trace << "created check_run { " << bcr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to re-create check run";
- f = true;
- }
+ if (!sd.conclusion_node_id)
+ fail << "no conclusion node id for check run " << cr.check_run.node_id;
- if (gq_create_check_run (error, ccr, iat->token,
- repo_node_id, head_sha,
- nullopt /* details_url */,
- build_state::built, move (br)))
- {
- l3 ([&]{trace << "created conclusion check_run { " << ccr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to re-create conclusion check run";
- f = true;
- }
+ tenant_id = d->tenant_id;
- // Fail the handler if either of the check runs could not be
- // updated.
- //
- if (f)
- throw server_error ();
+ // Get a new IAT if the one from the service data has expired.
+ //
+ if (system_clock::now () > sd.installation_access.expires_at)
+ {
+ if ((new_iat = get_iat ()))
+ iat = &*new_iat;
+ else
+ throw server_error ();
+ }
+ else
+ iat = &sd.installation_access;
- return true;
- }
+ if (d->archived) // Tenant is archived
+ {
+ // Fail (update) the check runs.
+ //
+ gq_built_result br (
+ make_built_result (
+ result_status::error, warning_success,
+ "Unable to rebuild individual configuration: build has "
+ "been archived"));
+
+ // Try to update the conclusion check run even if the first update
+ // fails.
+ //
+ bool f (false); // Failed.
- tenant_service& ts (d->service);
+ if (gq_update_check_run (error, bcr, iat->token,
+ repo_node_id, cr.check_run.node_id,
+ nullopt /* details_url */,
+ build_state::built, br))
+ {
+ l3 ([&]{trace << "updated check_run { " << bcr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to update check run";
+ f = true;
+ }
- try
+ if (gq_update_check_run (error, ccr, iat->token,
+ repo_node_id, *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built, move (br)))
{
- sd = service_data (*ts.data);
+ l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";});
}
- catch (const invalid_argument& e)
+ else
{
- fail << "failed to parse service data: " << e;
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to update conclusion check run";
+ f = true;
}
- tenant_id = d->tenant_id;
- }
- else
- {
- // No such tenant.
+ // Fail the handler if either of the check runs could not be
+ // updated.
//
- fail << "check run " << cr.check_run.node_id
- << " re-requested but tenant_service with id " << sid
- << " does not exist";
- }
- }
-
- // Get a new IAT if the one from the service data has expired.
- //
- const gh_installation_access_token* iat (nullptr);
- optional<gh_installation_access_token> new_iat;
+ if (f)
+ throw server_error ();
- if (system_clock::now () > sd.installation_access.expires_at)
- {
- if ((new_iat = get_iat ()))
- iat = &*new_iat;
- else
- throw server_error ();
+ return true;
+ }
}
- else
- iat = &sd.installation_access;
// Fail if it's the conclusion check run that is being re-requested.
//
+ // Expect that if the user selects re-run all failed checks we will
+ // receive multiple check runs, one of which will be the conclusion. And
+ // if we fail it while it happens to arrive last, then we will end up in
+ // the wrong overall state (real check run is building while conclusion is
+ // failed). It seems the best we can do is to ignore it: if the user did
+ // request a rebuild of the conclusion check run explicitly, there will be
+ // no change, which is not ideal but is still an indication that this
+ // operation is not supported.
+ //
if (cr.check_run.name == conclusion_check_run_name)
{
l3 ([&]{trace << "re-requested conclusion check_run";});
+#if 0
if (!sd.conclusion_node_id)
fail << "no conclusion node id for check run " << cr.check_run.node_id;
@@ -1013,6 +1393,7 @@ namespace brep
<< ": unable to update conclusion check run "
<< *sd.conclusion_node_id;
}
+#endif
return true;
}
@@ -1269,159 +1650,6 @@ namespace brep
return true;
}
- // Miscellaneous pull request facts
- //
- // - Although some of the GitHub documentation makes it sound like they
- // expect check runs to be added to both the PR head commit and the merge
- // commit, the PR UI does not react to the merge commit's check runs
- // consistently. It actually seems to be quite broken. The only thing it
- // does seem to do reliably is blocking the PR merge if the merge commit's
- // check runs are not successful (i.e, overriding the PR head commit's
- // check runs). But the UI looks quite messed up generally in this state.
- //
- // - When new commits are added to a PR base branch, pull_request.base.sha
- // does not change, but the test merge commit will be updated to include
- // the new commits to the base branch.
- //
- // - When new commits are added to a PR head branch, pull_request.head.sha
- // gets updated with the head commit's SHA and check_suite.pull_requests[]
- // will contain all PRs with this branch as head.
- //
- bool ci_github::
- handle_pull_request (gh_pull_request_event pr, bool warning_success)
- {
- HANDLER_DIAG;
-
- l3 ([&]{trace << "pull_request event { " << pr << " }";});
-
- // While we don't need the installation access token in this request,
- // let's obtain it to flush out any permission issues early. Also, it is
- // valid for an hour so we will most likely make use of it.
- //
- optional<string> jwt (generate_jwt (trace, error));
- if (!jwt)
- throw server_error ();
-
- optional<gh_installation_access_token> iat (
- obtain_installation_access_token (pr.installation.id,
- move (*jwt),
- error));
- if (!iat)
- throw server_error ();
-
- l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
-
- // Distinguish between local and remote PRs by comparing the head and base
- // repositories' paths.
- //
- service_data::kind_type kind (
- pr.pull_request.head_path == pr.pull_request.base_path
- ? service_data::local
- : service_data::remote);
-
- // Note that similar to the branch push case above, while it would have
- // been nice to cancel the previous CI job once the PR head moves (the
- // "synchronize" event), due to the head sharing problem the previous CI
- // job might actually still be relevant (in both local and remote PR
- // cases). So we only do it for the remote PRs and only if the head is not
- // shared (via tenant reference counting).
- //
- if (kind == service_data::remote && pr.action == "synchronize")
- {
- if (pr.before)
- {
- // Service id that will uniquely identify the CI tenant.
- //
- string sid (pr.repository.node_id + ':' + *pr.before);
-
- if (optional<tenant_service> ts = cancel (error, warn,
- verb_ ? &trace : nullptr,
- *build_db_, retry_,
- "ci-github", sid,
- true /* ref_count */))
- {
- l3 ([&]{trace << "pull request " << pr.pull_request.node_id
- << ": attempted to cancel CI of previous head commit"
- << " (ref_count: " << ts->ref_count << ')';});
- }
- else
- {
- // It's possible that there was no CI for the previous commit for
- // various reasons (e.g., CI was not enabled).
- //
- l3 ([&]{trace << "pull request " << pr.pull_request.node_id
- << ": failed to cancel CI of previous head commit "
- << "with tenant_service id " << sid;});
- }
- }
- else
- {
- error << "pull request " << pr.pull_request.node_id
- << ": before commit is missing in synchronize event";
- }
- }
-
- // Note: for remote PRs the check_sha will be set later, in
- // build_unloaded_pre_check(), to test merge commit id.
- //
- string check_sha (kind == service_data::local
- ? pr.pull_request.head_sha
- : "");
-
- // Note that PR rebuilds (re-requested) are handled by check_suite().
- //
- // Note that, in the case of a remote PR, GitHub will copy the PR head
- // commit from the head (forked) repository into the base repository. So
- // the check runs must always be added to the base repository, whether the
- // PR is local or remote. The head commit refs are located at
- // refs/pull/<PR-number>/head.
- //
- service_data sd (warning_success,
- move (iat->token),
- iat->expires_at,
- pr.installation.id,
- move (pr.repository.node_id),
- move (pr.repository.clone_url),
- kind, true /* pre_check */, false /* re_request */,
- move (check_sha),
- move (pr.pull_request.head_sha) /* report_sha */,
- pr.pull_request.node_id,
- pr.pull_request.number);
-
- // Create an unloaded CI tenant for the pre-check phase (during which we
- // wait for the PR's merge commit and behindness to become available).
- //
- // Create with an empty service id so that the generated tenant id is used
- // instead during the pre-check phase (so as not to clash with a proper
- // service id for this head commit, potentially created in
- // handle_check_suite() or as another PR).
- //
- tenant_service ts ("", "ci-github", sd.json ());
-
- // Note: use no delay since we need to start the actual CI (which in turn
- // (re)creates the synthetic conclusion check run) as soon as possible.
- //
- // After this call we will start getting the build_unloaded()
- // notifications -- which will be routed to build_unloaded_pre_check() --
- // until we cancel the tenant or it gets archived after some timeout.
- // (Note that we never actually load this request, we always cancel it;
- // see build_unloaded_pre_check() for details.)
- //
- if (!create (error,
- warn,
- verb_ ? &trace : nullptr,
- *build_db_, retry_,
- move (ts),
- chrono::seconds (30) /* interval */,
- chrono::seconds (0) /* delay */))
- {
- fail << "pull request " << pr.pull_request.node_id
- << ": unable to create unloaded pre-check tenant";
- }
-
- return true;
- }
-
function<optional<string> (const string&, const tenant_service&)> ci_github::
build_unloaded (const string& ti,
tenant_service&& ts,
@@ -1535,7 +1763,8 @@ namespace brep
// Create an unloaded CI tenant, doing nothing if one already exists
// (which could've been created by a head branch push or another PR
- // sharing the same head commit).
+ // sharing the same head commit). Note that the tenant's reference count
+ // is incremented in all cases.
//
// Note: use no delay since we need to (re)create the synthetic
// conclusion check run as soon as possible.
@@ -1567,7 +1796,7 @@ namespace brep
// local PR, or another remote PR) which in turn means the CI
// result may end up being for head, not merge commit. There is
// nothing we can do about it on our side (the user can enable the
- // head-behind- base protection on their side).
+ // head-behind-base protection on their side).
//
if (sd.kind == service_data::remote)
{
@@ -1682,7 +1911,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -1761,13 +1990,13 @@ namespace brep
return nullopt;
};
+ // (Re)create the synthetic conclusion check run first in order to convert
+ // a potentially completed check suite to building as early as possible.
+ //
// Note that there is a window between receipt of a check_suite or
// pull_request event and the first bot/worker asking for a task, which
// could be substantial. We could probably (also) try to (re)create the
// conclusion checkrun in the webhook handler. @@ Maybe/later.
-
- // (Re)create the synthetic conclusion check run first in order to convert
- // a potentially completed check suite to building as early as possible.
//
string conclusion_node_id; // Conclusion check run node ID.
@@ -2067,11 +2296,14 @@ namespace brep
//
bs.push_back (b);
- crs.emplace_back (move (bid),
- gh_check_run_name (b, &hs),
- nullopt, /* node_id */
- build_state::queued,
- false /* state_synced */);
+ crs.push_back (
+ check_run {move (bid),
+ gh_check_run_name (b, &hs),
+ nullopt, /* node_id */
+ build_state::queued,
+ false /* state_synced */,
+ nullopt /* status */,
+ nullopt /* details_url */});
}
}
@@ -2085,7 +2317,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -2250,7 +2482,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -2456,7 +2688,7 @@ namespace brep
if (system_clock::now () > sd.installation_access.expires_at)
{
- if (optional<string> jwt = generate_jwt (trace, error))
+ if (optional<string> jwt = generate_jwt (sd.app_id, trace, error))
{
new_iat = obtain_installation_access_token (sd.installation_id,
move (*jwt),
@@ -2881,19 +3113,32 @@ namespace brep
}
optional<string> ci_github::
- generate_jwt (const basic_mark& trace,
+ generate_jwt (const string& app_id,
+ const basic_mark& trace,
const basic_mark& error) const
{
string jwt;
try
{
+ // Look up the private key path for the app id and fail if not found.
+ //
+ const map<string, dir_path>& pks (
+ options_->ci_github_app_id_private_key ());
+
+ auto pk (pks.find (app_id));
+ if (pk == pks.end ())
+ {
+ error << "unable to generate JWT: "
+ << "no private key configured for app id " << app_id;
+ return nullopt;
+ }
+
// Set token's "issued at" time 60 seconds in the past to combat clock
// drift (as recommended by GitHub).
//
jwt = brep::generate_jwt (
*options_,
- options_->ci_github_app_private_key (),
- to_string (options_->ci_github_app_id ()),
+ pk->second, app_id,
chrono::seconds (options_->ci_github_jwt_validity_period ()),
chrono::seconds (60));
@@ -2949,7 +3194,7 @@ namespace brep
// example.
//
optional<gh_installation_access_token> ci_github::
- obtain_installation_access_token (uint64_t iid,
+ obtain_installation_access_token (const string& iid,
string jwt,
const basic_mark& error) const
{
@@ -2958,7 +3203,7 @@ namespace brep
{
// API endpoint.
//
- string ep ("app/installations/" + to_string (iid) + "/access_tokens");
+ string ep ("app/installations/" + iid + "/access_tokens");
uint16_t sc (
github_post (iat, ep, strings {"Authorization: Bearer " + jwt}));
diff --git a/mod/mod-ci-github.hxx b/mod/mod-ci-github.hxx
index f97bf05..1e5f24f 100644
--- a/mod/mod-ci-github.hxx
+++ b/mod/mod-ci-github.hxx
@@ -82,37 +82,45 @@ namespace brep
virtual void
init (cli::scanner&);
- // Handle the check_suite event `requested` and `rerequested` actions.
+ // Handle push events (branch push).
//
// If warning_success is true, then map result_status::warning to SUCCESS
// and to FAILURE otherwise.
//
bool
- handle_check_suite_request (gh_check_suite_event, bool warning_success);
+ handle_branch_push (gh_push_event, bool warning_success);
- // Handle the check_suite event `completed` action.
+ // Handle the pull_request event `opened` and `synchronize` actions.
//
// If warning_success is true, then map result_status::warning to SUCCESS
// and to FAILURE otherwise.
//
bool
- handle_check_suite_completed (gh_check_suite_event, bool warning_success);
+ handle_pull_request (gh_pull_request_event, bool warning_success);
- // Handle the check_run event `rerequested` action.
+ // Handle the check_suite event `rerequested` action.
//
// If warning_success is true, then map result_status::warning to SUCCESS
// and to FAILURE otherwise.
//
bool
- handle_check_run_rerequest (const gh_check_run_event&, bool warning_success);
+ handle_check_suite_rerequest (gh_check_suite_event, bool warning_success);
- // Handle the pull_request event `opened` and `synchronize` actions.
+ // Handle the check_suite event `completed` action.
//
// If warning_success is true, then map result_status::warning to SUCCESS
// and to FAILURE otherwise.
//
bool
- handle_pull_request (gh_pull_request_event, bool warning_success);
+ handle_check_suite_completed (gh_check_suite_event, bool warning_success);
+
+ // Handle the check_run event `rerequested` action.
+ //
+ // If warning_success is true, then map result_status::warning to SUCCESS
+ // and to FAILURE otherwise.
+ //
+ bool
+ handle_check_run_rerequest (const gh_check_run_event&, bool warning_success);
// Build a check run details_url for a build.
//
@@ -120,14 +128,16 @@ namespace brep
details_url (const build&) const;
optional<string>
- generate_jwt (const basic_mark& trace, const basic_mark& error) const;
+ generate_jwt (const string& app_id,
+ const basic_mark& trace,
+ const basic_mark& error) const;
// Authenticate to GitHub as an app installation. Return the installation
// access token (IAT). Issue diagnostics and return nullopt if something
// goes wrong.
//
optional<gh_installation_access_token>
- obtain_installation_access_token (uint64_t install_id,
+ obtain_installation_access_token (const string& install_id,
string jwt,
const basic_mark& error) const;
@@ -135,6 +145,8 @@ namespace brep
shared_ptr<options::ci_github> options_;
tenant_service_map& tenant_service_map_;
+
+ string webhook_secret_;
};
}
diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx
index 46fbf6a..16ec5a7 100644
--- a/mod/mod-ci.cxx
+++ b/mod/mod-ci.cxx
@@ -105,17 +105,17 @@ init (scanner& s)
fail << "unable to read ci-form file '" << ci_form << "': " << e;
}
}
- }
#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
- if (!options_->build_config_specified ())
- fail << "package building functionality must be enabled";
+ if (!options_->build_config_specified ())
+ fail << "package building functionality must be enabled";
- database_module::init (*options_, options_->build_db_retry ());
+ database_module::init (*options_, options_->build_db_retry ());
#endif
- if (options_->root ().empty ())
- options_->root (dir_path ("/"));
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
+ }
}
bool brep::ci::
@@ -131,8 +131,6 @@ handle (request& rq, response& rs)
HANDLER_DIAG;
- const dir_path& root (options_->root ());
-
// We will respond with the manifest to the CI request submission protocol
// violations and with a plain text message on the internal errors. In the
// latter case we will always respond with the same neutral message for
@@ -180,6 +178,8 @@ handle (request& rq, response& rs)
if (!options_->ci_data_specified ())
return respond_manifest (404, "CI request submission disabled");
+ const dir_path& root (options_->root ());
+
// Parse the request form data and verify the submission size limit.
//
// Note that the submission may include the overrides upload that we don't
@@ -387,18 +387,19 @@ handle (request& rq, response& rs)
optional<start_result> r;
- if (optional<string> ref = create (error,
- warn,
- verb_ ? &trace : nullptr,
- *build_db_,
- tenant_service ("", "ci", rl.string ()),
- chrono::seconds (40),
- chrono::seconds (10)))
+ if (optional<pair<string, duplicate_tenant_result>> ref =
+ create (error,
+ warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ tenant_service ("", "ci", rl.string ()),
+ chrono::seconds (40),
+ chrono::seconds (10)))
{
string msg ("unloaded CI request is created: " +
- options_->host () + tenant_dir (root, *ref).string ());
+ options_->host () + tenant_dir (root, ref->first).string ());
- r = start_result {200, move (msg), move (*ref), {}};
+ r = start_result {200, move (msg), move (ref->first), {}};
}
#endif
@@ -519,8 +520,10 @@ build_built (const string& /*tenant_id*/,
}
#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
-function<optional<string> (const brep::tenant_service&)> brep::ci::
-build_unloaded (tenant_service&& ts,
+function<optional<string> (const string& tenant_id,
+ const brep::tenant_service&)> brep::ci::
+build_unloaded (const string& /* tenant_id */,
+ tenant_service&& ts,
const diag_epilogue& log_writer) const noexcept
{
NOTIFICATION_DIAG (log_writer);
@@ -532,7 +535,7 @@ build_unloaded (tenant_service&& ts,
repository_location rl (*ts.data);
if (!load (error, warn, verb_ ? &trace : nullptr,
- *build_db_,
+ *build_db_, retry_,
move (ts),
rl))
return nullptr; // The diagnostics is already issued.
@@ -545,7 +548,10 @@ build_unloaded (tenant_service&& ts,
return nullptr;
}
- return [] (const tenant_service& ts) {return "loaded " + *ts.data;};
+ return [] (const string& tenant_id, const tenant_service& ts)
+ {
+ return "loaded " + tenant_id + ' ' + *ts.data;
+ };
}
#endif
#endif
diff --git a/mod/mod-submit.cxx b/mod/mod-submit.cxx
index 5ee358a..6c767cb 100644
--- a/mod/mod-submit.cxx
+++ b/mod/mod-submit.cxx
@@ -93,10 +93,10 @@ init (scanner& s)
if (options_->submit_handler_specified () &&
options_->submit_handler ().relative ())
fail << "submit-handler path must be absolute";
- }
- if (options_->root ().empty ())
- options_->root (dir_path ("/"));
+ if (options_->root ().empty ())
+ options_->root (dir_path ("/"));
+ }
}
bool brep::submit::
@@ -109,8 +109,6 @@ handle (request& rq, response& rs)
HANDLER_DIAG;
- const dir_path& root (options_->root ());
-
// We will respond with the manifest to the submission protocol violations
// and with a plain text message on the internal errors. In the latter case
// we will always respond with the same neutral message for security reason,
@@ -163,6 +161,8 @@ handle (request& rq, response& rs)
if (!options_->submit_data_specified ())
return respond_manifest (404, "submission disabled");
+ const dir_path& root (options_->root ());
+
// Parse the request form data and verify the submission size limit.
//
// Note that if it is exceeded, then there are parameters and this is the
diff --git a/mod/module.cli b/mod/module.cli
index 274ecd4..ba2b986 100644
--- a/mod/module.cli
+++ b/mod/module.cli
@@ -850,25 +850,20 @@ namespace brep
// GitHub CI-specific options.
//
- size_t ci-github-app-id
+ path ci-github-app-webhook-secret
{
- "<id>",
- "The GitHub App ID. Found in the app's settings on GitHub."
- }
-
- string ci-github-app-webhook-secret
- {
- "<secret>",
+ "<path>",
"The GitHub App's configured webhook secret. If not set, then the
- GitHub CI service is disabled. Note: make sure to choose a strong
- (random) secret."
+ GitHub CI service is disabled. Note that the path must be absolute.
+ Note: make sure to choose a strong (random) secret."
}
- path ci-github-app-private-key
+ std::map<string, dir_path> ci-github-app-id-private-key
{
- "<path>",
- "The private key used during GitHub API authentication. Created in
- the GitHub App's settings."
+ "<id>=<path>",
+ "The private key used during GitHub API authentication for the
+ specified GitHub App ID. Both vales are found in the GitHub App's
+ settings. Note that the paths must be absolute."
}
uint16_t ci-github-jwt-validity-period = 600