aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rw-r--r--etc/brep-module.conf2
-rw-r--r--etc/private/install/brep-module.conf23
-rw-r--r--libbrep/build-extra.sql1
-rw-r--r--libbrep/build.hxx2
-rw-r--r--libbrep/build.xml2
-rw-r--r--libbrep/common.hxx7
-rw-r--r--libbrep/package.hxx2
-rw-r--r--libbrep/package.xml6
-rw-r--r--migrate/migrate.cxx10
-rw-r--r--mod/ci-common.cxx75
-rw-r--r--mod/ci-common.hxx24
-rw-r--r--mod/database-module.cxx20
-rw-r--r--mod/database-module.hxx14
-rw-r--r--mod/mod-build-force.cxx5
-rw-r--r--mod/mod-build-result.cxx9
-rw-r--r--mod/mod-build-task.cxx26
-rw-r--r--mod/mod-ci-github-gh.cxx53
-rw-r--r--mod/mod-ci-github-gh.hxx29
-rw-r--r--mod/mod-ci-github-gq.cxx250
-rw-r--r--mod/mod-ci-github-gq.hxx45
-rw-r--r--mod/mod-ci-github-service-data.cxx73
-rw-r--r--mod/mod-ci-github-service-data.hxx17
-rw-r--r--mod/mod-ci-github.cxx1222
-rw-r--r--mod/mod-ci-github.hxx35
-rw-r--r--mod/mod-ci.cxx29
-rw-r--r--mod/mod-ci.hxx24
-rw-r--r--mod/module.cli3
-rw-r--r--mod/tenant-service.hxx32
28 files changed, 1389 insertions, 651 deletions
diff --git a/etc/brep-module.conf b/etc/brep-module.conf
index 80f7cbf..c1f7ca8 100644
--- a/etc/brep-module.conf
+++ b/etc/brep-module.conf
@@ -459,7 +459,7 @@ menu About=?about
# The GitHub App's configured webhook secret. If not set, then the GitHub CI
-# service is disabled.
+# service is disabled. Note: make sure to choose a strong (random) secret.
#
# ci-github-app-webhook-secret
diff --git a/etc/private/install/brep-module.conf b/etc/private/install/brep-module.conf
index 0c7f065..f64cc0d 100644
--- a/etc/private/install/brep-module.conf
+++ b/etc/private/install/brep-module.conf
@@ -461,6 +461,29 @@ submit-handler-timeout 120
# ci-handler-timeout
+# The GitHub App ID. Found in the app's settings on GitHub.
+#
+# ci-github-app-id
+
+
+# The GitHub App's configured webhook secret. If not set, then the GitHub CI
+# service is disabled. Note: make sure to choose a strong (random) secret.
+#
+# ci-github-app-webhook-secret
+
+
+# The private key used during GitHub API authentication. Created in the GitHub
+# App's settings.
+#
+# ci-github-app-private-key
+
+
+# The number of seconds a JWT (authentication token) should be valid for. The
+# maximum allowed by GitHub is 10 minutes.
+#
+# ci-github-jwt-validity-period 600
+
+
# The directory to save upload data to for the specified upload type. If
# unspecified, the build artifacts upload functionality will be disabled for
# this type.
diff --git a/libbrep/build-extra.sql b/libbrep/build-extra.sql
index 0c0f010..3134fbb 100644
--- a/libbrep/build-extra.sql
+++ b/libbrep/build-extra.sql
@@ -50,6 +50,7 @@ CREATE FOREIGN TABLE build_tenant (
archived BOOLEAN NOT NULL,
service_id TEXT NULL,
service_type TEXT NULL,
+ service_ref_count BIGINT NULL,
service_data TEXT NULL,
unloaded_timestamp BIGINT NULL,
unloaded_notify_interval BIGINT NULL,
diff --git a/libbrep/build.hxx b/libbrep/build.hxx
index b485636..5ebbb0c 100644
--- a/libbrep/build.hxx
+++ b/libbrep/build.hxx
@@ -28,7 +28,7 @@
//
#define LIBBREP_BUILD_SCHEMA_VERSION_BASE 28
-#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 28, closed)
+#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 29, closed)
// We have to keep these mappings at the global scope instead of inside the
// brep namespace because they need to be also effective in the bbot namespace
diff --git a/libbrep/build.xml b/libbrep/build.xml
index d58e5f4..284db49 100644
--- a/libbrep/build.xml
+++ b/libbrep/build.xml
@@ -1,4 +1,6 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="build" version="1">
+ <changeset version="29"/>
+
<model version="28">
<table name="build" kind="object">
<column name="package_tenant" type="TEXT" null="false"/>
diff --git a/libbrep/common.hxx b/libbrep/common.hxx
index d2009f5..22302f3 100644
--- a/libbrep/common.hxx
+++ b/libbrep/common.hxx
@@ -543,17 +543,22 @@ namespace brep
//
// Note that the {id, type} pair must be unique.
//
+ // The reference count is used to keep track of the number of attempts to
+ // create a duplicate tenant with this {id, type} (see ci_start::create()
+ // for details).
+ //
#pragma db value
struct tenant_service
{
string id;
string type;
+ uint64_t ref_count;
optional<string> data;
tenant_service () = default;
tenant_service (string i, string t, optional<string> d = nullopt)
- : id (move (i)), type (move (t)), data (move (d)) {}
+ : id (move (i)), type (move (t)), ref_count (1), data (move (d)) {}
};
// Version comparison operators.
diff --git a/libbrep/package.hxx b/libbrep/package.hxx
index 79b2c68..2714d10 100644
--- a/libbrep/package.hxx
+++ b/libbrep/package.hxx
@@ -20,7 +20,7 @@
//
#define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 34
-#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 35, closed)
+#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 36, closed)
namespace brep
{
diff --git a/libbrep/package.xml b/libbrep/package.xml
index 8b6c706..ac48ec4 100644
--- a/libbrep/package.xml
+++ b/libbrep/package.xml
@@ -1,4 +1,10 @@
<changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="package" version="1">
+ <changeset version="36">
+ <alter-table name="tenant">
+ <add-column name="service_ref_count" type="BIGINT" null="true"/>
+ </alter-table>
+ </changeset>
+
<changeset version="35">
<alter-table name="package">
<add-column name="reviews_pass" type="BIGINT" null="true"/>
diff --git a/migrate/migrate.cxx b/migrate/migrate.cxx
index 090fcac..095e6a3 100644
--- a/migrate/migrate.cxx
+++ b/migrate/migrate.cxx
@@ -208,7 +208,6 @@ create (database& db, bool extra_only) const
// Register the data migration functions for the package database schema.
//
-#if 0
template <schema_version v>
using package_migration_entry_base =
data_migration_entry<v, LIBBREP_PACKAGE_SCHEMA_VERSION_BASE>;
@@ -220,11 +219,14 @@ struct package_migration_entry: package_migration_entry_base<v>
: package_migration_entry_base<v> (f, "package") {}
};
-static const package_migration_entry<26>
-package_migrate_v26 ([] (database& db)
+static const package_migration_entry<36>
+package_migrate_v36 ([] (database& db)
{
+ // Set the reference count to 1 for tenant associated services.
+ //
+ db.execute ("UPDATE tenant SET service_ref_count = 1 "
+ "WHERE service_id IS NOT NULL");
});
-#endif
// Register the data migration functions for the build database schema.
//
diff --git a/mod/ci-common.cxx b/mod/ci-common.cxx
index d750b1b..e720914 100644
--- a/mod/ci-common.cxx
+++ b/mod/ci-common.cxx
@@ -553,7 +553,11 @@ namespace brep
assert (!transaction::has_current ());
build_tenant t;
+
+ // Set the reference count to 1 for the `created` result.
+ //
duplicate_tenant_result r (duplicate_tenant_result::created);
+ service.ref_count = 1;
for (string request_id;;)
{
@@ -584,14 +588,31 @@ namespace brep
: duplicate_tenant_mode::ignore);
}
+ // Shouldn't be here otherwise.
+ //
+ assert (t->service);
+
// Bail out in the ignore mode and cancel the tenant in the
// replace mode.
//
if (mode == duplicate_tenant_mode::ignore)
+ {
+ // Increment the reference count for the `ignored` result.
+ //
+ ++(t->service->ref_count);
+
+ db.update (t);
+ tr.commit ();
+
return make_pair (move (t->id), duplicate_tenant_result::ignored);
+ }
assert (mode == duplicate_tenant_mode::replace);
+ // Preserve the current reference count for the `replaced` result.
+ //
+ service.ref_count = t->service->ref_count;
+
if (t->unloaded_timestamp)
{
db.erase (t);
@@ -678,6 +699,7 @@ namespace brep
//
request_id = move (t.id);
service = move (*t.service);
+ service.ref_count = 1;
r = duplicate_tenant_result::created;
}
}
@@ -788,7 +810,8 @@ namespace brep
odb::core::database& db,
size_t retry,
const string& type,
- const string& id) const
+ const string& id,
+ bool ref_count) const
{
using namespace odb::core;
@@ -810,25 +833,44 @@ namespace brep
if (t == nullptr)
return nullopt;
- r = move (t->service);
+ // Shouldn't be here otherwise.
+ //
+ assert (t->service && t->service->ref_count != 0);
- if (t->unloaded_timestamp)
+ bool cancel (!ref_count || --(t->service->ref_count) == 0);
+
+ if (cancel)
{
- db.erase (t);
+ // Move out the service state before it is dropped from the tenant.
+ //
+ r = move (t->service);
+
+ if (t->unloaded_timestamp)
+ {
+ db.erase (t);
+ }
+ else
+ {
+ t->service = nullopt;
+ t->archived = true;
+ db.update (t);
+ }
+
+ if (trace != nullptr)
+ *trace << "CI request " << t->id << " for service " << id << ' '
+ << type << " is canceled";
}
else
{
- t->service = nullopt;
- t->archived = true;
- db.update (t);
+ db.update (t); // Update the service reference count.
+
+ // Move out the service state after the tenant is updated.
+ //
+ r = move (t->service);
}
tr.commit ();
- if (trace != nullptr)
- *trace << "CI request " << t->id << " for service " << id << ' '
- << type << " is canceled";
-
// Bail out if we have successfully updated or erased the tenant
// object.
//
@@ -913,7 +955,8 @@ namespace brep
rebuild (odb::core::database& db,
size_t retry,
const build_id& id,
- function<optional<string> (const tenant_service&,
+ function<optional<string> (const string& tenant_id,
+ const tenant_service&,
build_state)> uf) const
{
using namespace odb::core;
@@ -960,7 +1003,7 @@ namespace brep
tenant_service& ts (*t->service);
- if (optional<string> data = uf (ts, s))
+ if (optional<string> data = uf (t->id, ts, s))
{
ts.data = move (*data);
db.update (t);
@@ -988,7 +1031,7 @@ namespace brep
return s;
}
- optional<pair<tenant_service, bool>> ci_start::
+ optional<ci_start::tenant_data> ci_start::
find (odb::core::database& db,
const string& type,
const string& id) const
@@ -1007,9 +1050,9 @@ namespace brep
tr.commit ();
- if (t == nullptr)
+ if (t == nullptr || !t->service)
return nullopt;
- return pair<tenant_service, bool> (move (t->service), t->archived);
+ return tenant_data {move (t->id), move (*t->service), t->archived};
}
}
diff --git a/mod/ci-common.hxx b/mod/ci-common.hxx
index 36d5f0e..a38ac54 100644
--- a/mod/ci-common.hxx
+++ b/mod/ci-common.hxx
@@ -103,6 +103,10 @@ namespace brep
// Finally note that only duplicate_tenant_mode::fail can be used if the
// service id is empty.
//
+ // The tenant reference count is set to 1 if the result is `created`,
+ // incremented if the result is `ignored`, and preserved if the result is
+ // `replaced`.
+ //
// Repeat the attempts on the recoverable database failures (deadlocks,
// etc) and throw runtime_error if no more retries left.
//
@@ -150,6 +154,11 @@ namespace brep
// dropped. Note that the latter allow using unloaded tenants as a
// relatively cheap asynchronous execution mechanism.
//
+ // If ref_count is true, then decrement the tenant reference count and
+ // only cancel the CI request if it becomes 0. In this mode the caller can
+ // determine if the request was actually canceled by checking if the
+ // reference count in the returned service state is 0.
+ //
// Repeat the attempts on the recoverable database failures (deadlocks,
// etc) and throw runtime_error if no more retries left.
//
@@ -162,7 +171,8 @@ namespace brep
odb::core::database&,
size_t retry,
const string& type,
- const string& id) const;
+ const string& id,
+ bool ref_count = false) const;
// Cancel previously created or started CI request. Return false if there
// is no tenant for the specified tenant id. Note that the reason argument
@@ -232,7 +242,8 @@ namespace brep
rebuild (odb::core::database&,
size_t retry,
const build_id&,
- function<optional<string> (const tenant_service&,
+ function<optional<string> (const string& tenant_id,
+ const tenant_service&,
build_state)> = nullptr) const;
// Find the tenant given the tenant service type and id and return the
@@ -241,7 +252,14 @@ namespace brep
//
// Note: should be called out of the database transaction.
//
- optional<pair<tenant_service, bool /*archived*/>>
+ struct tenant_data
+ {
+ string tenant_id;
+ tenant_service service;
+ bool archived;
+ };
+
+ optional<tenant_data>
find (odb::core::database&,
const string& type,
const string& id) const;
diff --git a/mod/database-module.cxx b/mod/database-module.cxx
index bce8c93..629e393 100644
--- a/mod/database-module.cxx
+++ b/mod/database-module.cxx
@@ -79,8 +79,10 @@ namespace brep
optional<string> database_module::
update_tenant_service_state (
const connection_ptr& conn,
- const string& tid,
- const function<optional<string> (const tenant_service&)>& f)
+ const string& type,
+ const string& id,
+ const function<optional<string> (const string& tenant_id,
+ const tenant_service&)>& f)
{
assert (f != nullptr); // Shouldn't be called otherwise.
@@ -96,13 +98,21 @@ namespace brep
{
transaction tr (conn->begin ());
- shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tid));
+ using query = query<build_tenant>;
- if (t != nullptr && t->service)
+ shared_ptr<build_tenant> t (
+ build_db_->query_one<build_tenant> (query::service.id == id &&
+ query::service.type == type));
+
+ if (t != nullptr)
{
+ // Shouldn't be here otherwise.
+ //
+ assert (t->service);
+
tenant_service& s (*t->service);
- if (optional<string> data = f (s))
+ if (optional<string> data = f (t->id, s))
{
s.data = move (*data);
build_db_->update (t);
diff --git a/mod/database-module.hxx b/mod/database-module.hxx
index 298afbf..76f13d4 100644
--- a/mod/database-module.hxx
+++ b/mod/database-module.hxx
@@ -61,16 +61,18 @@ namespace brep
// and nullopt otherwise.
//
// Specifically, start the database transaction, query the service state,
- // and call the callback-returned function on this state. If this call
- // returns the data string (rather than nullopt), then update the service
- // state with this data and persist the change. Repeat all the above steps
- // on the recoverable database failures (deadlocks, etc).
+ // and, if present, call the callback-returned function on this state. If
+ // this call returns the data string (rather than nullopt), then update
+ // the service state with this data and persist the change. Repeat all the
+ // above steps on the recoverable database failures (deadlocks, etc).
//
optional<string>
update_tenant_service_state (
const odb::core::connection_ptr&,
- const string& tid,
- const function<optional<string> (const tenant_service&)>&);
+ const string& type,
+ const string& id,
+ const function<optional<string> (const string& tenant_id,
+ const tenant_service&)>&);
protected:
size_t retry_ = 0; // Max of all retries.
diff --git a/mod/mod-build-force.cxx b/mod/mod-build-force.cxx
index 8666889..d37674f 100644
--- a/mod/mod-build-force.cxx
+++ b/mod/mod-build-force.cxx
@@ -314,14 +314,15 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
build_state::building,
qhs,
log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, qbs.back ().tenant, f);
+ update_tenant_service_state (conn, ss.type, ss.id, f);
}
}
diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx
index bc44bd2..cc058b5 100644
--- a/mod/mod-build-result.cxx
+++ b/mod/mod-build-result.cxx
@@ -545,14 +545,15 @@ handle (request& rq, response&)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
build_state::building,
qhs,
log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, qbs.back ().tenant, f);
+ update_tenant_service_state (conn, ss.type, ss.id, f);
}
}
@@ -572,10 +573,10 @@ handle (request& rq, response&)
//
conn.reset ();
- if (auto f = tsb->build_built (ss, b, log_writer_))
+ if (auto f = tsb->build_built (b.tenant, ss, b, log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, b.tenant, f);
+ update_tenant_service_state (conn, ss.type, ss.id, f);
}
}
diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx
index 2ae1237..c8b1bb2 100644
--- a/mod/mod-build-task.cxx
+++ b/mod/mod-build-task.cxx
@@ -499,10 +499,14 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsu->build_unloaded (move (*t->service), log_writer_))
+ tenant_service& ts (*t->service);
+ string type (ts.type);
+ string id (ts.id);
+
+ if (auto f = tsu->build_unloaded (t->id, move (ts), log_writer_))
{
conn = build_db_->connection ();
- update_tenant_service_state (conn, t->id, f);
+ update_tenant_service_state (conn, type, id, f);
}
}
}
@@ -2350,7 +2354,8 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
nullopt /* initial_state */,
qhs,
@@ -2359,7 +2364,7 @@ handle (request& rq, response& rs)
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, qbs.back ().tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
@@ -2382,7 +2387,8 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsq->build_queued (ss,
+ if (auto f = tsq->build_queued (qbs.back ().tenant,
+ ss,
qbs,
initial_state,
qhs,
@@ -2391,7 +2397,7 @@ handle (request& rq, response& rs)
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, qbs.back ().tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
@@ -2418,12 +2424,12 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsb->build_building (ss, b, log_writer_))
+ if (auto f = tsb->build_building (b.tenant, ss, b, log_writer_))
{
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, b.tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
@@ -2546,12 +2552,12 @@ handle (request& rq, response& rs)
//
conn.reset ();
- if (auto f = tsb->build_built (ss, b, log_writer_))
+ if (auto f = tsb->build_built (b.tenant, ss, b, log_writer_))
{
conn = build_db_->connection ();
if (optional<string> data =
- update_tenant_service_state (conn, b.tenant, f))
+ update_tenant_service_state (conn, ss.type, ss.id, f))
ss.data = move (data);
}
}
diff --git a/mod/mod-ci-github-gh.cxx b/mod/mod-ci-github-gh.cxx
index 6372ef0..2e13af2 100644
--- a/mod/mod-ci-github-gh.cxx
+++ b/mod/mod-ci-github-gh.cxx
@@ -7,6 +7,15 @@
namespace brep
{
+ [[noreturn]] static void
+ throw_json (const json::parser& p, const string& m)
+ {
+ throw json::invalid_json_input (
+ p.input_name,
+ p.line (), p.column (), p.position (),
+ m);
+ }
+
// Return the GitHub check run status corresponding to a build_state.
//
string
@@ -102,10 +111,7 @@ namespace brep
[[noreturn]] static void
missing_member (const json::parser& p, const char* o, const char* m)
{
- throw json::invalid_json_input (
- p.input_name,
- p.line (), p.column (), p.position (),
- o + string (" object is missing member '") + m + '\'');
+ throw_json (p, o + string (" object is missing member '") + m + '\'');
}
using event = json::event;
@@ -362,14 +368,12 @@ namespace brep
};
if (c (ni, "node_id")) node_id = p.next_expect_string ();
- else if (c (nm, "name")) name = p.next_expect_string ();
else if (c (fn, "full_name")) path = p.next_expect_string ();
else if (c (cu, "clone_url")) clone_url = p.next_expect_string ();
else p.next_expect_value_skip ();
}
if (!ni) missing_member (p, "gh_repository", "node_id");
- if (!nm) missing_member (p, "gh_repository", "name");
if (!fn) missing_member (p, "gh_repository", "full_name");
if (!cu) missing_member (p, "gh_repository", "clone_url");
}
@@ -378,7 +382,6 @@ namespace brep
operator<< (ostream& os, const gh_repository& rep)
{
os << "node_id: " << rep.node_id
- << ", name: " << rep.name
<< ", path: " << rep.path
<< ", clone_url: " << rep.clone_url;
@@ -478,10 +481,6 @@ namespace brep
return p.name () == s ? (v = true) : false;
};
- // Pass true to gh_check_run() to indicate that the we're parsing a
- // webhook event or REST API response (in which case more fields are
- // expected to be present than in a GraphQL response).
- //
if (c (ac, "action")) action = p.next_expect_string ();
else if (c (cs, "check_run")) check_run = gh_check_run_ex (p);
else if (c (rp, "repository")) repository = gh_repository (p);
@@ -513,7 +512,7 @@ namespace brep
{
p.next_expect (event::begin_object);
- bool ac (false), pr (false), rp (false), in (false);
+ bool ac (false), pr (false), bf (false), rp (false), in (false);
// Skip unknown/uninteresting members.
//
@@ -526,6 +525,7 @@ namespace brep
if (c (ac, "action")) action = p.next_expect_string ();
else if (c (pr, "pull_request")) pull_request = gh_pull_request (p);
+ else if (c (bf, "before")) before = p.next_expect_string ();
else if (c (rp, "repository")) repository = gh_repository (p);
else if (c (in, "installation")) installation = gh_installation (p);
else p.next_expect_value_skip ();
@@ -542,6 +542,7 @@ namespace brep
{
os << "action: " << pr.action;
os << ", pull_request { " << pr.pull_request << " }";
+ os << ", before: " << (pr.before ? *pr.before : "null");
os << ", repository { " << pr.repository << " }";
os << ", installation { " << pr.installation << " }";
@@ -575,7 +576,29 @@ namespace brep
};
if (c (tk, "token")) token = p.next_expect_string ();
- else if (c (ea, "expires_at")) expires_at = gh_from_iso8601 (p.next_expect_string ());
+ else if (c (ea, "expires_at"))
+ {
+ string v (p.next_expect_string ());
+
+ try
+ {
+ expires_at = gh_from_iso8601 (v);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw_json (p,
+ "invalid IAT expires_at value '" + v +
+ "': " + e.what ());
+ }
+ catch (const system_error& e)
+ {
+ // Translate for simplicity.
+ //
+ throw_json (p,
+ "unable to convert IAT expires_at value '" + v +
+ "': " + e.what ());
+ }
+ }
else p.next_expect_value_skip ();
}
@@ -610,6 +633,8 @@ namespace brep
timestamp
gh_from_iso8601 (const string& s)
{
- return butl::from_string (s.c_str (), "%Y-%m-%dT%TZ", false /* local */);
+ return butl::from_string (s.c_str (),
+ "%Y-%m-%dT%TZ",
+ false /* local */);
}
}
diff --git a/mod/mod-ci-github-gh.hxx b/mod/mod-ci-github-gh.hxx
index b29904b..392c0e8 100644
--- a/mod/mod-ci-github-gh.hxx
+++ b/mod/mod-ci-github-gh.hxx
@@ -21,8 +21,6 @@ namespace butl
namespace brep
{
- // @@@ Check if any data members are unused (once the dust settles).
-
using build_queued_hints = tenant_service_build_queued::build_queued_hints;
// GitHub request/response types (all start with gh_).
@@ -87,15 +85,12 @@ namespace brep
string node_id;
unsigned int number;
- // @@ TMP The unused base/head members may be useful for trace output when
- // we receive the pull_request webhook.
-
string base_path; // Repository path (<org>/<repo>) under github.com.
- string base_ref; // @@ TODO Remove if remains unused.
- string base_sha; // @@ TODO Remove if remains unused.
+ string base_ref;
+ string base_sha;
string head_path;
- string head_ref; // @@ TODO Remove if remains unused.
+ string head_ref;
string head_sha;
explicit
@@ -107,7 +102,7 @@ namespace brep
// Return the GitHub check run status corresponding to a build_state.
//
string
- gh_to_status (build_state st);
+ gh_to_status (build_state);
// Return the build_state corresponding to a GitHub check run status
// string. Throw invalid_argument if the passed status was invalid.
@@ -118,6 +113,9 @@ namespace brep
// If warning_success is true, then map result_status::warning to SUCCESS
// and to FAILURE otherwise.
//
+ // Throw invalid_argument in case of unsupported result_status value
+ // (currently skip, interrupt).
+ //
string
gh_to_conclusion (result_status, bool warning_success);
@@ -130,7 +128,6 @@ namespace brep
struct gh_repository
{
string node_id;
- string name;
string path; // Repository path (<org>/<repo>) under github.com.
string clone_url;
@@ -183,6 +180,12 @@ namespace brep
string action;
gh_pull_request pull_request;
+
+ // The SHA of the previous commit on the head branch before the current
+ // one. Only present if action is "synchronize".
+ //
+ optional<string> before;
+
gh_repository repository;
gh_installation installation;
@@ -205,9 +208,15 @@ namespace brep
gh_installation_access_token () = default;
};
+ // Throw system_error if the conversion fails due to underlying operating
+ // system errors.
+ //
string
gh_to_iso8601 (timestamp);
+ // Throw invalid_argument if the conversion fails due to the invalid
+ // argument and system_error if due to underlying operating system errors.
+ //
timestamp
gh_from_iso8601 (const string&);
diff --git a/mod/mod-ci-github-gq.cxx b/mod/mod-ci-github-gq.cxx
index 4d1c583..774eeed 100644
--- a/mod/mod-ci-github-gq.cxx
+++ b/mod/mod-ci-github-gq.cxx
@@ -17,9 +17,11 @@ namespace brep
// bottom).
//
static const string& gq_name (const string&);
+ static string gq_name (string&&);
static string gq_str (const string&);
static string gq_bool (bool);
static const string& gq_enum (const string&);
+ static string gq_enum (string&&);
[[noreturn]] static void
throw_json (json::parser& p, const string& m)
@@ -163,6 +165,8 @@ namespace brep
// Parse a response to a check_run GraphQL mutation such as `createCheckRun`
// or `updateCheckRun`.
//
+ // Throw invalid_json_input.
+ //
// Example response (only the part we need to parse here):
//
// {
@@ -219,16 +223,17 @@ namespace brep
return r;
}
- // Send a GraphQL mutation request `rq` that operates on one or more check
- // runs. Update the check runs in `crs` with the new state and the node ID
- // if unset. Return false and issue diagnostics if the request failed.
+ // Send a GraphQL mutation request `rq` that creates or updates one or more
+ // check runs. The requested build state is taken from each check_run
+ // object. Update the check runs in `crs` with the new data (state, node ID
+ // if unset, and state_synced). Return false and issue diagnostics if the
+ // request failed.
//
static bool
gq_mutate_check_runs (const basic_mark& error,
vector<check_run>& crs,
const string& iat,
- string rq,
- build_state st) noexcept
+ string rq)
{
vector<gh_check_run> rcrs;
@@ -264,16 +269,12 @@ namespace brep
//
const gh_check_run& rcr (rcrs[i]); // Received check run.
+ build_state st (crs[i].state); // Requested state.
build_state rst (gh_from_status (rcr.status)); // Received state.
// Note that GitHub won't allow us to change a built check run to
// any other state (but all other transitions are allowed).
//
- // @@ Are we handling the case where the resulting state (built)
- // differs from what we expect?
- //
- // @@@ Does built-to-built transition updates status?
- //
if (rst != st && rst != build_state::built)
{
error << "unexpected check_run status: received '" << rcr.status
@@ -297,10 +298,10 @@ namespace brep
error << "unexpected number of check_run objects in response";
}
else
- error << "failed to update check run: error HTTP response status "
+ error << "failed to mutate check runs: error HTTP response status "
<< sc;
}
- catch (const json::invalid_json_input& e)
+ catch (const json::invalid_json_input& e) // struct resp (via github_post())
{
// Note: e.name is the GitHub API endpoint.
//
@@ -308,16 +309,16 @@ namespace brep
<< e.line << ", column: " << e.column << ", byte offset: "
<< e.position << ", error: " << e;
}
- catch (const invalid_argument& e)
+ catch (const invalid_argument& e) // github_post()
{
error << "malformed header(s) in response: " << e;
}
- catch (const system_error& e)
+ catch (const system_error& e) // github_post()
{
error << "unable to mutate check runs (errno=" << e.code () << "): "
<< e.what ();
}
- catch (const runtime_error& e) // From gq_parse_response_check_runs().
+ catch (const runtime_error& e) // gq_parse_response_check_runs()
{
// GitHub response contained error(s) (could be ours or theirs at this
// point).
@@ -350,25 +351,23 @@ namespace brep
// Serialize `createCheckRun` mutations for one or more builds to GraphQL.
//
- // The conclusion argument (`co`) is required if the build_state is built
- // because GitHub does not allow a check run status of completed without a
- // conclusion.
+ // The check run parameters (names, build states, details_urls, etc.) are
+ // taken from each object in `crs`.
+ //
+ // Note that build results are not supported because we never create
+ // multiple check runs in the built state.
//
// The details URL argument (`du`) can be empty for queued but not for the
// other states.
//
+ // Throw invalid_argument if any of the observed check run members are not
+ // valid GraphQL values (string, enum, etc).
+ //
static string
gq_mutation_create_check_runs (const string& ri, // Repository ID
const string& hs, // Head SHA
- const optional<string>& du, // Details URL.
- const vector<check_run>& crs,
- const string& st, // Check run status.
- optional<gq_built_result> br = nullopt)
+ const vector<check_run>& crs)
{
- // Ensure details URL is non-empty if present.
- //
- assert (!du || !du->empty ());
-
ostringstream os;
os << "mutation {" << '\n';
@@ -377,26 +376,25 @@ namespace brep
//
for (size_t i (0); i != crs.size (); ++i)
{
+ const check_run& cr (crs[i]);
+
+ assert (cr.state != build_state::built); // Not supported.
+
+ // Ensure details URL is non-empty if present.
+ //
+ assert (!cr.details_url || !cr.details_url->empty ());
+
string al ("cr" + to_string (i)); // Field alias.
os << gq_name (al) << ":createCheckRun(input: {" << '\n'
- << " name: " << gq_str (crs[i].name) << '\n'
+ << " name: " << gq_str (cr.name) << '\n'
<< " repositoryId: " << gq_str (ri) << '\n'
<< " headSha: " << gq_str (hs) << '\n'
- << " status: " << gq_enum (st);
- if (du)
- {
- os << '\n';
- os << " detailsUrl: " << gq_str (*du);
- }
- if (br)
+ << " status: " << gq_enum (gh_to_status (cr.state));
+ if (cr.details_url)
{
os << '\n';
- os << " conclusion: " << gq_enum (br->conclusion) << '\n'
- << " output: {" << '\n'
- << " title: " << gq_str (br->title) << '\n'
- << " summary: " << gq_str (br->summary) << '\n'
- << " }";
+ os << " detailsUrl: " << gq_str (*cr.details_url);
}
os << "})" << '\n'
// Specify the selection set (fields to be returned). Note that we
@@ -417,12 +415,83 @@ namespace brep
return os.str ();
}
+ // Serialize a `createCheckRun` mutation for a build to GraphQL.
+ //
+ // The build result argument (`br`) is required if the build_state is built
+ // because GitHub does not allow a check run status of completed without a
+ // conclusion.
+ //
+ // The details URL argument (`du`) can be empty for queued but not for the
+ // other states.
+ //
+ // Throw invalid_argument if any of the arguments or observed check run
+ // members are not valid GraphQL values (string, enum, etc).
+ //
+ static string
+ gq_mutation_create_check_run (const string& ri, // Repository ID
+ const string& hs, // Head SHA
+ const optional<string>& du, // Details URL.
+ const check_run& cr,
+ const string& st, // Check run status.
+ optional<gq_built_result> br = nullopt)
+ {
+ // Ensure details URL is non-empty if present.
+ //
+ assert (!du || !du->empty ());
+
+ ostringstream os;
+
+ os << "mutation {" << '\n';
+
+ // Serialize a `createCheckRun` for the build.
+ //
+ os << gq_name ("cr0") << ":createCheckRun(input: {" << '\n'
+ << " name: " << gq_str (cr.name) << '\n'
+ << " repositoryId: " << gq_str (ri) << '\n'
+ << " headSha: " << gq_str (hs) << '\n'
+ << " status: " << gq_enum (st);
+ if (du)
+ {
+ os << '\n';
+ os << " detailsUrl: " << gq_str (*du);
+ }
+ if (br)
+ {
+ os << '\n';
+ os << " conclusion: " << gq_enum (br->conclusion) << '\n'
+ << " output: {" << '\n'
+ << " title: " << gq_str (br->title) << '\n'
+ << " summary: " << gq_str (br->summary) << '\n'
+ << " }";
+ }
+ os << "})" << '\n'
+ // Specify the selection set (fields to be returned). Note that we
+ // rename `id` to `node_id` (using a field alias) for consistency with
+ // webhook events and REST API responses.
+ //
+ << "{" << '\n'
+ << " checkRun {" << '\n'
+ << " node_id: id" << '\n'
+ << " name" << '\n'
+ << " status" << '\n'
+ << " }" << '\n'
+ << "}" << '\n';
+
+ os << "}" << '\n';
+
+ return os.str ();
+ }
+
+
// Serialize an `updateCheckRun` mutation for one build to GraphQL.
//
// The `co` (conclusion) argument is required if the build_state is built
// because GitHub does not allow updating a check run to completed without a
// conclusion.
//
+ // Throw invalid_argument if any of the arguments are invalid values (of
+ // GraphQL types or otherwise).
+ //
static string
gq_mutation_update_check_run (const string& ri, // Repository ID.
const string& ni, // Node ID.
@@ -444,8 +513,19 @@ namespace brep
<< " status: " << gq_enum (st);
if (sa)
{
- os << '\n';
- os << " startedAt: " << gq_str (gh_to_iso8601 (*sa));
+ try
+ {
+ os << '\n';
+ os << " startedAt: " << gq_str (gh_to_iso8601 (*sa));
+ }
+ catch (const system_error& e)
+ {
+ // Translate for simplicity.
+ //
+ throw invalid_argument ("unable to convert started_at value " +
+ to_string (system_clock::to_time_t (*sa)) +
+ ": " + e.what ());
+ }
}
if (du)
{
@@ -483,23 +563,19 @@ namespace brep
vector<check_run>& crs,
const string& iat,
const string& rid,
- const string& hs,
- build_state st)
+ const string& hs)
{
// No support for result_status so state cannot be built.
//
- assert (st != build_state::built);
+#ifndef NDEBUG
+ for (const check_run& cr: crs)
+ assert (cr.state != build_state::built);
+#endif
- // Empty details URL because it's not available until building.
- //
string rq (
- gq_serialize_request (gq_mutation_create_check_runs (rid,
- hs,
- nullopt,
- crs,
- gh_to_status (st))));
+ gq_serialize_request (gq_mutation_create_check_runs (rid, hs, crs)));
- return gq_mutate_check_runs (error, crs, iat, move (rq), st);
+ return gq_mutate_check_runs (error, crs, iat, move (rq));
}
bool
@@ -516,18 +592,19 @@ namespace brep
//
assert (st != build_state::built || br);
- vector<check_run> crs {move (cr)};
-
string rq (
gq_serialize_request (
- gq_mutation_create_check_runs (rid,
- hs,
- du,
- crs,
- gh_to_status (st),
- move (br))));
+ gq_mutation_create_check_run (rid,
+ hs,
+ du,
+ cr,
+ gh_to_status (st),
+ move (br))));
- bool r (gq_mutate_check_runs (error, crs, iat, move (rq), st));
+ vector<check_run> crs {move (cr)};
+ crs[0].state = st;
+
+ bool r (gq_mutate_check_runs (error, crs, iat, move (rq)));
cr = move (crs[0]);
@@ -565,33 +642,19 @@ namespace brep
move (br))));
vector<check_run> crs {move (cr)};
+ crs[0].state = st;
- bool r (gq_mutate_check_runs (error, crs, iat, move (rq), st));
+ bool r (gq_mutate_check_runs (error, crs, iat, move (rq)));
cr = move (crs[0]);
return r;
}
- bool
- gq_update_or_create_check_run (const basic_mark& error,
- check_run& cr,
- const string& iat,
- const string& rid,
- const optional<string>& nid,
- const string& hs,
- const optional<string>& du,
- build_state st,
- optional<gq_built_result> br)
- {
- if (nid)
- return gq_update_check_run (error, cr, iat, rid, *nid, du, st, br);
- else
- return gq_create_check_run (error, cr, iat, rid, hs, du, st, br);
- }
-
// Serialize a GraphQL query that fetches a pull request from GitHub.
//
+ // Throw invalid_argument if the node id is not a valid GraphQL string.
+ //
static string
gq_query_pr_mergeability (const string& nid)
{
@@ -616,6 +679,8 @@ namespace brep
const string& iat,
const string& nid)
{
+ // Let invalid_argument from gq_query_pr_mergeability() propagate.
+ //
string rq (gq_serialize_request (gq_query_pr_mergeability (nid)));
try
@@ -718,7 +783,7 @@ namespace brep
error << "failed to fetch pull request: error HTTP response status "
<< sc;
}
- catch (const json::invalid_json_input& e)
+ catch (const json::invalid_json_input& e) // struct resp (via github_post())
{
// Note: e.name is the GitHub API endpoint.
//
@@ -726,16 +791,16 @@ namespace brep
<< e.line << ", column: " << e.column << ", byte offset: "
<< e.position << ", error: " << e;
}
- catch (const invalid_argument& e)
+ catch (const invalid_argument& e) // github_post()
{
error << "malformed header(s) in response: " << e;
}
- catch (const system_error& e)
+ catch (const system_error& e) // github_post()
{
error << "unable to fetch pull request (errno=" << e.code () << "): "
<< e.what ();
}
- catch (const runtime_error& e) // From response type's parsing constructor.
+ catch (const runtime_error& e) // struct resp
{
// GitHub response contained error(s) (could be ours or theirs at this
// point).
@@ -762,8 +827,6 @@ namespace brep
//
// Return the name or throw invalid_argument if it is invalid.
//
- // @@ TODO: dangerous API.
- //
static const string&
gq_name (const string& v)
{
@@ -782,6 +845,13 @@ namespace brep
return v;
}
+ static string
+ gq_name (string&& v)
+ {
+ gq_name (v);
+ return move (v);
+ }
+
// Serialize a string to GraphQL.
//
// Return the serialized string or throw invalid_argument if the string is
@@ -836,8 +906,6 @@ namespace brep
//
// Return the enum value or throw invalid_argument if it is invalid.
//
- // @@ TODO: dangerous API.
- //
static const string&
gq_enum (const string& v)
{
@@ -846,4 +914,12 @@ namespace brep
return gq_name (v);
}
+
+ static string
+ gq_enum (string&& v)
+ {
+ gq_enum (v);
+ return move (v);
+ }
+
}
diff --git a/mod/mod-ci-github-gq.hxx b/mod/mod-ci-github-gq.hxx
index 0353281..50950d4 100644
--- a/mod/mod-ci-github-gq.hxx
+++ b/mod/mod-ci-github-gq.hxx
@@ -19,12 +19,13 @@ namespace brep
// GraphQL functions (all start with gq_).
//
- // Create a new check run on GitHub for each build. Update `check_runs` with
- // the new data (node id, state, and state_synced). Return false and issue
- // diagnostics if the request failed.
+ // Create a new check run on GitHub for each build with the build state,
+ // name, and details_url taken from each check_run object. Update
+ // `check_runs` with the new data (node id and state_synced). Return false
+ // and issue diagnostics if the request failed.
//
- // Note: no details_url yet since there will be no entry in the build result
- // search page until the task starts building.
+ // Throw invalid_argument if the passed data is invalid, missing, or
+ // inconsistent.
//
// Note that creating a check_run named `foo` will effectively replace any
// existing check_runs with that name. They will still exist on the GitHub
@@ -36,13 +37,15 @@ namespace brep
vector<check_run>& check_runs,
const string& installation_access_token,
const string& repository_id,
- const string& head_sha,
- build_state);
+ const string& head_sha);
// Create a new check run on GitHub for a build. Update `cr` with the new
// data (node id, state, and state_synced). Return false and issue
// diagnostics if the request failed.
//
+ // Throw invalid_argument if the passed data is invalid, missing, or
+ // inconsistent.
+ //
// If the details_url is absent GitHub will use the app's homepage.
//
// The gq_built_result is required if the build_state is built because
@@ -66,11 +69,11 @@ namespace brep
build_state,
optional<gq_built_result> = nullopt);
- // Update a check run on GitHub.
+ // Update a check run on GitHub. Update `cr` with the new data (state and
+ // state_synced). Return false and issue diagnostics if the request failed.
//
- // Send a GraphQL request that updates an existing check run. Update `cr`
- // with the new data (state and state_synced). Return false and issue
- // diagnostics if the request failed.
+ // Throw invalid_argument if the passed data is invalid, missing, or
+ // inconsistent.
//
// Note that GitHub allows any state transitions except from built (but
// built to built is allowed). The latter case is signalled by setting the
@@ -92,24 +95,6 @@ namespace brep
build_state,
optional<gq_built_result> = nullopt);
- // Update a check run on GitHub if node_id is present, otherwise create a
- // new check run associated with head_sha. In the latter case, the new
- // node_id is set in the passed check_run object.
- //
- // This is a wrapper of gq_update_check_run() and gq_create_check_run() for
- // convenience.
- //
- bool
- gq_update_or_create_check_run (const basic_mark& error,
- check_run& cr,
- const string& installation_access_token,
- const string& repository_id,
- const optional<string>& node_id,
- const string& head_sha,
- const optional<string>& details_url,
- build_state,
- optional<gq_built_result> = nullopt);
-
// Fetch pre-check information for a pull request from GitHub. This
// information is used to decide whether or not to CI the PR and is
// comprised of the PR's head commit SHA, whether its head branch is behind
@@ -123,6 +108,8 @@ namespace brep
// Issue diagnostics and return absent if the request failed (which means it
// will be treated by the caller as still being generated).
//
+ // Throw invalid_argument if the node id is invalid.
+ //
// Note that the first request causes GitHub to start preparing the test
// merge commit.
//
diff --git a/mod/mod-ci-github-service-data.cxx b/mod/mod-ci-github-service-data.cxx
index 18f6eeb..31a556d 100644
--- a/mod/mod-ci-github-service-data.cxx
+++ b/mod/mod-ci-github-service-data.cxx
@@ -10,6 +10,15 @@ namespace brep
{
using event = json::event;
+ [[noreturn]] static void
+ throw_json (json::parser& p, const string& m)
+ {
+ throw json::invalid_json_input (
+ p.input_name,
+ p.line (), p.column (), p.position (),
+ m);
+ }
+
service_data::
service_data (const string& json)
{
@@ -32,11 +41,7 @@ namespace brep
if (v == "local") kind = local;
else if (v == "remote") kind = remote;
else
- {
- throw json::invalid_json_input (
- p.input_name, p.line (), p.column (), p.position (),
- "invalid service data kind: '" + v + '\'');
- }
+ throw_json (p, "invalid service data kind: '" + v + '\'');
}
pre_check = p.next_expect_member_boolean<bool> ("pre_check");
@@ -44,13 +49,10 @@ namespace brep
warning_success = p.next_expect_member_boolean<bool> ("warning_success");
- // Installation access token.
+ // Installation access token (IAT).
//
- p.next_expect_member_object ("installation_access");
- installation_access.token = p.next_expect_member_string ("token");
- installation_access.expires_at =
- gh_from_iso8601 (p.next_expect_member_string ("expires_at"));
- p.next_expect (event::end_object);
+ p.next_expect_name ("installation_access");
+ installation_access = gh_installation_access_token (p);
installation_id =
p.next_expect_member_number<uint64_t> ("installation_id");
@@ -82,7 +84,16 @@ namespace brep
nid = *v;
}
- build_state s (to_build_state (p.next_expect_member_string ("state")));
+ build_state s;
+ try
+ {
+ s = to_build_state (p.next_expect_member_string ("state"));
+ }
+ catch (const invalid_argument& e)
+ {
+ throw_json (p, e.what ());
+ }
+
bool ss (p.next_expect_member_boolean<bool> ("state_synced"));
optional<result_status> rs;
@@ -90,7 +101,14 @@ namespace brep
string* v (p.next_expect_member_string_null ("status"));
if (v != nullptr)
{
- rs = bbot::to_result_status (*v);
+ try
+ {
+ rs = bbot::to_result_status (*v);
+ }
+ catch (const invalid_argument& e)
+ {
+ throw_json (p, e.what ());
+ }
assert (s == build_state::built);
}
}
@@ -100,6 +118,8 @@ namespace brep
p.next_expect (event::end_object);
}
+ completed = p.next_expect_member_boolean<bool> ("completed");
+
{
string* s (p.next_expect_member_string_null ("conclusion_node_id"));
if (s != nullptr)
@@ -187,11 +207,30 @@ namespace brep
s.member ("warning_success", warning_success);
- // Installation access token.
+ // Installation access token (IAT).
//
s.member_begin_object ("installation_access");
s.member ("token", installation_access.token);
- s.member ("expires_at", gh_to_iso8601 (installation_access.expires_at));
+
+ // IAT expires_at timestamp.
+ //
+ {
+ string v;
+ try
+ {
+ v = gh_to_iso8601 (installation_access.expires_at);
+ }
+ catch (const system_error& e)
+ {
+ // Translate for simplicity.
+ //
+ throw invalid_argument ("unable to convert IAT expires_at value " +
+ to_string (system_clock::to_time_t (
+ installation_access.expires_at)));
+ }
+ s.member ("expires_at", move (v));
+ }
+
s.end_object ();
s.member ("installation_id", installation_id);
@@ -233,7 +272,7 @@ namespace brep
if (cr.status)
{
assert (cr.state == build_state::built);
- s.value (to_string (*cr.status));
+ s.value (to_string (*cr.status)); // Doesn't throw.
}
else
s.value (nullptr);
@@ -242,6 +281,8 @@ namespace brep
}
s.end_array ();
+ s.member ("completed", completed);
+
s.member_name ("conclusion_node_id");
if (conclusion_node_id)
s.value (*conclusion_node_id);
diff --git a/mod/mod-ci-github-service-data.hxx b/mod/mod-ci-github-service-data.hxx
index bb845cb..0f4c760 100644
--- a/mod/mod-ci-github-service-data.hxx
+++ b/mod/mod-ci-github-service-data.hxx
@@ -11,8 +11,6 @@
namespace brep
{
- // @@@ Check if any data members are unused (once the dust settles).
-
// Service data associated with the tenant (corresponds to GH check suite).
//
// It is always a top-level JSON object and the first member is always the
@@ -35,6 +33,11 @@ namespace brep
optional<result_status> status; // Only if state is built & synced.
+ // Note: never serialized (only used to pass information to the GraphQL
+ // functions).
+ //
+ optional<string> details_url;
+
string
state_string () const
{
@@ -94,7 +97,7 @@ namespace brep
// The following two are only used for pull requests.
//
- // @@ TODO/LATER: maybe put them in a struct?
+ // @@ TODO/LATER: maybe put them in a struct, if more members?
//
optional<string> pr_node_id;
optional<uint32_t> pr_number;
@@ -134,6 +137,9 @@ namespace brep
//
// Throw invalid_argument if the schema version is not supported.
//
+ // Throw invalid_argument (invalid_json_input) in case of malformed JSON
+ // or any invalid values.
+ //
explicit
service_data (const string& json);
@@ -174,6 +180,11 @@ namespace brep
// Serialize to JSON.
//
+ // Throw invalid_argument if any values are invalid.
+ //
+ // May also throw invalid_json_output but that would be a programming
+ // error.
+ //
string
json () const;
};
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index 6dfaa5f..394638a 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -19,26 +19,6 @@
#include <stdexcept>
-// @@ Remaining TODOs
-//
-// - Rerequested checks
-//
-// - check_suite (action: rerequested): received when user re-runs all
-// checks.
-//
-// - check_run (action: rerequested): received when user re-runs a
-// specific check or all failed checks.
-//
-// @@ TMP I have confirmed that the above is accurate.
-//
-// Will need to extract a few more fields from check_runs, but the layout
-// is very similar to that of check_suite.
-//
-// - Choose strong webhook secret (when deploying).
-//
-// - Check that delivery UUID has not been received before (replay attack).
-//
-
// Resources:
//
// Creating an App:
@@ -280,9 +260,6 @@ namespace brep
// is that we want be "notified" of new actions at which point we can
// decide whether to ignore them or to handle.
//
- // @@ There is also check_run even (re-requested by user, either
- // individual check run or all the failed check runs).
- //
if (event == "check_suite")
{
gh_check_suite_event cs;
@@ -316,13 +293,10 @@ namespace brep
else if (cs.action == "completed")
{
// GitHub thinks that "all the check runs in this check suite have
- // completed and a conclusion is available". Looks like this one we
- // ignore?
- //
- // What if our bookkeeping says otherwise? But then we can't even
- // access the service data easily here. @@ TODO: maybe/later.
+ // completed and a conclusion is available". Check with our own
+ // bookkeeping and log an error if there is a mismatch.
//
- return true;
+ return handle_check_suite_completed (move (cs), warning_success);
}
else
{
@@ -398,7 +372,8 @@ namespace brep
throw invalid_request (400, move (m));
}
- if (pr.action == "opened" || pr.action == "synchronize")
+ if (pr.action == "opened" ||
+ pr.action == "synchronize")
{
// opened
// A pull request was opened.
@@ -406,23 +381,78 @@ namespace brep
// synchronize
// A pull request's head branch was updated from the base branch or
// new commits were pushed to the head branch. (Note that there is
- // no equivalent event for the base branch. That case gets handled
- // in handle_check_suite_request() instead. @@ Not anymore.)
+ // no equivalent event for the base branch.)
//
- // Note that both cases are handled the same: we start a new CI
+ // Note that both cases are handled similarly: we start a new CI
// request which will be reported on the new commit id.
//
return handle_pull_request (move (pr), warning_success);
}
- else
+ else if (pr.action == "edited")
+ {
+ // PR base branch changed (to a different branch) besides other
+ // irrelevant changes (title, body, etc).
+ //
+ // This is in a sense a special case of the base branch moving. In
+ // that case we don't do anything (due to the head sharing problem)
+ // relying instead on the branch protection rule. So it makes sense
+ // to do the same here.
+ //
+ return true;
+ }
+ else if (pr.action == "closed")
+ {
+ // PR has been closed (as merged or not; see merged member). Also
+ // apparently received if base branch is deleted (and the same
+ // for head branch). See also the reopened event below.
+ //
+ // While it may seem natural to cancel the CI for the closed PR, it
+ // might actually be useful to have a completed CI record. GitHub
+ // doesn't prevent us from publishing CI results for the closed PR
+ // (even if both base and head branches were deleted). And if such a
+ // PR is reopened, the CI results remain.
+ //
+ return true;
+ }
+ else if (pr.action == "reopened")
{
- // Ignore the remaining actions by sending a 200 response with empty
- // body.
+ // Previously closed PR has been reopened.
//
- // @@ Ignore known but log unknown, as in check_suite above?
+ // Since we don't cancel the CI for a closed PR, there is nothing
+ // to do if it is reopened.
//
return true;
}
+ else if (pr.action == "assigned" ||
+ pr.action == "auto_merge_disabled" ||
+ pr.action == "auto_merge_enabled" ||
+ pr.action == "converted_to_draft" ||
+ pr.action == "demilestoned" ||
+ pr.action == "dequeued" ||
+ pr.action == "enqueued" ||
+ pr.action == "labeled" ||
+ pr.action == "locked" ||
+ pr.action == "milestoned" ||
+ pr.action == "ready_for_review" ||
+ pr.action == "review_request_removed" ||
+ pr.action == "review_requested" ||
+ pr.action == "unassigned" ||
+ pr.action == "unlabeled" ||
+ pr.action == "unlocked")
+ {
+ // These have no relation to CI.
+ //
+ return true;
+ }
+ else
+ {
+ // Ignore unknown actions by sending a 200 response with empty body
+ // but also log as an error since we want to notice new actions.
+ //
+ error << "unknown action '" << pr.action << "' in pull_request event";
+
+ return true;
+ }
}
else
{
@@ -502,20 +532,18 @@ namespace brep
l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- // @@ What happens if we call this functions with an already existing
- // node_id (e.g., replay attack). See the UUID header above.
- //
-
// While it would have been nice to cancel CIs of PRs with this branch as
- // base not to waste resources, there are complications: Firsty, we can
- // only do this for remote PRs (since local PRs may share the result with
- // branch push). Secondly, we try to do our best even if the branch
- // protection rule for head behind is not enabled. In this case, it would
- // be good to complete the CI. So maybe/later.
+ // base not to waste resources, there are complications: Firstly, we can
+ // only do this for remote PRs (since local PRs will most likely share the
+ // result with branch push). Secondly, we try to do our best even if the
+ // branch protection rule for head behind is not enabled. In this case, it
+ // would be good to complete the CI. So maybe/later. See also the head
+ // case in handle_pull_request(), where we do cancel remote PRs that are
+ // not shared.
// Service id that uniquely identifies the CI tenant.
//
- string sid (cs.repository.node_id + ":" + cs.check_suite.head_sha);
+ string sid (cs.repository.node_id + ':' + cs.check_suite.head_sha);
// If the user requests a rebuild of the (entire) PR, then this manifests
// as the check_suite rather than pull_request event. Specifically:
@@ -541,11 +569,13 @@ namespace brep
{
kind = service_data::remote;
- if (optional<tenant_service> ts = find (*build_db_, "ci-github", sid))
+ if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
{
+ tenant_service& ts (d->service);
+
try
{
- service_data sd (*ts->data);
+ service_data sd (*ts.data);
check_sha = move (sd.check_sha); // Test merge commit.
}
catch (const invalid_argument& e)
@@ -630,6 +660,35 @@ namespace brep
return true;
}
+ bool ci_github::
+ handle_check_suite_completed (gh_check_suite_event cs, bool warning_success)
+ {
+ // The plans is as follows:
+ //
+ // 1. Load the service data.
+ //
+ // 2. Verify it is completed.
+ //
+ // 3. Verify (like in build_built()) that all the check runs are
+ // completed.
+ //
+ // 4. Verify the result matches what GitHub thinks it is (if easy).
+
+ return true;
+ }
+
+ // Create a gq_built_result.
+ //
+ // Throw invalid_argument in case of invalid result_status.
+ //
+ static gq_built_result
+ make_built_result (result_status rs, bool warning_success, string message)
+ {
+ return {gh_to_conclusion (rs, warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ move (message)};
+ }
+
// Parse a check run details URL into a build_id.
//
// Return nullopt if the URL is invalid.
@@ -637,6 +696,12 @@ namespace brep
static optional<build_id>
parse_details_url (const string& details_url);
+ // Note that GitHub always posts a message to their GUI saying "You have
+ // successfully requested <check_run_name> be rerun", regardless of what
+ // HTTP status code we respond with. However we do return error status codes
+ // when there is no better option (like failing the conclusion) in case they
+ // start handling them someday.
+ //
bool ci_github::
handle_check_run_rerequest (const gh_check_run_event& cr,
bool warning_success)
@@ -645,20 +710,40 @@ namespace brep
l3 ([&]{trace << "check_run event { " << cr << " }";});
- // Fail if this is the conclusion check run.
+ // The overall plan is as follows:
//
- if (cr.check_run.name == conclusion_check_run_name)
- {
- // @@ Fail conclusion check run with appropriate message and reurn
- // true.
-
- l3 ([&]{trace << "ignoring conclusion check_run";});
-
- // 422 Unprocessable Content: The request was well-formed (i.e.,
- // syntactically correct) but could not be processed.
- //
- throw invalid_request (422, "Conclusion check run cannot be rebuilt");
- }
+ // 1. Load service data.
+ //
+ // 2. If the tenant is archived, then fail (re-create) both the check run
+ // and the conclusion with appropriate diagnostics.
+ //
+ // 3. If the check run is in the queued state, then do nothing.
+ //
+ // 4. Re-create the check run in the queued state and the conclusion in
+ // the building state. Note: do in a single request to make sure we
+ // either "win" or "loose" the potential race for both (important
+ // for #7).
+ //
+ // 5. Call the rebuild() function to attempt to schedule a rebuild. Pass
+ // the update function that does the following (if called):
+ //
+ // a. Save new node ids.
+ //
+ // b. Update the check run state (may also not exist).
+ //
+ // c. Clear the completed flag if true.
+ //
+ // 6. If the result of rebuild() indicates the tenant is archived, then
+ // fail (update) both the check run and conclusion with appropriate
+ // diagnostics.
+ //
+ // 7. If original state is queued (no rebuild was scheduled), then fail
+ // (update) both the check run and the conclusion.
+ //
+ // Note that while conceptually we are updating existing check runs, in
+ // practice we have to re-create as new check runs in order to replace the
+ // existing ones because GitHub does not allow transitioning out of the
+ // built state.
// Get a new installation access token.
//
@@ -680,77 +765,156 @@ namespace brep
return iat;
};
- // Create a new conclusion check run, replacing the existing one.
+ const string& repo_node_id (cr.repository.node_id);
+ const string& head_sha (cr.check_run.check_suite.head_sha);
+
+ // Prepare the build and conclusion check runs. They are sent to GitHub in
+ // a single request (unless something goes wrong) so store them together
+ // from the outset.
//
- // Return the check run on success or nullopt on failure.
+ vector<check_run> check_runs (2);
+ check_run& bcr (check_runs[0]); // Build check run
+ check_run& ccr (check_runs[1]); // Conclusion check run
+
+ ccr.name = conclusion_check_run_name;
+
+ // Load the service data, failing the check runs if the tenant has been
+ // archived.
//
- auto create_conclusion_cr =
- [&cr, &error, warning_success] (const gh_installation_access_token& iat,
- build_state bs,
- optional<result_status> rs = nullopt,
- optional<string> msg = nullopt)
- -> optional<check_run>
+ service_data sd;
+ string tenant_id;
{
- optional<gq_built_result> br;
- if (rs)
+ // Service id that uniquely identifies the CI tenant.
+ //
+ string sid (repo_node_id + ':' + head_sha);
+
+ if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
{
- assert (msg);
+ if (d->archived) // Tenant is archived
+ {
+ // Fail (re-create) the check runs.
+ //
+ optional<gh_installation_access_token> iat (get_iat ());
+ if (!iat)
+ throw server_error ();
+
+ gq_built_result br (
+ make_built_result (
+ result_status::error, warning_success,
+ "Unable to rebuild individual configuration: build has "
+ "been archived"));
+
+ // Try to update the conclusion check run even if the first update
+ // fails.
+ //
+ bool f (false); // Failed.
- br = gq_built_result (gh_to_conclusion (*rs, warning_success),
- circle (*rs) + ' ' + ucase (to_string (*rs)),
- move (*msg));
+ if (gq_create_check_run (error, bcr, iat->token,
+ repo_node_id, head_sha,
+ cr.check_run.details_url,
+ build_state::built, br))
+ {
+ l3 ([&]{trace << "created check_run { " << bcr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to re-create check run";
+ f = true;
+ }
+
+ if (gq_create_check_run (error, ccr, iat->token,
+ repo_node_id, head_sha,
+ nullopt /* details_url */,
+ build_state::built, move (br)))
+ {
+ l3 ([&]{trace << "created conclusion check_run { " << ccr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to re-create conclusion check run";
+ f = true;
+ }
+
+ // Fail the handler if either of the check runs could not be
+ // updated.
+ //
+ if (f)
+ throw server_error ();
+
+ return true;
+ }
+
+ tenant_service& ts (d->service);
+
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "failed to parse service data: " << e;
+ }
+
+ tenant_id = d->tenant_id;
}
+ else
+ {
+ // No such tenant.
+ //
+ fail << "check run " << cr.check_run.node_id
+ << " re-requested but tenant_service with id " << sid
+ << " does not exist";
+ }
+ }
+
+ // Get a new IAT if the one from the service data has expired.
+ //
+ const gh_installation_access_token* iat (nullptr);
+ optional<gh_installation_access_token> new_iat;
+
+ if (system_clock::now () > sd.installation_access.expires_at)
+ {
+ if ((new_iat = get_iat ()))
+ iat = &*new_iat;
+ else
+ throw server_error ();
+ }
+ else
+ iat = &sd.installation_access;
- check_run r;
- r.name = conclusion_check_run_name;
+ // Fail if it's the conclusion check run that is being re-requested.
+ //
+ if (cr.check_run.name == conclusion_check_run_name)
+ {
+ l3 ([&]{trace << "re-requested conclusion check_run";});
+
+ if (!sd.conclusion_node_id)
+ fail << "no conclusion node id for check run " << cr.check_run.node_id;
+
+ gq_built_result br (
+ make_built_result (result_status::error, warning_success,
+ "Conclusion check run cannot be rebuilt"));
- if (gq_create_check_run (error, r, iat.token,
- rni, hs,
+ // Fail (update) the conclusion check run.
+ //
+ if (gq_update_check_run (error, ccr, iat->token,
+ repo_node_id, *sd.conclusion_node_id,
nullopt /* details_url */,
- bs, move (br)))
+ build_state::built, move (br)))
{
- return r;
+ l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";});
}
else
- return nullopt;
- };
+ {
+ fail << "check run " << cr.check_run.node_id
+ << ": unable to update conclusion check run "
+ << *sd.conclusion_node_id;
+ }
- // The overall plan is as follows:
- //
- // 1. Call the rebuild() function to attempt to schedule a rebuild. Pass
- // the update function that does the following (if called):
- //
- // a. Update the check run being rebuilt (may also not exist).
- //
- // b. Clear the completed flag if true.
- //
- // c. "Return" the service data to be used after the call.
- //
- // 2. If the result of rebuild() indicates the tenant is archived, fail
- // the conclusion check run with appropriate diagnostics.
- //
- // 3. If original state is queued, then no rebuild was scheduled and we do
- // nothing.
- //
- // 4. Otherwise (the original state is building or built):
- //
- // a. Change the check run state to queued.
- //
- // b. Change the conclusion check run to building (do unconditionally
- // to mitigate races).
- //
- // Note that while conceptually we are updating existing check runs, in
- // practice we have to create new check runs to replace the existing ones
- // because GitHub does not allow transitioning out of the built state.
- //
- // This results in a new node id for each check run but we can't save them
- // to the service data after the rebuild() call. As a workaround, when
- // updating the service data we 1) clear the re-requested check run's node
- // id and set the state_synced flag to true to signal to build_building()
- // and build_built() that it needs to create a new check run; and 2) clear
- // the conclusion check run's node id to cause build_built() to create a
- // new conclusion check run. And these two check runs' node ids will be
- // saved to the service data.
+ return true;
+ }
// Parse the check_run's details_url to extract build id.
//
@@ -766,28 +930,101 @@ namespace brep
<< ": failed to extract build id from details_url";
}
- // The IAT retrieved from the service data.
+ // Initialize the check run (`bcr`) with state from the service data.
//
- optional<gh_installation_access_token> iat;
+ {
+ // Search for the check run in the service data.
+ //
+ // Note that we look by name in case node id got replaced by a racing
+ // re-request (in which case we ignore this request).
+ //
+ auto i (find_if (sd.check_runs.begin (), sd.check_runs.end (),
+ [&cr] (const check_run& scr)
+ {
+ return scr.name == cr.check_run.name;
+ }));
- // True if the check run exists in the service data.
+ if (i == sd.check_runs.end ())
+ fail << "check_run " << cr.check_run.node_id
+ << " (" << cr.check_run.name << "): "
+ << "re-requested but does not exist in service data";
+
+ // Do nothing if node ids don't match.
+ //
+ if (i->node_id && *i->node_id != cr.check_run.node_id)
+ {
+ l3 ([&]{trace << "check_run " << cr.check_run.node_id
+ << " (" << cr.check_run.name << "): "
+ << "node id has changed in service data";});
+ return true;
+ }
+
+ // Do nothing if the build is already queued.
+ //
+ if (i->state == build_state::queued)
+ {
+ l3 ([&]{trace << "ignoring already-queued check run";});
+ return true;
+ }
+
+ bcr.name = i->name;
+ bcr.build_id = i->build_id;
+ bcr.state = i->state;
+ }
+
+ // Transition the build and conclusion check runs out of the built state
+ // (or any other state) by re-creating them.
//
- bool cr_found (false);
+ bcr.state = build_state::queued;
+ bcr.state_synced = false;
+ bcr.details_url = cr.check_run.details_url;
+
+ ccr.state = build_state::building;
+ ccr.state_synced = false;
+
+ if (gq_create_check_runs (error, check_runs, iat->token,
+ repo_node_id, head_sha))
+ {
+ assert (bcr.state == build_state::queued);
+ assert (ccr.state == build_state::building);
- // Update the state of the check run in the service data. Return (via
- // captured references) the IAT and whether the check run was found.
+ l3 ([&]{trace << "created check_run { " << bcr << " }";});
+ l3 ([&]{trace << "created conclusion check_run { " << ccr << " }";});
+ }
+ else
+ {
+ fail << "check run " << cr.check_run.node_id
+ << ": unable to re-create build and conclusion check runs";
+ }
+
+ // Request the rebuild and update service data.
//
- // Called by rebuild(), but only if the build is actually restarted.
+ bool race (false);
+
+ // Callback function called by rebuild() to update the service data (but
+ // only if the build is actually restarted).
//
- auto update_sd = [&iat,
- &cr_found,
- &error,
- &cr] (const tenant_service& ts, build_state)
- -> optional<string>
+ auto update_sd = [&error, &new_iat, &race,
+ tenant_id = move (tenant_id),
+ &cr, &bcr, &ccr] (const string& ti,
+ const tenant_service& ts,
+ build_state) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to transaction
// being aborted) and so should not move out of its captures.
+ race = false; // Reset.
+
+ if (tenant_id != ti)
+ {
+ // The tenant got replaced since we loaded it but we managed to
+ // trigger a rebuild in the new tenant. Who knows whose check runs are
+ // visible, so let's fail ours similar to the cases below.
+ //
+ race = true;
+ return nullopt;
+ }
+
service_data sd;
try
{
@@ -796,148 +1033,138 @@ namespace brep
catch (const invalid_argument& e)
{
error << "failed to parse service data: " << e;
- return nullptr;
+ return nullopt;
}
- if (!iat)
- iat = sd.installation_access;
-
- // If the re-requested check run is found, update it in the service
- // data.
+ // Note that we again look by name in case node id got replaced by a
+ // racing re-request. In this case, however, it's impossible to decide
+ // who won that race, so let's fail the check suite to be on the safe
+ // side (in a sense, similar to the rebuild() returning queued below).
//
- const string& nid (cr.check_run.node_id);
+ auto i (find_if (
+ sd.check_runs.begin (), sd.check_runs.end (),
+ [&cr] (const check_run& scr)
+ {
+ return scr.name == cr.check_run.name;
+ }));
- for (check_run& cr: sd.check_runs)
+ if (i == sd.check_runs.end ())
{
- if (cr.node_id && *cr.node_id == nid)
- {
- cr_found = true;
- cr.state = build_state::queued;
- sd.completed = false;
-
- // Clear the check run node ids and set state_synced to true to
- // cause build_building() and/or build_built() to create new check
- // runs (see the plan above for details).
- //
- cr.node_id = nullopt;
- cr.state_synced = true;
- sd.conclusion_node_id = nullopt;
+ error << "check_run " << cr.check_run.node_id
+ << " (" << cr.check_run.name << "): "
+ << "re-requested but does not exist in service data";
+ return nullopt;
+ }
- return sd.json ();
- }
+ if (i->node_id && *i->node_id != cr.check_run.node_id)
+ {
+ // Keep the old conclusion node id to make sure any further state
+ // transitions are ignored. A bit of a hack.
+ //
+ race = true;
+ return nullopt;
}
- return nullopt;
+ *i = bcr; // Update with new node_id, state, state_synced.
+
+ sd.conclusion_node_id = ccr.node_id;
+ sd.completed = false;
+
+ // Save the IAT if we created a new one.
+ //
+ if (new_iat)
+ sd.installation_access = *new_iat;
+
+ return sd.json ();
};
optional<build_state> bs (rebuild (*build_db_, retry_, *bid, update_sd));
- if (!bs)
+ // If the build has been archived or re-enqueued since we loaded the
+ // service data, fail (by updating) both the build check run and the
+ // conclusion check run. Otherwise the build has been successfully
+ // re-enqueued so do nothing further.
+ //
+ if (!race && bs && *bs != build_state::queued)
+ return true;
+
+ gq_built_result br; // Built result for both check runs.
+
+ if (race || bs) // Race or re-enqueued.
{
- // Build has expired (most probably the tenant has been archived).
+ // The re-enqueued case: this build has been re-enqueued since we first
+ // loaded the service data. This could happen if the user clicked
+ // "re-run" multiple times and another handler won the rebuild() race.
//
- // Update the conclusion check run to notify the user (but have to
- // replace it with a new one because we don't know the existing one's
- // node id).
+ // However the winner of the check runs race cannot be determined.
//
- optional<gh_installation_access_token> iat (get_iat ());
- if (!iat)
- throw server_error ();
-
- if (optional<check_run> ccr = create_conclusion_cr (
- *iat,
- build_state::built,
- result_status::error,
- "Unable to rebuild: tenant has been archived or no such build"))
- {
- l3 ([&]{trace << "created conclusion check_run { " << *ccr << " }";});
- }
- else
- {
- // Log the error and return failure to GitHub which will presumably
- // indicate this in its GUI.
- //
- fail << "check run " << cr.check_run.node_id
- << ": unable to create conclusion check run";
- }
- }
- else if (*bs == build_state::queued)
- {
- // The build was already queued so nothing to be done. This might happen
- // if the user clicked "re-run" multiple times before we managed to
- // update the check run.
- }
- else
- {
- // The build has been requeued.
+ // Best case the other handler won the check runs race as well and
+ // thus everything will proceed normally. Our check runs will be
+ // invisible and disregarded.
//
- assert (*bs == build_state::building || *bs == build_state::built);
-
- if (!cr_found)
- {
- // Respond with an error otherwise GitHub will post a message in its
- // GUI saying "you have successfully requested a rebuild of ..."
- //
- fail << "check_run " << cr.check_run.node_id
- << ": build restarted but check run does not exist "
- << "in service data";
- }
-
- // Get a new IAT if the one from the service data has expired.
+ // Worst case we won the check runs race and the other handler's check
+ // runs -- the ones that will be updated by the build_*() notifications
+ // -- are no longer visible, leaving things quite broken.
//
- assert (iat.has_value ());
-
- if (system_clock::now () > iat->expires_at)
- {
- iat = get_iat ();
- if (!iat)
- throw server_error ();
- }
-
- // Update (by replacing) the re-requested and conclusion check runs to
- // queued and building, respectively.
+ // Either way, we fail our check runs. In the best case scenario it
+ // will have no effect; in the worst case scenario it lets the user
+ // know something has gone wrong.
//
- // If either fails we can only log the error but build_building() and/or
- // build_built() should correct the situation (see above for details).
+ br = make_built_result (result_status::error, warning_success,
+ "Unable to rebuild, try again");
+ }
+ else // Archived.
+ {
+ // The build has expired since we loaded the service data. Most likely
+ // the tenant has been archived.
//
+ br = make_built_result (
+ result_status::error, warning_success,
+ "Unable to rebuild individual configuration: build has been archived");
+ }
- // Update re-requested check run.
- //
- {
- check_run ncr; // New check run.
- ncr.name = cr.check_run.name;
+ // Try to update the conclusion check run even if the first update fails.
+ //
+ bool f (false); // Failed.
- if (gq_create_check_run (error,
- ncr,
- iat->token,
- cr.repository.node_id,
- cr.check_run.check_suite.head_sha,
- cr.check_run.details_url,
- build_state::queued))
- {
- l3 ([&]{trace << "created check_run { " << ncr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to create (to update) check run in queued state";
- }
- }
+ // Fail the build check run.
+ //
+ if (gq_update_check_run (error, bcr, iat->token,
+ repo_node_id, *bcr.node_id,
+ nullopt /* details_url */,
+ build_state::built, br))
+ {
+ l3 ([&]{trace << "updated check_run { " << bcr << " }";});
+ }
+ else
+ {
+ error << "check run " << cr.check_run.node_id
+ << ": unable to update (replacement) check run "
+ << *bcr.node_id;
+ f = true;
+ }
- // Update conclusion check run.
- //
- if (optional<check_run> ccr =
- create_conclusion_cr (*iat, build_state::building))
- {
- l3 ([&]{trace << "created conclusion check_run { " << *ccr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to create (to update) conclusion check run";
- }
+ // Fail the conclusion check run.
+ //
+ if (gq_update_check_run (error, ccr, iat->token,
+ repo_node_id, *ccr.node_id,
+ nullopt /* details_url */,
+ build_state::built, move (br)))
+ {
+ l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";});
+ }
+ else
+ {
+ error << "check run " << cr.check_run.node_id
+ << ": unable to update conclusion check run " << *ccr.node_id;
+ f = true;
}
+ // Fail the handler if either of the check runs could not be updated.
+ //
+ if (f)
+ throw server_error ();
+
return true;
}
@@ -959,30 +1186,6 @@ namespace brep
// gets updated with the head commit's SHA and check_suite.pull_requests[]
// will contain all PRs with this branch as head.
//
- // Remaining TODOs
- //
- // - @@ TODO? PR base branch changed (to a different branch)
- //
- // => pull_request(edited)
- //
- // - PR closed @@ TODO
- //
- // Also received if base branch is deleted. (And presumably same for head
- // branch.)
- //
- // => pull_request(closed)
- //
- // Cancel CI?
- //
- // - PR merged @@ TODO
- //
- // => pull_request(merged)
- //
- // => check_suite(PR_base)
- //
- // Probably wouldn't want to CI the base again because the PR CI would've
- // done the equivalent already.
- //
bool ci_github::
handle_pull_request (gh_pull_request_event pr, bool warning_success)
{
@@ -1007,12 +1210,6 @@ namespace brep
l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- // Note that similar to the branch push case above, while it would have
- // been nice to cancel the previous CI job once the PR head moves (the
- // "synchronize" event), due to the head sharing problem the previous CI
- // job might actually still be relevant (in both local and remote PR
- // cases).
-
// Distinguish between local and remote PRs by comparing the head and base
// repositories' paths.
//
@@ -1021,6 +1218,48 @@ namespace brep
? service_data::local
: service_data::remote);
+ // Note that similar to the branch push case above, while it would have
+ // been nice to cancel the previous CI job once the PR head moves (the
+ // "synchronize" event), due to the head sharing problem the previous CI
+ // job might actually still be relevant (in both local and remote PR
+ // cases). So we only do it for the remote PRs and only if the head is not
+ // shared (via tenant reference counting).
+ //
+ if (kind == service_data::remote && pr.action == "synchronize")
+ {
+ if (pr.before)
+ {
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (pr.repository.node_id + ':' + *pr.before);
+
+ if (optional<tenant_service> ts = cancel (error, warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ "ci-github", sid,
+ true /* ref_count */))
+ {
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": attempted to cancel CI of previous head commit"
+ << " (ref_count: " << ts->ref_count << ')';});
+ }
+ else
+ {
+ // It's possible that there was no CI for the previous commit for
+ // various reasons (e.g., CI was not enabled).
+ //
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": failed to cancel CI of previous head commit "
+ << "with tenant_service id " << sid;});
+ }
+ }
+ else
+ {
+ error << "pull request " << pr.pull_request.node_id
+ << ": before commit is missing in synchronize event";
+ }
+ }
+
// Note: for remote PRs the check_sha will be set later, in
// build_unloaded_pre_check(), to test merge commit id.
//
@@ -1082,10 +1321,13 @@ namespace brep
return true;
}
- function<optional<string> (const tenant_service&)> ci_github::
- build_unloaded (tenant_service&& ts,
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_unloaded (const string& ti,
+ tenant_service&& ts,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -1100,15 +1342,24 @@ namespace brep
}
return sd.pre_check
- ? build_unloaded_pre_check (move (ts), move (sd), log_writer)
- : build_unloaded_load (move (ts), move (sd), log_writer);
+ ? build_unloaded_pre_check (move (ts), move (sd), log_writer)
+ : build_unloaded_load (ti, move (ts), move (sd), log_writer);
}
- function<optional<string> (const tenant_service&)> ci_github::
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
build_unloaded_pre_check (tenant_service&& ts,
service_data&& sd,
const diag_epilogue& log_writer) const noexcept
+ try
{
+ // NOTE: this function is noexcept and should not throw.
+ //
+ // In a few places where invalid_argument is unlikely to be thrown and/or
+ // would indicate that things are seriously broken we let it propagate to
+ // the function catch block where the pre-check tenant will be canceled
+ // (otherwise we could end up in an infinite loop, e.g., because the
+ // problematic arguments won't change).
+
NOTIFICATION_DIAG (log_writer);
// We get here for PRs only (but both local and remote). The overall
@@ -1134,6 +1385,8 @@ namespace brep
// Request PR pre-check info (triggering the generation of the test merge
// commit on the GitHub's side).
//
+ // Let unlikely invalid_argument propagate (see above).
+ //
optional<gq_pr_pre_check_info> pc (
gq_fetch_pull_request_pre_check_info (error,
sd.installation_access.token,
@@ -1177,7 +1430,7 @@ namespace brep
// Service id that will uniquely identify the CI tenant.
//
- string sid (sd.repository_node_id + ":" + sd.report_sha);
+ string sid (sd.repository_node_id + ':' + sd.report_sha);
// Create an unloaded CI tenant, doing nothing if one already exists
// (which could've been created by a head branch push or another PR
@@ -1193,38 +1446,50 @@ namespace brep
// notifications until (1) we load the tenant, (2) we cancel it, or (3)
// it gets archived after some timeout.
//
- if (auto pr = create (error, warn, verb_ ? &trace : nullptr,
- *build_db_, retry_,
- tenant_service (sid, "ci-github", sd.json ()),
- chrono::seconds (30) /* interval */,
- chrono::seconds (0) /* delay */,
- duplicate_tenant_mode::ignore))
- {
- if (pr->second == duplicate_tenant_result::ignored)
+ try
+ {
+ if (auto pr = create (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ tenant_service (sid, "ci-github", sd.json ()),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */,
+ duplicate_tenant_mode::ignore))
{
- // This PR is sharing a head commit with something else.
- //
- // If this is a local PR then it's probably the branch push, which
- // is expected, so do nothing.
- //
- // If this is a remote PR then it could be anything (branch push,
- // local PR, or another remote PR) which in turn means the CI result
- // may end up being for head, not merge commit. There is nothing we
- // can do about it on our side (the user can enable the head-behind-
- // base protection on their side).
- //
- if (sd.kind == service_data::remote)
+ if (pr->second == duplicate_tenant_result::ignored)
{
- l3 ([&]{trace << "remote pull request " << *sd.pr_node_id
- << ": CI tenant already exists for " << sid;});
+ // This PR is sharing a head commit with something else.
+ //
+ // If this is a local PR then it's probably the branch push, which
+ // is expected, so do nothing.
+ //
+ // If this is a remote PR then it could be anything (branch push,
+ // local PR, or another remote PR) which in turn means the CI
+ // result may end up being for head, not merge commit. There is
+ // nothing we can do about it on our side (the user can enable the
+ // head-behind- base protection on their side).
+ //
+ if (sd.kind == service_data::remote)
+ {
+ l3 ([&]{trace << "remote pull request " << *sd.pr_node_id
+ << ": CI tenant already exists for " << sid;});
+ }
}
}
+ else
+ {
+ error << "pull request " << *sd.pr_node_id
+ << ": failed to create unloaded CI tenant "
+ << "with tenant_service id " << sid;
+
+ // Fall through to cancel.
+ }
}
- else
+ catch (const runtime_error& e) // Database retries exhausted.
{
error << "pull request " << *sd.pr_node_id
- << ": unable to create unloaded CI tenant "
- << "with tenant_service id " << sid;
+ << ": failed to create unloaded CI tenant "
+ << "with tenant_service id " << sid
+ << ": " << e.what ();
// Fall through to cancel.
}
@@ -1232,26 +1497,70 @@ namespace brep
// Cancel the pre-check tenant.
//
- if (!cancel (error, warn, verb_ ? &trace : nullptr,
- *build_db_, retry_,
- ts.type,
- ts.id))
+ try
+ {
+ if (!cancel (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ ts.type,
+ ts.id))
+ {
+ // Should never happen (no such tenant).
+ //
+ error << "pull request " << *sd.pr_node_id
+ << ": failed to cancel pre-check tenant with tenant_service id "
+ << ts.id;
+ }
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
{
- // Should never happen (no such tenant).
- //
error << "pull request " << *sd.pr_node_id
<< ": failed to cancel pre-check tenant with tenant_service id "
- << ts.id;
+ << ts.id << ": " << e.what ();
+ }
+
+ return nullptr;
+ }
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+ error << "pull request " << *sd.pr_node_id
+ << ": unhandled exception: " << e.what ();
+
+ // Cancel the pre-check tenant otherwise we could end up in an infinite
+ // loop (see top of function).
+ //
+ try
+ {
+ if (cancel (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ ts.type,
+ ts.id))
+ l3 ([&]{trace << "canceled pre-check tenant " << ts.id;});
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
+ {
+ l3 ([&]{trace << "failed to cancel pre-check tenant " << ts.id << ": "
+ << e.what ();});
}
return nullptr;
}
- function<optional<string> (const tenant_service&)> ci_github::
- build_unloaded_load (tenant_service&& ts,
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_unloaded_load (const string& tenant_id,
+ tenant_service&& ts,
service_data&& sd,
const diag_epilogue& log_writer) const noexcept
+ try
{
+ // NOTE: this function is noexcept and should not throw.
+ //
+ // In a few places where invalid_argument is unlikely to be thrown and/or
+ // would indicate that things are seriously broken we let it propagate to
+ // the function catch block where the tenant will be canceled (otherwise
+ // we could end up in an infinite loop, e.g., because the problematic
+ // arguments won't change).
+
NOTIFICATION_DIAG (log_writer);
// Load the tenant, which is essentially the same for both branch push and
@@ -1297,6 +1606,8 @@ namespace brep
check_run cr;
cr.name = move (name);
+ // Let unlikely invalid_argument propagate (see above).
+ //
if (gq_create_check_run (error,
cr,
iat->token,
@@ -1323,14 +1634,16 @@ namespace brep
{
assert (!node_id.empty ());
- optional<gq_built_result> br (
- gq_built_result (gh_to_conclusion (rs, sd.warning_success),
- circle (rs) + ' ' + ucase (to_string (rs)),
- move (summary)));
+ // Let unlikely invalid_argument propagate (see above).
+ //
+ gq_built_result br (
+ make_built_result (rs, sd.warning_success, move (summary)));
check_run cr;
cr.name = name; // For display purposes only.
+ // Let unlikely invalid_argument propagate (see above).
+ //
if (gq_update_check_run (error,
cr,
iat->token,
@@ -1357,16 +1670,24 @@ namespace brep
//
string conclusion_node_id; // Conclusion check run node ID.
- if (auto cr = create_synthetic_cr (conclusion_check_run_name))
+ if (!sd.conclusion_node_id)
{
- l3 ([&]{trace << "created check_run { " << *cr << " }";});
+ if (auto cr = create_synthetic_cr (conclusion_check_run_name))
+ {
+ l3 ([&]{trace << "created check_run { " << *cr << " }";});
- conclusion_node_id = move (*cr->node_id);
+ conclusion_node_id = move (*cr->node_id);
+ }
}
+ const string& effective_conclusion_node_id (
+ sd.conclusion_node_id
+ ? *sd.conclusion_node_id
+ : conclusion_node_id);
+
// Load the CI tenant if the conclusion check run was created.
//
- if (!conclusion_node_id.empty ())
+ if (!effective_conclusion_node_id.empty ())
{
string ru; // Repository URL.
@@ -1383,46 +1704,65 @@ namespace brep
else
ru = sd.repository_clone_url + '#' + sd.check_sha;
+ // Let unlikely invalid_argument propagate (see above).
+ //
repository_location rl (move (ru), repository_type::git);
- optional<start_result> r (load (error, warn, verb_ ? &trace : nullptr,
- *build_db_, retry_,
- move (ts),
- move (rl)));
-
- if (!r || r->status != 200)
+ try
{
- if (auto cr = update_synthetic_cr (conclusion_node_id,
- conclusion_check_run_name,
- result_status::error,
- to_check_run_summary (r)))
- {
- l3 ([&]{trace << "updated check_run { " << *cr << " }";});
- }
- else
+ optional<start_result> r (load (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ move (ts),
+ move (rl)));
+
+ if (!r || r->status != 200)
{
- // Nothing really we can do in this case since we will not receive
- // any further notifications. Log the error as a last resort.
+ // Let unlikely invalid_argument propagate (see above).
+ //
+ if (auto cr = update_synthetic_cr (effective_conclusion_node_id,
+ conclusion_check_run_name,
+ result_status::error,
+ to_check_run_summary (r)))
+ {
+ l3 ([&]{trace << "updated check_run { " << *cr << " }";});
+ }
+ else
+ {
+ // Nothing really we can do in this case since we will not receive
+ // any further notifications. Log the error as a last resort.
- error << "failed to load CI tenant " << ts.id
- << " and unable to update conclusion";
+ error << "failed to load CI tenant " << ts.id
+ << " and unable to update conclusion";
+ }
+
+ return nullptr; // No need to update service data in this case.
}
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
+ {
+ error << "failed to load CI tenant " << ts.id << ": " << e.what ();
- return nullptr; // No need to update service data in this case.
+ // Fall through to retry on next call.
}
}
- else if (!new_iat)
- return nullptr; // Nothing to save (but retry on next call).
+
+ if (!new_iat && conclusion_node_id.empty ())
+ return nullptr; // Nothing to save (but potentially retry on next call).
return [&error,
+ tenant_id,
iat = move (new_iat),
cni = move (conclusion_node_id)]
- (const tenant_service& ts) -> optional<string>
+ (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to
// transaction being aborted) and so should not move out of its
// captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -1443,6 +1783,28 @@ namespace brep
return sd.json ();
};
}
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+ error << "CI tenant " << ts.id << ": unhandled exception: " << e.what ();
+
+ // Cancel the tenant otherwise we could end up in an infinite loop (see
+ // top of function).
+ //
+ try
+ {
+ if (cancel (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_, ts.type, ts.id))
+ l3 ([&]{trace << "canceled CI tenant " << ts.id;});
+ }
+ catch (const runtime_error& e) // Database retries exhausted.
+ {
+ l3 ([&]{trace << "failed to cancel CI tenant " << ts.id
+ << ": " << e.what ();});
+ }
+
+ return nullptr;
+ }
// Build state change notifications (see tenant-services.hxx for
// background). Mapping our state transitions to GitHub pose multiple
@@ -1453,9 +1815,9 @@ namespace brep
// them when notifying GitHub. The first is not important (we expect the
// state to go back to building shortly). The second should normally not
// happen and would mean that a completed check suite may go back on its
- // conclusion (which would be pretty confusing for the user). @@@ This
- // can/will happen on check run rebuild. Distinguish between internal
- // and external rebuilds?
+ // conclusion (which would be pretty confusing for the user). Note that
+ // the ->queued state transition of a check run rebuild triggered by
+ // us is handled directly in handle_check_run_rerequest().
//
// So, for GitHub notifications, we only have the following linear
// transition sequence:
@@ -1532,13 +1894,17 @@ namespace brep
// if we have node_id, then we update, otherwise, we create (potentially
// overriding the check run created previously).
//
- function<optional<string> (const tenant_service&)> ci_github::
- build_queued (const tenant_service& ts,
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_queued (const string& tenant_id,
+ const tenant_service& ts,
const vector<build>& builds,
optional<build_state> istate,
const build_queued_hints& hs,
const diag_epilogue& log_writer) const noexcept
+ try
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -1638,11 +2004,12 @@ namespace brep
{
// Create a check_run for each build as a single request.
//
+ // Let unlikely invalid_argument propagate.
+ //
if (gq_create_check_runs (error,
crs,
iat->token,
- sd.repository_node_id, sd.report_sha,
- build_state::queued))
+ sd.repository_node_id, sd.report_sha))
{
for (const check_run& cr: crs)
{
@@ -1654,15 +2021,20 @@ namespace brep
}
}
- return [bs = move (bs),
+ return [tenant_id,
+ bs = move (bs),
iat = move (new_iat),
crs = move (crs),
error = move (error),
- warn = move (warn)] (const tenant_service& ts) -> optional<string>
+ warn = move (warn)] (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to transaction
// being aborted) and so should not move out of its captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -1702,12 +2074,24 @@ namespace brep
return sd.json ();
};
}
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+
+ error << "CI tenant " << ts.id << ": unhandled exception: " << e.what ();
+
+ return nullptr;
+ }
- function<optional<string> (const tenant_service&)> ci_github::
- build_building (const tenant_service& ts,
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_building (const string& tenant_id,
+ const tenant_service& ts,
const build& b,
const diag_epilogue& log_writer) const noexcept
+ try
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -1783,6 +2167,8 @@ namespace brep
//
if (iat != nullptr)
{
+ // Let unlikely invalid_argument propagate.
+ //
if (gq_update_check_run (error,
*cr,
iat->token,
@@ -1806,14 +2192,19 @@ namespace brep
}
}
- return [iat = move (new_iat),
+ return [tenant_id,
+ iat = move (new_iat),
cr = move (*cr),
error = move (error),
- warn = move (warn)] (const tenant_service& ts) -> optional<string>
+ warn = move (warn)] (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to transaction
// being aborted) and so should not move out of its captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -1848,18 +2239,31 @@ namespace brep
return sd.json ();
};
}
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+
+ string bid (gh_check_run_name (b)); // Full build id.
- function<optional<string> (const tenant_service&)> ci_github::
- build_built (const tenant_service& ts,
+ error << "check run " << bid << ": unhandled exception: " << e.what();
+
+ return nullptr;
+ }
+
+ function<optional<string> (const string&, const tenant_service&)> ci_github::
+ build_built (const string& tenant_id,
+ const tenant_service& ts,
const build& b,
const diag_epilogue& log_writer) const noexcept
+ try
{
- // @@ TODO Include service_data::event_node_id and perhaps ts.id in
- // diagnostics? E.g. when failing to update check runs we print the
- // build ID only.
- //
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
+ // @@ TODO Include ts.id in diagnostics? Check run build ids alone seem
+ // kind of meaningless. Log lines get pretty long this way however.
+
service_data sd;
try
{
@@ -1978,6 +2382,11 @@ namespace brep
{
using namespace web::xhtml;
+ // Note: let all serialization exceptions propagate. The XML
+ // serialization code can throw bad_alloc or xml::serialization in
+ // case of I/O failures, but we're serializing to a string stream so
+ // both exceptions are unlikely.
+ //
ostringstream os;
xml::serializer s (os, "check_run_summary");
@@ -2069,13 +2478,12 @@ namespace brep
}
gq_built_result br (
- gh_to_conclusion (*b.status, sd.warning_success),
- circle (*b.status) + ' ' + ucase (to_string (*b.status)),
- move (sm));
+ make_built_result (*b.status, sd.warning_success, move (sm)));
if (cr.node_id)
{
- // Update existing check run to built.
+ // Update existing check run to built. Let unlikely invalid_argument
+ // propagate.
//
if (gq_update_check_run (error,
cr,
@@ -2092,7 +2500,7 @@ namespace brep
}
else
{
- // Create new check run.
+ // Create new check run. Let unlikely invalid_argument propagate.
//
// Note that we don't have build hints so will be creating this check
// run with the full build id as name. In the unlikely event that an
@@ -2129,10 +2537,9 @@ namespace brep
result_status rs (*conclusion);
- optional<gq_built_result> br (
- gq_built_result (gh_to_conclusion (rs, sd.warning_success),
- circle (rs) + ' ' + ucase (to_string (rs)),
- "All configurations are built"));
+ gq_built_result br (
+ make_built_result (rs, sd.warning_success,
+ "All configurations are built"));
check_run cr;
@@ -2141,6 +2548,8 @@ namespace brep
cr.node_id = *sd.conclusion_node_id;
cr.name = conclusion_check_run_name;
+ // Let unlikely invalid_argument propagate.
+ //
if (gq_update_check_run (error,
cr,
iat->token,
@@ -2167,15 +2576,20 @@ namespace brep
}
}
- return [iat = move (new_iat),
+ return [tenant_id,
+ iat = move (new_iat),
cr = move (cr),
completed = completed,
error = move (error),
- warn = move (warn)] (const tenant_service& ts) -> optional<string>
+ warn = move (warn)] (const string& ti,
+ const tenant_service& ts) -> optional<string>
{
// NOTE: this lambda may be called repeatedly (e.g., due to transaction
// being aborted) and so should not move out of its captures.
+ if (tenant_id != ti)
+ return nullopt; // Do nothing if the tenant has been replaced.
+
service_data sd;
try
{
@@ -2245,6 +2659,16 @@ namespace brep
return sd.json ();
};
}
+ catch (const std::exception& e)
+ {
+ NOTIFICATION_DIAG (log_writer);
+
+ string bid (gh_check_run_name (b)); // Full build id.
+
+ error << "check run " << bid << ": unhandled exception: " << e.what();
+
+ return nullptr;
+ }
string ci_github::
details_url (const build& b) const
@@ -2270,22 +2694,22 @@ namespace brep
url u (details_url);
- if (!u.query || !u.path || u.path->size () <= 1)
- return nullopt;
-
build_id r;
// Extract the tenant from the URL path.
//
// Example path: @d2586f57-21dc-40b7-beb2-6517ad7917dd
//
- r.package.tenant = u.path->substr (1);
-
- if (r.package.tenant.empty ())
+ if (!u.path || u.path->size () != 37 || (*u.path)[0] != '@')
return nullopt;
+ r.package.tenant = u.path->substr (1);
+
// Extract the rest of the build_id members from the URL query.
//
+ if (!u.query)
+ return nullopt;
+
bool pn (false), pv (false), tg (false), tc (false), pc (false),
th (false);
@@ -2304,21 +2728,25 @@ namespace brep
++vp; // Skip '='
- const char* ve (ep ? ep : vp + strlen (vp)); // Value end pointer.
+ const char* ve (ep != nullptr ? ep : vp + strlen (vp)); // Value end.
// Get the value as-is or URL-decode it.
//
- auto getval = [vp, ve] () { return string (vp, ve); };
+ auto rawval = [vp, ve] () { return string (vp, ve); };
auto decval = [vp, ve] () { return mime_url_decode (vp, ve); };
auto make_version = [] (string&& v)
- { return canonical_version (brep::version (move (v))); };
+ {
+ return canonical_version (brep::version (move (v)));
+ };
auto c = [&n] (bool& b, const char* s)
- { return n == s ? (b = true) : false; };
+ {
+ return n == s ? (b = true) : false;
+ };
if (c (pn, "builds")) r.package.name = package_name (decval ());
- else if (c (pv, "pv")) r.package.version = make_version (getval ());
+ else if (c (pv, "pv")) r.package.version = make_version (rawval ());
else if (c (tg, "tg")) r.target = target_triplet (decval ());
else if (c (tc, "tc")) r.target_config_name = decval ();
else if (c (pc, "pc")) r.package_config_name = decval ();
@@ -2326,7 +2754,7 @@ namespace brep
{
// Toolchain name and version. E.g. "public-0.17.0"
- string v (getval ());
+ string v (rawval ());
// Note: parsing code based on mod/mod-builds.cxx.
//
@@ -2338,7 +2766,7 @@ namespace brep
r.toolchain_version = make_version (v.substr (p + 1));
}
- qp = ep ? ep + 1 : nullptr;
+ qp = ep != nullptr ? ep + 1 : nullptr;
}
if (!pn || !pv || !tg || !tc || !pc || !th)
@@ -2346,7 +2774,7 @@ namespace brep
return r;
}
- catch (const invalid_argument&)
+ catch (const invalid_argument&) // Invalid url, brep::version, etc.
{
return nullopt;
}
@@ -2455,6 +2883,8 @@ namespace brep
//
iat.expires_at -= chrono::minutes (5);
}
+ // gh_installation_access_token (via github_post())
+ //
catch (const json::invalid_json_input& e)
{
// Note: e.name is the GitHub API endpoint.
@@ -2464,12 +2894,12 @@ namespace brep
<< e.position << ", error: " << e;
return nullopt;
}
- catch (const invalid_argument& e)
+ catch (const invalid_argument& e) // github_post()
{
error << "malformed header(s) in response: " << e;
return nullopt;
}
- catch (const system_error& e)
+ catch (const system_error& e) // github_post()
{
error << "unable to get installation access token (errno=" << e.code ()
<< "): " << e.what ();
diff --git a/mod/mod-ci-github.hxx b/mod/mod-ci-github.hxx
index aa601d2..104f889 100644
--- a/mod/mod-ci-github.hxx
+++ b/mod/mod-ci-github.hxx
@@ -42,33 +42,40 @@ namespace brep
virtual const cli::options&
cli_options () const {return options::ci_github::description ();}
- virtual function<optional<string> (const tenant_service&)>
- build_unloaded (tenant_service&&,
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_unloaded (const string& tenant_id,
+ tenant_service&&,
const diag_epilogue& log_writer) const noexcept override;
- function<optional<string> (const tenant_service&)>
+ function<optional<string> (const string&, const tenant_service&)>
build_unloaded_pre_check (tenant_service&&,
service_data&&,
const diag_epilogue&) const noexcept;
- function<optional<string> (const tenant_service&)>
- build_unloaded_load (tenant_service&&,
+ function<optional<string> (const string&, const tenant_service&)>
+ build_unloaded_load (const string& tenant_id,
+ tenant_service&&,
service_data&&,
const diag_epilogue&) const noexcept;
- virtual function<optional<string> (const tenant_service&)>
- build_queued (const tenant_service&,
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_queued (const string& tenant_id,
+ const tenant_service&,
const vector<build>&,
optional<build_state> initial_state,
const build_queued_hints&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_building (const tenant_service&, const build&,
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_building (const string& tenant_id,
+ const tenant_service&,
+ const build&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_built (const tenant_service&, const build&,
+ virtual function<optional<string> (const string&, const tenant_service&)>
+ build_built (const string& tenant_id,
+ const tenant_service&,
+ const build&,
const diag_epilogue& log_writer) const noexcept override;
private:
@@ -89,7 +96,7 @@ namespace brep
// and to FAILURE otherwise.
//
bool
- handle_check_run_rerequest (gh_check_run_event, bool warning_success);
+ handle_check_run_rerequest (const gh_check_run_event&, bool warning_success);
// Handle the pull_request event `opened` and `synchronize` actions.
//
@@ -107,7 +114,9 @@ namespace brep
optional<string>
generate_jwt (const basic_mark& trace, const basic_mark& error) const;
- // Authenticate to GitHub as an app installation.
+ // Authenticate to GitHub as an app installation. Return the installation
+ // access token (IAT). Issue diagnostics and return nullopt if something
+ // goes wrong.
//
optional<gh_installation_access_token>
obtain_installation_access_token (uint64_t install_id,
diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx
index 52f4644..46fbf6a 100644
--- a/mod/mod-ci.cxx
+++ b/mod/mod-ci.cxx
@@ -422,8 +422,10 @@ handle (request& rq, response& rs)
}
#ifdef BREP_CI_TENANT_SERVICE
-function<optional<string> (const brep::tenant_service&)> brep::ci::
-build_queued (const tenant_service&,
+function<optional<string> (const string& tenant_id,
+ const brep::tenant_service&)> brep::ci::
+build_queued (const string& /*tenant_id*/,
+ const tenant_service&,
const vector<build>& bs,
optional<build_state> initial_state,
const build_queued_hints& hints,
@@ -437,7 +439,8 @@ build_queued (const tenant_service&,
<< hints.single_package_version << ' '
<< hints.single_package_config;});
- return [&bs, initial_state] (const tenant_service& ts)
+ return [&bs, initial_state] (const string& tenant_id,
+ const tenant_service& ts)
{
optional<string> r (ts.data);
@@ -446,6 +449,7 @@ build_queued (const tenant_service&,
string s ((!initial_state
? "queued "
: "queued " + to_string (*initial_state) + ' ') +
+ tenant_id + '/' +
b.package_name.string () + '/' +
b.package_version.string () + '/' +
b.target.string () + '/' +
@@ -467,14 +471,18 @@ build_queued (const tenant_service&,
};
}
-function<optional<string> (const brep::tenant_service&)> brep::ci::
-build_building (const tenant_service&,
+function<optional<string> (const string& tenant_id,
+ const brep::tenant_service&)> brep::ci::
+build_building (const string& /*tenant_id*/,
+ const tenant_service&,
const build& b,
const diag_epilogue&) const noexcept
{
- return [&b] (const tenant_service& ts)
+ return [&b] (const string& tenant_id,
+ const tenant_service& ts)
{
string s ("building " +
+ tenant_id + '/' +
b.package_name.string () + '/' +
b.package_version.string () + '/' +
b.target.string () + '/' +
@@ -487,14 +495,17 @@ build_building (const tenant_service&,
};
}
-function<optional<string> (const brep::tenant_service&)> brep::ci::
-build_built (const tenant_service&,
+function<optional<string> (const string& tenant_id,
+ const brep::tenant_service&)> brep::ci::
+build_built (const string& /*tenant_id*/,
+ const tenant_service&,
const build& b,
const diag_epilogue&) const noexcept
{
- return [&b] (const tenant_service& ts)
+ return [&b] (const string& tenant_id, const tenant_service& ts)
{
string s ("built " +
+ tenant_id + '/' +
b.package_name.string () + '/' +
b.package_version.string () + '/' +
b.target.string () + '/' +
diff --git a/mod/mod-ci.hxx b/mod/mod-ci.hxx
index e4a343c..132b5b0 100644
--- a/mod/mod-ci.hxx
+++ b/mod/mod-ci.hxx
@@ -71,26 +71,34 @@ namespace brep
cli_options () const override {return options::ci::description ();}
#ifdef BREP_CI_TENANT_SERVICE
- virtual function<optional<string> (const tenant_service&)>
- build_queued (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_queued (const string& tenant_id,
+ const tenant_service&,
const vector<build>&,
optional<build_state> initial_state,
const build_queued_hints&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_building (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_building (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept override;
- virtual function<optional<string> (const tenant_service&)>
- build_built (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_built (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept override;
#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
- virtual function<optional<string> (const tenant_service&)>
- build_unloaded (tenant_service&&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_unloaded (const string& tenant_id,
+ tenant_service&&,
const diag_epilogue& log_writer) const noexcept override;
#endif
#endif
diff --git a/mod/module.cli b/mod/module.cli
index 5799697..ad7f5eb 100644
--- a/mod/module.cli
+++ b/mod/module.cli
@@ -860,7 +860,8 @@ namespace brep
{
"<secret>",
"The GitHub App's configured webhook secret. If not set, then the
- GitHub CI service is disabled."
+ GitHub CI service is disabled. Note: make sure to choose a strong
+ (random) secret."
}
path ci-github-app-private-key
diff --git a/mod/tenant-service.hxx b/mod/tenant-service.hxx
index 8ba199a..5564a56 100644
--- a/mod/tenant-service.hxx
+++ b/mod/tenant-service.hxx
@@ -74,9 +74,11 @@ namespace brep
// If the returned function is not NULL, it is called to update the
// service data. It should return the new data or nullopt if no update is
// necessary. Note: tenant_service::data passed to the callback and to the
- // returned function may not be the same. Also, the returned function may
- // be called multiple times (on transaction retries). Note that the passed
- // log_writer is valid during the calls to the returned function.
+ // returned function may not be the same. Furthermore, tenant_ids may not
+ // be the same either, in case the tenant was replaced. Also, the returned
+ // function may be called multiple times (on transaction retries). Note
+ // that the passed log_writer is valid during the calls to the returned
+ // function.
//
// The passed initial_state indicates the logical initial state and is
// either absent, `building` (interrupted), or `built` (rebuild). Note
@@ -101,8 +103,10 @@ namespace brep
bool single_package_config;
};
- virtual function<optional<string> (const tenant_service&)>
- build_queued (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_queued (const string& tenant_id,
+ const tenant_service&,
const vector<build>&,
optional<build_state> initial_state,
const build_queued_hints&,
@@ -112,8 +116,10 @@ namespace brep
class tenant_service_build_building: public virtual tenant_service_base
{
public:
- virtual function<optional<string> (const tenant_service&)>
- build_building (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_building (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept = 0;
};
@@ -121,8 +127,10 @@ namespace brep
class tenant_service_build_built: public virtual tenant_service_base
{
public:
- virtual function<optional<string> (const tenant_service&)>
- build_built (const tenant_service&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_built (const string& tenant_id,
+ const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept = 0;
};
@@ -140,8 +148,10 @@ namespace brep
class tenant_service_build_unloaded: public virtual tenant_service_base
{
public:
- virtual function<optional<string> (const tenant_service&)>
- build_unloaded (tenant_service&&,
+ virtual function<optional<string> (const string& tenant_id,
+ const tenant_service&)>
+ build_unloaded (const string& tenant_id,
+ tenant_service&&,
const diag_epilogue& log_writer) const noexcept = 0;
};