aboutsummaryrefslogtreecommitdiff
path: root/mod
diff options
context:
space:
mode:
Diffstat (limited to 'mod')
-rw-r--r--mod/mod-ci-github.cxx269
1 files changed, 260 insertions, 9 deletions
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index a650cef..d651e36 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -638,12 +638,85 @@ namespace brep
parse_details_url (const string& details_url);
bool ci_github::
- handle_check_run_rerequest (gh_check_run_event cr, bool /* warning_success */)
+ handle_check_run_rerequest (gh_check_run_event cr, bool warning_success)
{
HANDLER_DIAG;
l3 ([&]{trace << "check_run event { " << cr << " }";});
+ // Bail out if this is the conclusion check run.
+ //
+ if (cr.check_run.name == conclusion_check_run_name)
+ {
+ l3 ([&]{trace << "ignoring conclusion check_run";});
+ return true;
+ }
+
+ // Get a new installation access token.
+ //
+ auto get_iat = [this, &trace, &error, &cr] ()
+ -> optional<gh_installation_access_token>
+ {
+ optional<string> jwt (generate_jwt (trace, error));
+ if (!jwt)
+ return nullopt;
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (cr.installation.id,
+ move (*jwt),
+ error));
+
+ if (iat)
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ return iat;
+ };
+
+ // Create a new check run, replacing any existing ones with the same name.
+ //
+ // Return the check run on success or nullopt on failure.
+ //
+ auto create_cr = [&cr,
+ &error,
+ warning_success] (const gh_installation_access_token& iat,
+ string name,
+ build_state bs,
+ optional<result_status> rs = nullopt,
+ optional<string> msg = nullopt)
+ -> optional<check_run>
+ {
+ optional<gq_built_result> br;
+ if (rs)
+ {
+ br = gq_built_result (gh_to_conclusion (*rs, warning_success),
+ circle (*rs) + ' ' + ucase (to_string (*rs)),
+ move (*msg));
+ }
+
+ // Use the received details_url if the passed name matches that of the
+ // received check_run.
+ //
+ optional<string> du; // details_url
+ if (name == cr.check_run.name)
+ du = cr.check_run.details_url;
+
+ check_run r;
+ r.name = move (name);
+
+ if (gq_create_check_run (error,
+ r,
+ iat.token,
+ cr.repository.node_id,
+ cr.check_run.check_suite.head_sha,
+ du,
+ bs, move (br)))
+ {
+ return r;
+ }
+ else
+ return nullopt;
+ };
+
// The overall plan is as follows:
//
// 1. Call the rebuild() function to attempt to schedule a rebuild. Pass
@@ -668,20 +741,198 @@ namespace brep
// b. Change the conclusion check run to building (do unconditionally
// to mitigate races).
//
+ // Note that while conceptually we are updating existing check runs, in
+ // practice we have to create new check runs to replace the existing ones
+ // because GitHub does not allow transitioning out of the built state.
+ //
+ // This results in a new node id for each check run but we can't save them
+ // to the service data after the rebuild() call. As a workaround, when
+ // updating the service data we 1) clear the re-requested check run's node
+ // id and set the state_synced flag true to signal to build_building()
+ // that it needs to create another new check run; and 2) clear the
+ // conclusion check run's node id to induce build_built() to create
+ // another new conclusion check run. And these two check runs' node ids
+ // will be saved to the service data.
-
- // Parse the CR's details_url.
+ // Parse the check_run's details_url into a build_id.
//
optional<build_id> bid (parse_details_url (cr.check_run.details_url));
+ if (!bid)
+ {
+ fail << "check run " << cr.check_run.node_id
+ << ": failed to parse details_url";
+ }
- // auto fn = [] (const tenant_service&,
- // build_state)> -> optional<string>
- // {
- // };
+ // The IAT retrieved from the service data.
+ //
+ optional<gh_installation_access_token> iat;
- // optional<build_state> bs (rebuild (*build_db_, retry_, bid, fn));
+ // True if the check run exists in the service data.
+ //
+ bool cr_found (false);
- return true;
+ // Update the service data and retrieve the IAT from it.
+ //
+ // Called by rebuild(), but only if the build is actually restarted.
+ //
+ auto update_sd = [&iat,
+ &cr_found,
+ &error,
+ ni = cr.check_run.node_id] (const tenant_service& ts,
+ build_state)
+ -> optional<string>
+ {
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullptr;
+ }
+
+ if (!iat)
+ iat = sd.installation_access;
+
+ // If the re-requested check run is found, update it and the service
+ // data.
+ //
+ for (check_run& cr: sd.check_runs)
+ {
+ if (cr.node_id && *cr.node_id == ni)
+ {
+ cr_found = true;
+ cr.state = build_state::queued;
+ sd.completed = false;
+
+ // Clear the check run node ids and set state_synced to true to
+ // cause build_building() and/or build_built() to create new check
+ // runs (see above for details).
+ //
+ cr.node_id = nullopt;
+ cr.state_synced = true;
+ sd.conclusion_node_id = nullopt;
+
+ return sd.json ();
+ }
+ }
+
+ return nullopt;
+ };
+
+ optional<build_state> bs (rebuild (*build_db_, retry_, *bid, update_sd));
+
+ if (!bs)
+ {
+ // Build has expired (most probably the tenant has been archived).
+ //
+ // Update the conclusion check run to notify the user (but have to
+ // replace it with a new one because we don't know the existing one's
+ // node id).
+ //
+ optional<gh_installation_access_token> iat (get_iat ());
+ if (!iat)
+ throw server_error ();
+
+ if (optional<check_run> ccr = create_cr (
+ *iat,
+ conclusion_check_run_name,
+ build_state::built,
+ result_status::error,
+ "Unable to rebuild: tenant has been archived or no such build"))
+ {
+ l3 ([&]{trace << "created conclusion check_run { " << *ccr << " }";});
+
+ // Respond with an error otherwise GitHub will post a message in its
+ // GUI saying "you have successfully requested a rebuild of ...."
+ //
+ throw invalid_request (404);
+ }
+ else
+ {
+ // Log the error and return failure to GitHub which will presumably
+ // indicate this in its GUI.
+ //
+ fail << "check run " << cr.check_run.node_id
+ << ": unable to create conclusion check run";
+ }
+
+ return true;
+ }
+ else if (*bs == build_state::queued)
+ {
+ // The build was already queued so nothing to be done. This might happen
+ // if the user clicked "re-run" multiple times before we managed to
+ // update the check run.
+ //
+ return true;
+ }
+ else
+ {
+ // The build has been requeued.
+ //
+ assert (*bs == build_state::building || *bs == build_state::built);
+
+ if (!cr_found)
+ {
+ // Respond with an error otherwise GitHub will post a message in its
+ // GUI saying "you have successfully requested a rebuild of ..."
+ //
+ fail << "check_run " << cr.check_run.node_id
+ << ": build restarted but check run does not exist "
+ << "in service data";
+ }
+
+ // Get a new IAT if the one from the service data has expired.
+ //
+ assert (iat.has_value ());
+
+ if (system_clock::now () > iat->expires_at)
+ {
+ iat = get_iat ();
+ if (!iat)
+ throw server_error ();
+ }
+
+ // Update (by replacing) the re-requested and conclusion check runs to
+ // building and queued.
+ //
+ // If either fails we can only log the error but build_building() and/or
+ // build_built() should correct the situation (see above for details).
+ //
+
+ // Update re-requested check run.
+ //
+ if (optional<check_run> rcr = create_cr (*iat,
+ cr.check_run.name,
+ build_state::queued))
+ {
+ l3 ([&]{trace << "created check_run { " << *rcr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to create (to update) check run in queued state";
+ }
+
+ // Update conclusion check run.
+ //
+ if (optional<check_run> ccr = create_cr (*iat,
+ conclusion_check_run_name,
+ build_state::building))
+ {
+ l3 ([&]{trace << "created conclusion check_run { " << *ccr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to create (to update) conclusion check run";
+ }
+
+ return true;
+ }
}
// Miscellaneous pull request facts