aboutsummaryrefslogtreecommitdiff
path: root/mod/mod-ci-github.cxx
diff options
context:
space:
mode:
authorBoris Kolpackov <boris@codesynthesis.com>2024-11-27 11:50:58 +0200
committerBoris Kolpackov <boris@codesynthesis.com>2024-11-27 11:50:58 +0200
commit7f06026024bd0c81f16f2cbff85712750c41549e (patch)
tree89cef5667893577afbd238c34586294571613461 /mod/mod-ci-github.cxx
parentd3a90bc3af694cb3efd7ad05e906b814bb7f2524 (diff)
Sketch
Diffstat (limited to 'mod/mod-ci-github.cxx')
-rw-r--r--mod/mod-ci-github.cxx322
1 files changed, 14 insertions, 308 deletions
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index 46cd0cc..3bfee41 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -1166,312 +1166,6 @@ namespace brep
return true;
}
- // @@ TMP
- //
-#if 0
- bool ci_github::
- handle_check_run_rerequest (const gh_check_run_event& cr,
- bool warning_success)
- {
- HANDLER_DIAG;
-
- l3 ([&]{trace << "check_run event { " << cr << " }";});
-
- // Fail if this is the conclusion check run.
- //
- if (cr.check_run.name == conclusion_check_run_name)
- {
- // @@ Fail conclusion check run with appropriate message and reurn
- // true.
-
- l3 ([&]{trace << "ignoring conclusion check_run";});
-
- // 422 Unprocessable Content: The request was well-formed (i.e.,
- // syntactically correct) but could not be processed.
- //
- throw invalid_request (422, "Conclusion check run cannot be rebuilt");
- }
-
- // Get a new installation access token.
- //
- auto get_iat = [this, &trace, &error, &cr] ()
- -> optional<gh_installation_access_token>
- {
- optional<string> jwt (generate_jwt (trace, error));
- if (!jwt)
- return nullopt;
-
- optional<gh_installation_access_token> iat (
- obtain_installation_access_token (cr.installation.id,
- move (*jwt),
- error));
-
- if (iat)
- l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
-
- return iat;
- };
-
- // Create a new conclusion check run, replacing the existing one.
- //
- // Return the check run on success or nullopt on failure.
- //
- auto create_conclusion_cr =
- [&cr, &error, warning_success] (const gh_installation_access_token& iat,
- build_state bs,
- optional<result_status> rs = nullopt,
- optional<string> msg = nullopt)
- -> optional<check_run>
- {
- optional<gq_built_result> br;
- if (rs)
- {
- assert (msg);
-
- br = make_built_result (*rs, warning_success, move (*msg));
- }
-
- check_run r;
- r.name = conclusion_check_run_name;
-
- if (gq_create_check_run (error, r, iat.token,
- rni, hs,
- nullopt /* details_url */,
- bs, move (br)))
- {
- return r;
- }
- else
- return nullopt;
- };
-
- // The overall plan is as follows:
- //
- // 1. Call the rebuild() function to attempt to schedule a rebuild. Pass
- // the update function that does the following (if called):
- //
- // a. Update the check run being rebuilt (may also not exist).
- //
- // b. Clear the completed flag if true.
- //
- // c. "Return" the service data to be used after the call.
- //
- // 2. If the result of rebuild() indicates the tenant is archived, fail
- // the conclusion check run with appropriate diagnostics.
- //
- // 3. If original state is queued, then no rebuild was scheduled and we do
- // nothing.
- //
- // 4. Otherwise (the original state is building or built):
- //
- // a. Change the check run state to queued.
- //
- // b. Change the conclusion check run to building (do unconditionally
- // to mitigate races).
- //
- // Note that while conceptually we are updating existing check runs, in
- // practice we have to create new check runs to replace the existing ones
- // because GitHub does not allow transitioning out of the built state.
- //
- // This results in a new node id for each check run but we can't save them
- // to the service data after the rebuild() call. As a workaround, when
- // updating the service data we 1) clear the re-requested check run's node
- // id and set the state_synced flag to true to signal to build_building()
- // and build_built() that it needs to create a new check run; and 2) clear
- // the conclusion check run's node id to cause build_built() to create a
- // new conclusion check run. And these two check runs' node ids will be
- // saved to the service data.
-
- // Parse the check_run's details_url to extract build id.
- //
- // While this is a bit hackish, there doesn't seem to be a better way
- // (like associating custom data with a check run). Note that the GitHub
- // UI only allows rebuilding completed check runs, so the details URL
- // should be there.
- //
- optional<build_id> bid (parse_details_url (cr.check_run.details_url));
- if (!bid)
- {
- fail << "check run " << cr.check_run.node_id
- << ": failed to extract build id from details_url";
- }
-
- // The IAT retrieved from the service data.
- //
- optional<gh_installation_access_token> iat;
-
- // True if the check run exists in the service data.
- //
- bool cr_found (false);
-
- // Update the state of the check run in the service data. Return (via
- // captured references) the IAT and whether the check run was found.
- //
- // Called by rebuild(), but only if the build is actually restarted.
- //
- auto update_sd = [&iat,
- &cr_found,
- &error,
- &cr] (const tenant_service& ts, build_state)
- -> optional<string>
- {
- // NOTE: this lambda may be called repeatedly (e.g., due to transaction
- // being aborted) and so should not move out of its captures.
-
- service_data sd;
- try
- {
- sd = service_data (*ts.data);
- }
- catch (const invalid_argument& e)
- {
- error << "failed to parse service data: " << e;
- return nullptr;
- }
-
- if (!iat)
- iat = sd.installation_access;
-
- // If the re-requested check run is found, update it in the service
- // data.
- //
- const string& nid (cr.check_run.node_id);
-
- for (check_run& cr: sd.check_runs)
- {
- if (cr.node_id && *cr.node_id == nid)
- {
- cr_found = true;
- cr.state = build_state::queued;
- sd.completed = false;
-
- // Clear the check run node ids and set state_synced to true to
- // cause build_building() and/or build_built() to create new check
- // runs (see the plan above for details).
- //
- cr.node_id = nullopt;
- cr.state_synced = true;
- sd.conclusion_node_id = nullopt;
-
- return sd.json ();
- }
- }
-
- return nullopt;
- };
-
- optional<build_state> bs (rebuild (*build_db_, retry_, *bid, update_sd));
-
- if (!bs)
- {
- // Build has expired (most probably the tenant has been archived).
- //
- // Update the conclusion check run to notify the user (but have to
- // replace it with a new one because we don't know the existing one's
- // node id).
- //
- optional<gh_installation_access_token> iat (get_iat ());
- if (!iat)
- throw server_error ();
-
- if (optional<check_run> ccr = create_conclusion_cr (
- *iat,
- build_state::built,
- result_status::error,
- "Unable to rebuild: tenant has been archived or no such build"))
- {
- l3 ([&]{trace << "created conclusion check_run { " << *ccr << " }";});
- }
- else
- {
- // Log the error and return failure to GitHub which will presumably
- // indicate this in its GUI.
- //
- fail << "check run " << cr.check_run.node_id
- << ": unable to create conclusion check run";
- }
- }
- else if (*bs == build_state::queued)
- {
- // The build was already queued so nothing to be done. This might happen
- // if the user clicked "re-run" multiple times before we managed to
- // update the check run.
- }
- else
- {
- // The build has been requeued.
- //
- assert (*bs == build_state::building || *bs == build_state::built);
-
- if (!cr_found)
- {
- // Respond with an error otherwise GitHub will post a message in its
- // GUI saying "you have successfully requested a rebuild of ..."
- //
- fail << "check_run " << cr.check_run.node_id
- << ": build restarted but check run does not exist "
- << "in service data";
- }
-
- // Get a new IAT if the one from the service data has expired.
- //
- assert (iat.has_value ());
-
- if (system_clock::now () > iat->expires_at)
- {
- iat = get_iat ();
- if (!iat)
- throw server_error ();
- }
-
- // Update (by replacing) the re-requested and conclusion check runs to
- // queued and building, respectively.
- //
- // If either fails we can only log the error but build_building() and/or
- // build_built() should correct the situation (see above for details).
- //
-
- // Update re-requested check run.
- //
- {
- check_run ncr; // New check run.
- ncr.name = cr.check_run.name;
-
- if (gq_create_check_run (error,
- ncr,
- iat->token,
- cr.repository.node_id,
- cr.check_run.check_suite.head_sha,
- cr.check_run.details_url,
- build_state::queued))
- {
- l3 ([&]{trace << "created check_run { " << ncr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to create (to update) check run in queued state";
- }
- }
-
- // Update conclusion check run.
- //
- if (optional<check_run> ccr =
- create_conclusion_cr (*iat, build_state::building))
- {
- l3 ([&]{trace << "created conclusion check_run { " << *ccr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to create (to update) conclusion check run";
- }
- }
-
- return true;
- }
-#endif
-
// Miscellaneous pull request facts
//
// - Although some of the GitHub documentation makes it sound like they
@@ -1629,6 +1323,8 @@ namespace brep
build_unloaded (tenant_service&& ts,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -1652,6 +1348,8 @@ namespace brep
service_data&& sd,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
// We get here for PRs only (but both local and remote). The overall
@@ -1795,6 +1493,8 @@ namespace brep
service_data&& sd,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
// Load the tenant, which is essentially the same for both branch push and
@@ -2080,6 +1780,8 @@ namespace brep
const build_queued_hints& hs,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -2248,6 +1950,8 @@ namespace brep
const build& b,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
NOTIFICATION_DIAG (log_writer);
service_data sd;
@@ -2394,11 +2098,13 @@ namespace brep
const build& b,
const diag_epilogue& log_writer) const noexcept
{
+ // NOTE: this function is noexcept and should not throw.
+
+ NOTIFICATION_DIAG (log_writer);
+
// @@ TODO Include service_data::event_node_id and perhaps ts.id in
// diagnostics? E.g. when failing to update check runs we print the
// build ID only.
- //
- NOTIFICATION_DIAG (log_writer);
service_data sd;
try