aboutsummaryrefslogtreecommitdiff
path: root/mod/mod-ci-github.cxx
diff options
context:
space:
mode:
Diffstat (limited to 'mod/mod-ci-github.cxx')
-rw-r--r--mod/mod-ci-github.cxx783
1 files changed, 486 insertions, 297 deletions
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index 721c047..67ab2a7 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -77,12 +77,45 @@ namespace brep
// Prepare for the CI requests handling, if configured.
//
- if (options_->build_config_specified () &&
- options_->ci_github_app_webhook_secret_specified ())
+ if (options_->ci_github_app_webhook_secret_specified ())
{
+ if (!options_->build_config_specified ())
+ fail << "package building functionality must be enabled";
+
if (!options_->ci_github_app_id_private_key_specified ())
fail << "no app id/private key mappings configured";
+ for (const auto& pr: options_->ci_github_app_id_private_key ())
+ {
+ if (pr.second.relative ())
+ fail << "ci-github-app-id-private-key path must be absolute";
+ }
+
+ // Read the webhook secret from the configured path.
+ //
+ {
+ const path& p (options_->ci_github_app_webhook_secret ());
+
+ if (p.relative ())
+ fail << "ci-github-app-webhook-secret path must be absolute";
+
+ try
+ {
+ ifdstream is (p);
+ getline (is, webhook_secret_, '\0');
+
+ // Trim leading/trailing whitespaces (presumably GitHub does the
+ // same in its web UI).
+ //
+ if (trim (webhook_secret_).empty ())
+ fail << "empty webhook secret in " << p;
+ }
+ catch (const io_error& e)
+ {
+ fail << "unable to read webhook secret from " << p << ": " << e;
+ }
+ }
+
ci_start::init (make_shared<options::ci_start> (*options_));
database_module::init (*options_, options_->build_db_retry ());
@@ -207,10 +240,10 @@ namespace brep
//
try
{
- string h (
- compute_hmac (*options_,
- body.data (), body.size (),
- options_->ci_github_app_webhook_secret ().c_str ()));
+ string h (compute_hmac (*options_,
+ body.data (),
+ body.size (),
+ webhook_secret_.c_str ()));
if (!icasecmp (h, hmac))
{
@@ -277,7 +310,7 @@ namespace brep
// Note: "GitHub continues to add new event types and new actions to
// existing event types." As a result we ignore known actions that we are
// not interested in and log and ignore unknown actions. The thinking here
- // is that we want be "notified" of new actions at which point we can
+ // is that we want to be "notified" of new actions at which point we can
// decide whether to ignore them or to handle.
//
if (event == "check_suite")
@@ -307,14 +340,17 @@ namespace brep
if (cs.action == "requested")
{
- return handle_check_suite_request (move (cs), warning_success);
+ // Branch pushes are handled in handle_branch_push() so ignore this
+ // event.
+ //
+ return true;
}
else if (cs.action == "rerequested")
{
// Someone manually requested to re-run all the check runs in this
// check suite. Treat as a new request.
//
- return handle_check_suite_request (move (cs), warning_success);
+ return handle_check_suite_rerequest (move (cs), warning_success);
}
else if (cs.action == "completed")
{
@@ -404,7 +440,7 @@ namespace brep
throw invalid_request (400, move (m));
}
- // Store the app-id webhook query parameter in the gh_pull_request
+ // Store the app-id webhook query parameter in the gh_pull_request_event
// object (see gh_pull_request for an explanation).
//
// When we receive the other webhooks we do check that the app ids in
@@ -494,6 +530,40 @@ namespace brep
return true;
}
}
+ else if (event == "push")
+ {
+ // Push events are triggered by branch pushes, branch creation, and
+ // branch deletion.
+ //
+ gh_push_event ps;
+ try
+ {
+ json::parser p (body.data (), body.size (), "push event");
+
+ ps = gh_push_event (p);
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ string m ("malformed JSON in " + e.name + " request body");
+
+ error << m << ", line: " << e.line << ", column: " << e.column
+ << ", byte offset: " << e.position << ", error: " << e;
+
+ throw invalid_request (400, move (m));
+ }
+
+ // Store the app-id webhook query parameter in the gh_push_event
+ // object (see gh_push_event for an explanation).
+ //
+ // When we receive the other webhooks we do check that the app ids in
+ // the payload and query match but here we have to assume it is valid.
+ //
+ ps.app_id = app_id;
+
+ // Note that the push request event has no action.
+ //
+ return handle_branch_push (move (ps), warning_success);
+ }
else
{
// Log to investigate.
@@ -509,52 +579,288 @@ namespace brep
//
static string conclusion_check_run_name ("CONCLUSION");
- // Return the colored circle corresponding to a result_status.
- //
- static string
- circle (result_status rs)
+ bool ci_github::
+ handle_branch_push (gh_push_event ps, bool warning_success)
{
- switch (rs)
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "push event { " << ps << " }";});
+
+ // Cancel the CI tenant associated with the overwritten/deleted previous
+ // head commit if this is a forced push or a branch deletion.
+ //
+ if (ps.forced || ps.deleted)
{
- case result_status::success: return "\U0001F7E2"; // Green circle.
- case result_status::warning: return "\U0001F7E0"; // Orange circle.
- case result_status::error:
- case result_status::abort:
- case result_status::abnormal: return "\U0001F534"; // Red circle.
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (ps.repository.node_id + ':' + ps.before);
- // Valid values we should never encounter.
+ // Note that it's possible this commit still exists in another branch so
+ // we do refcount-aware cancel.
//
- case result_status::skip:
- case result_status::interrupt:
- throw invalid_argument ("unexpected result_status value: " +
- to_string (rs));
+ if (optional<tenant_service> ts = cancel (error, warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ "ci-github", sid,
+ true /* ref_count */))
+ {
+ l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to "
+ : "deletion of ")
+ << ps.ref << ": attempted to cancel CI of previous"
+ << " head commit with tenant_service id " << sid
+ << " (ref_count: " << ts->ref_count << ')';});
+ }
+ else
+ {
+ // It's possible that there was no CI for the previous commit for
+ // various reasons (e.g., CI was not enabled).
+ //
+ l3 ([&]{trace << (ps.forced ? "forced push " + ps.after + " to "
+ : "deletion of ")
+ << ps.ref << ": failed to cancel CI of previous"
+ << " head commit with tenant_service id " << sid;});
+ }
}
- return ""; // Should never reach.
+ if (ps.deleted)
+ return true; // Do nothing further if this was a branch deletion.
+
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (ps.app_id, trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (ps.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ // While it would have been nice to cancel CIs of PRs with this branch as
+ // base not to waste resources, there are complications: Firstly, we can
+ // only do this for remote PRs (since local PRs will most likely share the
+ // result with branch push). Secondly, we try to do our best even if the
+ // branch protection rule for head behind is not enabled. In this case, it
+ // would be good to complete the CI. So maybe/later. See also the head
+ // case in handle_pull_request(), where we do cancel remote PRs that are
+ // not shared.
+
+ // Service id that uniquely identifies the CI tenant.
+ //
+ string sid (ps.repository.node_id + ':' + ps.after);
+
+ service_data sd (warning_success,
+ iat->token,
+ iat->expires_at,
+ ps.app_id,
+ ps.installation.id,
+ move (ps.repository.node_id),
+ move (ps.repository.clone_url),
+ service_data::local,
+ false /* pre_check */,
+ false /* re_requested */,
+ ps.after /* check_sha */,
+ ps.after /* report_sha */);
+
+ // Create an unloaded CI tenant, doing nothing if one already exists
+ // (which could've been created by handle_pull_request() or by us as a
+ // result of a push to another branch). Note that the tenant's reference
+ // count is incremented in all cases.
+ //
+ // Note: use no delay since we need to (re)create the synthetic conclusion
+ // check run as soon as possible.
+ //
+ // Note that we use the create() API instead of start() since duplicate
+ // management is not available in start().
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the tenant, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ if (!create (error, warn, verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ tenant_service (sid, "ci-github", sd.json ()),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */,
+ duplicate_tenant_mode::ignore))
+ {
+ fail << "push " + ps.after + " to " + ps.ref
+ << ": unable to create unloaded CI tenant";
+ }
+
+ return true;
}
- // Make a check run summary from a CI start_result.
+ // Miscellaneous pull request facts
//
- static string
- to_check_run_summary (const optional<ci_start::start_result>& r)
+ // - Although some of the GitHub documentation makes it sound like they
+ // expect check runs to be added to both the PR head commit and the merge
+ // commit, the PR UI does not react to the merge commit's check runs
+ // consistently. It actually seems to be quite broken. The only thing it
+ // does seem to do reliably is blocking the PR merge if the merge commit's
+ // check runs are not successful (i.e, overriding the PR head commit's
+ // check runs). But the UI looks quite messed up generally in this state.
+ //
+ // - When new commits are added to a PR base branch, pull_request.base.sha
+ // does not change, but the test merge commit will be updated to include
+ // the new commits to the base branch.
+ //
+ // - When new commits are added to a PR head branch, pull_request.head.sha
+ // gets updated with the head commit's SHA and check_suite.pull_requests[]
+ // will contain all PRs with this branch as head.
+ //
+ bool ci_github::
+ handle_pull_request (gh_pull_request_event pr, bool warning_success)
{
- string s;
+ HANDLER_DIAG;
- s = "```\n";
- if (r) s += r->message;
- else s += "Internal service error";
- s += "\n```";
+ l3 ([&]{trace << "pull_request event { " << pr << " }";});
- return s;
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (pr.pull_request.app_id, trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (pr.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ // Distinguish between local and remote PRs by comparing the head and base
+ // repositories' paths.
+ //
+ service_data::kind_type kind (
+ pr.pull_request.head_path == pr.pull_request.base_path
+ ? service_data::local
+ : service_data::remote);
+
+ // Note that similar to the branch push case above, while it would have
+ // been nice to cancel the previous CI job once the PR head moves (the
+ // "synchronize" event), due to the head sharing problem the previous CI
+ // job might actually still be relevant (in both local and remote PR
+ // cases). So we only do it for the remote PRs and only if the head is not
+ // shared (via tenant reference counting).
+ //
+ if (kind == service_data::remote && pr.action == "synchronize")
+ {
+ if (pr.before)
+ {
+ // Service id that will uniquely identify the CI tenant.
+ //
+ string sid (pr.repository.node_id + ':' + *pr.before);
+
+ if (optional<tenant_service> ts = cancel (error, warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ "ci-github", sid,
+ true /* ref_count */))
+ {
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": attempted to cancel CI of previous head commit"
+ << " (ref_count: " << ts->ref_count << ')';});
+ }
+ else
+ {
+ // It's possible that there was no CI for the previous commit for
+ // various reasons (e.g., CI was not enabled).
+ //
+ l3 ([&]{trace << "pull request " << pr.pull_request.node_id
+ << ": failed to cancel CI of previous head commit "
+ << "with tenant_service id " << sid;});
+ }
+ }
+ else
+ {
+ error << "pull request " << pr.pull_request.node_id
+ << ": before commit is missing in synchronize event";
+ }
+ }
+
+ // Note: for remote PRs the check_sha will be set later, in
+ // build_unloaded_pre_check(), to test merge commit id.
+ //
+ string check_sha (kind == service_data::local
+ ? pr.pull_request.head_sha
+ : "");
+
+ // Note that PR rebuilds (re-requested) are handled by
+ // handle_check_suite_rerequest().
+ //
+ // Note that, in the case of a remote PR, GitHub will copy the PR head
+ // commit from the head (forked) repository into the base repository. So
+ // the check runs must always be added to the base repository, whether the
+ // PR is local or remote. The head commit refs are located at
+ // refs/pull/<PR-number>/head.
+ //
+ service_data sd (warning_success,
+ move (iat->token),
+ iat->expires_at,
+ pr.pull_request.app_id,
+ pr.installation.id,
+ move (pr.repository.node_id),
+ move (pr.repository.clone_url),
+ kind, true /* pre_check */, false /* re_request */,
+ move (check_sha),
+ move (pr.pull_request.head_sha) /* report_sha */,
+ pr.pull_request.node_id,
+ pr.pull_request.number);
+
+ // Create an unloaded CI tenant for the pre-check phase (during which we
+ // wait for the PR's merge commit and behindness to become available).
+ //
+ // Create with an empty service id so that the generated tenant id is used
+ // instead during the pre-check phase (so as not to clash with a proper
+ // service id for this head commit, potentially created in
+ // handle_branch_push() or as another PR).
+ //
+ tenant_service ts ("", "ci-github", sd.json ());
+
+ // Note: use no delay since we need to start the actual CI (which in turn
+ // (re)creates the synthetic conclusion check run) as soon as possible.
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications -- which will be routed to build_unloaded_pre_check() --
+ // until we cancel the tenant or it gets archived after some timeout.
+ // (Note that we never actually load this request, we always cancel it;
+ // see build_unloaded_pre_check() for details.)
+ //
+ if (!create (error,
+ warn,
+ verb_ ? &trace : nullptr,
+ *build_db_, retry_,
+ move (ts),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */))
+ {
+ fail << "pull request " << pr.pull_request.node_id
+ << ": unable to create unloaded pre-check tenant";
+ }
+
+ return true;
}
bool ci_github::
- handle_check_suite_request (gh_check_suite_event cs, bool warning_success)
+ handle_check_suite_rerequest (gh_check_suite_event cs, bool warning_success)
{
HANDLER_DIAG;
l3 ([&]{trace << "check_suite event { " << cs << " }";});
+ assert (cs.action == "rerequested");
+
// While we don't need the installation access token in this request,
// let's obtain it to flush out any permission issues early. Also, it is
// valid for an hour so we will most likely make use of it.
@@ -572,15 +878,6 @@ namespace brep
l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- // While it would have been nice to cancel CIs of PRs with this branch as
- // base not to waste resources, there are complications: Firstly, we can
- // only do this for remote PRs (since local PRs will most likely share the
- // result with branch push). Secondly, we try to do our best even if the
- // branch protection rule for head behind is not enabled. In this case, it
- // would be good to complete the CI. So maybe/later. See also the head
- // case in handle_pull_request(), where we do cancel remote PRs that are
- // not shared.
-
// Service id that uniquely identifies the CI tenant.
//
string sid (cs.repository.node_id + ':' + cs.check_suite.head_sha);
@@ -604,9 +901,10 @@ namespace brep
string check_sha;
service_data::kind_type kind;
- bool re_requested (cs.action == "rerequested");
- if (re_requested && !cs.check_suite.head_branch)
+ if (!cs.check_suite.head_branch)
{
+ // Rebuild of remote PR.
+ //
kind = service_data::remote;
if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
@@ -634,7 +932,7 @@ namespace brep
}
else
{
- // Branch push or local PR rebuild.
+ // Rebuild of branch push or local PR.
//
kind = service_data::local;
check_sha = cs.check_suite.head_sha;
@@ -647,20 +945,15 @@ namespace brep
cs.installation.id,
move (cs.repository.node_id),
move (cs.repository.clone_url),
- kind, false /* pre_check */, re_requested,
+ kind, false /* pre_check */, true /* re_requested */,
move (check_sha),
move (cs.check_suite.head_sha) /* report_sha */);
- // If this check suite is being re-run, replace the existing CI tenant if
- // it exists; otherwise create a new one, doing nothing if a tenant
- // already exists (which could've been created by handle_pull_request()).
+ // Replace the existing CI tenant if it exists.
//
// Note that GitHub UI does not allow re-running the entire check suite
// until all the check runs are completed.
//
- duplicate_tenant_mode dtm (re_requested
- ? duplicate_tenant_mode::replace
- : duplicate_tenant_mode::ignore);
// Create an unloaded CI tenant.
//
@@ -681,7 +974,7 @@ namespace brep
tenant_service (sid, "ci-github", sd.json ()),
chrono::seconds (30) /* interval */,
chrono::seconds (0) /* delay */,
- dtm));
+ duplicate_tenant_mode::replace));
if (!pr)
{
@@ -689,8 +982,7 @@ namespace brep
<< ": unable to create unloaded CI tenant";
}
- if (dtm == duplicate_tenant_mode::replace &&
- pr->second == duplicate_tenant_result::created)
+ if (pr->second == duplicate_tenant_result::created)
{
error << "check suite " << cs.check_suite.node_id
<< ": re-requested but tenant_service with id " << sid
@@ -819,6 +1111,45 @@ namespace brep
return true;
}
+ // Return the colored circle corresponding to a result_status.
+ //
+ static string
+ circle (result_status rs)
+ {
+ switch (rs)
+ {
+ case result_status::success: return "\U0001F7E2"; // Green circle.
+ case result_status::warning: return "\U0001F7E0"; // Orange circle.
+ case result_status::error:
+ case result_status::abort:
+ case result_status::abnormal: return "\U0001F534"; // Red circle.
+
+ // Valid values we should never encounter.
+ //
+ case result_status::skip:
+ case result_status::interrupt:
+ throw invalid_argument ("unexpected result_status value: " +
+ to_string (rs));
+ }
+
+ return ""; // Should never reach.
+ }
+
+ // Make a check run summary from a CI start_result.
+ //
+ static string
+ to_check_run_summary (const optional<ci_start::start_result>& r)
+ {
+ string s;
+
+ s = "```\n";
+ if (r) s += r->message;
+ else s += "Internal service error";
+ s += "\n```";
+
+ return s;
+ }
+
// Create a gq_built_result.
//
// Throw invalid_argument in case of invalid result_status.
@@ -920,6 +1251,9 @@ namespace brep
ccr.name = conclusion_check_run_name;
+ const gh_installation_access_token* iat (nullptr);
+ optional<gh_installation_access_token> new_iat;
+
// Load the service data, failing the check runs if the tenant has been
// archived.
//
@@ -930,108 +1264,113 @@ namespace brep
//
string sid (repo_node_id + ':' + head_sha);
- if (optional<tenant_data> d = find (*build_db_, "ci-github", sid))
+ optional<tenant_data> d (find (*build_db_, "ci-github", sid));
+ if (!d)
{
- if (d->archived) // Tenant is archived
- {
- // Fail (re-create) the check runs.
- //
- optional<gh_installation_access_token> iat (get_iat ());
- if (!iat)
- throw server_error ();
+ // No such tenant.
+ //
+ fail << "check run " << cr.check_run.node_id
+ << " re-requested but tenant_service with id " << sid
+ << " does not exist";
+ }
- gq_built_result br (
- make_built_result (
- result_status::error, warning_success,
- "Unable to rebuild individual configuration: build has "
- "been archived"));
+ tenant_service& ts (d->service);
- // Try to update the conclusion check run even if the first update
- // fails.
- //
- bool f (false); // Failed.
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ fail << "failed to parse service data: " << e;
+ }
- if (gq_create_check_run (error, bcr, iat->token,
- repo_node_id, head_sha,
- cr.check_run.details_url,
- build_state::built, br))
- {
- l3 ([&]{trace << "created check_run { " << bcr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to re-create check run";
- f = true;
- }
+ if (!sd.conclusion_node_id)
+ fail << "no conclusion node id for check run " << cr.check_run.node_id;
- if (gq_create_check_run (error, ccr, iat->token,
- repo_node_id, head_sha,
- nullopt /* details_url */,
- build_state::built, move (br)))
- {
- l3 ([&]{trace << "created conclusion check_run { " << ccr << " }";});
- }
- else
- {
- error << "check_run " << cr.check_run.node_id
- << ": unable to re-create conclusion check run";
- f = true;
- }
+ tenant_id = d->tenant_id;
- // Fail the handler if either of the check runs could not be
- // updated.
- //
- if (f)
- throw server_error ();
+ // Get a new IAT if the one from the service data has expired.
+ //
+ if (system_clock::now () > sd.installation_access.expires_at)
+ {
+ if ((new_iat = get_iat ()))
+ iat = &*new_iat;
+ else
+ throw server_error ();
+ }
+ else
+ iat = &sd.installation_access;
- return true;
- }
+ if (d->archived) // Tenant is archived
+ {
+ // Fail (update) the check runs.
+ //
+ gq_built_result br (
+ make_built_result (
+ result_status::error, warning_success,
+ "Unable to rebuild individual configuration: build has "
+ "been archived"));
+
+ // Try to update the conclusion check run even if the first update
+ // fails.
+ //
+ bool f (false); // Failed.
- tenant_service& ts (d->service);
+ if (gq_update_check_run (error, bcr, iat->token,
+ repo_node_id, cr.check_run.node_id,
+ nullopt /* details_url */,
+ build_state::built, br))
+ {
+ l3 ([&]{trace << "updated check_run { " << bcr << " }";});
+ }
+ else
+ {
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to update check run";
+ f = true;
+ }
- try
+ if (gq_update_check_run (error, ccr, iat->token,
+ repo_node_id, *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built, move (br)))
{
- sd = service_data (*ts.data);
+ l3 ([&]{trace << "updated conclusion check_run { " << ccr << " }";});
}
- catch (const invalid_argument& e)
+ else
{
- fail << "failed to parse service data: " << e;
+ error << "check_run " << cr.check_run.node_id
+ << ": unable to update conclusion check run";
+ f = true;
}
- tenant_id = d->tenant_id;
- }
- else
- {
- // No such tenant.
+ // Fail the handler if either of the check runs could not be
+ // updated.
//
- fail << "check run " << cr.check_run.node_id
- << " re-requested but tenant_service with id " << sid
- << " does not exist";
- }
- }
+ if (f)
+ throw server_error ();
- // Get a new IAT if the one from the service data has expired.
- //
- const gh_installation_access_token* iat (nullptr);
- optional<gh_installation_access_token> new_iat;
-
- if (system_clock::now () > sd.installation_access.expires_at)
- {
- if ((new_iat = get_iat ()))
- iat = &*new_iat;
- else
- throw server_error ();
+ return true;
+ }
}
- else
- iat = &sd.installation_access;
// Fail if it's the conclusion check run that is being re-requested.
//
+ // Expect that if the user selects re-run all failed checks we will
+ // receive multiple check runs, one of which will be the conclusion. And
+ // if we fail it while it happens to arrive last, then we will end up in
+ // the wrong overall state (real check run is building while conclusion is
+ // failed). It seems the best we can do is to ignore it: if the user did
+ // request a rebuild of the conclusion check run explicitly, there will be
+ // no change, which is not ideal but is still an indication that this
+ // operation is not supported.
+ //
if (cr.check_run.name == conclusion_check_run_name)
{
l3 ([&]{trace << "re-requested conclusion check_run";});
+#if 0
if (!sd.conclusion_node_id)
fail << "no conclusion node id for check run " << cr.check_run.node_id;
@@ -1054,6 +1393,7 @@ namespace brep
<< ": unable to update conclusion check run "
<< *sd.conclusion_node_id;
}
+#endif
return true;
}
@@ -1310,161 +1650,6 @@ namespace brep
return true;
}
- // Miscellaneous pull request facts
- //
- // - Although some of the GitHub documentation makes it sound like they
- // expect check runs to be added to both the PR head commit and the merge
- // commit, the PR UI does not react to the merge commit's check runs
- // consistently. It actually seems to be quite broken. The only thing it
- // does seem to do reliably is blocking the PR merge if the merge commit's
- // check runs are not successful (i.e, overriding the PR head commit's
- // check runs). But the UI looks quite messed up generally in this state.
- //
- // - When new commits are added to a PR base branch, pull_request.base.sha
- // does not change, but the test merge commit will be updated to include
- // the new commits to the base branch.
- //
- // - When new commits are added to a PR head branch, pull_request.head.sha
- // gets updated with the head commit's SHA and check_suite.pull_requests[]
- // will contain all PRs with this branch as head.
- //
- bool ci_github::
- handle_pull_request (gh_pull_request_event pr, bool warning_success)
- {
- HANDLER_DIAG;
-
- l3 ([&]{trace << "pull_request event { " << pr << " }";});
-
- // While we don't need the installation access token in this request,
- // let's obtain it to flush out any permission issues early. Also, it is
- // valid for an hour so we will most likely make use of it.
- //
- optional<string> jwt (generate_jwt (pr.pull_request.app_id, trace, error));
- if (!jwt)
- throw server_error ();
-
- optional<gh_installation_access_token> iat (
- obtain_installation_access_token (pr.installation.id,
- move (*jwt),
- error));
- if (!iat)
- throw server_error ();
-
- l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
-
- // Distinguish between local and remote PRs by comparing the head and base
- // repositories' paths.
- //
- service_data::kind_type kind (
- pr.pull_request.head_path == pr.pull_request.base_path
- ? service_data::local
- : service_data::remote);
-
- // Note that similar to the branch push case above, while it would have
- // been nice to cancel the previous CI job once the PR head moves (the
- // "synchronize" event), due to the head sharing problem the previous CI
- // job might actually still be relevant (in both local and remote PR
- // cases). So we only do it for the remote PRs and only if the head is not
- // shared (via tenant reference counting).
- //
- if (kind == service_data::remote && pr.action == "synchronize")
- {
- if (pr.before)
- {
- // Service id that will uniquely identify the CI tenant.
- //
- string sid (pr.repository.node_id + ':' + *pr.before);
-
- if (optional<tenant_service> ts = cancel (error, warn,
- verb_ ? &trace : nullptr,
- *build_db_, retry_,
- "ci-github", sid,
- true /* ref_count */))
- {
- l3 ([&]{trace << "pull request " << pr.pull_request.node_id
- << ": attempted to cancel CI of previous head commit"
- << " (ref_count: " << ts->ref_count << ')';});
- }
- else
- {
- // It's possible that there was no CI for the previous commit for
- // various reasons (e.g., CI was not enabled).
- //
- l3 ([&]{trace << "pull request " << pr.pull_request.node_id
- << ": failed to cancel CI of previous head commit "
- << "with tenant_service id " << sid;});
- }
- }
- else
- {
- error << "pull request " << pr.pull_request.node_id
- << ": before commit is missing in synchronize event";
- }
- }
-
- // Note: for remote PRs the check_sha will be set later, in
- // build_unloaded_pre_check(), to test merge commit id.
- //
- string check_sha (kind == service_data::local
- ? pr.pull_request.head_sha
- : "");
-
- // Note that PR rebuilds (re-requested) are handled by
- // handle_check_suite_request().
- //
- // Note that, in the case of a remote PR, GitHub will copy the PR head
- // commit from the head (forked) repository into the base repository. So
- // the check runs must always be added to the base repository, whether the
- // PR is local or remote. The head commit refs are located at
- // refs/pull/<PR-number>/head.
- //
- service_data sd (warning_success,
- move (iat->token),
- iat->expires_at,
- pr.pull_request.app_id,
- pr.installation.id,
- move (pr.repository.node_id),
- move (pr.repository.clone_url),
- kind, true /* pre_check */, false /* re_request */,
- move (check_sha),
- move (pr.pull_request.head_sha) /* report_sha */,
- pr.pull_request.node_id,
- pr.pull_request.number);
-
- // Create an unloaded CI tenant for the pre-check phase (during which we
- // wait for the PR's merge commit and behindness to become available).
- //
- // Create with an empty service id so that the generated tenant id is used
- // instead during the pre-check phase (so as not to clash with a proper
- // service id for this head commit, potentially created in
- // handle_check_suite() or as another PR).
- //
- tenant_service ts ("", "ci-github", sd.json ());
-
- // Note: use no delay since we need to start the actual CI (which in turn
- // (re)creates the synthetic conclusion check run) as soon as possible.
- //
- // After this call we will start getting the build_unloaded()
- // notifications -- which will be routed to build_unloaded_pre_check() --
- // until we cancel the tenant or it gets archived after some timeout.
- // (Note that we never actually load this request, we always cancel it;
- // see build_unloaded_pre_check() for details.)
- //
- if (!create (error,
- warn,
- verb_ ? &trace : nullptr,
- *build_db_, retry_,
- move (ts),
- chrono::seconds (30) /* interval */,
- chrono::seconds (0) /* delay */))
- {
- fail << "pull request " << pr.pull_request.node_id
- << ": unable to create unloaded pre-check tenant";
- }
-
- return true;
- }
-
function<optional<string> (const string&, const tenant_service&)> ci_github::
build_unloaded (const string& ti,
tenant_service&& ts,
@@ -1578,7 +1763,8 @@ namespace brep
// Create an unloaded CI tenant, doing nothing if one already exists
// (which could've been created by a head branch push or another PR
- // sharing the same head commit).
+ // sharing the same head commit). Note that the tenant's reference count
+ // is incremented in all cases.
//
// Note: use no delay since we need to (re)create the synthetic
// conclusion check run as soon as possible.
@@ -2110,11 +2296,14 @@ namespace brep
//
bs.push_back (b);
- crs.emplace_back (move (bid),
- gh_check_run_name (b, &hs),
- nullopt, /* node_id */
- build_state::queued,
- false /* state_synced */);
+ crs.push_back (
+ check_run {move (bid),
+ gh_check_run_name (b, &hs),
+ nullopt, /* node_id */
+ build_state::queued,
+ false /* state_synced */,
+ nullopt /* status */,
+ nullopt /* details_url */});
}
}