aboutsummaryrefslogtreecommitdiff
path: root/mod
diff options
context:
space:
mode:
Diffstat (limited to 'mod')
-rw-r--r--mod/mod-ci-github-gq.cxx249
-rw-r--r--mod/mod-ci-github-gq.hxx12
-rw-r--r--mod/mod-ci-github.cxx158
-rw-r--r--mod/mod-ci-github.hxx21
4 files changed, 384 insertions, 56 deletions
diff --git a/mod/mod-ci-github-gq.cxx b/mod/mod-ci-github-gq.cxx
index e5ea0c5..bcf9e55 100644
--- a/mod/mod-ci-github-gq.cxx
+++ b/mod/mod-ci-github-gq.cxx
@@ -114,10 +114,12 @@ namespace brep
}
else if (p.name () == "errors")
{
- // Don't stop parsing because the error semantics depends on whether
- // or not `data` is present.
+ // Skip the errors object but don't stop parsing because the error
+ // semantics depends on whether or not `data` is present.
//
err = true; // Handled below.
+
+ p.next_expect_value_skip ();
}
else
{
@@ -595,6 +597,10 @@ namespace brep
//
bool found = false;
+ // Non-fatal error message issued during the parse.
+ //
+ string parse_error;
+
// The response value. Absent if the merge commit is still being
// generated.
//
@@ -622,13 +628,23 @@ namespace brep
value = move (oid);
}
- else if (ma == "CONFLICTING")
+ else
{
- value = "";
- }
- else if (ma == "UNKNOWN")
- {
- // Still being generated; leave value absent.
+ if (ma == "CONFLICTING")
+ value = "";
+ if (ma == "UNKNOWN")
+ ; // Still being generated; leave value absent.
+ else
+ {
+ parse_error = "unexpected mergeable value '" + ma + '\'';
+
+ // Carry on as if it were UNKNOWN.
+ }
+
+ // Skip the merge commit ID (which should be null).
+ //
+ p.next_expect_name ("potentialMergeCommit");
+ p.next_expect_value_skip ();
}
p.next_expect (event::end_object); // node
@@ -650,6 +666,8 @@ namespace brep
{
if (!rs.found)
error << "pull request '" << nid << "' not found";
+ else if (!rs.parse_error.empty ())
+ error << rs.parse_error;
return rs.value;
}
@@ -685,22 +703,205 @@ namespace brep
return nullopt;
}
- // bool
- // gq_fetch_branch_open_pull_requests ()
- // {
- // // query {
- // // repository(owner:"francoisk" name:"libb2")
- // // {
- // // pullRequests (last:100 states:OPEN baseRefName:"master") {
- // // edges {
- // // node {
- // // id
- // // }
- // // }
- // // }
- // // }
- // // }
- // }
+ // Serialize a GraphQL query that fetches the last 100 (the maximum per
+ // page) open pull requests with the specified base branch from the
+ // repository with the specified node ID.
+ //
+ // @@ TMP Should we support more/less than 100?
+ //
+ // Doing more (or even 100) could waste a lot of CI resources on
+ // re-testing stale PRs. Maybe we should create a failed synthetic
+ // conclusion check run asking the user to re-run the CI manually if/when
+ // needed.
+ //
+ // Note that we cannot request more than 100 at a time (will need to
+ // do multiple requests with paging, etc).
+ //
+ // Also, maybe we should limit the result to "fresh" PRs, e.g., those
+ // that have been "touched" in the last week.
+ //
+ // Example query:
+ //
+ // query {
+ // node(id:"R_kgDOLc8CoA")
+ // {
+ // ... on Repository {
+ // pullRequests (last:100 states:OPEN baseRefName:"master") {
+ // edges {
+ // node {
+ // id
+ // number
+ // headRefOid
+ // }
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ static string
+ gq_query_fetch_open_pull_requests (const string& rid, const string& br)
+ {
+ ostringstream os;
+
+ os << "query {" << '\n'
+ << " node(id:" << gq_str (rid) << ") {" << '\n'
+ << " ... on Repository {" << '\n'
+ << " pullRequests (last:100" << '\n'
+ << " states:" << gq_enum ("OPEN") << '\n'
+ << " baseRefName:" << gq_str (br) << '\n'
+ << " ) {" << '\n'
+ << " totalCount" << '\n'
+ << " edges { node { id number headRefOid } }" << '\n'
+ << " }" << '\n'
+ << " }" << '\n'
+ << " }" << '\n'
+ << "}" << '\n';
+
+ return os.str ();
+ }
+
+ optional<vector<gh_pull_request>>
+ gq_fetch_open_pull_requests (const basic_mark& error,
+ const string& iat,
+ const string& nid,
+ const string& br)
+ {
+ string rq (
+ gq_serialize_request (gq_query_fetch_open_pull_requests (nid, br)));
+
+ try
+ {
+ // Response parser.
+ //
+ // Example response (only the part we need to parse here):
+ //
+ // {
+ // "node": {
+ // "pullRequests": {
+ // "totalCount": 2,
+ // "edges": [
+ // {
+ // "node": {
+ // "id": "PR_kwDOLc8CoM5vRS0y",
+ // "number": 7,
+ // "headRefOid": "cf72888be9484d6946a1340264e7abf18d31cc92"
+ // }
+ // },
+ // {
+ // "node": {
+ // "id": "PR_kwDOLc8CoM5vRzHs",
+ // "number": 8,
+ // "headRefOid": "626d25b318aad27bc0005277afefe3e8d6b2d434"
+ // }
+ // }
+ // ]
+ // }
+ // }
+ // }
+ //
+ struct resp
+ {
+ bool found = false;
+
+ vector<gh_pull_request> pull_requests;
+
+ resp (json::parser& p)
+ {
+ using event = json::event;
+
+ auto parse_data = [this] (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ if (p.next_expect_member_object_null ("node"))
+ {
+ found = true;
+
+ p.next_expect_member_object ("pullRequests");
+
+ uint16_t n (p.next_expect_member_number<uint16_t> ("totalCount"));
+
+ p.next_expect_member_array ("edges");
+ for (size_t i (0); i != n; ++i)
+ {
+ p.next_expect (event::begin_object); // edges[i]
+
+ p.next_expect_member_object ("node");
+ {
+ gh_pull_request pr;
+ pr.node_id = p.next_expect_member_string ("id");
+ pr.number = p.next_expect_member_number<unsigned int> ("number");
+ pr.head_sha = p.next_expect_member_string ("headRefOid");
+ pull_requests.push_back (move (pr));
+ }
+ p.next_expect (event::end_object); // node
+
+ p.next_expect (event::end_object); // edges[i]
+ }
+ p.next_expect (event::end_array); // edges
+
+ p.next_expect (event::end_object); // pullRequests
+ p.next_expect (event::end_object); // node
+ }
+
+ p.next_expect (event::end_object);
+ };
+
+ gq_parse_response (p, move (parse_data));
+ }
+
+ resp () = default;
+ } rs;
+
+ uint16_t sc (github_post (rs,
+ "graphql", // API Endpoint.
+ strings {"Authorization: Bearer " + iat},
+ move (rq)));
+
+ if (sc == 200)
+ {
+ if (!rs.found)
+ {
+ error << "repository '" << nid << "' not found";
+
+ return nullopt;
+ }
+
+ return rs.pull_requests;
+ }
+ else
+ error << "failed to fetch repository pull requests: "
+ << "error HTTP response status " << sc;
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ // Note: e.name is the GitHub API endpoint.
+ //
+ error << "malformed JSON in response from " << e.name << ", line: "
+ << e.line << ", column: " << e.column << ", byte offset: "
+ << e.position << ", error: " << e;
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "malformed header(s) in response: " << e;
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to fetch repository pull requests (errno=" << e.code ()
+ << "): " << e.what ();
+ }
+ catch (const runtime_error& e) // From response type's parsing constructor.
+ {
+ // GitHub response contained error(s) (could be ours or theirs at this
+ // point).
+ //
+ error << "unable to fetch repository pull requests: " << e;
+ }
+
+ return nullopt;
+ }
+
// GraphQL serialization functions.
//
diff --git a/mod/mod-ci-github-gq.hxx b/mod/mod-ci-github-gq.hxx
index 9721b6e..439f7b7 100644
--- a/mod/mod-ci-github-gq.hxx
+++ b/mod/mod-ci-github-gq.hxx
@@ -116,6 +116,18 @@ namespace brep
gq_pull_request_mergeable (const basic_mark& error,
const string& installation_access_token,
const string& node_id);
+
+ // Fetch the last 100 open pull requests with the specified base branch from
+ // the repository with the specified node ID.
+ //
+ // Issue diagnostics and return nullopt if the repository was not found or
+ // an error occurred.
+ //
+ optional<vector<gh_pull_request>>
+ gq_fetch_open_pull_requests (const basic_mark& error,
+ const string& installation_access_token,
+ const string& repository_node_id,
+ const string& base_branch);
}
#endif // MOD_MOD_CI_GITHUB_GQ_HXX
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index d5131cf..dbc0c4b 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -493,6 +493,76 @@ namespace brep
}
}
+ // The merge commits of any open pull requests with this branch as base
+ // branch will now be out of date, and thus so will be their CI builds and
+ // associated check runs (and, no, GitHub does not invalidate those CI
+ // results automatically; see below).
+ //
+ // Unfortunately GitHub does not provide a webhook for PR base branch
+ // updates (as it does for PR head branch updates) so we have to handle it
+ // here. We do so by fetching the open pull requests with this branch as
+ // base branch and then recreating the CI requests (cancel existing,
+ // create new) for each pull request.
+ //
+ // If we fail to recreate any of the PR CI requests, they and their check
+ // runs will be left reflecting outdated merge commits. If the new merge
+ // commit failed to be generated (merge conflicts) the PR will not be
+ // mergeable which is not entirely catastrophic. But on the other hand, if
+ // all of the existing CI request's check runs have already succeeded and
+ // the new merge commit succeeds (no conflicts) with logic errors then a
+ // user would be able to merge a broken PR.
+ //
+ // Regardless of the nature of the error, we have to let the check suite
+ // handling code proceed so we only issue diagnostics. Note also that we
+ // want to run this code as early as possible to minimize the window of
+ // the user seeing misleading CI results.
+ //
+ {
+ // Fetch open pull requests with the check suite's head branch as base
+ // branch.
+ //
+ optional<vector<gh_pull_request>> prs (
+ gq_fetch_open_pull_requests (error,
+ iat->token,
+ sd.repository_node_id,
+ cs.check_suite.head_branch));
+
+ if (prs)
+ {
+ // Recreate each PR's CI request.
+ //
+ for (gh_pull_request& pr: *prs)
+ {
+ service_data prsd (sd.warning_success,
+ sd.installation_access.token,
+ sd.installation_access.expires_at,
+ sd.installation_id,
+ sd.repository_node_id,
+ pr.head_sha,
+ cs.repository.clone_url,
+ pr.number);
+
+ // Cancel the existing CI request and create a new unloaded CI
+ // request. After this call we will start getting the
+ // build_unloaded() notifications until (1) we load the request, (2)
+ // we cancel it, or (3) it gets archived after some timeout.
+ //
+ if (!create_pull_request_ci (error, warn, trace,
+ prsd, pr.node_id,
+ true /* cancel_first */))
+ {
+ error << "pull request " << pr.node_id
+ << ": unable to create unloaded CI request";
+ }
+ }
+ }
+ else
+ {
+ error << "unable to fetch open pull requests with base branch "
+ << cs.check_suite.head_branch;
+ }
+ }
+
// Start CI for the check suite.
//
repository_location rl (cs.repository.clone_url + '#' +
@@ -672,33 +742,27 @@ namespace brep
l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- string sd (service_data (warning_success,
- move (iat->token),
- iat->expires_at,
- pr.installation.id,
- move (pr.repository.node_id),
- pr.pull_request.head_sha,
- pr.repository.clone_url,
- pr.pull_request.number)
- .json ());
-
- // Create unloaded CI request. After this call we will start getting the
- // build_unloaded() notifications until (1) we load the request, (2) we
- // cancel it, or (3) it gets archived after some timeout.
+ service_data sd (warning_success,
+ move (iat->token),
+ iat->expires_at,
+ pr.installation.id,
+ move (pr.repository.node_id),
+ pr.pull_request.head_sha,
+ pr.repository.clone_url,
+ pr.pull_request.number);
+
+ // Create unloaded CI request. Cancel the existing CI request first if the
+ // head branch has been updated (action is `synchronize`).
//
- // Note: use no delay since we need to (re)create the synthetic merge
- // check run as soon as possible.
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the request, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
//
- optional<string> tid (
- create (error, warn, &trace,
- *build_db_,
- tenant_service (move (pr.pull_request.node_id),
- "ci-github",
- move (sd)),
- chrono::seconds (30) /* interval */,
- chrono::seconds (0) /* delay */));
-
- if (!tid)
+ bool cancel_first (pr.action == "synchronize");
+
+ if (!create_pull_request_ci (error, warn, trace,
+ sd, pr.pull_request.node_id,
+ cancel_first))
{
fail << "pull request " << pr.pull_request.node_id
<< ": unable to create unloaded CI request";
@@ -707,6 +771,8 @@ namespace brep
return true;
}
+ // Note: only handles pull requests (not check suites).
+ //
function<optional<string> (const tenant_service&)> ci_github::
build_unloaded (tenant_service&& ts,
const diag_epilogue& log_writer) const noexcept
@@ -909,14 +975,11 @@ namespace brep
// Cancel the CI request.
//
+ // Ignore failure because this CI request may have been cancelled
+ // elsewhere due to an update to the PR base or head branches.
+ //
if (!cancel (error, warn, &trace, *build_db_, ts.type, ts.id))
- {
- // Nothing we can do but also highly unlikely.
- //
- error << "unable to cancel CI request: "
- << "no tenant for service type/ID "
- << ts.type << '/' << ts.id;
- }
+ l3 ([&]{trace << "CI for PR " << ts.id << " already cancelled";});
return nullptr; // No need to update service data in this case.
}
@@ -1819,6 +1882,37 @@ namespace brep
};
}
+ bool ci_github::
+ create_pull_request_ci (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark& trace,
+ const service_data& sd,
+ const string& nid,
+ bool cf) const
+ {
+ // Cancel the existing CI request if asked to do so. Ignore failure
+ // because the request may already have been cancelled for other reasons.
+ //
+ if (cf)
+ {
+ if (!cancel (error, warn, &trace, *build_db_, "ci-github", nid))
+ l3 ([&] {trace << "unable to cancel CI for pull request " << nid;});
+ }
+
+ // Create a new unloaded CI request.
+ //
+ tenant_service ts (nid, "ci-github", sd.json ());
+
+ // Note: use no delay since we need to (re)create the synthetic merge
+ // check run as soon as possible.
+ //
+ return create (error, warn, &trace,
+ *build_db_, move (ts),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */)
+ .has_value ();
+ }
+
string ci_github::
details_url (const build& b) const
{
diff --git a/mod/mod-ci-github.hxx b/mod/mod-ci-github.hxx
index 8cd085d..489aac7 100644
--- a/mod/mod-ci-github.hxx
+++ b/mod/mod-ci-github.hxx
@@ -17,6 +17,8 @@
namespace brep
{
+ struct service_data;
+
class ci_github: public database_module,
private ci_start,
public tenant_service_build_unloaded,
@@ -79,6 +81,25 @@ namespace brep
bool
handle_pull_request (gh_pull_request_event, bool warning_success);
+ // Create an unloaded CI request for a pull request. If `cancel_first` is
+ // true, cancel its existing CI request first.
+ //
+ // Return true if an unloaded CI request was created. Ignore failure to
+ // cancel because the CI request may already have been cancelled for other
+ // reasons.
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the request, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ bool
+ create_pull_request_ci (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark& trace,
+ const service_data&,
+ const string& pull_request_node_id,
+ bool cancel_first) const;
+
// Build a check run details_url for a build.
//
string