diff options
-rw-r--r-- | mod/ci-common.hxx | 2 | ||||
-rw-r--r-- | mod/mod-ci-github-gh.cxx | 143 | ||||
-rw-r--r-- | mod/mod-ci-github-gh.hxx | 51 | ||||
-rw-r--r-- | mod/mod-ci-github-gq.cxx | 195 | ||||
-rw-r--r-- | mod/mod-ci-github-gq.hxx | 42 | ||||
-rw-r--r-- | mod/mod-ci-github-service-data.cxx | 74 | ||||
-rw-r--r-- | mod/mod-ci-github-service-data.hxx | 38 | ||||
-rw-r--r-- | mod/mod-ci-github.cxx | 635 | ||||
-rw-r--r-- | mod/mod-ci-github.hxx | 13 | ||||
-rw-r--r-- | mod/module.cli | 5 |
10 files changed, 1130 insertions, 68 deletions
diff --git a/mod/ci-common.hxx b/mod/ci-common.hxx index 848bca1..6a07154 100644 --- a/mod/ci-common.hxx +++ b/mod/ci-common.hxx @@ -24,7 +24,7 @@ namespace brep // If the request handling has been performed normally, then return the // information that corresponds to the CI result manifest (see CI Result - // Manifest in the manual). Otherwise (some internal has error occured), + // Manifest in the manual). Otherwise (some internal error has occured), // log the error and return nullopt. // // The arguments correspond to the CI request and overrides manifest diff --git a/mod/mod-ci-github-gh.cxx b/mod/mod-ci-github-gh.cxx index 7007db8..4ad8d32 100644 --- a/mod/mod-ci-github-gh.cxx +++ b/mod/mod-ci-github-gh.cxx @@ -177,6 +177,107 @@ namespace brep return os; } + gh_pull_request:: + gh_pull_request (json::parser& p) + { + p.next_expect (event::begin_object); + + bool ni (false), nu (false), st (false), ma (false), ms (false), + bs (false), hd (false); + + // Skip unknown/uninteresting members. + // + while (p.next_expect (event::name, event::end_object)) + { + auto c = [&p] (bool& v, const char* s) + { + return p.name () == s ? (v = true) : false; + }; + + if (c (ni, "node_id")) node_id = p.next_expect_string (); + else if (c (nu, "number")) number = p.next_expect_number<unsigned int> (); + else if (c (st, "state")) state = p.next_expect_string (); + else if (c (ma, "mergeable")) mergeable = p.next_expect_boolean_null<bool> (); + else if (c (ms, "merge_commit_sha")) + { + string* v (p.next_expect_string_null ()); + if (v != nullptr) + merge_commit_sha = *v; + } + else if (c (bs, "base")) + { + p.next_expect (event::begin_object); + + bool l (false), r (false), s (false); + + while (p.next_expect (event::name, event::end_object)) + { + if (c (l, "label")) base_label = p.next_expect_string (); + else if (c (r, "ref")) base_ref = p.next_expect_string (); + else if (c (s, "sha")) base_sha = p.next_expect_string (); + else p.next_expect_value_skip (); + } + + if (!l) missing_member (p, "gh_pull_request.base", "label"); + if (!r) missing_member (p, "gh_pull_request.base", "ref"); + if (!s) missing_member (p, "gh_pull_request.base", "sha"); + } + else if (c (hd, "head")) + { + p.next_expect (event::begin_object); + + bool l (false), r (false), s (false); + + while (p.next_expect (event::name, event::end_object)) + { + if (c (l, "label")) head_label = p.next_expect_string (); + else if (c (r, "ref")) head_ref = p.next_expect_string (); + else if (c (s, "sha")) head_sha = p.next_expect_string (); + else p.next_expect_value_skip (); + } + + if (!l) missing_member (p, "gh_pull_request.head", "label"); + if (!r) missing_member (p, "gh_pull_request.head", "ref"); + if (!s) missing_member (p, "gh_pull_request.head", "sha"); + } + else p.next_expect_value_skip (); + } + + if (!ni) missing_member (p, "gh_pull_request", "node_id"); + if (!nu) missing_member (p, "gh_pull_request", "number"); + if (!st) missing_member (p, "gh_pull_request", "state"); + if (!ma) missing_member (p, "gh_pull_request", "mergeable"); + if (!ms) missing_member (p, "gh_pull_request", "merge_commit_sha"); + if (!bs) missing_member (p, "gh_pull_request", "base"); + if (!hd) missing_member (p, "gh_pull_request", "head"); + } + + ostream& + operator<< (ostream& os, const gh_pull_request& pr) + { + os << "node_id: " << pr.node_id + << ", number: " << pr.number + << ", state: " << pr.state + << ", mergeable: " << (pr.mergeable + ? *pr.mergeable + ? "true" + : "false" + : "null") + << ", merge_commit_sha:" << pr.merge_commit_sha + << ", base: { " + << "label: " << pr.base_label + << ", ref: " << pr.base_ref + << ", sha: " << pr.base_sha + << " }" + << ", head: { " + << "label: " << pr.head_label + << ", ref: " << pr.head_ref + << ", sha: " << pr.head_sha + << " }"; + + return os; + } + // gh_repository // gh_repository:: @@ -297,6 +398,48 @@ namespace brep return os; } + // gh_pull_request_event + // + gh_pull_request_event:: + gh_pull_request_event (json::parser& p) + { + p.next_expect (event::begin_object); + + bool ac (false), pr (false), rp (false), in (false); + + // Skip unknown/uninteresting members. + // + while (p.next_expect (event::name, event::end_object)) + { + auto c = [&p] (bool& v, const char* s) + { + return p.name () == s ? (v = true) : false; + }; + + if (c (ac, "action")) action = p.next_expect_string (); + else if (c (pr, "pull_request")) pull_request = gh_pull_request (p); + else if (c (rp, "repository")) repository = gh_repository (p); + else if (c (in, "installation")) installation = gh_installation (p); + else p.next_expect_value_skip (); + } + + if (!ac) missing_member (p, "gh_pull_request_event", "action"); + if (!pr) missing_member (p, "gh_pull_request_event", "pull_request"); + if (!rp) missing_member (p, "gh_pull_request_event", "repository"); + if (!in) missing_member (p, "gh_pull_request_event", "installation"); + } + + ostream& + operator<< (ostream& os, const gh_pull_request_event& pr) + { + os << "action: " << pr.action; + os << ", pull_request { " << pr.pull_request << " }"; + os << ", repository { " << pr.repository << " }"; + os << ", installation { " << pr.installation << " }"; + + return os; + } + // gh_installation_access_token // // Example JSON: diff --git a/mod/mod-ci-github-gh.hxx b/mod/mod-ci-github-gh.hxx index b3da197..2b77aeb 100644 --- a/mod/mod-ci-github-gh.hxx +++ b/mod/mod-ci-github-gh.hxx @@ -66,6 +66,37 @@ namespace brep gh_check_run () = default; }; + struct gh_pull_request + { + string node_id; + unsigned int number; + + string state; // "open" or "closed". + + // If absent then the result of the test merge commit is not yet + // available. If true then `merge_commit_sha` contains the commit ID of + // the merge commit. If false then `merge_commit_sha` is either empty or + // no longer valid. + // + optional<bool> mergeable; + string merge_commit_sha; + + // @@ TODO Remove label if unused. + string base_label; // Name distinguishing the base from the head. + string base_ref; + string base_sha; + + // @@ TODO Remove label if unused. + string head_label; // Name distinguishing the head from the base. + string head_ref; + string head_sha; + + explicit + gh_pull_request (json::parser&); + + gh_pull_request () = default; + }; + // Return the GitHub check run status corresponding to a build_state. // string @@ -128,6 +159,20 @@ namespace brep gh_check_suite_event () = default; }; + struct gh_pull_request_event + { + string action; + + gh_pull_request pull_request; + gh_repository repository; + gh_installation installation; + + explicit + gh_pull_request_event (json::parser&); + + gh_pull_request_event () = default; + }; + struct gh_installation_access_token { string token; @@ -154,6 +199,9 @@ namespace brep operator<< (ostream&, const gh_check_run&); ostream& + operator<< (ostream&, const gh_pull_request&); + + ostream& operator<< (ostream&, const gh_repository&); ostream& @@ -163,6 +211,9 @@ namespace brep operator<< (ostream&, const gh_check_suite_event&); ostream& + operator<< (ostream&, const gh_pull_request_event&); + + ostream& operator<< (ostream&, const gh_installation_access_token&); } diff --git a/mod/mod-ci-github-gq.cxx b/mod/mod-ci-github-gq.cxx index 954f5a8..e5ea0c5 100644 --- a/mod/mod-ci-github-gq.cxx +++ b/mod/mod-ci-github-gq.cxx @@ -349,13 +349,17 @@ namespace brep // other states. // static string - gq_mutation_create_check_runs (const string& ri, // Repository ID - const string& hs, // Head SHA - const string& du, // Details URL. + gq_mutation_create_check_runs (const string& ri, // Repository ID + const string& hs, // Head SHA + const optional<string>& du, // Details URL. const vector<check_run>& crs, - const string& st, // Check run status. + const string& st, // Check run status. optional<gq_built_result> br = nullopt) { + // Ensure details URL is non-empty if present. + // + assert (!du || !du->empty ()); + ostringstream os; os << "mutation {" << '\n'; @@ -371,10 +375,10 @@ namespace brep << " repositoryId: " << gq_str (ri) << '\n' << " headSha: " << gq_str (hs) << '\n' << " status: " << gq_enum (st); - if (!du.empty ()) + if (du) { os << '\n'; - os << " detailsUrl: " << gq_str (du); + os << " detailsUrl: " << gq_str (*du); } if (br) { @@ -409,13 +413,17 @@ namespace brep // conclusion. // static string - gq_mutation_update_check_run (const string& ri, // Repository ID. - const string& ni, // Node ID. - const string& du, // Details URL. - const string& st, // Check run status. - optional<timestamp> sa, // Started at. + gq_mutation_update_check_run (const string& ri, // Repository ID. + const string& ni, // Node ID. + const optional<string>& du, // Details URL. + const string& st, // Check run status. + optional<timestamp> sa, // Started at. optional<gq_built_result> br) { + // Ensure details URL is non-empty if present. + // + assert (!du || !du->empty ()); + ostringstream os; os << "mutation {" << '\n' @@ -428,10 +436,10 @@ namespace brep os << '\n'; os << " startedAt: " << gq_str (gh_to_iso8601 (*sa)); } - if (!du.empty ()) + if (du) { os << '\n'; - os << " detailsUrl: " << gq_str (du); + os << " detailsUrl: " << gq_str (*du); } if (br) { @@ -472,8 +480,11 @@ namespace brep // Empty details URL because it's not available until building. // string rq ( - gq_serialize_request ( - gq_mutation_create_check_runs (rid, hs, "", crs, gh_to_status (st)))); + gq_serialize_request (gq_mutation_create_check_runs (rid, + hs, + nullopt, + crs, + gh_to_status (st)))); return gq_mutate_check_runs (error, crs, iat, move (rq), st); } @@ -484,7 +495,7 @@ namespace brep const string& iat, const string& rid, const string& hs, - const string& du, + const optional<string>& du, build_state st, optional<gq_built_result> br) { @@ -492,10 +503,6 @@ namespace brep // assert (st != build_state::built || br); - // Must have a details URL because `st` should never be queued. - // - assert (!du.empty ()); - vector<check_run> crs {move (cr)}; string rq ( @@ -520,7 +527,7 @@ namespace brep const string& iat, const string& rid, const string& nid, - const string& du, + const optional<string>& du, build_state st, optional<gq_built_result> br) { @@ -528,10 +535,6 @@ namespace brep // assert (st != build_state::built || br); - // Must have a details URL for building and built. - // - assert (!du.empty ()); - // Set `startedAt` to current time if updating to building. // optional<timestamp> sa; @@ -557,6 +560,148 @@ namespace brep return r; } + // Serialize a GraphQL query that fetches a pull request from GitHub. + // + static string + gq_query_pr_mergeability (const string& nid) + { + ostringstream os; + + os << "query {" << '\n' + << " node(id:" << gq_str (nid) << ") {" << '\n' + << " ... on PullRequest {" << '\n' + << " mergeable potentialMergeCommit { oid }" << '\n' + << " }" << '\n' + << " }" << '\n' + << "}" << '\n'; + + return os.str (); + } + + optional<string> + gq_pull_request_mergeable (const basic_mark& error, + const string& iat, + const string& nid) + { + string rq (gq_serialize_request (gq_query_pr_mergeability (nid))); + + try + { + // Response parser. + // + struct resp + { + // True if the pull request was found (i.e., the node ID was valid). + // + bool found = false; + + // The response value. Absent if the merge commit is still being + // generated. + // + optional<string> value; + + resp (json::parser& p) + { + using event = json::event; + + gq_parse_response (p, [this] (json::parser& p) + { + p.next_expect (event::begin_object); + + if (p.next_expect_member_object_null ("node")) + { + found = true; + + string ma (p.next_expect_member_string ("mergeable")); + + if (ma == "MERGEABLE") + { + p.next_expect_member_object ("potentialMergeCommit"); + string oid (p.next_expect_member_string ("oid")); + p.next_expect (event::end_object); + + value = move (oid); + } + else if (ma == "CONFLICTING") + { + value = ""; + } + else if (ma == "UNKNOWN") + { + // Still being generated; leave value absent. + } + + p.next_expect (event::end_object); // node + } + + p.next_expect (event::end_object); + }); + } + + resp () = default; + } rs; + + uint16_t sc (github_post (rs, + "graphql", // API Endpoint. + strings {"Authorization: Bearer " + iat}, + move (rq))); + + if (sc == 200) + { + if (!rs.found) + error << "pull request '" << nid << "' not found"; + + return rs.value; + } + else + error << "failed to fetch pull request: error HTTP response status " + << sc; + } + catch (const json::invalid_json_input& e) + { + // Note: e.name is the GitHub API endpoint. + // + error << "malformed JSON in response from " << e.name << ", line: " + << e.line << ", column: " << e.column << ", byte offset: " + << e.position << ", error: " << e; + } + catch (const invalid_argument& e) + { + error << "malformed header(s) in response: " << e; + } + catch (const system_error& e) + { + error << "unable to fetch pull request (errno=" << e.code () << "): " + << e.what (); + } + catch (const runtime_error& e) // From response type's parsing constructor. + { + // GitHub response contained error(s) (could be ours or theirs at this + // point). + // + error << "unable to fetch pull request: " << e; + } + + return nullopt; + } + + // bool + // gq_fetch_branch_open_pull_requests () + // { + // // query { + // // repository(owner:"francoisk" name:"libb2") + // // { + // // pullRequests (last:100 states:OPEN baseRefName:"master") { + // // edges { + // // node { + // // id + // // } + // // } + // // } + // // } + // // } + // } + // GraphQL serialization functions. // // The GraphQL spec: diff --git a/mod/mod-ci-github-gq.hxx b/mod/mod-ci-github-gq.hxx index 8f7a9ca..9721b6e 100644 --- a/mod/mod-ci-github-gq.hxx +++ b/mod/mod-ci-github-gq.hxx @@ -38,6 +38,8 @@ namespace brep // state and the node ID. Return false and issue diagnostics if the request // failed. // + // If the details_url is absent GitHub will use the app's homepage. + // // The gq_built_result is required if the build_state is built because // GitHub does not allow a check run status of `completed` without at least // a conclusion. @@ -55,7 +57,7 @@ namespace brep const string& installation_access_token, const string& repository_id, const string& head_sha, - const string& details_url, + const optional<string>& details_url, build_state, optional<gq_built_result> = nullopt); @@ -65,6 +67,8 @@ namespace brep // with the new state. Return false and issue diagnostics if the request // failed. // + // If the details_url is absent GitHub will use the app's homepage. + // // The gq_built_result is required if the build_state is built because // GitHub does not allow a check run status of `completed` without at least // a conclusion. @@ -75,9 +79,43 @@ namespace brep const string& installation_access_token, const string& repository_id, const string& node_id, - const string& details_url, + const optional<string>& details_url, build_state, optional<gq_built_result> = nullopt); + + // Fetch a pull request's mergeability from GitHub and return it in first, + // or absent if the merge commit is still being generated. + // + // Return false in second and issue diagnostics if the request failed. + // + struct gq_pr_mergeability + { + // True if the pull request is auto-mergeable; false if it would create + // conflicts. + // + bool mergeable; + + // The ID of the test merge commit. Empty if mergeable is false. + // + string merge_commit_id; + }; + + + // Fetch a pull request's mergeability from GitHub. Return absent value if + // the merge commit is still being generated. Return empty string if the + // pull request is not auto-mergeable. Otherwise return the test merge + // commit id. + // + // Issue diagnostics and return absent if the request failed (which means it + // will be treated by the caller as still being generated). + // + // Note that the first request causes GitHub to start preparing the test + // merge commit. + // + optional<string> + gq_pull_request_mergeable (const basic_mark& error, + const string& installation_access_token, + const string& node_id); } #endif // MOD_MOD_CI_GITHUB_GQ_HXX diff --git a/mod/mod-ci-github-service-data.cxx b/mod/mod-ci-github-service-data.cxx index 6aee2d7..561752d 100644 --- a/mod/mod-ci-github-service-data.cxx +++ b/mod/mod-ci-github-service-data.cxx @@ -35,8 +35,24 @@ namespace brep installation_id = p.next_expect_member_number<uint64_t> ("installation_id"); + repository_node_id = p.next_expect_member_string ("repository_node_id"); - head_sha = p.next_expect_member_string ("head_sha"); + + { + string* s (p.next_expect_member_string_null ("repository_clone_url")); + if (s != nullptr) + repository_clone_url = *s; + } + + pr_number = p.next_expect_member_number_null<uint32_t> ("pr_number"); + + { + string* s (p.next_expect_member_string_null ("merge_node_id")); + if (s != nullptr) + merge_node_id = *s; + } + + report_sha = p.next_expect_member_string ("report_sha"); p.next_expect_member_array ("check_runs"); while (p.next_expect (event::begin_object, event::end_array)) @@ -59,6 +75,12 @@ namespace brep p.next_expect (event::end_object); } + { + string* s (p.next_expect_member_string_null ("conclusion_node_id")); + if (s != nullptr) + conclusion_node_id = *s; + } + p.next_expect (event::end_object); } @@ -68,12 +90,31 @@ namespace brep timestamp iat_ea, uint64_t iid, string rid, - string hs) + string rs) + : warning_success (ws), + installation_access (move (iat_tok), iat_ea), + installation_id (iid), + repository_node_id (move (rid)), + report_sha (move (rs)) + { + } + + service_data:: + service_data (bool ws, + string iat_tok, + timestamp iat_ea, + uint64_t iid, + string rid, + string rs, + string rcu, + uint32_t prn) : warning_success (ws), installation_access (move (iat_tok), iat_ea), installation_id (iid), repository_node_id (move (rid)), - head_sha (move (hs)) + repository_clone_url (move (rcu)), + pr_number (prn), + report_sha (move (rs)) { } @@ -98,7 +139,26 @@ namespace brep s.member ("installation_id", installation_id); s.member ("repository_node_id", repository_node_id); - s.member ("head_sha", head_sha); + + s.member_name ("repository_clone_url"); + if (repository_clone_url) + s.value (*repository_clone_url); + else + s.value (nullptr); + + s.member_name ("pr_number"); + if (pr_number) + s.value (*pr_number); + else + s.value (nullptr); + + s.member_name ("merge_node_id"); + if (merge_node_id) + s.value (*merge_node_id); + else + s.value (nullptr); + + s.member ("report_sha", report_sha); s.member_begin_array ("check_runs"); for (const check_run& cr: check_runs) @@ -120,6 +180,12 @@ namespace brep } s.end_array (); + s.member_name ("conclusion_node_id"); + if (conclusion_node_id) + s.value (*conclusion_node_id); + else + s.value (nullptr); + s.end_object (); return b; diff --git a/mod/mod-ci-github-service-data.hxx b/mod/mod-ci-github-service-data.hxx index c4e20b3..21c8a8f 100644 --- a/mod/mod-ci-github-service-data.hxx +++ b/mod/mod-ci-github-service-data.hxx @@ -59,10 +59,31 @@ namespace brep string repository_node_id; // GitHub-internal opaque repository id. - string head_sha; + // The following two are only used for pull requests. + // + // @@ TODO/LATER: maybe put them in a struct? + // + optional<string> repository_clone_url; + optional<uint32_t> pr_number; + + // The GitHub ID of the synthetic PR merge check run or absent if it + // hasn't been created yet. + // + optional<string> merge_node_id; + + // The commit ID the check suite or pull request (and its check runs) are + // reporting to. Note that in the case of a pull request this will be the + // head commit (`pull_request.head.sha`) as opposed to the merge commit. + // + string report_sha; vector<check_run> check_runs; + // The GitHub ID of the synthetic conclusion check run or absent if it + // hasn't been created yet. See also merge_node_id above. + // + optional<string> conclusion_node_id; + // Return the check run with the specified build ID or nullptr if not // found. // @@ -76,12 +97,25 @@ namespace brep explicit service_data (const string& json); + // The check_suite constructor. + // + service_data (bool warning_success, + string iat_token, + timestamp iat_expires_at, + uint64_t installation_id, + string repository_node_id, + string report_sha); + + // The pull_request constructor. + // service_data (bool warning_success, string iat_token, timestamp iat_expires_at, uint64_t installation_id, string repository_node_id, - string head_sha); + string report_sha, + string repository_clone_url, + uint32_t pr_number); service_data () = default; diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx index 6816e5a..ae736ca 100644 --- a/mod/mod-ci-github.cxx +++ b/mod/mod-ci-github.cxx @@ -340,9 +340,46 @@ namespace brep } else if (event == "pull_request") { - // @@ TODO + gh_pull_request_event pr; + try + { + json::parser p (body.data (), body.size (), "pull_request event"); + + pr = gh_pull_request_event (p); + } + catch (const json::invalid_json_input& e) + { + string m ("malformed JSON in " + e.name + " request body"); + + error << m << ", line: " << e.line << ", column: " << e.column + << ", byte offset: " << e.position << ", error: " << e; - throw invalid_request (501, "pull request events not implemented yet"); + throw invalid_request (400, move (m)); + } + + if (pr.action == "opened" || pr.action == "synchronize") + { + // opened + // A pull request was opened. + // + // synchronize + // A pull request's head branch was updated from the base branch or + // new commits were pushed to the head branch. (Note that there is + // no equivalent event for the base branch. That case gets handled + // in handle_check_suite_request() instead.) + // + // Note that both cases are handled the same: we start a new CI + // request which will be reported on the new commit id. + // + return handle_pull_request (move (pr), warning_success); + } + else + { + // Ignore the remaining actions by sending a 200 response with empty + // body. + // + return true; + } } else { @@ -410,6 +447,567 @@ namespace brep return true; } + // High-level description of pull request (PR) handling + // + // - Some GitHub pull request terminology: + // + // - Fork and pull model: Pull requests are created in a forked + // repository. Thus the head and base repositories are different. + // + // - Shared repository model: The pull request head and base branches are + // in the same repository. For example, from a feature branch onto + // master. + // + // - CI the merge commit but add check runs to the pull request head commit + // + // Most of the major CI integrations build the merge commit instead of the + // PR head commit. + // + // Adding the check runs to the PR head commit is recommended by the + // following blog posts by a GitHub employee who is one of the best + // sources on these topics: + // https://www.kenmuse.com/blog/shared-commits-and-github-checks/ and + // https://www.kenmuse.com/blog/creating-github-checks/. + // + // Do not add any check runs to the merge commit because: + // + // - The PR head commit is the only commit that can be relied upon to + // exist throughout the PR's lifetime. The merge commit, on the other + // hand, can change during the PR process. When that happens the PR will + // look for check runs on the new merge commit, effectively discarding + // the ones we had before. + // + // - Although some of the GitHub documentation makes it sound like they + // expect check runs to be added to both the PR head commit and the + // merge commit, the PR UI does not react to the merge commit's check + // runs consistently. It actually seems to be quite broken. + // + // The only thing it seems to do reliably is blocking the PR merge if + // the merge commit's check runs are not successful (i.e, overriding the + // PR head commit's check runs). But the UI looks quite messed up + // generally in this state. + // + // Note that, in the case of a PR from a forked repository (the so-called + // "fork and pull" model), GitHub will copy the PR head commit from the + // head repository (the forked one) into the base repository. So the check + // runs must always be added to the base repository, whether the PR is + // from within the same repository or from a forked repository. The merge + // and head commits will be at refs/pull/<PR-number>/{merge,head}. + // + // - New commits are added to PR head branch + // + // @@ TODO In this case we will end up creating two sets of check runs on + // the same commit (pull_request.head.sha and + // check_suite.head_sha). It's not obvious which to prefer but I'm + // thinking the pull request is more important because in most + // development models it represents something that is more likely to + // end up in an important branch (as opposed to the head of a feature + // branch). + // + // Possible solution: ignore all check_suites with non-empty + // pull_requests[]. + // + // => check_suite(requested, PR_head) [only in shared repo model] + // + // Note: check_suite.pull_requests[] will contain all PRs with this + // branch as head. + // + // Note: check_suite.pull_requests[i].head.sha will be the new, updated + // PR head sha. + // + // => pull_request(synchronize) + // + // Note: The check_suite and pull_request can arrive in any order. + // + // - New commits are added to PR base branch + // + // Note: In this case pull_request.base.sha does not change, but the merge + // commit will be updated to include the new commits to the base branch. + // + // - @@ TODO? PR base branch changed (to a different branch) + // + // => pull_request(edited) + // + // - PR closed @@ TODO + // + // => pull_request(closed) + // + // Cancel CI? + // + // - PR merged @@ TODO + // + // => pull_request(merged) + // + // => check_suite(PR_base) + // + // Probably wouldn't want to CI the base again because the PR CI would've + // done the equivalent already. + // + bool ci_github:: + handle_pull_request (gh_pull_request_event pr, bool warning_success) + { + HANDLER_DIAG; + + l3 ([&]{trace << "pull_request event { " << pr << " }";}); + + // While we don't need the installation access token in this request, + // let's obtain it to flush out any permission issues early. Also, it is + // valid for an hour so we will most likely make use of it. + // + optional<string> jwt (generate_jwt (trace, error)); + if (!jwt) + throw server_error (); + + optional<gh_installation_access_token> iat ( + obtain_installation_access_token (pr.installation.id, + move (*jwt), + error)); + if (!iat) + throw server_error (); + + l3 ([&]{trace << "installation_access_token { " << *iat << " }";}); + + string sd (service_data (warning_success, + move (iat->token), + iat->expires_at, + pr.installation.id, + move (pr.repository.node_id), + pr.pull_request.head_sha, + pr.repository.clone_url, + pr.pull_request.number) + .json ()); + + // Create unloaded CI request. After this call we will start getting the + // build_unloaded() notifications until (1) we load the request, (2) we + // cancel it, or (3) it gets archived after some timeout. + // + // Note: use no delay since we need to (re)create the synthetic merge + // check run as soon as possible. + // + optional<string> tid ( + create (error, warn, &trace, + *build_db_, + tenant_service (move (pr.pull_request.node_id), + "ci-github", + move (sd)), + chrono::seconds (30) /* interval */, + chrono::seconds (0) /* delay */)); + + if (!tid) + fail << "unable to create unloaded CI request"; + + return true; + } + + // Return the colored circle corresponding to a result_status. + // + static string + circle (result_status rs) + { + switch (rs) + { + case result_status::success: return "\U0001F7E2"; // Green circle. + case result_status::warning: return "\U0001F7E0"; // Orange circle. + case result_status::error: + case result_status::abort: + case result_status::abnormal: return "\U0001F534"; // Red circle. + + // Valid values we should never encounter. + // + case result_status::skip: + case result_status::interrupt: + throw invalid_argument ("unexpected result_status value: " + + to_string (rs)); + } + + return ""; // Should never reach. + } + + // Let's capitalize the synthetic check run names to make them easier to + // distinguish from the regular ones. + // + static string merge_check_run_name ("MERGE-COMMIT"); + static string conclusion_check_run_name ("CONCLUSION"); + + function<optional<string> (const tenant_service&)> ci_github:: + build_unloaded (tenant_service&& ts, + const diag_epilogue& log_writer) const noexcept + { + NOTIFICATION_DIAG (log_writer); + + service_data sd; + try + { + sd = service_data (*ts.data); + } + catch (const invalid_argument& e) + { + error << "failed to parse service data: " << e; + return nullptr; + } + + // Get a new installation access token if the current one has expired. + // + const gh_installation_access_token* iat (nullptr); + optional<gh_installation_access_token> new_iat; + + if (system_clock::now () > sd.installation_access.expires_at) + { + if (optional<string> jwt = generate_jwt (trace, error)) + { + new_iat = obtain_installation_access_token (sd.installation_id, + move (*jwt), + error); + if (new_iat) + iat = &*new_iat; + } + } + else + iat = &sd.installation_access; + + if (iat == nullptr) + return nullptr; // Try again on the next call. + + auto make_iat_updater = [&new_iat, &error] () + { + function<optional<string> (const tenant_service&)> r; + + if (new_iat) + { + r = [&error, + iat = move (new_iat)] (const tenant_service& ts) + -> optional<string> + { + // NOTE: this lambda may be called repeatedly (e.g., due to + // transaction being aborted) and so should not move out of its + // captures. + + service_data sd; + try + { + sd = service_data (*ts.data); + } + catch (const invalid_argument& e) + { + error << "failed to parse service data: " << e; + return nullopt; + } + + sd.installation_access = *iat; + + return sd.json (); + }; + } + + return r; + }; + + // Create a synthetic check run with an in-progress state. Return the + // check run on success or nullopt on failure. + // + auto create_synthetic_cr = [iat, + &sd, + &error] (string name) -> optional<check_run> + { + check_run cr; + cr.name = move (name); + + if (gq_create_check_run (error, + cr, + iat->token, + sd.repository_node_id, + sd.report_sha, + nullopt /* details_url */, + build_state::building)) + { + return cr; + } + else + return nullopt; + }; + + // Update a synthetic check run with success or failure. Return the check + // run on success or nullopt on failure. + // + auto update_synthetic_cr = [iat, + &sd, + &error] (const string& node_id, + const string& name, + result_status rs, + string summary) -> optional<check_run> + { + assert (!node_id.empty ()); + + optional<gq_built_result> br ( + gq_built_result (gh_to_conclusion (rs, sd.warning_success), + circle (rs) + ' ' + ucase (to_string (rs)), + move (summary))); + + check_run cr; + cr.name = name; // For display purposes only. + + if (gq_update_check_run (error, + cr, + iat->token, + sd.repository_node_id, + node_id, + nullopt /* details_url */, + build_state::built, + move (br))) + { + return cr; + } + else + return nullopt; + }; + + // Synthetic merge check run node ID. Empty until created on the first + // call or retrieved from service data on subsequent calls. + // + string merge_node_id; + + // True if this is the first call (or the merge commit couldn't be created + // on the first call, in which case we just re-try by treating it as a + // first call). + // + bool first (!sd.merge_node_id); + + // If this is the first call, (re)create the synthetic merge check run as + // soon as possible to make sure the previous check suite, if any, is no + // longer completed. + // + // Note that there is still a window between receipt of the pull_request + // event and the first bot/worker asking for a task, which could be + // substantial. We could probably (also) try to (re)create the merge + // checkrun in the webhook. @@ Maybe/later. + // + if (first) + { + if (auto cr = create_synthetic_cr (merge_check_run_name)) + { + l3 ([&]{trace << "created check_run { " << *cr << " }";}); + + merge_node_id = move (*cr->node_id); + } + else + return make_iat_updater (); // Try again on the next call. + } + else + merge_node_id = *sd.merge_node_id; + + // Start/check PR mergeability. + // + optional<string> mc ( + gq_pull_request_mergeable (error, iat->token, ts.id)); // Merge commit. + + if (!mc || mc->empty ()) + { + if (!mc) // No merge commit yet. + { + // If this is a subsequent notification and there is no merge commit, + // then there is nothing to do. + // + if (!first) + return make_iat_updater (); + + // Fall through to update service data. + } + else // Not mergeable. + { + // If the commit is not mergeable, cancel the CI request and fail the + // merge check run. + // + // Note that it feels like in this case we don't really need to create a + // failed synthetic conclusion check run since the PR cannot be merged + // anyway. + + if (auto cr = update_synthetic_cr ( + merge_node_id, + merge_check_run_name, + result_status::error, + "GitHub is unable to create test merge commit")) + { + l3 ([&]{trace << "updated check_run { " << *cr << " }";}); + + // Cancel the CI request. + // + if (!cancel (error, warn, &trace, *build_db_, ts.type, ts.id)) + { + // Nothing we can do but also highly unlikely. + // + error << "unable to cancel CI request: " + << "no tenant for service type/ID " + << ts.type << '/' << ts.id; + } + + return nullptr; // No need to update service data in this case. + } + else + { + // Don't cancel the CI request if the merge check run update failed + // so that we can try again on the next call. + + if (!first) + return make_iat_updater (); + + // Fall through to update service data. + } + } + + // This is a first notification, so record the merge check run in the + // service data. + // + return [&error, + iat = move (new_iat), + mni = move (merge_node_id)] (const tenant_service& ts) + -> optional<string> + { + // NOTE: this lambda may be called repeatedly (e.g., due to + // transaction being aborted) and so should not move out of its + // captures. + + service_data sd; + try + { + sd = service_data (*ts.data); + } + catch (const invalid_argument& e) + { + error << "failed to parse service data: " << e; + return nullopt; + } + + if (iat) + sd.installation_access = *iat; + + sd.merge_node_id = mni; + + return sd.json (); + }; + } + + // If we are here, then it means we have a merge commit that we can load. + // + // Note that this can still be the first call (first=true). + // + + // As a first step, (re)create the synthetic conclusion check run and then + // change the merge check run state to success. Do it in this order so + // that the check suite does not become completed. + + // Synthetic conclusion check run node ID. Empty until created on the + // "second" call or retrieved from service data on subsequent calls. + // + string conclusion_node_id; + + // True if this is the first call after the merge commit became available, + // which we will call the "second" call (or we couldn't create the + // conclusion check run on the first such call, in which case we just + // re-try by treating it as a "second" call). + // + bool second (!sd.conclusion_node_id); + + if (second) + { + if (auto cr = create_synthetic_cr (conclusion_check_run_name)) + { + l3 ([&]{trace << "created check_run { " << *cr << " }";}); + + conclusion_node_id = move (*cr->node_id); + } + } + else + conclusion_node_id = *sd.conclusion_node_id; + + if (!conclusion_node_id.empty ()) // Conclusion check run was created. + { + // Update merge check run to successful. + // + if (auto cr = update_synthetic_cr (merge_node_id, + merge_check_run_name, + result_status::success, + "GitHub created test merge commit")) + { + l3 ([&]{trace << "updated check_run { " << *cr << " }";}); + + // Load the CI request. + // + // Example repository URL fragment: + // + // #pull/28/merge@1b6c9a361086ed93e6f1e67189e82d52de91c49b + // + repository_location rl (*sd.repository_clone_url + "#pull/" + + to_string (*sd.pr_number) + "/merge@" + *mc, + repository_type::git); + + optional<start_result> r ( + load (error, warn, &trace, *build_db_, move (ts), rl)); + + if (!r || r->status != 200) + { + string msg; + msg = "```\n"; + if (r) msg += r->message; + else msg += "Internal service error"; + msg += "\n```"; + + if (auto cr = update_synthetic_cr (conclusion_node_id, + conclusion_check_run_name, + result_status::error, + move (msg))) + { + l3 ([&]{trace << "updated check_run { " << *cr << " }";}); + } + else + { + // Nothing really we can do in this case since we will not receive + // any further notifications. + } + + return nullptr; // No need to update service data in this case. + } + } + else + { + // Don't load the CI request if the merge check run update failed so + // that we can try again on the next call. + } + } + + return [&error, + iat = move (new_iat), + mni = (first ? move (merge_node_id) : string ()), + cni = (second ? move (conclusion_node_id) : string ())] + (const tenant_service& ts) -> optional<string> + { + // NOTE: this lambda may be called repeatedly (e.g., due to + // transaction being aborted) and so should not move out of its + // captures. + + service_data sd; + try + { + sd = service_data (*ts.data); + } + catch (const invalid_argument& e) + { + error << "failed to parse service data: " << e; + return nullopt; + } + + if (iat) + sd.installation_access = *iat; + + if (!mni.empty ()) + sd.merge_node_id = mni; + + if (!cni.empty ()) + sd.conclusion_node_id = cni; + + return sd.json (); + }; + } + // Build state change notifications (see tenant-services.hxx for // background). Mapping our state transitions to GitHub pose multiple // problems: @@ -596,7 +1194,7 @@ namespace brep if (gq_create_check_runs (error, crs, iat->token, - sd.repository_node_id, sd.head_sha, + sd.repository_node_id, sd.report_sha, build_state::queued)) { for (const check_run& cr: crs) @@ -873,29 +1471,6 @@ namespace brep // if (iat != nullptr) { - // Return the colored circle corresponding to a result_status. - // - auto circle = [] (result_status rs) -> string - { - switch (rs) - { - case result_status::success: return "\U0001F7E2"; // Green circle. - case result_status::warning: return "\U0001F7E0"; // Orange circle. - case result_status::error: - case result_status::abort: - case result_status::abnormal: return "\U0001F534"; // Red circle. - - // Valid values we should never encounter. - // - case result_status::skip: - case result_status::interrupt: - throw invalid_argument ("unexpected result_status value: " + - to_string (rs)); - } - - return ""; // Should never reach. - }; - // Prepare the check run's summary field (the build information in an // XHTML table). // @@ -927,9 +1502,9 @@ namespace brep // Serialize a result row (colored circle, result text, log URL) for // an operation and result_status. // - auto tr_result = [this, circle, &b] (xml::serializer& s, - const string& op, - result_status rs) + auto tr_result = [this, &b] (xml::serializer& s, + const string& op, + result_status rs) { // The log URL. // @@ -1030,7 +1605,7 @@ namespace brep cr, iat->token, sd.repository_node_id, - sd.head_sha, + sd.report_sha, details_url (b), build_state::built, move (br))) diff --git a/mod/mod-ci-github.hxx b/mod/mod-ci-github.hxx index 946a326..8cd085d 100644 --- a/mod/mod-ci-github.hxx +++ b/mod/mod-ci-github.hxx @@ -19,6 +19,7 @@ namespace brep { class ci_github: public database_module, private ci_start, + public tenant_service_build_unloaded, public tenant_service_build_queued, public tenant_service_build_building, public tenant_service_build_built @@ -40,6 +41,10 @@ namespace brep cli_options () const {return options::ci_github::description ();} virtual function<optional<string> (const tenant_service&)> + build_unloaded (tenant_service&&, + const diag_epilogue& log_writer) const noexcept override; + + virtual function<optional<string> (const tenant_service&)> build_queued (const tenant_service&, const vector<build>&, optional<build_state> initial_state, @@ -66,6 +71,14 @@ namespace brep bool handle_check_suite_request (gh_check_suite_event, bool warning_success); + // Handle the pull_request event `opened` and `synchronize` actions. + // + // If warning_success is true, then map result_status::warning to SUCCESS + // and to FAILURE otherwise. + // + bool + handle_pull_request (gh_pull_request_event, bool warning_success); + // Build a check run details_url for a build. // string diff --git a/mod/module.cli b/mod/module.cli index e8f7df9..0c6785f 100644 --- a/mod/module.cli +++ b/mod/module.cli @@ -845,10 +845,7 @@ namespace brep { }; - class ci_github: ci_start, ci_cancel, - build, build_db, - repository_url, - handler + class ci_github: ci_start, ci_cancel, repository_url { // GitHub CI-specific options. // |