aboutsummaryrefslogtreecommitdiff
path: root/mod
diff options
context:
space:
mode:
Diffstat (limited to 'mod')
-rw-r--r--mod/build-config-module.cxx33
-rw-r--r--mod/build-config-module.hxx9
-rw-r--r--mod/buildfile1
-rw-r--r--mod/ci-common.cxx357
-rw-r--r--mod/ci-common.hxx81
-rw-r--r--mod/mod-build-configs.cxx14
-rw-r--r--mod/mod-build-force.cxx23
-rw-r--r--mod/mod-build-log.cxx2
-rw-r--r--mod/mod-build-result.cxx30
-rw-r--r--mod/mod-build-task.cxx125
-rw-r--r--mod/mod-builds.cxx2
-rw-r--r--mod/mod-ci-github-gh.cxx174
-rw-r--r--mod/mod-ci-github-gh.hxx60
-rw-r--r--mod/mod-ci-github-gq.cxx633
-rw-r--r--mod/mod-ci-github-gq.hxx94
-rw-r--r--mod/mod-ci-github-service-data.cxx114
-rw-r--r--mod/mod-ci-github-service-data.hxx61
-rw-r--r--mod/mod-ci-github.cxx1325
-rw-r--r--mod/mod-ci-github.hxx48
-rw-r--r--mod/mod-ci.cxx129
-rw-r--r--mod/mod-ci.hxx47
-rw-r--r--mod/mod-package-details.cxx2
-rw-r--r--mod/mod-repository-details.cxx2
-rw-r--r--mod/mod-repository-root.cxx14
-rw-r--r--mod/mod-repository-root.hxx2
-rw-r--r--mod/module.cli36
-rw-r--r--mod/page.cxx2
-rw-r--r--mod/tenant-service.hxx19
28 files changed, 3014 insertions, 425 deletions
diff --git a/mod/build-config-module.cxx b/mod/build-config-module.cxx
index 97c9f9e..1f4ad42 100644
--- a/mod/build-config-module.cxx
+++ b/mod/build-config-module.cxx
@@ -148,26 +148,35 @@ namespace brep
}
bool build_config_module::
- belongs (const build_target_config& cfg, const char* cls) const
+ derived (const string& c, const char* bc) const
{
+ if (c == bc)
+ return true;
+
+ // Go through base classes.
+ //
const map<string, string>& im (target_conf_->class_inheritance_map);
- for (const string& c: cfg.classes)
+ for (auto i (im.find (c)); i != im.end (); )
{
- if (c == cls)
+ const string& base (i->second);
+
+ if (base == bc)
return true;
- // Go through base classes.
- //
- for (auto i (im.find (c)); i != im.end (); )
- {
- const string& base (i->second);
+ i = im.find (base);
+ }
- if (base == cls)
- return true;
+ return false;
+ }
- i = im.find (base);
- }
+ bool build_config_module::
+ belongs (const build_target_config& cfg, const char* cls) const
+ {
+ for (const string& c: cfg.classes)
+ {
+ if (derived (c, cls))
+ return true;
}
return false;
diff --git a/mod/build-config-module.hxx b/mod/build-config-module.hxx
index c1630b0..bbbe952 100644
--- a/mod/build-config-module.hxx
+++ b/mod/build-config-module.hxx
@@ -54,15 +54,20 @@ namespace brep
default_all_ucs);
}
+ // Return true if a class is derived from the base class, recursively.
+ //
+ bool
+ derived (const string&, const char* base_class) const;
+
// Check if the configuration belongs to the specified class.
//
bool
belongs (const build_target_config&, const char*) const;
bool
- belongs (const build_target_config& cfg, const string& cls) const
+ belongs (const build_target_config& cfg, const string& classes) const
{
- return belongs (cfg, cls.c_str ());
+ return belongs (cfg, classes.c_str ());
}
// Target/configuration/toolchain combination that, in particular, can be
diff --git a/mod/buildfile b/mod/buildfile
index c3895dc..2d6ef39 100644
--- a/mod/buildfile
+++ b/mod/buildfile
@@ -39,6 +39,7 @@ mod{brep}: {hxx ixx txx cxx}{* -module-options -{$libu_src}} \
# the debugging of the notifications machinery.
#
cxx.poptions += -DBREP_CI_TENANT_SERVICE
+#cxx.poptions += -DBREP_CI_TENANT_SERVICE_UNLOADED
libus{mod}: ../web/xhtml/libus{xhtml}
libue{mod}: ../web/xhtml/libue{xhtml}
diff --git a/mod/ci-common.cxx b/mod/ci-common.cxx
index cb61e66..c0ef89f 100644
--- a/mod/ci-common.cxx
+++ b/mod/ci-common.cxx
@@ -3,6 +3,9 @@
#include <mod/ci-common.hxx>
+#include <odb/database.hxx>
+#include <odb/transaction.hxx>
+
#include <libbutl/uuid.hxx>
#include <libbutl/fdstream.hxx>
#include <libbutl/sendmail.hxx>
@@ -11,6 +14,9 @@
#include <libbutl/process-io.hxx> // operator<<(ostream, process_args)
#include <libbutl/manifest-serializer.hxx>
+#include <libbrep/build-package.hxx>
+#include <libbrep/build-package-odb.hxx>
+
#include <mod/external-handler.hxx>
namespace brep
@@ -38,13 +44,16 @@ namespace brep
options_ = move (o);
}
- optional<ci_start::start_result> ci_start::
+ static optional<ci_start::start_result>
start (const basic_mark& error,
const basic_mark& warn,
const basic_mark* trace,
+ const options::ci_start& ops,
+ string&& request_id,
optional<tenant_service>&& service,
+ bool service_load,
const repository_location& repository,
- const vector<package>& packages,
+ const vector<ci_start::package>& packages,
const optional<string>& client_ip,
const optional<string>& user_agent,
const optional<string>& interactive,
@@ -55,32 +64,15 @@ namespace brep
using serializer = manifest_serializer;
using serialization = manifest_serialization;
- assert (options_ != nullptr); // Shouldn't be called otherwise.
+ using result = ci_start::start_result;
// If the tenant service is specified, then its type may not be empty.
//
assert (!service || !service->type.empty ());
- // Generate the request id.
- //
- // Note that it will also be used as a CI result manifest reference,
- // unless the latter is provided by the external handler.
- //
- string request_id;
-
- try
- {
- request_id = uuid::generate ().string ();
- }
- catch (const system_error& e)
- {
- error << "unable to generate request id: " << e;
- return nullopt;
- }
-
// Create the submission data directory.
//
- dir_path dd (options_->ci_data () / dir_path (request_id));
+ dir_path dd (ops.ci_data () / dir_path (request_id));
try
{
@@ -103,10 +95,10 @@ namespace brep
//
auto client_error = [&request_id] (uint16_t status, string message)
{
- return start_result {status,
- move (message),
- request_id,
- vector<pair<string, string>> ()};
+ return result {status,
+ move (message),
+ request_id,
+ vector<pair<string, string>> ()};
};
// Serialize the CI request manifest to a stream. On the serialization
@@ -119,6 +111,7 @@ namespace brep
auto rqm = [&request_id,
&ts,
&service,
+ service_load,
&repository,
&packages,
&client_ip,
@@ -127,7 +120,7 @@ namespace brep
&simulate,
&custom_request,
&client_error] (ostream& os, bool long_lines = false)
- -> pair<bool, optional<start_result>>
+ -> pair<bool, optional<result>>
{
try
{
@@ -139,7 +132,7 @@ namespace brep
s.next ("id", request_id);
s.next ("repository", repository.string ());
- for (const package& p: packages)
+ for (const ci_start::package& p: packages)
{
if (!p.version)
s.next ("package", p.name.string ());
@@ -178,6 +171,8 @@ namespace brep
if (service->data)
s.next ("service-data", *service->data);
+
+ s.next ("service-action", service_load ? "load" : "start");
}
// Serialize the request custom parameters.
@@ -190,12 +185,12 @@ namespace brep
s.next (nv.first, nv.second);
s.next ("", ""); // End of manifest.
- return make_pair (true, optional<start_result> ());
+ return make_pair (true, optional<result> ());
}
catch (const serialization& e)
{
return make_pair (false,
- optional<start_result> (
+ optional<result> (
client_error (400,
string ("invalid parameter: ") +
e.what ())));
@@ -209,7 +204,7 @@ namespace brep
try
{
ofdstream os (rqf);
- pair<bool, optional<start_result>> r (rqm (os));
+ pair<bool, optional<result>> r (rqm (os));
os.close ();
if (!r.first)
@@ -228,7 +223,7 @@ namespace brep
//
auto ovm = [&overrides, &client_error] (ostream& os,
bool long_lines = false)
- -> pair<bool, optional<start_result>>
+ -> pair<bool, optional<result>>
{
try
{
@@ -240,12 +235,12 @@ namespace brep
s.next (nv.first, nv.second);
s.next ("", ""); // End of manifest.
- return make_pair (true, optional<start_result> ());
+ return make_pair (true, optional<result> ());
}
catch (const serialization& e)
{
return make_pair (false,
- optional<start_result> (
+ optional<result> (
client_error (
400,
string ("invalid manifest override: ") +
@@ -261,7 +256,7 @@ namespace brep
try
{
ofdstream os (ovf);
- pair<bool, optional<start_result>> r (ovm (os));
+ pair<bool, optional<result>> r (ovm (os));
os.close ();
if (!r.first)
@@ -305,16 +300,16 @@ namespace brep
// manifest from its stdout and parse it into the resulting manifest
// object. Otherwise, create implied CI result manifest.
//
- start_result sr;
+ result sr;
- if (options_->ci_handler_specified ())
+ if (ops.ci_handler_specified ())
{
using namespace external_handler;
- optional<result_manifest> r (run (options_->ci_handler (),
- options_->ci_handler_argument (),
+ optional<result_manifest> r (run (ops.ci_handler (),
+ ops.ci_handler_argument (),
dd,
- options_->ci_handler_timeout (),
+ ops.ci_handler_timeout (),
error,
warn,
trace));
@@ -358,7 +353,7 @@ namespace brep
{
try
{
- serialize_manifest (sr, os, long_lines);
+ ci_start::serialize_manifest (sr, os, long_lines);
return true;
}
catch (const serialization& e)
@@ -424,7 +419,7 @@ namespace brep
// assume that the web server error log is monitored and the email sending
// failure will be noticed.
//
- if (options_->ci_email_specified () && !simulate)
+ if (ops.ci_email_specified () && !simulate)
try
{
// Redirect the diagnostics to the web server error log.
@@ -435,14 +430,13 @@ namespace brep
*trace << process_args {args, n};
},
2 /* stderr */,
- options_->email (),
+ ops.email (),
"CI request submission (" + sr.reference + ')',
- {options_->ci_email ()});
+ {ops.ci_email ()});
// Write the CI request manifest.
//
- pair<bool, optional<start_result>> r (
- rqm (sm.out, true /* long_lines */));
+ pair<bool, optional<result>> r (rqm (sm.out, true /* long_lines */));
assert (r.first); // The serialization succeeded once, so can't fail now.
@@ -473,7 +467,55 @@ namespace brep
error << "sendmail error: " << e;
}
- return optional<start_result> (move (sr));
+ return optional<result> (move (sr));
+ }
+
+ optional<ci_start::start_result> ci_start::
+ start (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ optional<tenant_service>&& service,
+ const repository_location& repository,
+ const vector<package>& packages,
+ const optional<string>& client_ip,
+ const optional<string>& user_agent,
+ const optional<string>& interactive,
+ const optional<string>& simulate,
+ const vector<pair<string, string>>& custom_request,
+ const vector<pair<string, string>>& overrides) const
+ {
+ assert (options_ != nullptr); // Shouldn't be called otherwise.
+
+ // Generate the request id.
+ //
+ // Note that it will also be used as a CI result manifest reference,
+ // unless the latter is provided by the external handler.
+ //
+ string request_id;
+
+ try
+ {
+ request_id = uuid::generate ().string ();
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to generate request id: " << e;
+ return nullopt;
+ }
+
+ return brep::start (error, warn, trace,
+ *options_,
+ move (request_id),
+ move (service),
+ false /* service_load */,
+ repository,
+ packages,
+ client_ip,
+ user_agent,
+ interactive,
+ simulate,
+ custom_request,
+ overrides);
}
void ci_start::
@@ -491,4 +533,227 @@ namespace brep
s.next ("", ""); // End of manifest.
}
+
+ optional<string> ci_start::
+ create (const basic_mark& error,
+ const basic_mark&,
+ const basic_mark* trace,
+ odb::core::database& db,
+ tenant_service&& service,
+ duration notify_interval,
+ duration notify_delay) const
+ {
+ using namespace odb::core;
+
+ // Generate the request id.
+ //
+ string request_id;
+
+ try
+ {
+ request_id = uuid::generate ().string ();
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to generate request id: " << e;
+ return nullopt;
+ }
+
+ // Use the generated request id if the tenant service id is not specified.
+ //
+ if (service.id.empty ())
+ service.id = request_id;
+
+ build_tenant t (move (request_id),
+ move (service),
+ system_clock::now () - notify_interval + notify_delay,
+ notify_interval);
+ {
+ assert (!transaction::has_current ());
+
+ transaction tr (db.begin ());
+
+ // Note that in contrast to brep-load, we know that the tenant id is
+ // unique and thus we don't try to remove a tenant with such an id.
+ // There is also not much reason to assume that we may have switched
+ // from the single-tenant mode here and remove the respective tenant,
+ // unless we are in the tenant-service functionality development mode.
+ //
+#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
+ cstrings ts ({""});
+
+ db.erase_query<build_package> (
+ query<build_package>::id.tenant.in_range (ts.begin (), ts.end ()));
+
+ db.erase_query<build_repository> (
+ query<build_repository>::id.tenant.in_range (ts.begin (), ts.end ()));
+
+ db.erase_query<build_public_key> (
+ query<build_public_key>::id.tenant.in_range (ts.begin (), ts.end ()));
+
+ db.erase_query<build_tenant> (
+ query<build_tenant>::id.in_range (ts.begin (), ts.end ()));
+#endif
+
+ db.persist (t);
+
+ tr.commit ();
+ }
+
+ if (trace != nullptr)
+ *trace << "unloaded CI request " << t.id << " for service "
+ << t.service->id << ' ' << t.service->type << " is created";
+
+ return move (t.id);
+ }
+
+ optional<ci_start::start_result> ci_start::
+ load (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ odb::core::database& db,
+ tenant_service&& service,
+ const repository_location& repository) const
+ {
+ using namespace odb::core;
+
+ string request_id;
+ {
+ assert (!transaction::has_current ());
+
+ transaction tr (db.begin ());
+
+ using query = query<build_tenant>;
+
+ shared_ptr<build_tenant> t (
+ db.query_one<build_tenant> (query::service.id == service.id &&
+ query::service.type == service.type));
+
+ if (t == nullptr)
+ {
+ error << "unable to find tenant for service " << service.id << ' '
+ << service.type;
+
+ return nullopt;
+ }
+ else if (t->archived)
+ {
+ error << "tenant " << t->id << " for service " << service.id << ' '
+ << service.type << " is already archived";
+
+ return nullopt;
+ }
+ else if (!t->unloaded_timestamp)
+ {
+ error << "tenant " << t->id << " for service " << service.id << ' '
+ << service.type << " is already loaded";
+
+ return nullopt;
+ }
+
+ t->unloaded_timestamp = nullopt;
+ db.update (t);
+
+ tr.commit ();
+
+ request_id = move (t->id);
+ }
+
+ assert (options_ != nullptr); // Shouldn't be called otherwise.
+
+ optional<start_result> r (brep::start (error, warn, trace,
+ *options_,
+ move (request_id),
+ move (service),
+ true /* service_load */,
+ repository,
+ {} /* packages */,
+ nullopt /* client_ip */,
+ nullopt /* user_agent */,
+ nullopt /* interactive */,
+ nullopt /* simulate */,
+ {} /* custom_request */,
+ {} /* overrides */));
+
+ // Note: on error (r == nullopt) the diagnostics is already issued.
+ //
+ if (trace != nullptr && r)
+ *trace << "CI request for '" << repository << "' is "
+ << (r->status != 200 ? "not " : "") << "loaded: "
+ << r->message << " (reference: " << r->reference << ')';
+
+ return r;
+ }
+
+ optional<tenant_service> ci_start::
+ cancel (const basic_mark&,
+ const basic_mark&,
+ const basic_mark* trace,
+ odb::core::database& db,
+ const string& type,
+ const string& id) const
+ {
+ using namespace odb::core;
+
+ assert (!transaction::has_current ());
+
+ transaction tr (db.begin ());
+
+ using query = query<build_tenant>;
+
+ shared_ptr<build_tenant> t (
+ db.query_one<build_tenant> (query::service.id == id &&
+ query::service.type == type));
+ if (t == nullptr)
+ return nullopt;
+
+ optional<tenant_service> r (move (t->service));
+ t->service = nullopt;
+ t->archived = true;
+ db.update (t);
+
+ tr.commit ();
+
+ if (trace != nullptr)
+ *trace << "CI request " << t->id << " for service " << id << ' ' << type
+ << " is canceled";
+
+ return r;
+ }
+
+ bool ci_start::
+ cancel (const basic_mark&,
+ const basic_mark&,
+ const basic_mark* trace,
+ const string& reason,
+ odb::core::database& db,
+ const string& tid) const
+ {
+ using namespace odb::core;
+
+ assert (!transaction::has_current ());
+
+ transaction tr (db.begin ());
+
+ shared_ptr<build_tenant> t (db.find<build_tenant> (tid));
+
+ if (t == nullptr)
+ return false;
+
+ if (!t->archived)
+ {
+ t->archived = true;
+ db.update (t);
+ }
+
+ tr.commit ();
+
+ if (trace != nullptr)
+ *trace << "CI request " << tid << " is canceled: "
+ << (reason.size () < 50
+ ? reason
+ : string (reason, 0, 50) + "...");
+
+ return true;
+ }
}
diff --git a/mod/ci-common.hxx b/mod/ci-common.hxx
index 6f62c4b..6a07154 100644
--- a/mod/ci-common.hxx
+++ b/mod/ci-common.hxx
@@ -24,7 +24,7 @@ namespace brep
// If the request handling has been performed normally, then return the
// information that corresponds to the CI result manifest (see CI Result
- // Manifest in the manual). Otherwise (some internal has error occured),
+ // Manifest in the manual). Otherwise (some internal error has occured),
// log the error and return nullopt.
//
// The arguments correspond to the CI request and overrides manifest
@@ -36,6 +36,7 @@ namespace brep
package_name name;
optional<brep::version> version;
};
+
// Note that the inability to generate the reference is an internal
// error. Thus, it is not optional.
//
@@ -62,7 +63,67 @@ namespace brep
const optional<string>& interactive = nullopt,
const optional<string>& simulate = nullopt,
const vector<pair<string, string>>& custom_request = {},
- const vector<pair<string, string>>& overrides = {});
+ const vector<pair<string, string>>& overrides = {}) const;
+
+ // Create an unloaded CI request returning start_result::reference on
+ // success and nullopt on an internal error. Such a request is not started
+ // until loaded with the load() function below. Configure the time
+ // interval between the build_unloaded() notifications for the being
+ // created tenant and set the initial delay for the first notification.
+ // See also the build_unloaded() tenant services notification.
+ //
+ // Note: should be called out of the database transaction.
+ //
+ optional<string>
+ create (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ odb::core::database&,
+ tenant_service&&,
+ duration notify_interval,
+ duration notify_delay) const;
+
+ // Load (and start) previously created (as unloaded) CI request. Similarly
+ // to the start() function, return nullopt on an internal error.
+ //
+ // Note that tenant_service::id is used to identify the CI request tenant.
+ //
+ // Note: should be called out of the database transaction.
+ //
+ optional<start_result>
+ load (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ odb::core::database&,
+ tenant_service&&,
+ const repository_location& repository) const;
+
+ // Cancel previously created or started CI request. Return the service
+ // state or nullopt if there is no tenant for such a type/id pair.
+ //
+ // Note: should be called out of the database transaction.
+ //
+ optional<tenant_service>
+ cancel (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ odb::core::database&,
+ const string& type,
+ const string& id) const;
+
+ // Cancel previously created or started CI request. Return false if there
+ // is no tenant for the specified tenant id. Note that the reason argument
+ // is only used for tracing.
+ //
+ // Note: should be called out of the database transaction.
+ //
+ bool
+ cancel (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark* trace,
+ const string& reason,
+ odb::core::database&,
+ const string& tenant_id) const;
// Helpers.
//
@@ -75,22 +136,6 @@ namespace brep
private:
shared_ptr<options::ci_start> options_;
};
-
- class ci_cancel
- {
- public:
- void
- init (shared_ptr<options::ci_cancel>, shared_ptr<odb::core::database>);
-
- // @@ TODO Archive the tenant.
- //
- void
- cancel (/*...*/);
-
- private:
- shared_ptr<options::ci_cancel> options_;
- shared_ptr<odb::core::database> build_db_;
- };
}
#endif // MOD_CI_COMMON_HXX
diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx
index 9282544..ce79edb 100644
--- a/mod/mod-build-configs.cxx
+++ b/mod/mod-build-configs.cxx
@@ -30,8 +30,6 @@ build_configs (const build_configs& r)
void brep::build_configs::
init (scanner& s)
{
- HANDLER_DIAG;
-
options_ = make_shared<options::build_configs> (
s, unknown_mode::fail, unknown_mode::fail);
@@ -127,19 +125,19 @@ handle (request& rq, response& rs)
s << DIV(ID="filter-heading") << "Build Configuration Classes" << ~DIV
<< P(ID="filter");
+ bool printed (false);
for (auto b (cls.begin ()), i (b), e (cls.end ()); i != e; ++i)
{
- // Skip the 'hidden' class.
+ // Skip the hidden classes.
//
const string& c (*i);
- if (c != "hidden")
+ if (!derived (c, "hidden"))
{
- // Note that here we rely on the fact that the first class in the list
- // can never be 'hidden' (is always 'all').
- //
- if (i != b)
+ if (printed)
s << ' ';
+ else
+ printed = true;
print_class_name (c, c == selected_class);
diff --git a/mod/mod-build-force.cxx b/mod/mod-build-force.cxx
index bdae356..ea921e9 100644
--- a/mod/mod-build-force.cxx
+++ b/mod/mod-build-force.cxx
@@ -42,8 +42,6 @@ build_force (const build_force& r, const tenant_service_map& tsm)
void brep::build_force::
init (scanner& s)
{
- HANDLER_DIAG;
-
options_ = make_shared<options::build_force> (
s, unknown_mode::fail, unknown_mode::fail);
@@ -192,7 +190,14 @@ handle (request& rq, response& rs)
optional<pair<tenant_service, shared_ptr<build>>> tss;
tenant_service_build_queued::build_queued_hints qhs;
+ // Acquire the database connection for the subsequent transactions.
+ //
+ // Note that we will release it prior to any potentially time-consuming
+ // operations (such as HTTP requests) and re-acquire it again afterwards,
+ // if required.
+ //
connection_ptr conn (build_db_->connection ());
+
{
transaction t (conn->begin ());
@@ -297,14 +302,28 @@ handle (request& rq, response& rs)
vector<build> qbs;
qbs.push_back (move (b));
+ // Release the database connection since the build_queued() notification
+ // can potentially be time-consuming (e.g., it may perform an HTTP
+ // request).
+ //
+ conn.reset ();
+
if (auto f = tsq->build_queued (ss,
qbs,
build_state::building,
qhs,
log_writer_))
+ {
+ conn = build_db_->connection ();
update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
}
+ // Release the database connection prior to writing into the unbuffered
+ // response stream.
+ //
+ conn.reset ();
+
// We have all the data, so don't buffer the response content.
//
ostream& os (rs.content (200, "text/plain;charset=utf-8", false));
diff --git a/mod/mod-build-log.cxx b/mod/mod-build-log.cxx
index c8e803b..5487f6e 100644
--- a/mod/mod-build-log.cxx
+++ b/mod/mod-build-log.cxx
@@ -34,8 +34,6 @@ build_log (const build_log& r)
void brep::build_log::
init (scanner& s)
{
- HANDLER_DIAG;
-
options_ = make_shared<options::build_log> (
s, unknown_mode::fail, unknown_mode::fail);
diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx
index ccce17f..3ba18e1 100644
--- a/mod/mod-build-result.cxx
+++ b/mod/mod-build-result.cxx
@@ -49,8 +49,6 @@ build_result (const build_result& r, const tenant_service_map& tsm)
void brep::build_result::
init (scanner& s)
{
- HANDLER_DIAG;
-
options_ = make_shared<options::build_result> (
s, unknown_mode::fail, unknown_mode::fail);
@@ -207,13 +205,20 @@ handle (request& rq, response&)
optional<pair<tenant_service, shared_ptr<build>>> tss;
tenant_service_build_queued::build_queued_hints qhs;
+ // Acquire the database connection for the subsequent transactions.
+ //
+ // Note that we will release it prior to any potentially time-consuming
+ // operations (such as HTTP requests) and re-acquire it again afterwards,
+ // if required.
+ //
+ connection_ptr conn (build_db_->connection ());
+
// Note that if the session authentication fails (probably due to the
// authentication settings change), then we log this case with the warning
// severity and respond with the 200 HTTP code as if the challenge is
// valid. The thinking is that we shouldn't alarm a law-abaiding agent and
// shouldn't provide any information to a malicious one.
//
- connection_ptr conn (build_db_->connection ());
{
transaction t (conn->begin ());
@@ -518,12 +523,20 @@ handle (request& rq, response&)
vector<build> qbs;
qbs.push_back (move (*tss->second));
+ // Release the database connection since build_queued() notification can
+ // potentially be time-consuming (e.g., it may perform an HTTP request).
+ //
+ conn.reset ();
+
if (auto f = tsq->build_queued (ss,
qbs,
build_state::building,
qhs,
log_writer_))
+ {
+ conn = build_db_->connection ();
update_tenant_service_state (conn, qbs.back ().tenant, f);
+ }
}
// If a third-party service needs to be notified about the built package,
@@ -537,8 +550,16 @@ handle (request& rq, response&)
const tenant_service& ss (tss->first);
const build& b (*tss->second);
+ // Release the database connection since build_built() notification can
+ // potentially be time-consuming (e.g., it may perform an HTTP request).
+ //
+ conn.reset ();
+
if (auto f = tsb->build_built (ss, b, log_writer_))
+ {
+ conn = build_db_->connection ();
update_tenant_service_state (conn, b.tenant, f);
+ }
}
if (bld != nullptr)
@@ -549,6 +570,9 @@ handle (request& rq, response&)
if (!build_notify)
(cfg->email ? cfg->email : pkg->build_email) = email ();
+ if (conn == nullptr)
+ conn = build_db_->connection ();
+
send_notification_email (*options_,
conn,
*bld,
diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx
index 07aff8d..6be77f6 100644
--- a/mod/mod-build-task.cxx
+++ b/mod/mod-build-task.cxx
@@ -399,6 +399,79 @@ handle (request& rq, response& rs)
}
}
+ // Acquire the database connection for the subsequent transactions.
+ //
+ // Note that we will release it prior to any potentially time-consuming
+ // operations (such as HTTP requests) and re-acquire it again afterwards,
+ // if required.
+ //
+ connection_ptr conn (build_db_->connection ());
+
+ // Perform some housekeeping first.
+ //
+ // Notify a tenant-associated third-party service about the unloaded CI
+ // request, if present.
+ //
+ {
+ const tenant_service_build_unloaded* tsu (nullptr);
+
+ transaction tr (conn->begin ());
+
+ using query = query<build_tenant>;
+
+ // Pick the unloaded tenant with the earliest loaded timestamp, skipping
+ // those which were already picked recently.
+ //
+ shared_ptr<build_tenant> t (
+ build_db_->query_one<build_tenant> (
+ (!query::archived &&
+ query::unloaded_timestamp.is_not_null () &&
+ (query::unloaded_timestamp +
+ "<= EXTRACT (EPOCH FROM NOW()) * 1000000000 - " +
+ query::unloaded_notify_interval)) +
+ "ORDER BY" + query::unloaded_timestamp +
+ "LIMIT 1"));
+
+ if (t != nullptr && t->service)
+ {
+ auto i (tenant_service_map_.find (t->service->type));
+
+ if (i != tenant_service_map_.end ())
+ {
+ tsu = dynamic_cast<const tenant_service_build_unloaded*> (
+ i->second.get ());
+
+ if (tsu != nullptr)
+ {
+ // If we ought to call the
+ // tenant_service_build_unloaded::build_unloaded() callback, then
+ // set the package tenant's loaded timestamp to the current time to
+ // prevent the notifications race.
+ //
+ t->unloaded_timestamp = system_clock::now ();
+ build_db_->update (t);
+ }
+ }
+ }
+
+ tr.commit ();
+
+ if (tsu != nullptr)
+ {
+ // Release the database connection since the build_unloaded()
+ // notification can potentially be time-consuming (e.g., it may perform
+ // an HTTP request).
+ //
+ conn.reset ();
+
+ if (auto f = tsu->build_unloaded (move (*t->service), log_writer_))
+ {
+ conn = build_db_->connection ();
+ update_tenant_service_state (conn, t->id, f);
+ }
+ }
+ }
+
// Go through package build configurations until we find one that has no
// build target configuration present in the database, or is in the building
// state but expired (collectively called unbuilt). If such a target
@@ -825,7 +898,10 @@ handle (request& rq, response& rs)
imode,
queued_expiration_ns));
- transaction t (build_db_->begin ());
+ if (conn == nullptr)
+ conn = build_db_->connection ();
+
+ transaction t (conn->begin ());
// If there are any non-archived interactive build tenants, then the
// chosen randomization approach doesn't really work since interactive
@@ -886,7 +962,8 @@ handle (request& rq, response& rs)
"OFFSET" + pkg_query::_ref (offset) +
"LIMIT" + pkg_query::_ref (limit);
- connection_ptr conn (build_db_->connection ());
+ if (conn == nullptr)
+ conn = build_db_->connection ();
prep_pkg_query pkg_prep_query (
conn->prepare_query<buildable_package> (
@@ -2226,12 +2303,20 @@ handle (request& rq, response& rs)
if (!qbs.empty ())
{
+ // Release the database connection since the build_queued()
+ // notification can potentially be time-consuming (e.g., it may
+ // perform an HTTP request).
+ //
+ conn.reset ();
+
if (auto f = tsq->build_queued (ss,
qbs,
nullopt /* initial_state */,
qhs,
log_writer_))
{
+ conn = build_db_->connection ();
+
if (optional<string> data =
update_tenant_service_state (conn, qbs.back ().tenant, f))
ss.data = move (data);
@@ -2250,12 +2335,20 @@ handle (request& rq, response& rs)
qbs.push_back (move (b));
restore_build = true;
+ // Release the database connection since the build_queued()
+ // notification can potentially be time-consuming (e.g., it may
+ // perform an HTTP request).
+ //
+ conn.reset ();
+
if (auto f = tsq->build_queued (ss,
qbs,
initial_state,
qhs,
log_writer_))
{
+ conn = build_db_->connection ();
+
if (optional<string> data =
update_tenant_service_state (conn, qbs.back ().tenant, f))
ss.data = move (data);
@@ -2278,8 +2371,16 @@ handle (request& rq, response& rs)
tenant_service& ss (tss->first);
const build& b (*tss->second);
+ // Release the database connection since the build_building()
+ // notification can potentially be time-consuming (e.g., it may
+ // perform an HTTP request).
+ //
+ conn.reset ();
+
if (auto f = tsb->build_building (ss, b, log_writer_))
{
+ conn = build_db_->connection ();
+
if (optional<string> data =
update_tenant_service_state (conn, b.tenant, f))
ss.data = move (data);
@@ -2306,6 +2407,9 @@ handle (request& rq, response& rs)
const tenant_service_build_built* tsb (nullptr);
optional<pair<tenant_service, shared_ptr<build>>> tss;
{
+ if (conn == nullptr)
+ conn = build_db_->connection ();
+
transaction t (conn->begin ());
shared_ptr<build> b (build_db_->find<build> (task_build->id));
@@ -2395,8 +2499,16 @@ handle (request& rq, response& rs)
tenant_service& ss (tss->first);
const build& b (*tss->second);
+ // Release the database connection since the build_built()
+ // notification can potentially be time-consuming (e.g., it may
+ // perform an HTTP request).
+ //
+ conn.reset ();
+
if (auto f = tsb->build_built (ss, b, log_writer_))
{
+ conn = build_db_->connection ();
+
if (optional<string> data =
update_tenant_service_state (conn, b.tenant, f))
ss.data = move (data);
@@ -2407,6 +2519,10 @@ handle (request& rq, response& rs)
// Send notification emails for all the aborted builds.
//
for (const aborted_build& ab: aborted_builds)
+ {
+ if (conn == nullptr)
+ conn = build_db_->connection ();
+
send_notification_email (*options_,
conn,
*ab.b,
@@ -2415,9 +2531,14 @@ handle (request& rq, response& rs)
ab.what,
error,
verb_ >= 2 ? &trace : nullptr);
+ }
}
}
+ // Release the database connection as soon as possible.
+ //
+ conn.reset ();
+
serialize_task_response_manifest ();
return true;
}
diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx
index 30562f3..81d4649 100644
--- a/mod/mod-builds.cxx
+++ b/mod/mod-builds.cxx
@@ -50,8 +50,6 @@ builds (const builds& r)
void brep::builds::
init (scanner& s)
{
- HANDLER_DIAG;
-
options_ = make_shared<options::builds> (
s, unknown_mode::fail, unknown_mode::fail);
diff --git a/mod/mod-ci-github-gh.cxx b/mod/mod-ci-github-gh.cxx
index 5c4809c..4ad8d32 100644
--- a/mod/mod-ci-github-gh.cxx
+++ b/mod/mod-ci-github-gh.cxx
@@ -20,10 +20,9 @@ namespace brep
case build_state::queued: return "QUEUED";
case build_state::building: return "IN_PROGRESS";
case build_state::built: return "COMPLETED";
- default:
- throw invalid_argument ("invalid build_state value: " +
- to_string (static_cast<int> (st)));
}
+
+ return ""; // Should never reach.
}
// Return the build_state corresponding to a GitHub check run status
@@ -36,19 +35,21 @@ namespace brep
else if (s == "IN_PROGRESS") return build_state::building;
else if (s == "COMPLETED") return build_state::built;
else
- throw invalid_argument ("invalid GitHub check run status: '" + s +
+ throw invalid_argument ("unexpected GitHub check run status: '" + s +
'\'');
}
string
- gh_to_conclusion (result_status rs)
+ gh_to_conclusion (result_status rs, bool warning_success)
{
switch (rs)
{
case result_status::success:
- case result_status::warning:
return "SUCCESS";
+ case result_status::warning:
+ return warning_success ? "SUCCESS" : "FAILURE";
+
case result_status::error:
case result_status::abort:
case result_status::abnormal:
@@ -60,13 +61,9 @@ namespace brep
case result_status::interrupt:
throw invalid_argument ("unexpected result_status value: " +
to_string (rs));
-
- // Invalid value.
- //
- default:
- throw invalid_argument ("invalid result_status value: " +
- to_string (static_cast<int> (rs)));
}
+
+ return ""; // Should never reach.
}
string
@@ -121,7 +118,7 @@ namespace brep
{
p.next_expect (event::begin_object);
- bool ni (false), hb (false), hs (false), bf (false), at (false);
+ bool ni (false), hb (false), hs (false);
// Skip unknown/uninteresting members.
//
@@ -135,16 +132,12 @@ namespace brep
if (c (ni, "node_id")) node_id = p.next_expect_string ();
else if (c (hb, "head_branch")) head_branch = p.next_expect_string ();
else if (c (hs, "head_sha")) head_sha = p.next_expect_string ();
- else if (c (bf, "before")) before = p.next_expect_string ();
- else if (c (at, "after")) after = p.next_expect_string ();
else p.next_expect_value_skip ();
}
if (!ni) missing_member (p, "gh_check_suite", "node_id");
if (!hb) missing_member (p, "gh_check_suite", "head_branch");
if (!hs) missing_member (p, "gh_check_suite", "head_sha");
- if (!bf) missing_member (p, "gh_check_suite", "before");
- if (!at) missing_member (p, "gh_check_suite", "after");
}
ostream&
@@ -152,9 +145,7 @@ namespace brep
{
os << "node_id: " << cs.node_id
<< ", head_branch: " << cs.head_branch
- << ", head_sha: " << cs.head_sha
- << ", before: " << cs.before
- << ", after: " << cs.after;
+ << ", head_sha: " << cs.head_sha;
return os;
}
@@ -186,6 +177,107 @@ namespace brep
return os;
}
+ gh_pull_request::
+ gh_pull_request (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool ni (false), nu (false), st (false), ma (false), ms (false),
+ bs (false), hd (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (ni, "node_id")) node_id = p.next_expect_string ();
+ else if (c (nu, "number")) number = p.next_expect_number<unsigned int> ();
+ else if (c (st, "state")) state = p.next_expect_string ();
+ else if (c (ma, "mergeable")) mergeable = p.next_expect_boolean_null<bool> ();
+ else if (c (ms, "merge_commit_sha"))
+ {
+ string* v (p.next_expect_string_null ());
+ if (v != nullptr)
+ merge_commit_sha = *v;
+ }
+ else if (c (bs, "base"))
+ {
+ p.next_expect (event::begin_object);
+
+ bool l (false), r (false), s (false);
+
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (l, "label")) base_label = p.next_expect_string ();
+ else if (c (r, "ref")) base_ref = p.next_expect_string ();
+ else if (c (s, "sha")) base_sha = p.next_expect_string ();
+ else p.next_expect_value_skip ();
+ }
+
+ if (!l) missing_member (p, "gh_pull_request.base", "label");
+ if (!r) missing_member (p, "gh_pull_request.base", "ref");
+ if (!s) missing_member (p, "gh_pull_request.base", "sha");
+ }
+ else if (c (hd, "head"))
+ {
+ p.next_expect (event::begin_object);
+
+ bool l (false), r (false), s (false);
+
+ while (p.next_expect (event::name, event::end_object))
+ {
+ if (c (l, "label")) head_label = p.next_expect_string ();
+ else if (c (r, "ref")) head_ref = p.next_expect_string ();
+ else if (c (s, "sha")) head_sha = p.next_expect_string ();
+ else p.next_expect_value_skip ();
+ }
+
+ if (!l) missing_member (p, "gh_pull_request.head", "label");
+ if (!r) missing_member (p, "gh_pull_request.head", "ref");
+ if (!s) missing_member (p, "gh_pull_request.head", "sha");
+ }
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ni) missing_member (p, "gh_pull_request", "node_id");
+ if (!nu) missing_member (p, "gh_pull_request", "number");
+ if (!st) missing_member (p, "gh_pull_request", "state");
+ if (!ma) missing_member (p, "gh_pull_request", "mergeable");
+ if (!ms) missing_member (p, "gh_pull_request", "merge_commit_sha");
+ if (!bs) missing_member (p, "gh_pull_request", "base");
+ if (!hd) missing_member (p, "gh_pull_request", "head");
+ }
+
+ ostream&
+ operator<< (ostream& os, const gh_pull_request& pr)
+ {
+ os << "node_id: " << pr.node_id
+ << ", number: " << pr.number
+ << ", state: " << pr.state
+ << ", mergeable: " << (pr.mergeable
+ ? *pr.mergeable
+ ? "true"
+ : "false"
+ : "null")
+ << ", merge_commit_sha:" << pr.merge_commit_sha
+ << ", base: { "
+ << "label: " << pr.base_label
+ << ", ref: " << pr.base_ref
+ << ", sha: " << pr.base_sha
+ << " }"
+ << ", head: { "
+ << "label: " << pr.head_label
+ << ", ref: " << pr.head_ref
+ << ", sha: " << pr.head_sha
+ << " }";
+
+ return os;
+ }
+
// gh_repository
//
gh_repository::
@@ -306,6 +398,48 @@ namespace brep
return os;
}
+ // gh_pull_request_event
+ //
+ gh_pull_request_event::
+ gh_pull_request_event (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ bool ac (false), pr (false), rp (false), in (false);
+
+ // Skip unknown/uninteresting members.
+ //
+ while (p.next_expect (event::name, event::end_object))
+ {
+ auto c = [&p] (bool& v, const char* s)
+ {
+ return p.name () == s ? (v = true) : false;
+ };
+
+ if (c (ac, "action")) action = p.next_expect_string ();
+ else if (c (pr, "pull_request")) pull_request = gh_pull_request (p);
+ else if (c (rp, "repository")) repository = gh_repository (p);
+ else if (c (in, "installation")) installation = gh_installation (p);
+ else p.next_expect_value_skip ();
+ }
+
+ if (!ac) missing_member (p, "gh_pull_request_event", "action");
+ if (!pr) missing_member (p, "gh_pull_request_event", "pull_request");
+ if (!rp) missing_member (p, "gh_pull_request_event", "repository");
+ if (!in) missing_member (p, "gh_pull_request_event", "installation");
+ }
+
+ ostream&
+ operator<< (ostream& os, const gh_pull_request_event& pr)
+ {
+ os << "action: " << pr.action;
+ os << ", pull_request { " << pr.pull_request << " }";
+ os << ", repository { " << pr.repository << " }";
+ os << ", installation { " << pr.installation << " }";
+
+ return os;
+ }
+
// gh_installation_access_token
//
// Example JSON:
diff --git a/mod/mod-ci-github-gh.hxx b/mod/mod-ci-github-gh.hxx
index 45b5359..2b77aeb 100644
--- a/mod/mod-ci-github-gh.hxx
+++ b/mod/mod-ci-github-gh.hxx
@@ -42,15 +42,11 @@ namespace brep
// The "check_suite" object within a check_suite webhook event request.
//
- // @@ TODO Remove unused fields.
- //
struct gh_check_suite
{
string node_id;
string head_branch;
string head_sha;
- string before;
- string after;
explicit
gh_check_suite (json::parser&);
@@ -70,6 +66,37 @@ namespace brep
gh_check_run () = default;
};
+ struct gh_pull_request
+ {
+ string node_id;
+ unsigned int number;
+
+ string state; // "open" or "closed".
+
+ // If absent then the result of the test merge commit is not yet
+ // available. If true then `merge_commit_sha` contains the commit ID of
+ // the merge commit. If false then `merge_commit_sha` is either empty or
+ // no longer valid.
+ //
+ optional<bool> mergeable;
+ string merge_commit_sha;
+
+ // @@ TODO Remove label if unused.
+ string base_label; // Name distinguishing the base from the head.
+ string base_ref;
+ string base_sha;
+
+ // @@ TODO Remove label if unused.
+ string head_label; // Name distinguishing the head from the base.
+ string head_ref;
+ string head_sha;
+
+ explicit
+ gh_pull_request (json::parser&);
+
+ gh_pull_request () = default;
+ };
+
// Return the GitHub check run status corresponding to a build_state.
//
string
@@ -81,8 +108,11 @@ namespace brep
build_state
gh_from_status (const string&);
+ // If warning_success is true, then map result_status::warning to SUCCESS
+ // and to FAILURE otherwise.
+ //
string
- gh_to_conclusion (result_status);
+ gh_to_conclusion (result_status, bool warning_success);
// Create a check_run name from a build. If the second argument is not
// NULL, return an abbreviated id if possible.
@@ -129,6 +159,20 @@ namespace brep
gh_check_suite_event () = default;
};
+ struct gh_pull_request_event
+ {
+ string action;
+
+ gh_pull_request pull_request;
+ gh_repository repository;
+ gh_installation installation;
+
+ explicit
+ gh_pull_request_event (json::parser&);
+
+ gh_pull_request_event () = default;
+ };
+
struct gh_installation_access_token
{
string token;
@@ -155,6 +199,9 @@ namespace brep
operator<< (ostream&, const gh_check_run&);
ostream&
+ operator<< (ostream&, const gh_pull_request&);
+
+ ostream&
operator<< (ostream&, const gh_repository&);
ostream&
@@ -164,6 +211,9 @@ namespace brep
operator<< (ostream&, const gh_check_suite_event&);
ostream&
+ operator<< (ostream&, const gh_pull_request_event&);
+
+ ostream&
operator<< (ostream&, const gh_installation_access_token&);
}
diff --git a/mod/mod-ci-github-gq.cxx b/mod/mod-ci-github-gq.cxx
index 4e13199..1b967e5 100644
--- a/mod/mod-ci-github-gq.cxx
+++ b/mod/mod-ci-github-gq.cxx
@@ -35,6 +35,8 @@ namespace brep
// Throw runtime_error if the response indicated errors and
// invalid_json_input if the GitHub response contained invalid JSON.
//
+ // The parse_data function should not throw anything but invalid_json_input.
+ //
// The response format is defined in the GraphQL spec:
// https://spec.graphql.org/October2021/#sec-Response.
//
@@ -48,10 +50,21 @@ namespace brep
// The contents of `data`, including its opening and closing braces, are
// parsed by the `parse_data` function.
//
- // @@ TODO: specify what parse_data may throw (probably only
- // invalid_json_input).
+ // If the `errors` field is present in the response, error(s) occurred
+ // before or during execution of the operation.
+ //
+ // If the `data` field is not present the errors are request errors which
+ // occur before execution and are typically the client's fault.
//
- // @@ TODO data comes before errors in GitHub's responses.
+ // If the `data` field is also present in the response the errors are field
+ // errors which occur during execution and are typically the GraphQL
+ // endpoint's fault, and some fields in `data` that should not be are likely
+ // to be null.
+ //
+ // Although the spec recommends that the errors field (if present) should
+ // come before the data field, GitHub places data before errors. Therefore
+ // we need to check that the errors field is not present before parsing the
+ // data field as it might contain nulls if errors is present.
//
static void
gq_parse_response (json::parser& p,
@@ -61,13 +74,14 @@ namespace brep
// True if the data/errors fields are present.
//
- // Although the spec merely recommends that the `errors` field, if
- // present, comes before the `data` field, assume it always does because
- // letting the client parse data in the presence of field errors
- // (unexpected nulls) would not make sense.
- //
bool dat (false), err (false);
+ // Because the errors field is likely to come before the data field,
+ // serialize data to a stringstream and only parse it later once we're
+ // sure there are no errors.
+ //
+ stringstream data; // The value of the data field.
+
p.next_expect (event::begin_object);
while (p.next_expect (event::name, event::end_object))
@@ -76,20 +90,36 @@ namespace brep
{
dat = true;
- // Currently we're not handling fields that are null due to field
- // errors (see below for details) so don't parse any further.
+ // Serialize the data field to a string.
+ //
+ // Note that the JSON payload sent by GitHub is not pretty-printed so
+ // there is no need to worry about that.
//
- if (err)
- break;
+ json::stream_serializer s (data, 0 /* indentation */);
- parse_data (p);
+ try
+ {
+ for (event e: p)
+ {
+ if (!s.next (e, p.data ()))
+ break; // Stop if data object is complete.
+ }
+ }
+ catch (const json::invalid_json_output& e)
+ {
+ throw_json (p,
+ string ("serializer rejected response 'data' field: ") +
+ e.what ());
+ }
}
else if (p.name () == "errors")
{
- // Don't stop parsing because the error semantics depends on whether
- // or not `data` is present.
+ // Skip the errors object but don't stop parsing because the error
+ // semantics depends on whether or not `data` is present.
//
err = true; // Handled below.
+
+ p.next_expect_value_skip ();
}
else
{
@@ -107,23 +137,21 @@ namespace brep
}
}
- // If the `errors` field was present in the response, error(s) occurred
- // before or during execution of the operation.
- //
- // If the `data` field was not present the errors are request errors which
- // occur before execution and are typically the client's fault.
- //
- // If the `data` field was also present in the response the errors are
- // field errors which occur during execution and are typically the GraphQL
- // endpoint's fault, and some fields in `data` that should not be are
- // likely to be null.
- //
- if (err)
+ if (!err)
+ {
+ if (!dat)
+ throw runtime_error ("no data received from GraphQL endpoint");
+
+ // Parse the data field now that we know there are no errors.
+ //
+ json::parser dp (data, p.input_name);
+
+ parse_data (dp);
+ }
+ else
{
if (dat)
{
- // @@ TODO: Consider parsing partial data?
- //
throw runtime_error ("field error(s) received from GraphQL endpoint; "
"incomplete data received");
}
@@ -154,8 +182,6 @@ namespace brep
// }
// }
//
- // @@ TODO Handle response errors properly.
- //
static vector<gh_check_run>
gq_parse_response_check_runs (json::parser& p)
{
@@ -198,11 +224,11 @@ namespace brep
// if unset. Return false and issue diagnostics if the request failed.
//
static bool
- gq_mutate_check_runs (vector<check_run>& crs,
+ gq_mutate_check_runs (const basic_mark& error,
+ vector<check_run>& crs,
const string& iat,
string rq,
- build_state st,
- const basic_mark& error) noexcept
+ build_state st) noexcept
{
vector<gh_check_run> rcrs;
@@ -239,6 +265,12 @@ namespace brep
build_state rst (gh_from_status (rcr.status)); // Received state.
+ // Note that GitHub won't allow us to change a built check run
+ // to any other state.
+ //
+ // @@ Are we handling the case where the resulting state (built)
+ // differs from what we expect?
+ //
if (rst != build_state::built && rst != st)
{
error << "unexpected check_run status: received '" << rcr.status
@@ -253,7 +285,7 @@ namespace brep
if (!cr.node_id)
cr.node_id = move (rcr.node_id);
- cr.state = gh_from_status (rcr.status);
+ cr.state = rst;
cr.state_synced = true;
}
}
@@ -317,50 +349,61 @@ namespace brep
// Serialize `createCheckRun` mutations for one or more builds to GraphQL.
//
- // The result_status is required if the build_state is built because GitHub
- // does not allow a check run status of completed without a conclusion.
+ // The conclusion argument (`co`) is required if the build_state is built
+ // because GitHub does not allow a check run status of completed without a
+ // conclusion.
+ //
+ // The details URL argument (`du`) can be empty for queued but not for the
+ // other states.
//
static string
- gq_mutation_create_check_runs (
- const string& ri, // Repository ID
- const string& hs, // Head SHA
- const vector<reference_wrapper<const build>>& bs,
- build_state st, optional<result_status> rs,
- const build_queued_hints* bh)
+ gq_mutation_create_check_runs (const string& ri, // Repository ID
+ const string& hs, // Head SHA
+ const optional<string>& du, // Details URL.
+ const vector<check_run>& crs,
+ const string& st, // Check run status.
+ optional<gq_built_result> br = nullopt)
{
+ // Ensure details URL is non-empty if present.
+ //
+ assert (!du || !du->empty ());
+
ostringstream os;
os << "mutation {" << '\n';
// Serialize a `createCheckRun` for each build.
//
- for (size_t i (0); i != bs.size (); ++i)
+ for (size_t i (0); i != crs.size (); ++i)
{
- const build& b (bs[i]);
-
string al ("cr" + to_string (i)); // Field alias.
- // Check run name.
- //
- string nm (gh_check_run_name (b, bh));
-
os << gq_name (al) << ":createCheckRun(input: {" << '\n'
- << " name: " << gq_str (nm) << ',' << '\n'
- << " repositoryId: " << gq_str (ri) << ',' << '\n'
- << " headSha: " << gq_str (hs) << ',' << '\n'
- << " status: " << gq_enum (gh_to_status (st));
- if (rs)
+ << " name: " << gq_str (crs[i].name) << '\n'
+ << " repositoryId: " << gq_str (ri) << '\n'
+ << " headSha: " << gq_str (hs) << '\n'
+ << " status: " << gq_enum (st);
+ if (du)
+ {
+ os << '\n';
+ os << " detailsUrl: " << gq_str (*du);
+ }
+ if (br)
{
- os << ',' << '\n'
- << " conclusion: " << gq_enum (gh_to_conclusion (*rs));
+ os << '\n';
+ os << " conclusion: " << gq_enum (br->conclusion) << '\n'
+ << " output: {" << '\n'
+ << " title: " << gq_str (br->title) << '\n'
+ << " summary: " << gq_str (br->summary) << '\n'
+ << " }";
}
os << "})" << '\n'
// Specify the selection set (fields to be returned).
//
<< "{" << '\n'
<< " checkRun {" << '\n'
- << " id," << '\n'
- << " name," << '\n'
+ << " id" << '\n'
+ << " name" << '\n'
<< " status" << '\n'
<< " }" << '\n'
<< "}" << '\n';
@@ -373,34 +416,55 @@ namespace brep
// Serialize an `updateCheckRun` mutation for one build to GraphQL.
//
- // The result_status is required if the build_state is built because GitHub
- // does not allow updating a check run to completed without a conclusion.
+ // The `co` (conclusion) argument is required if the build_state is built
+ // because GitHub does not allow updating a check run to completed without a
+ // conclusion.
//
static string
- gq_mutation_update_check_run (const string& ri, // Repository ID.
- const string& ni, // Node ID.
- build_state st,
- optional<result_status> rs)
+ gq_mutation_update_check_run (const string& ri, // Repository ID.
+ const string& ni, // Node ID.
+ const optional<string>& du, // Details URL.
+ const string& st, // Check run status.
+ optional<timestamp> sa, // Started at.
+ optional<gq_built_result> br)
{
+ // Ensure details URL is non-empty if present.
+ //
+ assert (!du || !du->empty ());
+
ostringstream os;
os << "mutation {" << '\n'
<< "cr0:updateCheckRun(input: {" << '\n'
- << " checkRunId: " << gq_str (ni) << ',' << '\n'
- << " repositoryId: " << gq_str (ri) << ',' << '\n'
- << " status: " << gq_enum (gh_to_status (st));
- if (rs)
+ << " checkRunId: " << gq_str (ni) << '\n'
+ << " repositoryId: " << gq_str (ri) << '\n'
+ << " status: " << gq_enum (st);
+ if (sa)
{
- os << ',' << '\n'
- << " conclusion: " << gq_enum (gh_to_conclusion (*rs));
+ os << '\n';
+ os << " startedAt: " << gq_str (gh_to_iso8601 (*sa));
+ }
+ if (du)
+ {
+ os << '\n';
+ os << " detailsUrl: " << gq_str (*du);
+ }
+ if (br)
+ {
+ os << '\n';
+ os << " conclusion: " << gq_enum (br->conclusion) << '\n'
+ << " output: {" << '\n'
+ << " title: " << gq_str (br->title) << '\n'
+ << " summary: " << gq_str (br->summary) << '\n'
+ << " }";
}
os << "})" << '\n'
// Specify the selection set (fields to be returned).
//
<< "{" << '\n'
<< " checkRun {" << '\n'
- << " id," << '\n'
- << " name," << '\n'
+ << " id" << '\n'
+ << " name" << '\n'
<< " status" << '\n'
<< " }" << '\n'
<< "}" << '\n'
@@ -410,51 +474,55 @@ namespace brep
}
bool
- gq_create_check_runs (vector<check_run>& crs,
+ gq_create_check_runs (const basic_mark& error,
+ vector<check_run>& crs,
const string& iat,
const string& rid,
const string& hs,
- const vector<reference_wrapper<const build>>& bs,
- build_state st,
- const build_queued_hints& bh,
- const basic_mark& error)
+ build_state st)
{
// No support for result_status so state cannot be built.
//
assert (st != build_state::built);
- string rq (gq_serialize_request (
- gq_mutation_create_check_runs (rid,
- hs,
- bs,
- st,
- nullopt /* result_status */,
- &bh)));
-
- return gq_mutate_check_runs (crs, iat, move (rq), st, error);
+ // Empty details URL because it's not available until building.
+ //
+ string rq (
+ gq_serialize_request (gq_mutation_create_check_runs (rid,
+ hs,
+ nullopt,
+ crs,
+ gh_to_status (st))));
+
+ return gq_mutate_check_runs (error, crs, iat, move (rq), st);
}
bool
- gq_create_check_run (check_run& cr,
+ gq_create_check_run (const basic_mark& error,
+ check_run& cr,
const string& iat,
const string& rid,
const string& hs,
- const build& b,
+ const optional<string>& du,
build_state st,
- optional<result_status> rs,
- const build_queued_hints& bh,
- const basic_mark& error)
+ optional<gq_built_result> br)
{
// Must have a result if state is built.
//
- assert (st != build_state::built || rs);
-
- string rq (gq_serialize_request (
- gq_mutation_create_check_runs (rid, hs, {b}, st, rs, &bh)));
+ assert (st != build_state::built || br);
vector<check_run> crs {move (cr)};
- bool r (gq_mutate_check_runs (crs, iat, move (rq), st, error));
+ string rq (
+ gq_serialize_request (
+ gq_mutation_create_check_runs (rid,
+ hs,
+ du,
+ crs,
+ gh_to_status (st),
+ move (br))));
+
+ bool r (gq_mutate_check_runs (error, crs, iat, move (rq), st));
cr = move (crs[0]);
@@ -462,30 +530,387 @@ namespace brep
}
bool
- gq_update_check_run (check_run& cr,
+ gq_update_check_run (const basic_mark& error,
+ check_run& cr,
const string& iat,
const string& rid,
const string& nid,
+ const optional<string>& du,
build_state st,
- optional<result_status> rs,
- const basic_mark& error)
+ optional<gq_built_result> br)
{
// Must have a result if state is built.
//
- assert (st != build_state::built || rs);
+ assert (st != build_state::built || br);
- string rq (gq_serialize_request (
- gq_mutation_update_check_run (rid, nid, st, rs)));
+ // Set `startedAt` to current time if updating to building.
+ //
+ optional<timestamp> sa;
+
+ if (st == build_state::building)
+ sa = system_clock::now ();
+
+ string rq (
+ gq_serialize_request (
+ gq_mutation_update_check_run (rid,
+ nid,
+ du,
+ gh_to_status (st),
+ sa,
+ move (br))));
vector<check_run> crs {move (cr)};
- bool r (gq_mutate_check_runs (crs, iat, move (rq), st, error));
+ bool r (gq_mutate_check_runs (error, crs, iat, move (rq), st));
cr = move (crs[0]);
return r;
}
+ // Serialize a GraphQL query that fetches a pull request from GitHub.
+ //
+ static string
+ gq_query_pr_mergeability (const string& nid)
+ {
+ ostringstream os;
+
+ os << "query {" << '\n'
+ << " node(id:" << gq_str (nid) << ") {" << '\n'
+ << " ... on PullRequest {" << '\n'
+ << " mergeable potentialMergeCommit { oid }" << '\n'
+ << " }" << '\n'
+ << " }" << '\n'
+ << "}" << '\n';
+
+ return os.str ();
+ }
+
+ optional<string>
+ gq_pull_request_mergeable (const basic_mark& error,
+ const string& iat,
+ const string& nid)
+ {
+ string rq (gq_serialize_request (gq_query_pr_mergeability (nid)));
+
+ try
+ {
+ // Response parser.
+ //
+ struct resp
+ {
+ // True if the pull request was found (i.e., the node ID was valid).
+ //
+ bool found = false;
+
+ // Non-fatal error message issued during the parse.
+ //
+ string parse_error;
+
+ // The response value. Absent if the merge commit is still being
+ // generated.
+ //
+ optional<string> value;
+
+ resp (json::parser& p)
+ {
+ using event = json::event;
+
+ gq_parse_response (p, [this] (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ if (p.next_expect_member_object_null ("node"))
+ {
+ found = true;
+
+ string ma (p.next_expect_member_string ("mergeable"));
+
+ if (ma == "MERGEABLE")
+ {
+ p.next_expect_member_object ("potentialMergeCommit");
+ string oid (p.next_expect_member_string ("oid"));
+ p.next_expect (event::end_object);
+
+ value = move (oid);
+ }
+ else
+ {
+ if (ma == "CONFLICTING")
+ value = "";
+ if (ma == "UNKNOWN")
+ ; // Still being generated; leave value absent.
+ else
+ {
+ // @@ Let's throw invalid_json.
+ //
+ parse_error = "unexpected mergeable value '" + ma + '\'';
+
+ // Carry on as if it were UNKNOWN.
+ }
+
+ // Skip the merge commit ID (which should be null).
+ //
+ p.next_expect_name ("potentialMergeCommit");
+ p.next_expect_value_skip ();
+ }
+
+ p.next_expect (event::end_object); // node
+ }
+
+ p.next_expect (event::end_object);
+ });
+ }
+
+ resp () = default;
+ } rs;
+
+ uint16_t sc (github_post (rs,
+ "graphql", // API Endpoint.
+ strings {"Authorization: Bearer " + iat},
+ move (rq)));
+
+ if (sc == 200)
+ {
+ if (!rs.found)
+ error << "pull request '" << nid << "' not found";
+ else if (!rs.parse_error.empty ())
+ error << rs.parse_error;
+
+ return rs.value;
+ }
+ else
+ error << "failed to fetch pull request: error HTTP response status "
+ << sc;
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ // Note: e.name is the GitHub API endpoint.
+ //
+ error << "malformed JSON in response from " << e.name << ", line: "
+ << e.line << ", column: " << e.column << ", byte offset: "
+ << e.position << ", error: " << e;
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "malformed header(s) in response: " << e;
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to fetch pull request (errno=" << e.code () << "): "
+ << e.what ();
+ }
+ catch (const runtime_error& e) // From response type's parsing constructor.
+ {
+ // GitHub response contained error(s) (could be ours or theirs at this
+ // point).
+ //
+ error << "unable to fetch pull request: " << e;
+ }
+
+ return nullopt;
+ }
+
+ // Serialize a GraphQL query that fetches the last 100 (the maximum per
+ // page) open pull requests with the specified base branch from the
+ // repository with the specified node ID.
+ //
+ // @@ TMP Should we support more/less than 100?
+ //
+ // Doing more (or even 100) could waste a lot of CI resources on
+ // re-testing stale PRs. Maybe we should create a failed synthetic
+ // conclusion check run asking the user to re-run the CI manually if/when
+ // needed.
+ //
+ // Note that we cannot request more than 100 at a time (will need to
+ // do multiple requests with paging, etc).
+ //
+ // Also, maybe we should limit the result to "fresh" PRs, e.g., those
+ // that have been "touched" in the last week.
+ //
+ // Example query:
+ //
+ // query {
+ // node(id:"R_kgDOLc8CoA")
+ // {
+ // ... on Repository {
+ // pullRequests (last:100 states:OPEN baseRefName:"master") {
+ // edges {
+ // node {
+ // id
+ // number
+ // headRefOid
+ // }
+ // }
+ // }
+ // }
+ // }
+ // }
+ //
+ static string
+ gq_query_fetch_open_pull_requests (const string& rid, const string& br)
+ {
+ ostringstream os;
+
+ os << "query {" << '\n'
+ << " node(id:" << gq_str (rid) << ") {" << '\n'
+ << " ... on Repository {" << '\n'
+ << " pullRequests (last:100" << '\n'
+ << " states:" << gq_enum ("OPEN") << '\n'
+ << " baseRefName:" << gq_str (br) << '\n'
+ << " ) {" << '\n'
+ << " totalCount" << '\n'
+ << " edges { node { id number headRefOid } }" << '\n'
+ << " }" << '\n'
+ << " }" << '\n'
+ << " }" << '\n'
+ << "}" << '\n';
+
+ return os.str ();
+ }
+
+ optional<vector<gh_pull_request>>
+ gq_fetch_open_pull_requests (const basic_mark& error,
+ const string& iat,
+ const string& nid,
+ const string& br)
+ {
+ string rq (
+ gq_serialize_request (gq_query_fetch_open_pull_requests (nid, br)));
+
+ try
+ {
+ // Response parser.
+ //
+ // Example response (only the part we need to parse here):
+ //
+ // {
+ // "node": {
+ // "pullRequests": {
+ // "totalCount": 2,
+ // "edges": [
+ // {
+ // "node": {
+ // "id": "PR_kwDOLc8CoM5vRS0y",
+ // "number": 7,
+ // "headRefOid": "cf72888be9484d6946a1340264e7abf18d31cc92"
+ // }
+ // },
+ // {
+ // "node": {
+ // "id": "PR_kwDOLc8CoM5vRzHs",
+ // "number": 8,
+ // "headRefOid": "626d25b318aad27bc0005277afefe3e8d6b2d434"
+ // }
+ // }
+ // ]
+ // }
+ // }
+ // }
+ //
+ struct resp
+ {
+ bool found = false;
+
+ vector<gh_pull_request> pull_requests;
+
+ resp (json::parser& p)
+ {
+ using event = json::event;
+
+ auto parse_data = [this] (json::parser& p)
+ {
+ p.next_expect (event::begin_object);
+
+ if (p.next_expect_member_object_null ("node"))
+ {
+ found = true;
+
+ p.next_expect_member_object ("pullRequests");
+
+ uint16_t n (p.next_expect_member_number<uint16_t> ("totalCount"));
+
+ p.next_expect_member_array ("edges");
+ for (size_t i (0); i != n; ++i)
+ {
+ p.next_expect (event::begin_object); // edges[i]
+
+ p.next_expect_member_object ("node");
+ {
+ gh_pull_request pr;
+ pr.node_id = p.next_expect_member_string ("id");
+ pr.number = p.next_expect_member_number<unsigned int> ("number");
+ pr.head_sha = p.next_expect_member_string ("headRefOid");
+ pull_requests.push_back (move (pr));
+ }
+ p.next_expect (event::end_object); // node
+
+ p.next_expect (event::end_object); // edges[i]
+ }
+ p.next_expect (event::end_array); // edges
+
+ p.next_expect (event::end_object); // pullRequests
+ p.next_expect (event::end_object); // node
+ }
+
+ p.next_expect (event::end_object);
+ };
+
+ gq_parse_response (p, move (parse_data));
+ }
+
+ resp () = default;
+ } rs;
+
+ uint16_t sc (github_post (rs,
+ "graphql", // API Endpoint.
+ strings {"Authorization: Bearer " + iat},
+ move (rq)));
+
+ if (sc == 200)
+ {
+ if (!rs.found)
+ {
+ error << "repository '" << nid << "' not found";
+
+ return nullopt;
+ }
+
+ return rs.pull_requests;
+ }
+ else
+ error << "failed to fetch repository pull requests: "
+ << "error HTTP response status " << sc;
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ // Note: e.name is the GitHub API endpoint.
+ //
+ error << "malformed JSON in response from " << e.name << ", line: "
+ << e.line << ", column: " << e.column << ", byte offset: "
+ << e.position << ", error: " << e;
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "malformed header(s) in response: " << e;
+ }
+ catch (const system_error& e)
+ {
+ error << "unable to fetch repository pull requests (errno=" << e.code ()
+ << "): " << e.what ();
+ }
+ catch (const runtime_error& e) // From response type's parsing constructor.
+ {
+ // GitHub response contained error(s) (could be ours or theirs at this
+ // point).
+ //
+ error << "unable to fetch repository pull requests: " << e;
+ }
+
+ return nullopt;
+ }
+
+
// GraphQL serialization functions.
//
// The GraphQL spec:
diff --git a/mod/mod-ci-github-gq.hxx b/mod/mod-ci-github-gq.hxx
index 9331b2b..439f7b7 100644
--- a/mod/mod-ci-github-gq.hxx
+++ b/mod/mod-ci-github-gq.hxx
@@ -23,35 +23,43 @@ namespace brep
// the new states and node IDs. Return false and issue diagnostics if the
// request failed.
//
+ // Note: no details_url yet since there will be no entry in the build result
+ // search page until the task starts building.
+ //
bool
- gq_create_check_runs (vector<check_run>& check_runs,
+ gq_create_check_runs (const basic_mark& error,
+ vector<check_run>& check_runs,
const string& installation_access_token,
const string& repository_id,
const string& head_sha,
- const vector<reference_wrapper<const build>>&,
- build_state,
- const build_queued_hints&,
- const basic_mark& error);
+ build_state);
// Create a new check run on GitHub for a build. Update `cr` with the new
// state and the node ID. Return false and issue diagnostics if the request
// failed.
//
- // The result_status is required if the build_state is built because GitHub
- // does not allow a check run status of `completed` without a conclusion.
+ // If the details_url is absent GitHub will use the app's homepage.
//
- // @@ TODO Support output (title, summary, text).
+ // The gq_built_result is required if the build_state is built because
+ // GitHub does not allow a check run status of `completed` without at least
+ // a conclusion.
//
+ struct gq_built_result
+ {
+ string conclusion;
+ string title;
+ string summary;
+ };
+
bool
- gq_create_check_run (check_run& cr,
+ gq_create_check_run (const basic_mark& error,
+ check_run& cr,
const string& installation_access_token,
const string& repository_id,
const string& head_sha,
- const build&,
+ const optional<string>& details_url,
build_state,
- optional<result_status>,
- const build_queued_hints&,
- const basic_mark& error);
+ optional<gq_built_result> = nullopt);
// Update a check run on GitHub.
//
@@ -59,19 +67,67 @@ namespace brep
// with the new state. Return false and issue diagnostics if the request
// failed.
//
- // The result_status is required if the build_state is built because GitHub
- // does not allow updating a check run to `completed` without a conclusion.
+ // If the details_url is absent GitHub will use the app's homepage.
//
- // @@ TODO Support output (title, summary, text).
+ // The gq_built_result is required if the build_state is built because
+ // GitHub does not allow a check run status of `completed` without at least
+ // a conclusion.
//
bool
- gq_update_check_run (check_run& cr,
+ gq_update_check_run (const basic_mark& error,
+ check_run& cr,
const string& installation_access_token,
const string& repository_id,
const string& node_id,
+ const optional<string>& details_url,
build_state,
- optional<result_status>,
- const basic_mark& error);
+ optional<gq_built_result> = nullopt);
+
+ // Fetch a pull request's mergeability from GitHub and return it in first,
+ // or absent if the merge commit is still being generated.
+ //
+ // Return false in second and issue diagnostics if the request failed.
+ //
+ struct gq_pr_mergeability
+ {
+ // True if the pull request is auto-mergeable; false if it would create
+ // conflicts.
+ //
+ bool mergeable;
+
+ // The ID of the test merge commit. Empty if mergeable is false.
+ //
+ string merge_commit_id;
+ };
+
+
+ // Fetch a pull request's mergeability from GitHub. Return absent value if
+ // the merge commit is still being generated. Return empty string if the
+ // pull request is not auto-mergeable. Otherwise return the test merge
+ // commit id.
+ //
+ // Issue diagnostics and return absent if the request failed (which means it
+ // will be treated by the caller as still being generated).
+ //
+ // Note that the first request causes GitHub to start preparing the test
+ // merge commit.
+ //
+ optional<string>
+ gq_pull_request_mergeable (const basic_mark& error,
+ const string& installation_access_token,
+ const string& node_id);
+
+ // Fetch the last 100 open pull requests with the specified base branch from
+ // the repository with the specified node ID.
+ //
+ // Issue diagnostics and return nullopt if the repository was not found or
+ // an error occurred.
+ //
+ optional<vector<gh_pull_request>>
+ gq_fetch_open_pull_requests (const basic_mark& error,
+ const string& installation_access_token,
+ const string& repository_node_id,
+ const string& base_branch);
}
#endif // MOD_MOD_CI_GITHUB_GQ_HXX
diff --git a/mod/mod-ci-github-service-data.cxx b/mod/mod-ci-github-service-data.cxx
index f79550c..6db32fd 100644
--- a/mod/mod-ci-github-service-data.cxx
+++ b/mod/mod-ci-github-service-data.cxx
@@ -26,6 +26,8 @@ namespace brep
to_string (version));
}
+ warning_success = p.next_expect_member_boolean<bool> ("warning_success");
+
// Installation access token.
//
p.next_expect_member_object ("installation_access");
@@ -36,13 +38,30 @@ namespace brep
installation_id =
p.next_expect_member_number<uint64_t> ("installation_id");
- repository_id = p.next_expect_member_string ("repository_id");
- head_sha = p.next_expect_member_string ("head_sha");
+
+ repository_node_id = p.next_expect_member_string ("repository_node_id");
+
+ {
+ string* s (p.next_expect_member_string_null ("repository_clone_url"));
+ if (s != nullptr)
+ repository_clone_url = *s;
+ }
+
+ pr_number = p.next_expect_member_number_null<uint32_t> ("pr_number");
+
+ {
+ string* s (p.next_expect_member_string_null ("merge_node_id"));
+ if (s != nullptr)
+ merge_node_id = *s;
+ }
+
+ report_sha = p.next_expect_member_string ("report_sha");
p.next_expect_member_array ("check_runs");
while (p.next_expect (event::begin_object, event::end_array))
{
string bid (p.next_expect_member_string ("build_id"));
+ string nm (p.next_expect_member_string ("name"));
optional<string> nid;
{
@@ -54,24 +73,61 @@ namespace brep
build_state s (to_build_state (p.next_expect_member_string ("state")));
bool ss (p.next_expect_member_boolean<bool> ("state_synced"));
- check_runs.emplace_back (move (bid), move (nid), s, ss);
+ optional<result_status> rs;
+ {
+ string* v (p.next_expect_member_string_null ("status"));
+ if (v != nullptr)
+ {
+ rs = bbot::to_result_status (*v);
+ assert (s == build_state::built);
+ }
+ }
+
+ check_runs.emplace_back (move (bid), move (nm), move (nid), s, ss, rs);
p.next_expect (event::end_object);
}
+ {
+ string* s (p.next_expect_member_string_null ("conclusion_node_id"));
+ if (s != nullptr)
+ conclusion_node_id = *s;
+ }
+
p.next_expect (event::end_object);
}
service_data::
- service_data (string iat_tok,
+ service_data (bool ws,
+ string iat_tok,
+ timestamp iat_ea,
+ uint64_t iid,
+ string rid,
+ string rs)
+ : warning_success (ws),
+ installation_access (move (iat_tok), iat_ea),
+ installation_id (iid),
+ repository_node_id (move (rid)),
+ report_sha (move (rs))
+ {
+ }
+
+ service_data::
+ service_data (bool ws,
+ string iat_tok,
timestamp iat_ea,
uint64_t iid,
string rid,
- string hs)
- : installation_access (move (iat_tok), iat_ea),
+ string rs,
+ string rcu,
+ uint32_t prn)
+ : warning_success (ws),
+ installation_access (move (iat_tok), iat_ea),
installation_id (iid),
- repository_id (move (rid)),
- head_sha (move (hs))
+ repository_node_id (move (rid)),
+ repository_clone_url (move (rcu)),
+ pr_number (prn),
+ report_sha (move (rs))
{
}
@@ -85,6 +141,8 @@ namespace brep
s.member ("version", 1);
+ s.member ("warning_success", warning_success);
+
// Installation access token.
//
s.member_begin_object ("installation_access");
@@ -93,14 +151,34 @@ namespace brep
s.end_object ();
s.member ("installation_id", installation_id);
- s.member ("repository_id", repository_id);
- s.member ("head_sha", head_sha);
+ s.member ("repository_node_id", repository_node_id);
+
+ s.member_name ("repository_clone_url");
+ if (repository_clone_url)
+ s.value (*repository_clone_url);
+ else
+ s.value (nullptr);
+
+ s.member_name ("pr_number");
+ if (pr_number)
+ s.value (*pr_number);
+ else
+ s.value (nullptr);
+
+ s.member_name ("merge_node_id");
+ if (merge_node_id)
+ s.value (*merge_node_id);
+ else
+ s.value (nullptr);
+
+ s.member ("report_sha", report_sha);
s.member_begin_array ("check_runs");
for (const check_run& cr: check_runs)
{
s.begin_object ();
s.member ("build_id", cr.build_id);
+ s.member ("name", cr.name);
s.member_name ("node_id");
if (cr.node_id)
@@ -111,10 +189,25 @@ namespace brep
s.member ("state", to_string (cr.state));
s.member ("state_synced", cr.state_synced);
+ s.member_name ("status");
+ if (cr.status)
+ {
+ assert (cr.state == build_state::built);
+ s.value (to_string (*cr.status));
+ }
+ else
+ s.value (nullptr);
+
s.end_object ();
}
s.end_array ();
+ s.member_name ("conclusion_node_id");
+ if (conclusion_node_id)
+ s.value (*conclusion_node_id);
+ else
+ s.value (nullptr);
+
s.end_object ();
return b;
@@ -136,6 +229,7 @@ namespace brep
{
os << "node_id: " << cr.node_id.value_or ("null")
<< ", build_id: " << cr.build_id
+ << ", name: " << cr.name
<< ", state: " << cr.state_string ();
return os;
diff --git a/mod/mod-ci-github-service-data.hxx b/mod/mod-ci-github-service-data.hxx
index d4a47d5..5573d90 100644
--- a/mod/mod-ci-github-service-data.hxx
+++ b/mod/mod-ci-github-service-data.hxx
@@ -24,11 +24,14 @@ namespace brep
//
struct check_run
{
- string build_id; // Full build id.
- optional<string> node_id; // GitHub id.
+ string build_id; // Full build id.
+ string name; // Potentially shortened build id.
+ optional<string> node_id; // GitHub id.
- build_state state;
- bool state_synced;
+ build_state state;
+ bool state_synced;
+
+ optional<result_status> status; // Only present if state is built.
string
state_string () const
@@ -46,19 +49,43 @@ namespace brep
//
uint64_t version = 1;
+ // Check suite settings.
+ //
+ bool warning_success; // See gh_to_conclusion().
+
// Check suite-global data.
//
gh_installation_access_token installation_access;
uint64_t installation_id;
- // @@ TODO Rename to repository_node_id.
+
+ string repository_node_id; // GitHub-internal opaque repository id.
+
+ // The following two are only used for pull requests.
+ //
+ // @@ TODO/LATER: maybe put them in a struct?
+ //
+ optional<string> repository_clone_url;
+ optional<uint32_t> pr_number;
+
+ // The GitHub ID of the synthetic PR merge check run or absent if it
+ // hasn't been created yet.
//
- string repository_id; // GitHub-internal opaque repository id.
+ optional<string> merge_node_id;
- string head_sha;
+ // The commit ID the check suite or pull request (and its check runs) are
+ // reporting to. Note that in the case of a pull request this will be the
+ // head commit (`pull_request.head.sha`) as opposed to the merge commit.
+ //
+ string report_sha;
vector<check_run> check_runs;
+ // The GitHub ID of the synthetic conclusion check run or absent if it
+ // hasn't been created yet. See also merge_node_id above.
+ //
+ optional<string> conclusion_node_id;
+
// Return the check run with the specified build ID or nullptr if not
// found.
//
@@ -72,11 +99,25 @@ namespace brep
explicit
service_data (const string& json);
- service_data (string iat_token,
+ // The check_suite constructor.
+ //
+ service_data (bool warning_success,
+ string iat_token,
+ timestamp iat_expires_at,
+ uint64_t installation_id,
+ string repository_node_id,
+ string report_sha);
+
+ // The pull_request constructor.
+ //
+ service_data (bool warning_success,
+ string iat_token,
timestamp iat_expires_at,
uint64_t installation_id,
- string repository_id,
- string head_sha);
+ string repository_node_id,
+ string report_sha,
+ string repository_clone_url,
+ uint32_t pr_number);
service_data () = default;
diff --git a/mod/mod-ci-github.cxx b/mod/mod-ci-github.cxx
index 9449f97..d829ff6 100644
--- a/mod/mod-ci-github.cxx
+++ b/mod/mod-ci-github.cxx
@@ -5,8 +5,12 @@
#include <libbutl/json/parser.hxx>
+#include <web/xhtml/serialization.hxx>
+#include <web/server/mime-url-encoding.hxx> // mime_url_encode()
+
#include <mod/jwt.hxx>
#include <mod/hmac.hxx>
+#include <mod/build.hxx> // build_log_url()
#include <mod/module-options.hxx>
#include <mod/mod-ci-github-gq.hxx>
@@ -15,32 +19,41 @@
#include <stdexcept>
-// @@ TODO
+// @@ Remaining TODOs
+//
+// - Rerequested checks
+//
+// - check_suite (action: rerequested): received when user re-runs all
+// checks.
+//
+// - check_run (action: rerequested): received when user re-runs a
+// specific check or all failed checks.
//
-// Building CI checks with a GitHub App
-// https://docs.github.com/en/apps/creating-github-apps/writing-code-for-a-github-app/building-ci-checks-with-a-github-app
+// Will need to extract a few more fields from check_runs, but the layout
+// is very similar to that of check_suite.
+//
+// - Choose strong webhook secret (when deploying).
+//
+// - Check that delivery UUID has not been received before (replay attack).
//
-// @@ TODO Best practices
+// Resources:
+//
+// Creating an App:
+// https://docs.github.com/en/apps/creating-github-apps/about-creating-github-apps/best-practices-for-creating-a-github-app
//
// Webhooks:
// https://docs.github.com/en/webhooks/using-webhooks/best-practices-for-using-webhooks
// https://docs.github.com/en/webhooks/using-webhooks/validating-webhook-deliveries
//
// REST API:
-// https://docs.github.com/en/rest/using-the-rest-api/best-practices-for-using-the-rest-api?apiVersion=2022-11-28
+// All docs: https://docs.github.com/en/rest#all-docs
+// Best practices: https://docs.github.com/en/rest/using-the-rest-api/best-practices-for-using-the-rest-api
//
-// Creating an App:
-// https://docs.github.com/en/apps/creating-github-apps/about-creating-github-apps/best-practices-for-creating-a-github-app
+// GraphQL API:
+// Reference: https://docs.github.com/en/graphql/reference
//
-// Use a webhook secret to ensure request is coming from Github. HMAC:
-// https://en.wikipedia.org/wiki/HMAC#Definition. A suitable implementation
-// is provided by OpenSSL.
-// @@ TODO Centralize exception/error handling around calls to
-// github_post(). Currently it's mostly duplicated and there is quite
-// a lot of it.
-//
using namespace std;
using namespace butl;
using namespace web;
@@ -56,7 +69,7 @@ namespace brep
ci_github::
ci_github (const ci_github& r, tenant_service_map& tsm)
- : handler (r),
+ : database_module (r),
ci_start (r),
options_ (r.initialized_ ? r.options_ : nullptr),
tenant_service_map_ (tsm)
@@ -80,9 +93,12 @@ namespace brep
// Prepare for the CI requests handling, if configured.
//
- if (options_->ci_github_app_webhook_secret_specified ())
+ if (options_->build_config_specified () &&
+ options_->ci_github_app_webhook_secret_specified ())
{
ci_start::init (make_shared<options::ci_start> (*options_));
+
+ database_module::init (*options_, options_->build_db_retry ());
}
}
@@ -93,22 +109,14 @@ namespace brep
HANDLER_DIAG;
- if (!options_->ci_github_app_webhook_secret_specified ())
- throw invalid_request (404, "GitHub CI request submission disabled");
+ if (build_db_ == nullptr)
+ throw invalid_request (501, "GitHub CI submission not implemented");
// Process headers.
//
- // @@ TMP Shouldn't we also error<< in some of these header problem cases?
- //
- // @@ TMP From GitHub docs: "You can create webhooks that subscribe to the
- // events listed on this page."
- //
- // So it seems appropriate to generally use the term "event" (which
- // we already do for the most part), and "webhook event" only when
- // more context would be useful?
- //
string event; // Webhook event.
string hmac; // Received HMAC.
+ try
{
bool content_type (false);
@@ -175,11 +183,16 @@ namespace brep
if (hmac.empty ())
throw invalid_request (400, "missing x-hub-signature-256 header");
}
+ catch (const invalid_request& e)
+ {
+ error << "request header error: " << e.content;
+ throw;
+ }
// Read the entire request body into a buffer because we need to compute
// an HMAC over it and then parse it as JSON. The alternative of reading
- // from the stream twice works out to be more complicated (see also @@
- // TODO item in web/server/module.hxx).
+ // from the stream twice works out to be more complicated (see also a TODO
+ // item in web/server/module.hxx).
//
string body;
{
@@ -226,6 +239,35 @@ namespace brep
fail << "unable to compute request HMAC: " << e;
}
+ // Process the `warning` webhook request query parameter.
+ //
+ bool warning_success;
+ {
+ const name_values& rps (rq.parameters (1024, true /* url_only */));
+
+ auto i (find_if (rps.begin (), rps.end (),
+ [] (auto&& rp) {return rp.name == "warning";}));
+
+ if (i == rps.end ())
+ throw invalid_request (400,
+ "missing 'warning' webhook query parameter");
+
+ if (!i->value)
+ throw invalid_request (
+ 400, "missing 'warning' webhook query parameter value");
+
+ const string& v (*i->value);
+
+ if (v == "success") warning_success = true;
+ else if (v == "failure") warning_success = false;
+ else
+ {
+ throw invalid_request (
+ 400,
+ "invalid 'warning' webhook query parameter value: '" + v + '\'');
+ }
+ }
+
// There is a webhook event (specified in the x-github-event header) and
// each event contains a bunch of actions (specified in the JSON request
// body).
@@ -236,6 +278,9 @@ namespace brep
// is that we want be "notified" of new actions at which point we can decide
// whether to ignore them or to handle.
//
+ // @@ There is also check_run even (re-requested by user, either
+ // individual check run or all the failed check runs).
+ //
if (event == "check_suite")
{
gh_check_suite_event cs;
@@ -257,14 +302,14 @@ namespace brep
if (cs.action == "requested")
{
- return handle_check_suite_request (move (cs));
+ return handle_check_suite_request (move (cs), warning_success);
}
else if (cs.action == "rerequested")
{
- // Someone manually requested to re-run the check runs in this check
- // suite. Treat as a new request.
+ // Someone manually requested to re-run all the check runs in this
+ // check suite. Treat as a new request.
//
- return handle_check_suite_request (move (cs));
+ return handle_check_suite_request (move (cs), warning_success);
}
else if (cs.action == "completed")
{
@@ -272,9 +317,8 @@ namespace brep
// completed and a conclusion is available". Looks like this one we
// ignore?
//
- // @@ TODO What if our bookkeeping says otherwise? See conclusion
- // field which includes timedout. Need to come back to this once
- // have the "happy path" implemented.
+ // What if our bookkeeping says otherwise? But then we can't even
+ // access the service data easily here. @@ TODO: maybe/later.
//
return true;
}
@@ -290,9 +334,46 @@ namespace brep
}
else if (event == "pull_request")
{
- // @@ TODO
+ gh_pull_request_event pr;
+ try
+ {
+ json::parser p (body.data (), body.size (), "pull_request event");
+
+ pr = gh_pull_request_event (p);
+ }
+ catch (const json::invalid_json_input& e)
+ {
+ string m ("malformed JSON in " + e.name + " request body");
+
+ error << m << ", line: " << e.line << ", column: " << e.column
+ << ", byte offset: " << e.position << ", error: " << e;
+
+ throw invalid_request (400, move (m));
+ }
- throw invalid_request (501, "pull request events not implemented yet");
+ if (pr.action == "opened" || pr.action == "synchronize")
+ {
+ // opened
+ // A pull request was opened.
+ //
+ // synchronize
+ // A pull request's head branch was updated from the base branch or
+ // new commits were pushed to the head branch. (Note that there is
+ // no equivalent event for the base branch. That case gets handled
+ // in handle_check_suite_request() instead.)
+ //
+ // Note that both cases are handled the same: we start a new CI
+ // request which will be reported on the new commit id.
+ //
+ return handle_pull_request (move (pr), warning_success);
+ }
+ else
+ {
+ // Ignore the remaining actions by sending a 200 response with empty
+ // body.
+ //
+ return true;
+ }
}
else
{
@@ -304,8 +385,53 @@ namespace brep
}
}
+ // Let's capitalize the synthetic check run names to make them easier to
+ // distinguish from the regular ones.
+ //
+ static string merge_check_run_name ("MERGE-COMMIT");
+ static string conclusion_check_run_name ("CONCLUSION");
+
+ // Return the colored circle corresponding to a result_status.
+ //
+ static string
+ circle (result_status rs)
+ {
+ switch (rs)
+ {
+ case result_status::success: return "\U0001F7E2"; // Green circle.
+ case result_status::warning: return "\U0001F7E0"; // Orange circle.
+ case result_status::error:
+ case result_status::abort:
+ case result_status::abnormal: return "\U0001F534"; // Red circle.
+
+ // Valid values we should never encounter.
+ //
+ case result_status::skip:
+ case result_status::interrupt:
+ throw invalid_argument ("unexpected result_status value: " +
+ to_string (rs));
+ }
+
+ return ""; // Should never reach.
+ }
+
+ // Make a check run summary from a CI start_result.
+ //
+ static string
+ to_check_run_summary (const optional<ci_start::start_result>& r)
+ {
+ string s;
+
+ s = "```\n";
+ if (r) s += r->message;
+ else s += "Internal service error";
+ s += "\n```";
+
+ return s;
+ }
+
bool ci_github::
- handle_check_suite_request (gh_check_suite_event cs)
+ handle_check_suite_request (gh_check_suite_event cs, bool warning_success)
{
HANDLER_DIAG;
@@ -325,38 +451,706 @@ namespace brep
l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
- // Submit the CI request.
+ service_data sd (warning_success,
+ iat->token,
+ iat->expires_at,
+ cs.installation.id,
+ move (cs.repository.node_id),
+ move (cs.check_suite.head_sha));
+
+ // Create the conclusion check run.
+ //
+ {
+ check_run cr;
+ cr.name = conclusion_check_run_name;
+
+ if (gq_create_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ sd.report_sha,
+ nullopt /* details_url */,
+ build_state::building))
+ {
+ l3 ([&]{trace << "created check_run { " << cr << " }";});
+
+ sd.conclusion_node_id = move (cr.node_id);
+ }
+ else
+ {
+ // We could try to carry on in this case by either updating or
+ // creating this conclusion check run later. But let's not complicate
+ // things for now.
+ //
+ fail << "check suite " << cs.check_suite.node_id
+ << ": unable to create conclusion check run";
+ }
+ }
+
+ // The merge commits of any open pull requests with this branch as base
+ // branch will now be out of date, and thus so will be their CI builds and
+ // associated check runs (and, no, GitHub does not invalidate those CI
+ // results automatically; see below).
+ //
+ // Unfortunately GitHub does not provide a webhook for PR base branch
+ // updates (as it does for PR head branch updates) so we have to handle it
+ // here. We do so by fetching the open pull requests with this branch as
+ // base branch and then recreating the CI requests (cancel existing,
+ // create new) for each pull request.
+ //
+ // If we fail to recreate any of the PR CI requests, they and their check
+ // runs will be left reflecting outdated merge commits. If the new merge
+ // commit failed to be generated (merge conflicts) the PR will not be
+ // mergeable which is not entirely catastrophic. But on the other hand, if
+ // all of the existing CI request's check runs have already succeeded and
+ // the new merge commit succeeds (no conflicts) with logic errors then a
+ // user would be able to merge a broken PR.
+ //
+ // Regardless of the nature of the error, we have to let the check suite
+ // handling code proceed so we only issue diagnostics. Note also that we
+ // want to run this code as early as possible to minimize the window of
+ // the user seeing misleading CI results.
+ //
+ if (cs.action == "requested")
+ {
+ // Fetch open pull requests with the check suite's head branch as base
+ // branch.
+ //
+ optional<vector<gh_pull_request>> prs (
+ gq_fetch_open_pull_requests (error,
+ iat->token,
+ sd.repository_node_id,
+ cs.check_suite.head_branch));
+
+ if (prs)
+ {
+ // Recreate each PR's CI request.
+ //
+ for (gh_pull_request& pr: *prs)
+ {
+ service_data prsd (sd.warning_success,
+ sd.installation_access.token,
+ sd.installation_access.expires_at,
+ sd.installation_id,
+ sd.repository_node_id,
+ pr.head_sha,
+ cs.repository.clone_url,
+ pr.number);
+
+ // Cancel the existing CI request and create a new unloaded CI
+ // request. After this call we will start getting the
+ // build_unloaded() notifications until (1) we load the request, (2)
+ // we cancel it, or (3) it gets archived after some timeout.
+ //
+ if (!create_pull_request_ci (error, warn, trace,
+ prsd, pr.node_id,
+ true /* cancel_first */))
+ {
+ error << "pull request " << pr.node_id
+ << ": unable to create unloaded CI request";
+ }
+ }
+ }
+ else
+ {
+ error << "unable to fetch open pull requests with base branch "
+ << cs.check_suite.head_branch;
+ }
+ }
+ // Cancel existing CI request if this check suite is being re-run.
+ //
+ else if (cs.action == "rerequested")
+ {
+ const string& nid (cs.check_suite.node_id);
+
+ if (!cancel (error, warn, &trace, *build_db_, "ci-github", nid))
+ error << "check suite " << nid << " (re-requested): unable to cancel";
+ }
+
+ // Start CI for the check suite.
//
repository_location rl (cs.repository.clone_url + '#' +
cs.check_suite.head_branch,
repository_type::git);
- string sd (service_data (move (iat->token),
- iat->expires_at,
- cs.installation.id,
- move (cs.repository.node_id),
- move (cs.check_suite.head_sha))
- .json ());
-
+ // @@ What happens if we call this functions with an already existing
+ // node_id (e.g., replay attack). See the UUID header above.
+ //
optional<start_result> r (
- start (error,
- warn,
- verb_ ? &trace : nullptr,
- tenant_service (move (cs.check_suite.node_id),
- "ci-github",
- move (sd)),
- move (rl),
- vector<package> {},
- nullopt, // client_ip
- nullopt // user_agent
- ));
-
- if (!r)
- fail << "unable to submit CI request";
+ start (error,
+ warn,
+ verb_ ? &trace : nullptr,
+ tenant_service (cs.check_suite.node_id, "ci-github", sd.json ()),
+ move (rl),
+ vector<package> {},
+ nullopt, /* client_ip */
+ nullopt /* user_agent */));
+
+ if (!r || r->status != 200)
+ {
+ // Update the conclusion check run with failure.
+ //
+ result_status rs (result_status::error);
+
+ optional<gq_built_result> br (
+ gq_built_result (gh_to_conclusion (rs, sd.warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ to_check_run_summary (r)));
+
+ check_run cr;
+
+ // Set some fields for display purposes.
+ //
+ cr.node_id = *sd.conclusion_node_id;
+ cr.name = conclusion_check_run_name;
+
+ if (gq_update_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built,
+ move (br)))
+ {
+ l3 ([&]{trace << "updated check_run { " << cr << " }";});
+ }
+ else
+ {
+ fail << "check suite " << cs.check_suite.node_id
+ << ": unable to update conclusion check_run "
+ << *sd.conclusion_node_id;
+ }
+ }
+
+ return true;
+ }
+
+ // High-level description of pull request (PR) handling
+ //
+ // - Some GitHub pull request terminology:
+ //
+ // - Fork and pull model: Pull requests are created in a forked
+ // repository. Thus the head and base repositories are different.
+ //
+ // - Shared repository model: The pull request head and base branches are
+ // in the same repository. For example, from a feature branch onto
+ // master.
+ //
+ // - CI the merge commit but add check runs to the pull request head commit
+ //
+ // Most of the major CI integrations build the merge commit instead of the
+ // PR head commit.
+ //
+ // Adding the check runs to the PR head commit is recommended by the
+ // following blog posts by a GitHub employee who is one of the best
+ // sources on these topics:
+ // https://www.kenmuse.com/blog/shared-commits-and-github-checks/ and
+ // https://www.kenmuse.com/blog/creating-github-checks/.
+ //
+ // Do not add any check runs to the merge commit because:
+ //
+ // - The PR head commit is the only commit that can be relied upon to
+ // exist throughout the PR's lifetime. The merge commit, on the other
+ // hand, can change during the PR process. When that happens the PR will
+ // look for check runs on the new merge commit, effectively discarding
+ // the ones we had before.
+ //
+ // - Although some of the GitHub documentation makes it sound like they
+ // expect check runs to be added to both the PR head commit and the
+ // merge commit, the PR UI does not react to the merge commit's check
+ // runs consistently. It actually seems to be quite broken.
+ //
+ // The only thing it seems to do reliably is blocking the PR merge if
+ // the merge commit's check runs are not successful (i.e, overriding the
+ // PR head commit's check runs). But the UI looks quite messed up
+ // generally in this state.
+ //
+ // Note that, in the case of a PR from a forked repository (the so-called
+ // "fork and pull" model), GitHub will copy the PR head commit from the
+ // head repository (the forked one) into the base repository. So the check
+ // runs must always be added to the base repository, whether the PR is
+ // from within the same repository or from a forked repository. The merge
+ // and head commits will be at refs/pull/<PR-number>/{merge,head}.
+ //
+ // - New commits are added to PR head branch
+ //
+ // @@ TODO In this case we will end up creating two sets of check runs on
+ // the same commit (pull_request.head.sha and
+ // check_suite.head_sha). It's not obvious which to prefer but I'm
+ // thinking the pull request is more important because in most
+ // development models it represents something that is more likely to
+ // end up in an important branch (as opposed to the head of a feature
+ // branch).
+ //
+ // Note that in these two cases we are building different commit (the
+ // head commit vs merge commit). So it's not clear how we can have
+ // a single check_suite result represent both?
+ //
+ // Possible solution: ignore all check_suites with non-empty
+ // pull_requests[].
+ //
+ // => check_suite(requested, PR_head) [only in shared repo model]
+ //
+ // Note: check_suite.pull_requests[] will contain all PRs with this
+ // branch as head.
+ //
+ // Note: check_suite.pull_requests[i].head.sha will be the new, updated
+ // PR head sha.
+ //
+ // => pull_request(synchronize)
+ //
+ // Note: The check_suite and pull_request can arrive in any order.
+ //
+ // - New commits are added to PR base branch
+ //
+ // Note: In this case pull_request.base.sha does not change, but the merge
+ // commit will be updated to include the new commits to the base branch.
+ //
+ // - @@ TODO? PR base branch changed (to a different branch)
+ //
+ // => pull_request(edited)
+ //
+ // - PR closed @@ TODO
+ //
+ // => pull_request(closed)
+ //
+ // Cancel CI?
+ //
+ // - PR merged @@ TODO
+ //
+ // => pull_request(merged)
+ //
+ // => check_suite(PR_base)
+ //
+ // Probably wouldn't want to CI the base again because the PR CI would've
+ // done the equivalent already.
+ //
+ bool ci_github::
+ handle_pull_request (gh_pull_request_event pr, bool warning_success)
+ {
+ HANDLER_DIAG;
+
+ l3 ([&]{trace << "pull_request event { " << pr << " }";});
+
+ // While we don't need the installation access token in this request,
+ // let's obtain it to flush out any permission issues early. Also, it is
+ // valid for an hour so we will most likely make use of it.
+ //
+ optional<string> jwt (generate_jwt (trace, error));
+ if (!jwt)
+ throw server_error ();
+
+ optional<gh_installation_access_token> iat (
+ obtain_installation_access_token (pr.installation.id,
+ move (*jwt),
+ error));
+ if (!iat)
+ throw server_error ();
+
+ l3 ([&]{trace << "installation_access_token { " << *iat << " }";});
+
+ service_data sd (warning_success,
+ move (iat->token),
+ iat->expires_at,
+ pr.installation.id,
+ move (pr.repository.node_id),
+ pr.pull_request.head_sha,
+ pr.repository.clone_url,
+ pr.pull_request.number);
+
+ // Create unloaded CI request. Cancel the existing CI request first if the
+ // head branch has been updated (action is `synchronize`).
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the request, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ bool cancel_first (pr.action == "synchronize");
+
+ if (!create_pull_request_ci (error, warn, trace,
+ sd, pr.pull_request.node_id,
+ cancel_first))
+ {
+ fail << "pull request " << pr.pull_request.node_id
+ << ": unable to create unloaded CI request";
+ }
return true;
}
+ // Note: only handles pull requests (not check suites).
+ //
+ function<optional<string> (const tenant_service&)> ci_github::
+ build_unloaded (tenant_service&& ts,
+ const diag_epilogue& log_writer) const noexcept
+ {
+ NOTIFICATION_DIAG (log_writer);
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullptr;
+ }
+
+ // Get a new installation access token if the current one has expired.
+ //
+ const gh_installation_access_token* iat (nullptr);
+ optional<gh_installation_access_token> new_iat;
+
+ if (system_clock::now () > sd.installation_access.expires_at)
+ {
+ if (optional<string> jwt = generate_jwt (trace, error))
+ {
+ new_iat = obtain_installation_access_token (sd.installation_id,
+ move (*jwt),
+ error);
+ if (new_iat)
+ iat = &*new_iat;
+ }
+ }
+ else
+ iat = &sd.installation_access;
+
+ if (iat == nullptr)
+ return nullptr; // Try again on the next call.
+
+ auto make_iat_updater = [&new_iat, &error] ()
+ {
+ function<optional<string> (const tenant_service&)> r;
+
+ if (new_iat)
+ {
+ r = [&error,
+ iat = move (new_iat)] (const tenant_service& ts)
+ -> optional<string>
+ {
+ // NOTE: this lambda may be called repeatedly (e.g., due to
+ // transaction being aborted) and so should not move out of its
+ // captures.
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
+
+ sd.installation_access = *iat;
+
+ return sd.json ();
+ };
+ }
+
+ return r;
+ };
+
+ // Create a synthetic check run with an in-progress state. Return the
+ // check run on success or nullopt on failure.
+ //
+ auto create_synthetic_cr = [iat,
+ &sd,
+ &error] (string name) -> optional<check_run>
+ {
+ check_run cr;
+ cr.name = move (name);
+
+ if (gq_create_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ sd.report_sha,
+ nullopt /* details_url */,
+ build_state::building))
+ {
+ return cr;
+ }
+ else
+ return nullopt;
+ };
+
+ // Update a synthetic check run with success or failure. Return the check
+ // run on success or nullopt on failure.
+ //
+ auto update_synthetic_cr = [iat,
+ &sd,
+ &error] (const string& node_id,
+ const string& name,
+ result_status rs,
+ string summary) -> optional<check_run>
+ {
+ assert (!node_id.empty ());
+
+ optional<gq_built_result> br (
+ gq_built_result (gh_to_conclusion (rs, sd.warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ move (summary)));
+
+ check_run cr;
+ cr.name = name; // For display purposes only.
+
+ if (gq_update_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ node_id,
+ nullopt /* details_url */,
+ build_state::built,
+ move (br)))
+ {
+ return cr;
+ }
+ else
+ return nullopt;
+ };
+
+ // Synthetic merge check run node ID. Empty until created on the first
+ // call or retrieved from service data on subsequent calls.
+ //
+ string merge_node_id;
+
+ // True if this is the first call (or the merge commit couldn't be created
+ // on the first call, in which case we just re-try by treating it as a
+ // first call).
+ //
+ bool first (!sd.merge_node_id);
+
+ // If this is the first call, (re)create the synthetic merge check run as
+ // soon as possible to make sure the previous check suite, if any, is no
+ // longer completed.
+ //
+ // Note that there is still a window between receipt of the pull_request
+ // event and the first bot/worker asking for a task, which could be
+ // substantial. We could probably (also) try to (re)create the merge
+ // checkrun in the webhook. @@ Maybe/later.
+ //
+ if (first)
+ {
+ if (auto cr = create_synthetic_cr (merge_check_run_name))
+ {
+ l3 ([&]{trace << "created check_run { " << *cr << " }";});
+
+ merge_node_id = move (*cr->node_id);
+ }
+ else
+ return make_iat_updater (); // Try again on the next call.
+ }
+ else
+ merge_node_id = *sd.merge_node_id;
+
+ // Start/check PR mergeability.
+ //
+ optional<string> mc (
+ gq_pull_request_mergeable (error, iat->token, ts.id)); // Merge commit.
+
+ if (!mc || mc->empty ())
+ {
+ if (!mc) // No merge commit yet.
+ {
+ // If this is a subsequent notification and there is no merge commit,
+ // then there is nothing to do.
+ //
+ if (!first)
+ return make_iat_updater ();
+
+ // Fall through to update service data.
+ }
+ else // Not mergeable.
+ {
+ // If the commit is not mergeable, cancel the CI request and fail the
+ // merge check run.
+ //
+ // Note that it feels like in this case we don't really need to create a
+ // failed synthetic conclusion check run since the PR cannot be merged
+ // anyway.
+
+ if (auto cr = update_synthetic_cr (
+ merge_node_id,
+ merge_check_run_name,
+ result_status::error,
+ "GitHub is unable to create test merge commit"))
+ {
+ l3 ([&]{trace << "updated check_run { " << *cr << " }";});
+
+ // Cancel the CI request.
+ //
+ // Ignore failure because this CI request may have been cancelled
+ // elsewhere due to an update to the PR base or head branches.
+ //
+ if (!cancel (error, warn, &trace, *build_db_, ts.type, ts.id))
+ l3 ([&]{trace << "CI for PR " << ts.id << " already cancelled";});
+
+ return nullptr; // No need to update service data in this case.
+ }
+ else
+ {
+ // Don't cancel the CI request if the merge check run update failed
+ // so that we can try again on the next call.
+
+ if (!first)
+ return make_iat_updater ();
+
+ // Fall through to update service data.
+ }
+ }
+
+ // This is a first notification, so record the merge check run in the
+ // service data.
+ //
+ return [&error,
+ iat = move (new_iat),
+ mni = move (merge_node_id)] (const tenant_service& ts)
+ -> optional<string>
+ {
+ // NOTE: this lambda may be called repeatedly (e.g., due to
+ // transaction being aborted) and so should not move out of its
+ // captures.
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
+
+ if (iat)
+ sd.installation_access = *iat;
+
+ sd.merge_node_id = mni;
+
+ return sd.json ();
+ };
+ }
+
+ // If we are here, then it means we have a merge commit that we can load.
+ //
+ // Note that this can still be the first call (first=true).
+ //
+
+ // As a first step, (re)create the synthetic conclusion check run and then
+ // change the merge check run state to success. Do it in this order so
+ // that the check suite does not become completed.
+
+ // Synthetic conclusion check run node ID. Empty until created on the
+ // "second" call or retrieved from service data on subsequent calls.
+ //
+ string conclusion_node_id;
+
+ // True if this is the first call after the merge commit became available,
+ // which we will call the "second" call (or we couldn't create the
+ // conclusion check run on the first such call, in which case we just
+ // re-try by treating it as a "second" call).
+ //
+ bool second (!sd.conclusion_node_id);
+
+ if (second)
+ {
+ if (auto cr = create_synthetic_cr (conclusion_check_run_name))
+ {
+ l3 ([&]{trace << "created check_run { " << *cr << " }";});
+
+ conclusion_node_id = move (*cr->node_id);
+ }
+ }
+ else
+ conclusion_node_id = *sd.conclusion_node_id;
+
+ if (!conclusion_node_id.empty ()) // Conclusion check run was created.
+ {
+ // Update merge check run to successful.
+ //
+ if (auto cr = update_synthetic_cr (merge_node_id,
+ merge_check_run_name,
+ result_status::success,
+ "GitHub created test merge commit"))
+ {
+ l3 ([&]{trace << "updated check_run { " << *cr << " }";});
+
+ // Load the CI request.
+ //
+ // Example repository URL fragment:
+ //
+ // #pull/28/merge@1b6c9a361086ed93e6f1e67189e82d52de91c49b
+ //
+ repository_location rl (*sd.repository_clone_url + "#pull/" +
+ to_string (*sd.pr_number) + "/merge@" + *mc,
+ repository_type::git);
+
+ optional<start_result> r (
+ load (error, warn, &trace, *build_db_, move (ts), rl));
+
+ if (!r || r->status != 200)
+ {
+ if (auto cr = update_synthetic_cr (conclusion_node_id,
+ conclusion_check_run_name,
+ result_status::error,
+ to_check_run_summary (r)))
+ {
+ l3 ([&]{trace << "updated check_run { " << *cr << " }";});
+ }
+ else
+ {
+ // Nothing really we can do in this case since we will not receive
+ // any further notifications.
+ }
+
+ return nullptr; // No need to update service data in this case.
+ }
+ }
+ else
+ {
+ // Don't load the CI request if the merge check run update failed so
+ // that we can try again on the next call.
+ }
+ }
+
+ return [&error,
+ iat = move (new_iat),
+ mni = (first ? move (merge_node_id) : string ()),
+ cni = (second ? move (conclusion_node_id) : string ())]
+ (const tenant_service& ts) -> optional<string>
+ {
+ // NOTE: this lambda may be called repeatedly (e.g., due to
+ // transaction being aborted) and so should not move out of its
+ // captures.
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
+
+ if (iat)
+ sd.installation_access = *iat;
+
+ if (!mni.empty ())
+ sd.merge_node_id = mni;
+
+ if (!cni.empty ())
+ sd.conclusion_node_id = cni;
+
+ return sd.json ();
+ };
+ }
+
// Build state change notifications (see tenant-services.hxx for
// background). Mapping our state transitions to GitHub pose multiple
// problems:
@@ -411,8 +1205,8 @@ namespace brep
// building Skip if there is no check run in service data or it's
// not in the queued state, otherwise update.
//
- // built Update if there is check run in service data and its
- // state is not built, otherwise create new.
+ // built Update if there is check run in service data unless its
+ // state is built, otherwise create new.
//
// The rationale for this semantics is as follows: the building
// notification is a "nice to have" and can be skipped if things are not
@@ -487,12 +1281,14 @@ namespace brep
}
else if (*istate == build_state::built)
{
- // Unexpectd built->queued transition (rebuild).
+ // Unexpected built->queued transition (rebuild).
//
warn << "check run " << bid << ": unexpected rebuild";
}
else
- ; // Ignore interrupted.
+ {
+ // Ignore interrupted.
+ }
}
else
{
@@ -501,6 +1297,7 @@ namespace brep
bs.push_back (b);
crs.emplace_back (move (bid),
+ gh_check_run_name (b, &hs),
nullopt, /* node_id */
build_state::queued,
false /* state_synced */);
@@ -537,13 +1334,11 @@ namespace brep
{
// Create a check_run for each build.
//
- if (gq_create_check_runs (crs,
+ if (gq_create_check_runs (error,
+ crs,
iat->token,
- sd.repository_id, sd.head_sha,
- bs,
- build_state::queued,
- hs,
- error))
+ sd.repository_node_id, sd.report_sha,
+ build_state::queued))
{
for (const check_run& cr: crs)
{
@@ -603,7 +1398,8 @@ namespace brep
}
function<optional<string> (const tenant_service&)> ci_github::
- build_building (const tenant_service& ts, const build& b,
+ build_building (const tenant_service& ts,
+ const build& b,
const diag_epilogue& log_writer) const noexcept
{
NOTIFICATION_DIAG (log_writer);
@@ -636,7 +1432,9 @@ namespace brep
cr->state_synced = false;
}
else
- ; // Network error during queued notification, ignore.
+ {
+ // Network error during queued notification, ignore.
+ }
}
else
warn << "check run " << bid << ": out of order building "
@@ -674,13 +1472,13 @@ namespace brep
//
if (iat != nullptr)
{
- if (gq_update_check_run (*cr,
+ if (gq_update_check_run (error,
+ *cr,
iat->token,
- sd.repository_id,
+ sd.repository_node_id,
*cr->node_id,
- build_state::building,
- nullopt, /* result_status */
- error))
+ details_url (b),
+ build_state::building))
{
// Do nothing further if the state was already built on GitHub (note
// that this is based on the above-mentioned special GitHub semantics
@@ -743,7 +1541,8 @@ namespace brep
}
function<optional<string> (const tenant_service&)> ci_github::
- build_built (const tenant_service& ts, const build& b,
+ build_built (const tenant_service& ts,
+ const build& b,
const diag_epilogue& log_writer) const noexcept
{
NOTIFICATION_DIAG (log_writer);
@@ -759,33 +1558,68 @@ namespace brep
return nullptr;
}
- check_run cr; // Updated check run.
- string bid (gh_check_run_name (b)); // Full Build ID.
+ // Absent if have any unbuilt check runs.
+ //
+ optional<result_status> conclusion (*b.status);
- if (check_run* scr = sd.find_check_run (bid))
+ check_run cr; // Updated check run.
{
- if (scr->state != build_state::building)
+ string bid (gh_check_run_name (b)); // Full Build ID.
+
+ optional<check_run> scr;
+ for (check_run& cr: sd.check_runs)
{
- warn << "check run " << bid << ": out of order built "
- << "notification; existing state: " << scr->state_string ();
+ if (cr.build_id == bid)
+ {
+ assert (!scr);
+ scr = move (cr);
+ }
+ else
+ {
+ if (cr.state == build_state::built)
+ {
+ if (conclusion)
+ {
+ assert (cr.status);
+ *conclusion |= *cr.status;
+ }
+ }
+ else
+ conclusion = nullopt;
+ }
+
+ if (scr && !conclusion.has_value ())
+ break;
}
- // Do nothing if already built.
- //
- if (scr->state == build_state::built)
- return nullptr;
+ if (scr)
+ {
+ if (scr->state != build_state::building)
+ {
+ warn << "check run " << bid << ": out of order built notification; "
+ << "existing state: " << scr->state_string ();
+ }
- cr = move (*scr);
- }
- else
- {
- warn << "check run " << bid << ": out of order built "
- << "notification; no check run state in service data";
+ // Do nothing if already built (e.g., rebuild).
+ //
+ if (scr->state == build_state::built)
+ return nullptr;
- cr.build_id = move (bid);
- }
+ // Don't move from scr because we search sd.check_runs below.
+ //
+ cr = move (*scr);
+ }
+ else
+ {
+ warn << "check run " << bid << ": out of order built notification; "
+ << "no check run state in service data";
+
+ cr.build_id = move (bid);
+ cr.name = cr.build_id;
+ }
- cr.state_synced = false;
+ cr.state_synced = false;
+ }
// Get a new installation access token if the current one has expired.
//
@@ -812,16 +1646,120 @@ namespace brep
//
if (iat != nullptr)
{
+ // Prepare the check run's summary field (the build information in an
+ // XHTML table).
+ //
+ string sm; // Summary.
+ {
+ using namespace web::xhtml;
+
+ ostringstream os;
+ xml::serializer s (os, "check_run_summary");
+
+ // This hack is required to disable XML element name prefixes (which
+ // GitHub does not like). Note that this adds an xmlns declaration for
+ // the XHTML namespace which for now GitHub appears to ignore. If that
+ // ever becomes a problem, then we should redo this with raw XML
+ // serializer calls.
+ //
+ struct table: element
+ {
+ table (): element ("table") {}
+
+ void
+ start (xml::serializer& s) const override
+ {
+ s.start_element (xmlns, name);
+ s.namespace_decl (xmlns, "");
+ }
+ } TABLE;
+
+ // Serialize a result row (colored circle, result text, log URL) for
+ // an operation and result_status.
+ //
+ auto tr_result = [this, &b] (xml::serializer& s,
+ const string& op,
+ result_status rs)
+ {
+ // The log URL.
+ //
+ string lu (build_log_url (options_->host (),
+ options_->root (),
+ b,
+ op != "result" ? &op : nullptr));
+
+ s << TR
+ << TD << EM << op << ~EM << ~TD
+ << TD
+ << circle (rs) << ' '
+ << CODE << to_string (rs) << ~CODE
+ << " (" << A << HREF << lu << ~HREF << "log" << ~A << ')'
+ << ~TD
+ << ~TR;
+ };
+
+ // Serialize the summary to an XHTML table.
+ //
+ s << TABLE
+ << TBODY;
+
+ tr_result (s, "result", *b.status);
+
+ s << TR
+ << TD << EM << "package" << ~EM << ~TD
+ << TD << CODE << b.package_name << ~CODE << ~TD
+ << ~TR
+ << TR
+ << TD << EM << "version" << ~EM << ~TD
+ << TD << CODE << b.package_version << ~CODE << ~TD
+ << ~TR
+ << TR
+ << TD << EM << "toolchain" << ~EM << ~TD
+ << TD
+ << CODE
+ << b.toolchain_name << '-' << b.toolchain_version.string ()
+ << ~CODE
+ << ~TD
+ << ~TR
+ << TR
+ << TD << EM << "target" << ~EM << ~TD
+ << TD << CODE << b.target.string () << ~CODE << ~TD
+ << ~TR
+ << TR
+ << TD << EM << "target config" << ~EM << ~TD
+ << TD << CODE << b.target_config_name << ~CODE << ~TD
+ << ~TR
+ << TR
+ << TD << EM << "package config" << ~EM << ~TD
+ << TD << CODE << b.package_config_name << ~CODE << ~TD
+ << ~TR;
+
+ for (const operation_result& r: b.results)
+ tr_result (s, r.operation, r.status);
+
+ s << ~TBODY
+ << ~TABLE;
+
+ sm = os.str ();
+ }
+
+ gq_built_result br (gh_to_conclusion (*b.status, sd.warning_success),
+ circle (*b.status) + ' ' +
+ ucase (to_string (*b.status)),
+ move (sm));
+
if (cr.node_id)
{
// Update existing check run to built.
//
- if (gq_update_check_run (cr,
+ if (gq_update_check_run (error,
+ cr,
iat->token,
- sd.repository_id,
+ sd.repository_node_id,
*cr.node_id,
- build_state::built, b.status,
- error))
+ details_url (b),
+ build_state::built,
+ move (br)))
{
assert (cr.state == build_state::built);
@@ -838,63 +1776,174 @@ namespace brep
// check run to the service data it will create another check run with
// the shortened name which will never get to the built state.
//
- // @@ TMP If build_queued() runs but fails to create we could store
- // the build hints and use them now.
- //
- if (gq_create_check_run (cr,
+ if (gq_create_check_run (error,
+ cr,
iat->token,
- sd.repository_id,
- sd.head_sha,
- b,
- build_state::built, b.status,
- build_queued_hints (false, false),
- error))
+ sd.repository_node_id,
+ sd.report_sha,
+ details_url (b),
+ build_state::built,
+ move (br)))
{
assert (cr.state == build_state::built);
l3 ([&]{trace << "created check_run { " << cr << " }";});
}
}
+
+ if (cr.state == build_state::built)
+ {
+ // Check run was created/updated successfully to built.
+ //
+ // @@ TMP Feels like this should also be done inside
+ // gq_{create,update}_check_run() -- where cr.state is set if the
+ // create/update succeeds -- but I think we didn't want to pass a
+ // result_status into a gq_ function because converting to a GitHub
+ // conclusion/title/summary is reasonably complicated.
+ //
+ // @@@ We need to redo that code:
+ //
+ // - Pass the vector of check runs with new state (and status) set.
+ // - Update synchronized flag inside those functions.
+ // - Update the state to built if it's already built on GitHub --
+ // but then what do we set the status to?
+ // - Maybe signal in return value (optional<bool>?) that there is
+ // a discrepancy.
+ //
+ cr.status = b.status;
+
+ // Update the conclusion check run if all check runs are now built.
+ //
+ if (conclusion)
+ {
+ assert (sd.conclusion_node_id);
+
+ // Update the conclusion check run with success.
+ //
+ result_status rs (*conclusion);
+
+ optional<gq_built_result> br (
+ gq_built_result (gh_to_conclusion (rs, sd.warning_success),
+ circle (rs) + ' ' + ucase (to_string (rs)),
+ "All configurations are built"));
+
+ check_run cr;
+
+ // Set some fields for display purposes.
+ //
+ cr.node_id = *sd.conclusion_node_id;
+ cr.name = conclusion_check_run_name;
+
+ if (gq_update_check_run (error,
+ cr,
+ iat->token,
+ sd.repository_node_id,
+ *sd.conclusion_node_id,
+ nullopt /* details_url */,
+ build_state::built,
+ move (br)))
+ {
+ l3 ([&]{trace << "updated check_run { " << cr << " }";});
+ }
+ else
+ {
+ // Nothing we can do here except log the error.
+ //
+ error << "check suite " << ts.id
+ << ": unable to update conclusion check run "
+ << *sd.conclusion_node_id;
+ }
+ }
+ }
}
return [iat = move (new_iat),
cr = move (cr),
error = move (error),
warn = move (warn)] (const tenant_service& ts) -> optional<string>
+ {
+ // NOTE: this lambda may be called repeatedly (e.g., due to transaction
+ // being aborted) and so should not move out of its captures.
+
+ service_data sd;
+ try
+ {
+ sd = service_data (*ts.data);
+ }
+ catch (const invalid_argument& e)
{
- // NOTE: this lambda may be called repeatedly (e.g., due to transaction
- // being aborted) and so should not move out of its captures.
+ error << "failed to parse service data: " << e;
+ return nullopt;
+ }
- service_data sd;
- try
- {
- sd = service_data (*ts.data);
- }
- catch (const invalid_argument& e)
+ if (iat)
+ sd.installation_access = *iat;
+
+ if (check_run* scr = sd.find_check_run (cr.build_id))
+ {
+ // This will most commonly generate a duplicate warning (see above).
+ // We could save the old state and only warn if it differs but let's
+ // not complicate things for now.
+ //
+#if 0
+ if (scr->state != build_state::building)
{
- error << "failed to parse service data: " << e;
- return nullopt;
+ warn << "check run " << cr.build_id << ": out of order built "
+ << "notification service data update; existing state: "
+ << scr->state_string ();
}
+#endif
+ *scr = cr;
+ }
+ else
+ sd.check_runs.push_back (cr);
- if (iat)
- sd.installation_access = *iat;
+ return sd.json ();
+ };
+ }
- if (check_run* scr = sd.find_check_run (cr.build_id))
- {
- if (scr->state != build_state::building)
- {
- warn << "check run " << cr.build_id << ": out of order built "
- << "notification service data update; existing state: "
- << scr->state_string ();
- }
+ bool ci_github::
+ create_pull_request_ci (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark& trace,
+ const service_data& sd,
+ const string& nid,
+ bool cf) const
+ {
+ // Cancel the existing CI request if asked to do so. Ignore failure
+ // because the request may already have been cancelled for other reasons.
+ //
+ if (cf)
+ {
+ if (!cancel (error, warn, &trace, *build_db_, "ci-github", nid))
+ l3 ([&] {trace << "unable to cancel CI for pull request " << nid;});
+ }
- *scr = cr;
- }
- else
- sd.check_runs.push_back (cr);
+ // Create a new unloaded CI request.
+ //
+ tenant_service ts (nid, "ci-github", sd.json ());
- return sd.json ();
- };
+ // Note: use no delay since we need to (re)create the synthetic merge
+ // check run as soon as possible.
+ //
+ return create (error, warn, &trace,
+ *build_db_, move (ts),
+ chrono::seconds (30) /* interval */,
+ chrono::seconds (0) /* delay */)
+ .has_value ();
+ }
+
+ string ci_github::
+ details_url (const build& b) const
+ {
+ return options_->host () +
+ "/@" + b.tenant +
+ "?builds=" + mime_url_encode (b.package_name.string ()) +
+ "&pv=" + b.package_version.string () +
+ "&tg=" + mime_url_encode (b.target.string ()) +
+ "&tc=" + mime_url_encode (b.target_config_name) +
+ "&pc=" + mime_url_encode (b.package_config_name) +
+ "&th=" + mime_url_encode (b.toolchain_version.string ());
}
optional<string> ci_github::
diff --git a/mod/mod-ci-github.hxx b/mod/mod-ci-github.hxx
index 07feca8..489aac7 100644
--- a/mod/mod-ci-github.hxx
+++ b/mod/mod-ci-github.hxx
@@ -7,8 +7,8 @@
#include <libbrep/types.hxx>
#include <libbrep/utility.hxx>
-#include <mod/module.hxx>
#include <mod/module-options.hxx>
+#include <mod/database-module.hxx>
#include <mod/ci-common.hxx>
#include <mod/tenant-service.hxx>
@@ -17,8 +17,11 @@
namespace brep
{
- class ci_github: public handler,
+ struct service_data;
+
+ class ci_github: public database_module,
private ci_start,
+ public tenant_service_build_unloaded,
public tenant_service_build_queued,
public tenant_service_build_building,
public tenant_service_build_built
@@ -40,6 +43,10 @@ namespace brep
cli_options () const {return options::ci_github::description ();}
virtual function<optional<string> (const tenant_service&)>
+ build_unloaded (tenant_service&&,
+ const diag_epilogue& log_writer) const noexcept override;
+
+ virtual function<optional<string> (const tenant_service&)>
build_queued (const tenant_service&,
const vector<build>&,
optional<build_state> initial_state,
@@ -60,8 +67,43 @@ namespace brep
// Handle the check_suite event `requested` and `rerequested` actions.
//
+ // If warning_success is true, then map result_status::warning to SUCCESS
+ // and to FAILURE otherwise.
+ //
bool
- handle_check_suite_request (gh_check_suite_event);
+ handle_check_suite_request (gh_check_suite_event, bool warning_success);
+
+ // Handle the pull_request event `opened` and `synchronize` actions.
+ //
+ // If warning_success is true, then map result_status::warning to SUCCESS
+ // and to FAILURE otherwise.
+ //
+ bool
+ handle_pull_request (gh_pull_request_event, bool warning_success);
+
+ // Create an unloaded CI request for a pull request. If `cancel_first` is
+ // true, cancel its existing CI request first.
+ //
+ // Return true if an unloaded CI request was created. Ignore failure to
+ // cancel because the CI request may already have been cancelled for other
+ // reasons.
+ //
+ // After this call we will start getting the build_unloaded()
+ // notifications until (1) we load the request, (2) we cancel it, or (3)
+ // it gets archived after some timeout.
+ //
+ bool
+ create_pull_request_ci (const basic_mark& error,
+ const basic_mark& warn,
+ const basic_mark& trace,
+ const service_data&,
+ const string& pull_request_node_id,
+ bool cancel_first) const;
+
+ // Build a check run details_url for a build.
+ //
+ string
+ details_url (const build&) const;
optional<string>
generate_jwt (const basic_mark& trace, const basic_mark& error) const;
diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx
index 5974d45..8c47bc4 100644
--- a/mod/mod-ci.cxx
+++ b/mod/mod-ci.cxx
@@ -22,6 +22,8 @@ using namespace butl;
using namespace web;
using namespace brep::cli;
+// ci
+//
#ifdef BREP_CI_TENANT_SERVICE
brep::ci::
ci (tenant_service_map& tsm)
@@ -36,7 +38,12 @@ ci (const ci& r, tenant_service_map& tsm)
#else
ci (const ci& r)
#endif
- : handler (r),
+ :
+#ifndef BREP_CI_TENANT_SERVICE_UNLOADED
+ handler (r),
+ #else
+ database_module (r),
+#endif
ci_start (r),
options_ (r.initialized_ ? r.options_ : nullptr),
form_ (r.initialized_ || r.form_ == nullptr
@@ -100,6 +107,13 @@ init (scanner& s)
}
}
+#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
+ if (!options_->build_config_specified ())
+ fail << "package building functionality must be enabled";
+
+ database_module::init (*options_, options_->build_db_retry ());
+#endif
+
if (options_->root ().empty ())
options_->root (dir_path ("/"));
}
@@ -347,6 +361,7 @@ handle (request& rq, response& rs)
user_agent = h.value;
}
+#ifndef BREP_CI_TENANT_SERVICE_UNLOADED
optional<start_result> r (start (error,
warn,
verb_ ? &trace : nullptr,
@@ -367,6 +382,25 @@ handle (request& rq, response& rs)
: optional<string> ()),
custom_request,
overrides));
+#else
+ assert (build_db_ != nullptr); // Wouldn't be here otherwise.
+
+ optional<start_result> r;
+
+ if (optional<string> ref = create (error,
+ warn,
+ verb_ ? &trace : nullptr,
+ *build_db_,
+ tenant_service ("", "ci", rl.string ()),
+ chrono::seconds (40),
+ chrono::seconds (10)))
+ {
+ string msg ("unloaded CI request is created: " +
+ options_->host () + tenant_dir (root, *ref).string ());
+
+ r = start_result {200, move (msg), move (*ref), {}};
+ }
+#endif
if (!r)
return respond_error (); // The diagnostics is already issued.
@@ -472,4 +506,97 @@ build_built (const tenant_service&,
return ts.data ? *ts.data + ", " + s : s;
};
}
+
+#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
+function<optional<string> (const brep::tenant_service&)> brep::ci::
+build_unloaded (tenant_service&& ts,
+ const diag_epilogue& log_writer) const noexcept
+{
+ NOTIFICATION_DIAG (log_writer);
+
+ assert (ts.data); // Repository location.
+
+ try
+ {
+ repository_location rl (*ts.data);
+
+ if (!load (error, warn, verb_ ? &trace : nullptr,
+ *build_db_,
+ move (ts),
+ rl))
+ return nullptr; // The diagnostics is already issued.
+ }
+ catch (const invalid_argument& e)
+ {
+ error << "invalid repository location '" << *ts.data << "' stored for "
+ << "tenant service " << ts.id << ' ' << ts.type;
+
+ return nullptr;
+ }
+
+ return [] (const tenant_service& ts) {return "loaded " + *ts.data;};
+}
+#endif
#endif
+
+// ci_cancel
+//
+brep::ci_cancel::
+ci_cancel (const ci_cancel& r)
+ : database_module (r),
+ options_ (r.initialized_ ? r.options_ : nullptr)
+{
+}
+
+void brep::ci_cancel::
+init (scanner& s)
+{
+ options_ = make_shared<options::ci_cancel> (
+ s, unknown_mode::fail, unknown_mode::fail);
+
+ if (options_->build_config_specified ())
+ database_module::init (*options_, options_->build_db_retry ());
+}
+
+bool brep::ci_cancel::
+handle (request& rq, response& rs)
+{
+ HANDLER_DIAG;
+
+ if (build_db_ == nullptr)
+ throw invalid_request (501, "not implemented");
+
+ params::ci_cancel params;
+
+ try
+ {
+ name_value_scanner s (rq.parameters (1024));
+ params = params::ci_cancel (s, unknown_mode::fail, unknown_mode::fail);
+ }
+ catch (const cli::exception& e)
+ {
+ throw invalid_request (400, e.what ());
+ }
+
+ const string& reason (params.reason ());
+
+ if (reason.empty ())
+ throw invalid_request (400, "missing CI request cancellation reason");
+
+ // Verify the tenant id.
+ //
+ const string tid (params.id ());
+
+ if (tid.empty ())
+ throw invalid_request (400, "invalid CI request id");
+
+ if (!cancel (error, warn, verb_ ? &trace : nullptr, reason, *build_db_, tid))
+ throw invalid_request (400, "unknown CI request id");
+
+ // We have all the data, so don't buffer the response content.
+ //
+ ostream& os (rs.content (200, "text/plain;charset=utf-8", false));
+ os << "CI request " << tid << " has been canceled";
+
+ return true;
+}
diff --git a/mod/mod-ci.hxx b/mod/mod-ci.hxx
index 1e2ee15..e4a343c 100644
--- a/mod/mod-ci.hxx
+++ b/mod/mod-ci.hxx
@@ -16,6 +16,11 @@
#include <mod/module-options.hxx>
#include <mod/ci-common.hxx>
+#include <mod/database-module.hxx>
+
+#if defined(BREP_CI_TENANT_SERVICE_UNLOADED) && !defined(BREP_CI_TENANT_SERVICE)
+# error BREP_CI_TENANT_SERVICE must be defined if BREP_CI_TENANT_SERVICE_UNLOADED is defined
+#endif
#ifdef BREP_CI_TENANT_SERVICE
# include <mod/tenant-service.hxx>
@@ -23,12 +28,20 @@
namespace brep
{
- class ci: public handler,
+ class ci:
+#ifndef BREP_CI_TENANT_SERVICE_UNLOADED
+ public handler,
+#else
+ public database_module,
+#endif
private ci_start
#ifdef BREP_CI_TENANT_SERVICE
, public tenant_service_build_queued,
public tenant_service_build_building,
public tenant_service_build_built
+#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
+ , public tenant_service_build_unloaded
+#endif
#endif
{
public:
@@ -74,6 +87,12 @@ namespace brep
build_built (const tenant_service&,
const build&,
const diag_epilogue& log_writer) const noexcept override;
+
+#ifdef BREP_CI_TENANT_SERVICE_UNLOADED
+ virtual function<optional<string> (const tenant_service&)>
+ build_unloaded (tenant_service&&,
+ const diag_epilogue& log_writer) const noexcept override;
+#endif
#endif
private:
@@ -88,6 +107,32 @@ namespace brep
tenant_service_map& tenant_service_map_;
#endif
};
+
+ class ci_cancel: public database_module,
+ private ci_start
+ {
+ public:
+ ci_cancel () = default;
+
+ // Create a shallow copy (handling instance) if initialized and a deep
+ // copy (context exemplar) otherwise.
+ //
+ explicit
+ ci_cancel (const ci_cancel&);
+
+ virtual bool
+ handle (request&, response&) override;
+
+ virtual const cli::options&
+ cli_options () const override {return options::ci_cancel::description ();}
+
+ private:
+ virtual void
+ init (cli::scanner&) override;
+
+ private:
+ shared_ptr<options::ci_cancel> options_;
+ };
}
#endif // MOD_MOD_CI_HXX
diff --git a/mod/mod-package-details.cxx b/mod/mod-package-details.cxx
index fcd50da..1fb51da 100644
--- a/mod/mod-package-details.cxx
+++ b/mod/mod-package-details.cxx
@@ -37,8 +37,6 @@ package_details (const package_details& r)
void brep::package_details::
init (scanner& s)
{
- HANDLER_DIAG;
-
options_ = make_shared<options::package_details> (
s, unknown_mode::fail, unknown_mode::fail);
diff --git a/mod/mod-repository-details.cxx b/mod/mod-repository-details.cxx
index 082903b..93b6c9e 100644
--- a/mod/mod-repository-details.cxx
+++ b/mod/mod-repository-details.cxx
@@ -39,8 +39,6 @@ repository_details (const repository_details& r)
void brep::repository_details::
init (scanner& s)
{
- HANDLER_DIAG;
-
options_ = make_shared<options::repository_details> (
s, unknown_mode::fail, unknown_mode::fail);
diff --git a/mod/mod-repository-root.cxx b/mod/mod-repository-root.cxx
index d44cdc5..99e7219 100644
--- a/mod/mod-repository-root.cxx
+++ b/mod/mod-repository-root.cxx
@@ -134,6 +134,7 @@ namespace brep
#else
ci_ (make_shared<ci> ()),
#endif
+ ci_cancel_ (make_shared<ci_cancel> ()),
ci_github_ (make_shared<ci_github> (*tenant_service_map_)),
upload_ (make_shared<upload> ())
{
@@ -203,6 +204,10 @@ namespace brep
#else
: make_shared<ci> (*r.ci_)),
#endif
+ ci_cancel_ (
+ r.initialized_
+ ? r.ci_cancel_
+ : make_shared<ci_cancel> (*r.ci_cancel_)),
ci_github_ (
r.initialized_
? r.ci_github_
@@ -237,6 +242,7 @@ namespace brep
append (r, build_configs_->options ());
append (r, submit_->options ());
append (r, ci_->options ());
+ append (r, ci_cancel_->options ());
append (r, ci_github_->options ());
append (r, upload_->options ());
return r;
@@ -284,6 +290,7 @@ namespace brep
sub_init (*build_configs_, "build_configs");
sub_init (*submit_, "submit");
sub_init (*ci_, "ci");
+ sub_init (*ci_cancel_, "ci-cancel");
sub_init (*ci_github_, "ci_github");
sub_init (*upload_, "upload");
@@ -486,6 +493,13 @@ namespace brep
return handle ("ci", param);
}
+ else if (func == "ci-cancel")
+ {
+ if (handler_ == nullptr)
+ handler_.reset (new ci_cancel (*ci_cancel_));
+
+ return handle ("ci-cancel", param);
+ }
else if (func == "ci-github")
{
if (handler_ == nullptr)
diff --git a/mod/mod-repository-root.hxx b/mod/mod-repository-root.hxx
index 118f12c..31fde9b 100644
--- a/mod/mod-repository-root.hxx
+++ b/mod/mod-repository-root.hxx
@@ -25,6 +25,7 @@ namespace brep
class build_configs;
class submit;
class ci;
+ class ci_cancel;
class ci_github;
class upload;
@@ -75,6 +76,7 @@ namespace brep
shared_ptr<build_configs> build_configs_;
shared_ptr<submit> submit_;
shared_ptr<ci> ci_;
+ shared_ptr<ci_cancel> ci_cancel_;
shared_ptr<ci_github> ci_github_;
shared_ptr<upload> upload_;
diff --git a/mod/module.cli b/mod/module.cli
index 166adbd..d716c6e 100644
--- a/mod/module.cli
+++ b/mod/module.cli
@@ -796,11 +796,7 @@ namespace brep
}
};
- class ci_cancel
- {
- };
-
- class ci: ci_start, page, repository_url, handler
+ class ci: ci_start, build, build_db, page, repository_url, handler
{
// Classic CI-specific options.
//
@@ -815,10 +811,11 @@ namespace brep
}
};
- // @@ TODO Is etc/brep-module.conf updated manually? Yes, will need to
- // replicate there eventually.
- //
- class ci_github: ci_start, ci_cancel, build_db, handler, openssl_options
+ class ci_cancel: build, build_db, handler
+ {
+ };
+
+ class ci_github: ci_start, ci_cancel, repository_url
{
// GitHub CI-specific options.
//
@@ -832,14 +829,15 @@ namespace brep
string ci-github-app-webhook-secret
{
"<secret>",
- "The GitHub app's configured webhook secret."
+ "The GitHub App's configured webhook secret. If not set, then the
+ GitHub CI service is disabled."
}
path ci-github-app-private-key
{
"<path>",
"The private key used during GitHub API authentication. Created in
- the GitHub app's settings."
+ the GitHub App's settings."
}
uint16_t ci-github-jwt-validity-period = 600
@@ -1127,6 +1125,22 @@ namespace brep
string simulate;
};
+ // All parameters are non-optional.
+ //
+ class ci_cancel
+ {
+ // CI task tenant id.
+ //
+ // Note that the ci-cancel parameter is renamed to '_' by the root
+ // handler (see the request_proxy class for details).
+ //
+ string id | _;
+
+ // CI task canceling reason. Must not be empty.
+ //
+ string reason;
+ };
+
// Parameters other than challenge must be all present.
//
// Note also that besides these parameters there can be others. We don't
diff --git a/mod/page.cxx b/mod/page.cxx
index bc2e42d..177fb64 100644
--- a/mod/page.cxx
+++ b/mod/page.cxx
@@ -739,7 +739,7 @@ namespace brep
<< ~TR;
}
- // BUILD_RESULT
+ // TR_BUILD_RESULT
//
void TR_BUILD_RESULT::
operator() (serializer& s) const
diff --git a/mod/tenant-service.hxx b/mod/tenant-service.hxx
index 61e0a07..c46cb7b 100644
--- a/mod/tenant-service.hxx
+++ b/mod/tenant-service.hxx
@@ -21,7 +21,8 @@ namespace brep
virtual ~tenant_service_base () = default;
};
- // Possible build notifications:
+ // Possible build notifications (see also the unloaded special notification
+ // below):
//
// queued
// building
@@ -124,6 +125,22 @@ namespace brep
const diag_epilogue& log_writer) const noexcept = 0;
};
+ // This notification is only made on unloaded CI requests created with the
+ // ci_start::create() call and until they are loaded with ci_start::load()
+ // or, alternatively, abandoned with ci_start::abandon().
+ //
+ // Note: make sure the implementation of this notification does not take
+ // too long (currently 40 seconds) to avoid nested notifications. Note
+ // also that the first notification is delayed (currently 10 seconds).
+ //
+ class tenant_service_build_unloaded: public virtual tenant_service_base
+ {
+ public:
+ virtual function<optional<string> (const tenant_service&)>
+ build_unloaded (tenant_service&&,
+ const diag_epilogue& log_writer) const noexcept = 0;
+ };
+
// Map of service type (tenant_service::type) to service.
//
using tenant_service_map = std::map<string, shared_ptr<tenant_service_base>>;