diff options
149 files changed, 13699 insertions, 3406 deletions
@@ -5,10 +5,16 @@ *.d *.t *.i +*.i.* *.ii +*.ii.* *.o *.obj +*.gcm +*.pcm +*.ifc *.so +*.dylib *.dll *.a *.lib @@ -77,24 +77,13 @@ d) Unless you already have the build2 toolchain, install it by following 3. Build and Install brep -Normally the only extra information that you need to provide on this step is -the location of the Apache2 headers (httpd.h, etc). Below are their locations -for some distributions: - -Debian/Ubuntu: /usr/include/apache2 -Fedora/RHEL: /usr/include/httpd -FreeBSD: /usr/local/include/apache24 - -You can also use the Apache2 apxs utility to obtain this information as shown -below. - $ mkdir brep $ cd brep -$ bpkg create \ - cc \ - config.cc.poptions="-I$(apxs -q includedir)" \ - config.bin.rpath=$HOME/install/lib \ +$ bpkg create \ + cc \ + config.cc.coptions=-O3 \ + config.bin.rpath=$HOME/install/lib \ config.install.root=$HOME/install $ bpkg add https://pkg.cppget.org/1/alpha @@ -104,6 +93,22 @@ $ bpkg install brep $ cd .. # Back to brep home. +Note that by default the location of the Apache2 headers (httpd.h, etc) is +detected automatically, using the Apache2 apxs utility. Below are their +locations for some distributions: + +Debian/Ubuntu: /usr/include/apache2 +Fedora/RHEL: /usr/include/httpd +FreeBSD: /usr/local/include/apache24 + +To disable this functionality and specify the location explicitly, you can +configure brep in the development mode and specify the respective preprocessor +option by, for example, adding the following configuration variables to the +above bpkg-build command: + +config.brep.develop=true +config.cc.poptions="-I..." + 4. Create PostgreSQL User and Databases @@ -158,7 +163,7 @@ CREATE EXTENSION postgres_fdw; CREATE SERVER package_server FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname 'brep_package', updatable 'false'); + OPTIONS (dbname 'brep_package', updatable 'true'); GRANT USAGE ON FOREIGN SERVER package_server to brep; @@ -166,6 +171,17 @@ CREATE USER MAPPING FOR PUBLIC SERVER package_server OPTIONS (user 'brep-build', password '-'); +Note that starting with PostgreSQL 15 only the database owner can create the +objects in the public schema by default. Thus, if the PostgreSQL version is 15 +or above, then all the privileges on this schema in the created databases need +to be granted explicitly by the postgres user to the brep user: + +\c brep_package +GRANT ALL PRIVILEGES ON SCHEMA public TO brep; + +\c brep_build +GRANT ALL PRIVILEGES ON SCHEMA public TO brep; + Exit psql (^D) The user brep-build is required (by the postgres_fdw extension) to login with @@ -218,7 +234,21 @@ $ cp install/share/brep/etc/brep-module.conf config/ $ edit config/brep-module.conf # Adjust default values if required. To enable the build2 build bot controller functionality you will need to set -the build-config option in brep-module.conf. +the build-config option in brep-module.conf. To also enable the build +artifacts upload functionality you will need to specify the upload-data +directory for the desired upload types in brep-module.conf. For example, for +generated binary distribution packages it can be as follows: + +upload-data bindist=/home/brep/bindist-data + +Note that this directory must exist and have read, write, and execute +permissions granted to the www-data user. This, for example, can be achieved +with the following commands: + +$ mkdir /home/brep/bindist-data +$ setfacl -m g:www-data:rwx /home/brep/bindist-data + +For sample upload handler implementations see brep/handler/upload/. To enable the package submission functionality you will need to specify the submit-data and submit-temp directories in brep-module.conf. Note that these @@ -330,8 +360,22 @@ user/group. Otherwise, a cron job is a natural choice. Note that the builds cleaner execution is optional and is only required if the build2 build bot functionality is enabled (see the build bot documentation for -details). If it is disabled in you setup, then skip the cleaner-related -parts in the subsequent subsections. +details). If it is disabled in you setup, then skip the cleaner-related parts +in the subsequent subsections. + +If the build artifacts upload functionality is enabled in addition to the +build2 build bot functionality you most likely will want to additionally setup +the cleanup of the outdated build artifacts. For example, for binary +distribution package uploads handled by brep-upload-bindist the cleanup needs +to be performed by periodic execution of brep-upload-bindist-clean script. +Note that the directory where the uploads are saved to must exist and have +read, write, and execute permissions granted to the brep user. This, for +example, can be achieved with the following commands: + +# mkdir /var/bindist +# chown www-data:www-data /var/bindist +# setfacl -m u:brep:rwx /var/bindist +# setfacl -dm u:brep:rwx /var/bindist If the CI request functionality is enabled you most likely will want to additionally setup the tenants cleanup. @@ -346,8 +390,9 @@ infrastructure. 8.a Setup Periodic Loader, Cleaner, and Monitor Execution with cron The following crontab entries will execute the loader every five minutes, the -tenants and builds cleaners once a day at midnight, and the monitor every hour -(all shifted by a few minutes in order not to clash with other jobs): +tenants, builds, and binary distribution cleaners once a day at midnight, and +the monitor every hour (all shifted by a few minutes in order not to clash +with other jobs): $ crontab -l MAILTO=<brep-admin-email> @@ -355,7 +400,8 @@ PATH=/usr/local/bin:/bin:/usr/bin */5 * * * * $HOME/install/bin/brep-load $HOME/config/loadtab 1 0 * * * $HOME/install/bin/brep-clean tenants 240 2 0 * * * $HOME/install/bin/brep-clean builds $HOME/config/buildtab -3 * * * * $HOME/install/bin/brep-monitor --report-timeout 86400 --clean $HOME/config/brep-module.conf public +3 0 * * * $HOME/install/bin/brep-upload-bindist-clean /var/bindist 2880 +4 * * * * $HOME/install/bin/brep-monitor --report-timeout 86400 --clean $HOME/config/brep-module.conf public ^D Note that here we assume that bpkg (which is executed by brep-load) is in one diff --git a/INSTALL-CI-DEV b/INSTALL-CI-DEV new file mode 100644 index 0000000..a80b727 --- /dev/null +++ b/INSTALL-CI-DEV @@ -0,0 +1,131 @@ +This guide shows how to configure the brep module for serving the CI and +build2 build bot requests and how to smoke-test it. + +Note that during the testing both the user and CI submission handler (executed +by the brep module) will run the build2 toolchain utilities. Thus, the user +needs to arrange the toolchain availability for her and for the user the +Apache2 process runs under. The easiest, would be to install the toolchain +into the system using, for example, the build2-install-*-a.0-stage.sh script +(can be downloaded from https://stage.build2.org/0/). If the being developed +brep module is not compatible with the staged toolchain, then installing the +development version of the toolchain may be required. + +In the below instructions replace <BREP-SRC-ROOT>, <BREP-OUT-ROOT>, and <HOME> +with the actual absolute paths of the brep source, brep output, and the user +home directories. Replace <HOST> with the actual hostname of the local brep +repository instance. + +Here we assume that the brep instance is already configured according to the +instructions in the INSTALL-DEV file. Now, the instance needs to additionally +be configured as the build2 build bot controller and the CI request service, +as it is described in the INSTALL file. This, in particular, requires to +specify the build-config and the number of ci-* configuration options in the +brep module configuration file. For example: + +$ mkdir ~/brep +$ cd ~/brep +$ mkdir ci-data config +$ setfacl -m g:www-data:rwx ci-data +$ cd config +$ cp <BREP-SRC-ROOT>/etc/brep-module.conf . + +Edit brep-module.conf: + +- Uncomment the Builds=?builds menu. +- Set the build-config option as <HOME>/brep/config/buildtab. +- Set the ci-data option as <HOME>/brep/ci-data. +- Set the ci-handler option as <BREP-OUT-ROOT>/brep/handler/ci/brep-ci-load. + +- Add the following options: + +ci-handler-argument --result-url +ci-handler-argument http://<HOST> +ci-handler-argument <BREP-OUT-ROOT>/load/brep-load + +Create the buildtab file: + +$ cat <<EOF >buildtab +linux_debian_12*-gcc_13.1 linux_debian_12-gcc_13.1 x86_64-linux-gnu "all default" +linux_debian_12*-gcc_13.1 linux_debian_12-gcc_13.1-O3 x86_64-linux-gnu "all default" config.cc.coptions="-O3" +EOF + +Point the brep module to the newly created configuration file: + +$ sudo systemctl stop apache2 + +Open the corresponding Apache2 .conf file and change the brep-conf directive +to refer to <HOME>/brep/config/brep-module.conf. + +$ sudo systemctl start apache2 +$ sudo systemctl status apache2 + +Submit a package for CI, for example, foo/1.0.0: + +$ cd ~/brep +$ git clone https://.../foo +$ cd foo +$ bdep init -C @cfg -- +$ bdep ci --server http://<HOST> + +Verify that the CI request is successfully submitted by opening the link +contained in the bdep-ci's stderr. The submitted package should be present on +the Packages page. + +Send the task request query on the behalf of the build2 build bot agent, for +example: + +$ cd ~/brep +$ cat <<EOF >task-request.manifest +: 1 +agent: bot +toolchain-name: dev +toolchain-version: 0.17.0-a.1 + +: +id: a2b0c44298fc1c149afbf4c8996fb92427ae41e4649b934ca495991b7852b855 +name: linux_debian_12-gcc_13.1 +summary: Linux Debian 12 GCC 13.1 +EOF + +$ cat task-request.manifest | \ + curl -s -S --data-binary @- \ + --header 'Content-Type: text/manifest' \ + --include "http://<HOST>/?build-task" + +Stash the session and result-url manifest values contained in the curl's +stdout. We will refer them as <SESSION> and <RESULT-URL> down the road. + +Verify that the CI task is successfully created by clicking the 'Builds' link +in the menu of the previously opened brep page. A single package build in the +building state should be present on the Builds page. + +Send the result request query on the behalf of the build2 build bot agent: + +$ cat <<EOF >result-request.manifest +: 1 +session: <SESSION> +agent-checksum: 1 +: +name: foo +version: 1.0.0 +status: success +EOF + +$ cat result-request.manifest | \ + curl -s -S --data-binary @- \ + --header 'Content-Type: text/manifest' \ + --include <RESULT-URL> + +Refresh the Builds page and make sure that the build is now in the built state +(the 'success' status is printed in the result field). + +Re-submit the task-request.manifest file, refresh the Builds page, and make +sure that the second package build appears on the page in the building state. +Edit the session value in the result-request.manifest, re-submit it to the new +result URL, refresh the Builds page, and make sure that the latest build is +now in the built state as well. + +You can also track the brep objects state transitions in the database. For +example, by executing the following query before/after each curl command: + +$ psql -d brep_build -c 'select * from build_tenant' diff --git a/INSTALL-DEV b/INSTALL-DEV index af5c06e..8ebc5a3 100644 --- a/INSTALL-DEV +++ b/INSTALL-DEV @@ -55,6 +55,17 @@ CREATE USER "www-data" INHERIT IN ROLE <user>; CREATE USER "brep-build" INHERIT IN ROLE <user> PASSWORD '-'; +Note that starting with PostgreSQL 15 only the database owner can create the +objects in the public schema by default. Thus, if the PostgreSQL version is 15 +or above, then all the privileges on this schema in the created databases need +to be granted explicitly by the postgres user to <user>: + +\c brep_package +GRANT ALL PRIVILEGES ON SCHEMA public TO <user>; + +\c brep_build +GRANT ALL PRIVILEGES ON SCHEMA public TO <user>; + Exit psql (^D), then make sure the logins work: $ psql -d brep_package @@ -72,7 +83,7 @@ CREATE EXTENSION postgres_fdw; CREATE SERVER package_server FOREIGN DATA WRAPPER postgres_fdw - OPTIONS (dbname 'brep_package', updatable 'false'); + OPTIONS (dbname 'brep_package', updatable 'true'); GRANT USAGE ON FOREIGN SERVER package_server to <user>; @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2014-2020 the build2 authors (see the AUTHORS and LEGAL files). +Copyright (c) 2014-2024 the build2 authors (see the AUTHORS and LEGAL files). Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal @@ -1,3 +1,41 @@ +Version 0.16.0 + + * Note: brep_build database schema migration from version 18 is unsupported. + + * Support for build artifact upload. + + * Support for *-package-config package manifest value functionality. + + * Support for interrupted build results. + + * Support for random package ordering when issuing build tasks. + + * Support for package-description, package-description-{file,type} package + manifest values. + +Version 0.15.0 + + * Support for disabling build notification emails per toolchain. + + * The submit-git and submit-pub handlers now deny submissions of older + package version revisions. + + * Support for footnotes in cmark-gfm. + +Version 0.14.0 + + * Support for interactive CI. + + * Support for soft and hard rebuilds. + + * Support for build-time dependencies and the target/host configuration + split awareness. + + * Initial support for private brep-as-VM setup (see etc/private/README). + + * Build notifications are no longer sent if the build-email package manifest + value is unspecified. + Version 0.13.0 * Support for the alternative package rebuild timeout. diff --git a/brep/handler/buildfile b/brep/handler/buildfile index b351ca3..cd11231 100644 --- a/brep/handler/buildfile +++ b/brep/handler/buildfile @@ -3,7 +3,8 @@ import mods = libbutl.bash%bash{manifest-parser} import mods += libbutl.bash%bash{manifest-serializer} +import mods += bpkg-util%bash{package-archive} -./: bash{handler} submit/ ci/ +./: bash{handler} submit/ ci/ upload/ bash{handler}: in{handler} $mods diff --git a/brep/handler/ci/ci-load.in b/brep/handler/ci/ci-load.in index f62bb76..3f04ea8 100644 --- a/brep/handler/ci/ci-load.in +++ b/brep/handler/ci/ci-load.in @@ -25,7 +25,10 @@ verbose= #true fetch_timeout=60 trap "{ exit 1; }" ERR -set -o errtrace # Trap ERR in functions. +set -o errtrace # Trap ERR in functions. +set -o pipefail # Fail if any pipeline command fails. +shopt -s lastpipe # Execute last pipeline command in the current shell. +shopt -s nullglob # Expand no-match globs to nothing rather than themselves. @import brep/handler/handler@ @import brep/handler/ci/ci@ @@ -33,7 +36,7 @@ set -o errtrace # Trap ERR in functions. # The handler's own options. # result_url= -while [ $# -gt 0 ]; do +while [[ "$#" -gt 0 ]]; do case $1 in --result-url) shift @@ -50,7 +53,7 @@ done # loader="$1" -if [ -z "$loader" ]; then +if [[ -z "$loader" ]]; then error "$usage" fi @@ -60,7 +63,7 @@ shift # options. # loader_options=() -while [ $# -gt 1 ]; do +while [[ "$#" -gt 1 ]]; do loader_options+=("$1") shift done @@ -69,11 +72,11 @@ done # data_dir="${1%/}" -if [ -z "$data_dir" ]; then +if [[ -z "$data_dir" ]]; then error "$usage" fi -if [ ! -d "$data_dir" ]; then +if [[ ! -d "$data_dir" ]]; then error "'$data_dir' does not exist or is not a directory" fi @@ -84,8 +87,9 @@ reference="$(basename "$data_dir")" # manifest_parser_start "$data_dir/request.manifest" -simulate= repository= +interactive= +simulate= # Package map. We first enter packages from the request manifest as keys and # setting the values to true. Then we go through the repository package list @@ -104,40 +108,58 @@ declare -A packages # spec= +# Third party service information which, if specified, needs to be associated +# with the being created tenant. +# +service_id= +service_type= +service_data= + while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do case "$n" in - simulate) simulate="$v" ;; - repository) repository="$v" ;; + repository) repository="$v" ;; + interactive) interactive="$v" ;; + simulate) simulate="$v" ;; package) packages["$v"]=true - if [ -n "$spec" ]; then + if [[ -n "$spec" ]]; then spec="$spec," fi spec="$spec$v" ;; + + service-id) service_id="$v" ;; + service-type) service_type="$v" ;; + service-data) service_data="$v" ;; esac done manifest_parser_finish -if [ -n "$spec" ]; then +if [[ -n "$spec" ]]; then spec="$spec@" fi spec="$spec$repository" -if [ -z "$repository" ]; then +if [[ -z "$repository" ]]; then error "repository manifest value expected" fi -if [ -n "$simulate" -a "$simulate" != "success" ]; then +if [[ -n "$simulate" && "$simulate" != "success" ]]; then exit_with_manifest 400 "unrecognized simulation outcome '$simulate'" fi +# Use the generated reference if the tenant service id is not specified. +# +if [[ -n "$service_type" && -z "$service_id" ]]; then + service_id="$reference" +fi + message_suffix= -if [ -n "$result_url" ]; then +if [[ -n "$result_url" ]]; then message_suffix=": $result_url/@$reference" # Append the tenant id. fi @@ -146,7 +168,7 @@ fi # Note that we can't assume a real repository URL is specified if simulating # so trying to query the repository info is not a good idea. # -if [ -n "$simulate" ]; then +if [[ -n "$simulate" ]]; then run rm -r "$data_dir" trace "CI request for '$spec' is simulated$message_suffix" @@ -188,9 +210,9 @@ manifest_values=() manifest_version= more=true -while [ "$more" ]; do +while [[ "$more" ]]; do - if [ -n "$manifest_version" ]; then + if [[ -n "$manifest_version" ]]; then manifest_names=("") manifest_values=("$manifest_version") fi @@ -213,35 +235,32 @@ while [ "$more" ]; do manifest_names+=("$n") manifest_values+=("$v") - done # Reduce the first manifest case. # - if [ ${#manifest_names[@]} -eq 0 ]; then + if [[ "${#manifest_names[@]}" -eq 0 ]]; then continue fi # Add or filter out the manifest, if present. # - if [ ${#packages[@]} -ne 0 ]; then - - if [[ -v packages["$name"] ]]; then + if [[ "${#packages[@]}" -ne 0 ]]; then + if [[ -v "packages[$name]" ]]; then packages["$name"]= packages["$name/$version"]= # Clear it either, as may also be present. - elif [[ -v packages["$name/$version"] ]]; then + elif [[ -v "packages[$name/$version]" ]]; then packages["$name/$version"]= else continue # Skip. fi - fi packages_manifest_names+=("${manifest_names[@]}") packages_manifest_values+=("${manifest_values[@]}") - if [ -z "$display_name" ]; then - if [ -n "$project" ]; then + if [[ -z "$display_name" ]]; then + if [[ -n "$project" ]]; then display_name="$project" else display_name="$name" @@ -255,7 +274,7 @@ manifest_parser_finish # the repository. # for p in "${!packages[@]}"; do - if [ "${packages[$p]}" ]; then + if [[ "${packages[$p]}" ]]; then exit_with_manifest 422 "unknown package $p" fi done @@ -263,7 +282,7 @@ done # Verify that the repository is not empty. Failed that, the repository display # name wouldn't be set. # -if [ -z "$display_name" ]; then +if [[ -z "$display_name" ]]; then exit_with_manifest 422 "no packages in repository" fi @@ -275,7 +294,7 @@ run mv "$cache_dir/packages.manifest" "$cache_dir/packages.manifest.orig" # manifest_serializer_start "$cache_dir/packages.manifest" -for ((i=0; i <= ${#packages_manifest_names[@]}; ++i)); do +for ((i=0; i != "${#packages_manifest_names[@]}"; ++i)); do manifest_serialize "${packages_manifest_names[$i]}" \ "${packages_manifest_values[$i]}" done @@ -289,7 +308,7 @@ run echo "$repository $display_name cache:cache" >"$loadtab" # Apply overrides, if uploaded. # -if [ -f "$data_dir/overrides.manifest" ]; then +if [[ -f "$data_dir/overrides.manifest" ]]; then loader_options+=(--overrides-file "$data_dir/overrides.manifest") fi @@ -298,6 +317,22 @@ fi # loader_options+=(--force --shallow --tenant "$reference") +# Build the packages interactively, if requested. +# +if [[ -n "$interactive" ]]; then + loader_options+=(--interactive "$interactive") +fi + +# Pass the tenant service information, if specified, to the loader. +# +if [[ -n "$service_id" ]]; then + loader_options+=(--service-id "$service_id" --service-type "$service_type") + + if [[ -n "$service_data" ]]; then + loader_options+=(--service-data "$service_data") + fi +fi + run "$loader" "${loader_options[@]}" "$loadtab" # Remove the no longer needed CI request data directory. diff --git a/brep/handler/handler.bash.in b/brep/handler/handler.bash.in index 1169b99..d9e7eaa 100644 --- a/brep/handler/handler.bash.in +++ b/brep/handler/handler.bash.in @@ -9,8 +9,11 @@ else brep_handler=true fi -@import libbutl/manifest-parser@ -@import libbutl/manifest-serializer@ +@import libbutl.bash/manifest-parser@ +@import libbutl.bash/manifest-serializer@ + +bpkg_util_bpkg=bpkg +@import bpkg-util/package-archive@ # Diagnostics. # @@ -148,3 +151,7 @@ function manifest_serialize () # <name> <value> # trace "$1: $2" printf "%s:%s\0" "$1" "$2" >&"$manifest_serializer_ifd" } + +function pkg_verify_archive () { bpkg_util_pkg_verify_archive "$@"; } +function pkg_find_archives () { bpkg_util_pkg_find_archives "$@"; } +function pkg_find_archive () { bpkg_util_pkg_find_archive "$@"; } diff --git a/brep/handler/submit/submit-git.bash.in b/brep/handler/submit/submit-git.bash.in index 9f25c28..cf7300d 100644 --- a/brep/handler/submit/submit-git.bash.in +++ b/brep/handler/submit/submit-git.bash.in @@ -59,6 +59,10 @@ function owners_dir () # <repo-dir> # Check if a repository already contains the package. Respond with the # 'duplicate submission' result manifest and exit if that's the case. # +# Also check if the repository contains newer revision of this package +# version. Respond with the 'newer revision is present' result manifest and +# exit if that's the case. +# function check_package_duplicate () # <name> <version> <repo-dir> { trace_func "$@" @@ -72,22 +76,54 @@ function check_package_duplicate () # <name> <version> <repo-dir> run source "$rep/submit.config.bash" - # Check for duplicate package in all sections. Use <name>-<version>.* - # without .tar.gz in case we want to support more archive types later. + local rev + rev="$(version_revision "$ver")" + + # Check for duplicate package and its newer revisions in all sections. Use + # <name>-<version>.* without .tar.gz in case we want to support more archive + # types later. # local s for s in "${!sections[@]}"; do local d="$rep/${sections[$s]}" - if [ -d "$d" ]; then - local f - f="$(run find "$d" -name "$nam-$ver.*")" + # Check for duplicate. + # + local p + run pkg_find_archive "$nam-$ver.*" "$d" | readarray -t p + + if [ "${#p[@]}" -ne 0 ]; then + local n="${p[1]}" + local v="${p[2]}" - if [ -n "$f" ]; then - trace "found: $f" + trace "found: $n/$v in ${p[0]}" + + if [ "$n" == "$nam" ]; then exit_with_manifest 422 "duplicate submission" + else + exit_with_manifest 422 "submission conflicts with $n/$v" fi fi + + # Check for newer revision. + # + local arcs + run pkg_find_archives "$nam" "$ver*" "$d" | readarray -t arcs + + local f + for f in "${arcs[@]}"; do + local p + pkg_verify_archive "$f" | readarray -t p + + local v="${p[1]}" + + local rv + rv="$(version_revision "$v")" + + if [ "$rv" -gt "$rev" ]; then + exit_with_manifest 422 "newer revision $nam/$v is present" + fi + done done } @@ -163,6 +199,7 @@ function auth_project () # <project> <control> <repo-dir> local r="unknown" local m="$d/$prj/project-owner.manifest" + local info= # If the project owner manifest exists then parse it and try to authenticate # the submitter as the project owner. @@ -175,16 +212,31 @@ function auth_project () # <project> <control> <repo-dir> local n v while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do - if [[ "$n" == "control" && "$ctl" == "$v"* ]]; then - r="project" - break + if [[ "$n" == "control" ]]; then + if [[ "$ctl" == "$v"* ]]; then + r="project" + break + fi + + # If the control URLs don't match, then compare them case- + # insensitively, converting them to the lower case. If they match + # case-insensitively, then still fail the authentication but provide + # additional information in the manifest message value. + # + if [[ "${ctl,,}" == "${v,,}"* ]]; then + info=" + info: control repository URL differs only in character case + info: submitted URL: $ctl + info: project owner's URL: $v + info: consider using --control to specify exact URL" + fi fi done manifest_parser_finish if [ "$r" != "project" ]; then - exit_with_manifest 401 "project owner authentication failed" + exit_with_manifest 401 "project owner authentication failed$info" fi fi @@ -210,7 +262,8 @@ function auth_package () # <project> <package> <control> <repo-dir> local prj="$1" local pkg="$2" - local ctl="${3%.git}" # Strip the potential .git extension. + local ctl="${3%.git}" # For comparison strip the potential .git extension. + local ctl_orig="$3" # For diagnostics use the original URL. local rep="$4" local d @@ -227,6 +280,7 @@ function auth_package () # <project> <package> <control> <repo-dir> local r="unknown" local m="$d/$prj/$pkg/package-owner.manifest" + local info= # If the package owner manifest exists then parse it and try to authenticate # the submitter as the package owner. @@ -241,16 +295,31 @@ function auth_package () # <project> <package> <control> <repo-dir> # local n v while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do - if [ "$n" == "control" -a "${v%.git}" == "$ctl" ]; then - r="package" - break + if [ "$n" == "control" ]; then + local u="${v%.git}" + + if [ "$u" == "$ctl" ]; then + r="package" + break + fi + + # If the control URLs don't match, then compare them case- + # insensitively (see auth_project() for details). + # + if [ "${u,,}" == "${ctl,,}" ]; then + info=" + info: control repository URL differs only in character case + info: submitted URL: $ctl_orig + info: package owner's URL: $v + info: consider using --control to specify exact URL" + fi fi done manifest_parser_finish if [ "$r" != "package" ]; then - exit_with_manifest 401 "package owner authentication failed" + exit_with_manifest 401 "package owner authentication failed$info" fi fi diff --git a/brep/handler/submit/submit-git.in b/brep/handler/submit/submit-git.in index 54cd230..c882b84 100644 --- a/brep/handler/submit/submit-git.in +++ b/brep/handler/submit/submit-git.in @@ -186,8 +186,10 @@ git_timeout=10 ref_lock_timeout=30 trap "{ exit 1; }" ERR -set -o errtrace # Trap ERR in functions. -set -o pipefail # Return the rightmost non-zero exit status in a pipeline. +set -o errtrace # Trap in functions and subshells. +set -o pipefail # Fail if any pipeline command fails. +shopt -s lastpipe # Execute last pipeline command in the current shell. +shopt -s nullglob # Expand no-match globs to nothing rather than themselves. @import brep/handler/handler@ @import brep/handler/submit/submit@ @@ -403,7 +405,7 @@ function git_add () # <repo-dir> <path>... local d="$1" shift - run git -C "$d" add $gvo "$@" >&2 + run git -C "$d" add --force $gvo "$@" >&2 } # For now we make 10 re-tries to add the package and push to target. Push can @@ -639,28 +641,10 @@ for i in {1..11}; do exit_with_manifest 400 "unrecognized section '$section'" fi - # Strips the version revision part, if present. - # - v="$(sed -n -re 's%^(\+?[^+]+)(\+[0-9]+)?$%\1%p' <<<"$version")" - - # Make sure the section directory exists before we run find in it. - # - d="$tgt_dir/$s/$project" - run mkdir -p "$d" # Create all the parent directories as well. - - # Go through the potentially matching archives (for example, for - # foo-1.2.3+2: foo-1.2.3.tar.gz, foo-1.2.3+1.tar.gz, foo-1.2.30.tar.gz, etc) - # and remove those that match exactly. - # - # Change CWD to the section directory to make sure that the found archive - # paths don't contain spaces. - # - fs=($(run cd "$tgt_dir/$s" && run find -name "$name-$v*")) + run pkg_find_archives "$name" "$version*" "$tgt_dir/$s" | readarray -t arcs - for f in "${fs[@]}"; do - if [[ "$f" =~ ^\./[^/]+/"$name-$v"(\+[0-9]+)?\.[^/]+$ ]]; then - run git -C "$tgt_dir" rm $gqo "$s/$f" >&2 - fi + for f in "${arcs[@]}"; do + run git -C "$tgt_dir" rm $gqo "${f#$tgt_dir/}" >&2 done # Finally, add the package archive to the target repository. @@ -669,8 +653,10 @@ for i in {1..11}; do # Make sure the project directory exists before we copy the archive into it. # Note that it was removed by git-rm if it became empty. # + d="$tgt_dir/$s/$project" + run mkdir -p "$d" # Create all the parent directories as well. + a="$d/$archive" - run mkdir -p "$d" # Create all the parent directories as well. run cp "$data_dir/$archive" "$a" git_add "$tgt_dir" "${a#$tgt_dir/}" diff --git a/brep/handler/submit/submit-pub.in b/brep/handler/submit/submit-pub.in index d262ae9..42d478d 100644 --- a/brep/handler/submit/submit-pub.in +++ b/brep/handler/submit/submit-pub.in @@ -12,7 +12,7 @@ # # Specifically, the handler performs the following steps: # -# - Lock the repository directory for the duraton of the package submission. +# - Lock the repository directory for the duration of the package submission. # # - Check for the package duplicate. # @@ -85,7 +85,10 @@ verbose= #true rep_lock_timeout=60 trap "{ exit 1; }" ERR -set -o errtrace # Trap ERR in functions. +set -o errtrace # Trap in functions and subshells. +set -o pipefail # Fail if any pipeline command fails. +shopt -s lastpipe # Execute last pipeline command in the current shell. +shopt -s nullglob # Expand no-match globs to nothing rather than themselves. @import brep/handler/handler@ @import brep/handler/submit/submit@ @@ -254,6 +257,8 @@ else message_suffix=": $name/$version" fi +revision="$(version_revision "$version")" + # Open the reading file descriptor and lock the repository. Fail if unable to # lock before timeout. # @@ -294,8 +299,22 @@ trap exit_trap EXIT # Check for the package duplicate (in all projects). # -if [ -n "$(run find "$repo_old/1" -name "$archive")" ]; then - exit_with_manifest 422 "duplicate submission" +# Use <name>-<version>.* without .tar.gz in case we want to support more +# archive types later. +# +run pkg_find_archive "$name-$version.*" "$repo_old/1" | readarray -t p + +if [ "${#p[@]}" -ne 0 ]; then + n="${p[1]}" + v="${p[2]}" + + trace "found: $n/$v in ${p[0]}" + + if [ "$n" == "$name" ]; then + exit_with_manifest 422 "duplicate submission" + else + exit_with_manifest 422 "submission conflicts with $n/$v" + fi fi # Copy the current repository using hardlinks. @@ -310,27 +329,30 @@ fi run rsync -rtO --exclude 'packages.manifest' --link-dest="$repo_old" \ "$repo_old/" "$repo_new" -# Remove the package version revisions that may exist in the repository. +# Remove the package version revision archives that may exist in the +# repository. # -# Strips the version revision part, if present. +# But first check if the repository contains newer revision of this package +# version. Respond with the 'newer revision is present' result manifest and +# exit if that's the case. # -v="$(sed -n -re 's%^(\+?[^+]+)(\+[0-9]+)?$%\1%p' <<<"$version")" +run pkg_find_archives "$name" "$version*" "$repo_new/1" | readarray -t arcs -# Go through the potentially matching archives (for example, for foo-1.2.3+2: -# foo-1.2.3.tar.gz, foo-1.2.3+1.tar.gz, foo-1.2.30.tar.gz, etc) and remove -# those that match exactly. -# -# Change CWD to the section directory to make sure that the found archive -# paths don't contain spaces. -# -fs=($(run cd "$repo_new/1" && run find -name "$name-$v*")) +for f in "${arcs[@]}"; do + pkg_verify_archive "$f" | readarray -t p + + v="${p[1]}" + rv="$(version_revision "$v")" -for f in "${fs[@]}"; do - if [[ "$f" =~ ^\./[^/]+/"$name-$v"(\+[0-9]+)?\.[^/]+$ ]]; then - run rm "$repo_new/1/$f" >&2 + if [ "$rv" -gt "$revision" ]; then + exit_with_manifest 422 "newer revision $name/$v is present" fi done +for f in "${arcs[@]}"; do + run rm "$f" +done + # Copy the archive rather than moving it since we may need it for # troubleshooting. Note: the data and repository directories can be on # different filesystems and so hardlinking could fail. diff --git a/brep/handler/submit/submit.bash.in b/brep/handler/submit/submit.bash.in index 667bbc1..7826809 100644 --- a/brep/handler/submit/submit.bash.in +++ b/brep/handler/submit/submit.bash.in @@ -47,12 +47,29 @@ function extract_package_manifest () # <archive> <manifest> local arc="$1" local man="$2" - # Pass the --deep option to make sure that the *-file manifest values are - # resolvable, so rep-create will not fail due to this package down the road. - # Note that we also make sure that all the manifest values are known (see - # bpkg-pkg-verify for details). + # Pass the --deep option to make sure that the bootstrap buildfile is + # present and the *-file manifest values are resolvable, so rep-create will + # not fail due to this package down the road. Note that we also make sure + # that all the manifest values are known (see bpkg-pkg-verify for details). # - if ! run_silent bpkg pkg-verify --deep --manifest "$arc" >"$man"; then + local cmd=(bpkg pkg-verify --deep --manifest "$arc") + trace_cmd "${cmd[@]}" + + # Note that we used to just advise the user to run bpkg-pkg-verify locally + # for the details on the potential failure. That, however, may not always be + # helpful since the user can use a different version of the toolchain and so + # may observe a different behavior. Thus, we add the bpkg-pkg-verify error + # message to the response, turning it into an info. This way the user may + # potentially see the following bdep-publish diagnostics: + # + # error: package archive is not valid + # info: unable to satisfy constraint (build2 >= 0.17.0-) for package libhello-1.0.0.tar.gz + # info: available build2 version is 0.16.0 + # info: run bpkg pkg-verify for details + # info: reference: 308e155764c8 + # + local e + if ! e="$("${cmd[@]}" 2>&1 >"$man")"; then # Perform the sanity check to make sure that bpkg is runnable. # @@ -60,6 +77,33 @@ function extract_package_manifest () # <archive> <manifest> error "unable to run bpkg" fi - exit_with_manifest 400 "archive is not a valid package (run bpkg pkg-verify for details)" + # Note that bpkg-pkg-verify diagnostics may potentially contain the + # archive absolute path. Let's sanitize this diagnostics by stripping the + # archive directory path, if present. Also note that to use sed for that + # we first need to escape the special regex characters and slashes in the + # archive directory path (see sed's basic regular expressions for + # details). + # + local d="$(sed 's/[[\.*^$/]/\\&/g' <<<"$(dirname "$arc")/")" + + e="$(sed -e "s/$d//g" -e 's/^error:/ info:/' <<<"$e")" + e=$'package archive is not valid\n'"$e"$'\n info: run bpkg pkg-verify for details' + + exit_with_manifest 400 "$e" fi } + +# Extract the revision part from the package version. Return 0 if the version +# doesn't contain revision. +# +function version_revision () # version +{ + local r + r="$(sed -n -re 's%^(\+?[^+]+)(\+([0-9]+))?$%\3%p' <<<"$1")" + + if [ -z "$r" ]; then + r="0" + fi + + echo "$r" +} diff --git a/brep/handler/upload/.gitignore b/brep/handler/upload/.gitignore new file mode 100644 index 0000000..da4dc5a --- /dev/null +++ b/brep/handler/upload/.gitignore @@ -0,0 +1,2 @@ +brep-upload-bindist +brep-upload-bindist-clean diff --git a/brep/handler/upload/buildfile b/brep/handler/upload/buildfile new file mode 100644 index 0000000..ca52ddd --- /dev/null +++ b/brep/handler/upload/buildfile @@ -0,0 +1,13 @@ +# file : brep/handler/upload/buildfile +# license : MIT; see accompanying LICENSE file + +./: exe{brep-upload-bindist} exe{brep-upload-bindist-clean} + +include ../ + +exe{brep-upload-bindist}: in{upload-bindist} bash{upload} ../bash{handler} + +[rule_hint=bash] \ +exe{brep-upload-bindist-clean}: in{upload-bindist-clean} + +bash{upload}: in{upload} ../bash{handler} diff --git a/brep/handler/upload/upload-bindist-clean.in b/brep/handler/upload/upload-bindist-clean.in new file mode 100644 index 0000000..99914a7 --- /dev/null +++ b/brep/handler/upload/upload-bindist-clean.in @@ -0,0 +1,224 @@ +#!/usr/bin/env bash + +# file : brep/handler/upload/upload-bindist-clean.in +# license : MIT; see accompanying LICENSE file + +# Remove expired package configuration directories created by the +# upload-bindist handler. +# +# Specifically, perform the following steps: +# +# - Recursively scan the specified root directory and collect the package +# configuration directories with age older than the specified timeout (in +# minutes). Recognize the package configuration directories by matching the +# *-????-??-??T??:??:??Z* pattern and calculate their age based on the +# modification time of the packages.sha256 file they may contain. If +# packages.sha256 doesn't exist in the configuration directory, then +# consider it as still being prepared and skip. +# +# - Iterate over the expired package configuration directories and for each of +# them: +# +# - Lock the root directory. +# +# - Re-check the expiration criteria. +# +# - Remove the package configuration symlink if it refers to this directory. +# +# - Remove this directory. +# +# - Remove all the the parent directories of this directory which become +# empty, up to (but excluding) the root directory. +# +# - Unlock the root directory. +# +usage="usage: $0 <root> <timeout>" + +# Diagnostics. +# +verbose= #true + +# The root directory lock timeout (in seconds). +# +lock_timeout=60 + +trap "{ exit 1; }" ERR +set -o errtrace # Trap in functions and subshells. +set -o pipefail # Fail if any pipeline command fails. +shopt -s lastpipe # Execute last pipeline command in the current shell. +shopt -s nullglob # Expand no-match globs to nothing rather than themselves. + +function info () { echo "$*" 1>&2; } +function error () { info "$*"; exit 1; } +function trace () { if [ "$verbose" ]; then info "$*"; fi } + +# Trace a command line, quoting empty arguments as well as those that contain +# spaces. +# +function trace_cmd () # <cmd> <arg>... +{ + if [[ "$verbose" ]]; then + local s="+" + while [ $# -gt 0 ]; do + if [ -z "$1" -o -z "${1##* *}" ]; then + s="$s '$1'" + else + s="$s $1" + fi + + shift + done + + info "$s" + fi +} + +# Trace and run a command. +# +function run () # <cmd> <arg>... +{ + trace_cmd "$@" + "$@" +} + +if [[ "$#" -ne 2 ]]; then + error "$usage" +fi + +# Package configurations root directory. +# +root_dir="${1%/}" +shift + +if [[ -z "$root_dir" ]]; then + error "$usage" +fi + +if [[ ! -d "$root_dir" ]]; then + error "'$root_dir' does not exist or is not a directory" +fi + +# Package configuration directories timeout. +# +timeout="$1" +shift + +if [[ ! "$timeout" =~ ^[0-9]+$ ]]; then + error "$usage" +fi + +# Note that while the '%s' date format is not POSIX, it is supported on both +# Linux and FreeBSD. +# +expiration=$(($(date -u +"%s") - $timeout * 60)) + +# Collect the list of expired package configuration directories. +# +expired_dirs=() + +run find "$root_dir" -type d -name "*-????-??-??T??:??:??Z*" | while read d; do + f="$d/packages.sha256" + + # Note that while the -r date option is not POSIX, it is supported on both + # Linux and FreeBSD. + # + trace_cmd date -u -r "$f" +"%s" + if t="$(date -u -r "$f" +"%s" 2>/dev/null)" && (($t <= $expiration)); then + expired_dirs+=("$d") + fi +done + +if [[ "${#expired_dirs[@]}" -eq 0 ]]; then + exit 0 # Nothing to do. +fi + +# Make sure the root directory lock file exists. +# +lock="$root_dir/upload.lock" +run touch "$lock" + +# Remove the expired package configuration directories, symlinks which refer +# to them, and the parent directories which become empty. +# +for d in "${expired_dirs[@]}"; do + # Deduce the path of the potential package configuration symlink that may + # refer to this package configuration directory by stripping the + # -<timestamp>[-<number>] suffix. + # + l="$(sed -n -re 's/^(.+)-[0-9]{4}-[0-9]{2}-[0-9]{2}T[0-9]{2}:[0-9]{2}:[0-9]{2}Z(-[0-9]+)?$/\1/p' <<<"$d")" + if [[ -z "$l" ]]; then + error "invalid name '$d' for package configuration directory" + fi + + f="$d/packages.sha256" + + # Open the reading file descriptor and lock the root directory. Fail if + # unable to lock before timeout. + # + trace "+ exec {lfd}<$lock" + exec {lfd}<"$lock" + + if ! run flock -w "$lock_timeout" "$lfd"; then + error "unable to lock root directory" + fi + + # Now, as the lock is acquired, recheck the package configuration directory + # expiration criteria (see above) and, if it still holds, remove this + # directory, the package configuration symlink if it refers to it, and all + # the parent directories which become empty up to (but excluding) the root + # directory. + # + trace_cmd date -u -r "$f" +"%s" + if t="$(date -u -r "$f" +"%s" 2>/dev/null)" && (($t <= $expiration)); then + # Remove the package configuration symlink. + # + # Do this first to avoid dangling symlinks which may potentially be + # exposed by brep. + # + # Note that while the realpath utility is not POSIX, it is present on + # both Linux and FreeBSD. + # + if [[ -L "$l" ]]; then + p="$(realpath "$l")" + if [[ "$p" == "$d" ]]; then + run rm "$l" + fi + fi + + # Remove the package configuration directory. + # + # Note that this directory contains files copied from a subdirectory of + # upload-data. These files are normally owned by the Apache2 user/group + # and have rw-r--r-- permissions. This script is normally executed as the + # brep user/group and thus the uploads root directory and all its + # subdirectories must have read, write, and execute permissions granted to + # the brep user, for example, by using ACL (see INSTALL file for + # details). Since cp preserves the file permissions by default, these + # files effective permissions will normally be r-- (read-only) for this + # script. In this case rm pops up the 'remove write-protected regular + # file' prompt by default prior to removing these files. To suppress the + # prompt we will pass the -f option to rm. + # + run rm -rf "$d" + + # Remove the empty parent directories. + # + # Note that we iterate until the rmdir command fails, presumably because a + # directory is not empty. + # + d="$(dirname "$d")" + while [[ "$d" != "$root_dir" ]]; do + trace_cmd rmdir "$d" + if rmdir "$d" 2>/dev/null; then + d="$(dirname "$d")" + else + break + fi + done + fi + + # Close the file descriptor and unlock the root directory. + # + trace "+ exec {lfd}<&-" + exec {lfd}<&- +done diff --git a/brep/handler/upload/upload-bindist.in b/brep/handler/upload/upload-bindist.in new file mode 100644 index 0000000..05d0bcf --- /dev/null +++ b/brep/handler/upload/upload-bindist.in @@ -0,0 +1,595 @@ +#!/usr/bin/env bash + +# file : brep/handler/upload/upload-bindist.in +# license : MIT; see accompanying LICENSE file + +# Binary distribution packages upload handler which places the uploaded +# packages under the following filesystem hierarchy: +# +# <root>/[<tenant>/]<instance>/<os-release-name-id><os-release-version-id>/<project>/<package>/<version>/<package-config> +# +# The overall idea behind this handler is to create a uniquely named package +# configuration directory for each upload and maintain the package +# configuration symlink at the above path to refer to the directory of the +# latest upload. +# +# The root directory is passed as an argument (via upload-handler-argument). +# All the remaining directory components are retrieved from the respective +# manifest values of request.manifest created by brep and +# bindist-result.manifest contained in the uploaded archive. +# +# Note that the leaf component of the package configuration symlink path is +# sanitized, having the "bindist", <instance>, <os-release-name-id>, and +# <os-release-name-id><os-release-version-id> dash-separated sub-components +# removed. If the component becomes empty as a result of the sanitization, +# then the target CPU is assumed, if the package is not architecture- +# independent, and "noarch" otherwise. If the sanitized component is not +# empty, the package is not architecture-independent, and the resulting +# component doesn't containt the target CPU, then prepend it with the <cpu>- +# prefix. For example, the following symlink paths: +# +# .../archive/windows10/foo/libfoo/1.0.0/bindist-archive-windows10-release +# .../archive/windows10/foo/libfoo/1.0.0/bindist-archive-windows10 +# +# are reduced to: +# +# .../archive/windows10/foo/libfoo/1.0.0/x86_64-release +# .../archive/windows10/foo/libfoo/1.0.0/x86_64 +# +# To achieve this the handler performs the following steps (<dir> is passed as +# last argument by brep and is a subdirectory of upload-data): +# +# - Parse <dir>/request.manifest to retrieve the upload archive path, +# timestamp, and the values which are required to compose the package +# configuration symlink path. +# +# - Extract files from the upload archive. +# +# - Parse <dir>/<instance>/bindist-result.manifest to retrieve the values +# required to compose the package configuration symlink path and the package +# file paths. +# +# - Compose the package configuration symlink path. +# +# - Compose the package configuration directory path by appending the +# -<timestamp>[-<number>] suffix to the package configuration symlink path. +# +# - Create the package configuration directory. +# +# - Copy the uploaded package files into the package configuration directory. +# +# - Generate the packages.sha256 file in the package configuration directory, +# which lists the SHA256 checksums of the files contained in this directory. +# +# - Switch the package configuration symlink to refer to the newly created +# package configuration directory. +# +# - If the --keep-previous option is not specified, then remove the previous +# target of the package configuration symlink, if exists. +# +# Notes: +# +# - There could be a race both with upload-bindist-clean and other +# upload-bindist instances while creating the package version/configuration +# directories, querying the package configuration symlink target, switching +# the symlink, and removing the symlink's previous target. To avoid it, the +# root directory needs to be locked for the duration of these operations. +# This, however, needs to be done granularly to perform the time consuming +# operations (files copying, etc) while not holding the lock. +# +# - The brep module doesn't acquire the root directory lock. Thus, the package +# configuration symlink during its lifetime should always refer to a +# valid/complete package configuration directory. +# +# - Filesystem entries that exist or are created in the data directory: +# +# <archive> saved by brep +# request.manifest created by brep +# <instance>/* extracted by the handler (bindist-result.manifest, etc) +# result.manifest saved by brep +# +# Options: +# +# --keep-previous +# +# Don't remove the previous target of the package configuration symlink. +# +usage="usage: $0 [<options>] <root> <dir>" + +# Diagnostics. +# +verbose= #true + +# The root directory lock timeout (in seconds). +# +lock_timeout=60 + +# If the package configuration directory already exists (may happen due to the +# low timestamp resolution), then re-try creating the configuration directory +# by adding the -<number> suffix and incrementing it until the creation +# succeeds or the retries limit is reached. +# +create_dir_retries=99 + +trap "{ exit 1; }" ERR +set -o errtrace # Trap in functions and subshells. +set -o pipefail # Fail if any pipeline command fails. +shopt -s lastpipe # Execute last pipeline command in the current shell. +shopt -s nullglob # Expand no-match globs to nothing rather than themselves. + +@import brep/handler/handler@ +@import brep/handler/upload/upload@ + +# Parse the command line options. +# +keep_previous= + +while [[ "$#" -gt 0 ]]; do + case $1 in + --keep-previous) + shift + keep_previous=true + ;; + *) + break + ;; + esac +done + +if [[ "$#" -ne 2 ]]; then + error "$usage" +fi + +# Destination root directory. +# +root_dir="${1%/}" +shift + +if [[ -z "$root_dir" ]]; then + error "$usage" +fi + +if [[ ! -d "$root_dir" ]]; then + error "'$root_dir' does not exist or is not a directory" +fi + +# Upload data directory. +# +data_dir="${1%/}" +shift + +if [[ -z "$data_dir" ]]; then + error "$usage" +fi + +if [[ ! -d "$data_dir" ]]; then + error "'$data_dir' does not exist or is not a directory" +fi + +reference="$(basename "$data_dir")" # Upload request reference. + +# Parse the upload request manifest. +# +manifest_parser_start "$data_dir/request.manifest" + +archive= +instance= +timestamp= +name= +version= +project= +package_config= +target= +tenant= + +while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do + case "$n" in + archive) archive="$v" ;; + instance) instance="$v" ;; + timestamp) timestamp="$v" ;; + name) name="$v" ;; + version) version="$v" ;; + project) project="$v" ;; + package-config) package_config="$v" ;; + target) target="$v" ;; + tenant) tenant="$v" ;; + esac +done + +manifest_parser_finish + +if [[ -z "$archive" ]]; then + error "archive manifest value expected" +fi + +if [[ -z "$instance" ]]; then + error "instance manifest value expected" +fi + +if [[ -z "$timestamp" ]]; then + error "timestamp manifest value expected" +fi + +if [[ -z "$name" ]]; then + error "name manifest value expected" +fi + +if [[ -z "$version" ]]; then + error "version manifest value expected" +fi + +if [[ -z "$project" ]]; then + error "project manifest value expected" +fi + +if [[ -z "$package_config" ]]; then + error "package-config manifest value expected" +fi + +if [[ -z "$target" ]]; then + error "target manifest value expected" +fi + +# Let's disallow the leading dot in the package-config manifest value since +# the latter serves as the package configuration symlink name and brep skips +# symlinks with the leading dots assuming them as hidden (see +# mod/mod-package-version-details.cxx for details). +# +if [[ "$package_config" == "."* ]]; then + exit_with_manifest 400 "package-config manifest value may not start with dot" +fi + +# Extract the CPU component from the target triplet and deduce the binary +# distribution-specific CPU representation which is normally used in the +# package file names. +# +cpu="$(sed -n -re 's/^([^-]+)-.+/\1/p' <<<"$target")" + +if [[ -z "$cpu" ]]; then + error "CPU expected in target triplet '$target'" +fi + +# Use CPU extracted from the target triplet as a distribution-specific +# representation, unless this is Debian or Fedora (see bpkg's +# system-package-manager-{fedora,debian}.cxx for details). +# +cpu_dist="$cpu" + +case $instance in + debian) + case $cpu in + x86_64) cpu_dist="amd64" ;; + aarch64) cpu_dist="arm64" ;; + i386 | i486 | i586 | i686) cpu_dist="i386" ;; + esac + ;; + fedora) + case $cpu in + i386 | i486 | i586 | i686) cpu_dist="i686" ;; + esac + ;; +esac + +# Unpack the archive. +# +run tar -xf "$data_dir/$archive" -C "$data_dir" + +# Parse the bindist result manifest list. +# +f="$data_dir/$instance/bindist-result.manifest" + +if [[ ! -f "$f" ]]; then + exit_with_manifest 400 "$instance/bindist-result.manifest not found" +fi + +manifest_parser_start "$f" + +# Parse the distribution manifest. +# +# Note that we need to skip the first manifest version value and parse until +# the next one is encountered, which introduces the first package file +# manifest. +# +os_release_name_id= +os_release_version_id= + +first=true +more= +while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do + case "$n" in + "") if [[ "$first" ]]; then # Start of the first (distribution) manifest? + first= + else # Start of the second (package file) manifest. + more=true + break + fi + ;; + + os-release-name-id) os_release_name_id="$v" ;; + os-release-version-id) os_release_version_id="$v" ;; + esac +done + +if [[ -z "$os_release_name_id" ]]; then + exit_with_manifest 400 "os-release-name-id bindist result manifest value expected" +fi + +if [[ -z "$os_release_version_id" ]]; then + exit_with_manifest 400 "os-release-version-id bindist result manifest value expected" +fi + +if [[ ! "$more" ]]; then + exit_with_manifest 400 "no package file manifests in bindist result manifest list" +fi + +# Parse the package file manifest list and cache the file paths. +# +# While at it, detect if the package is architecture-specific or not by +# checking if any package file names contain the distribution-specific CPU +# representation (as a sub-string). +# +# Note that while we currently only need the package file paths, we can make +# use of their types and system names in the future. Thus, let's verify that +# all the required package file values are present and, while at it, cache +# them all in the parallel arrays. +# +package_file_paths=() +package_file_types=() +package_file_system_names=() + +arch_specific= + +# The outer loop iterates over package file manifests while the inner loop +# iterates over manifest values in each such manifest. +# +while [[ "$more" ]]; do + more= + type= + path= + system_name= + + while IFS=: read -ru "$manifest_parser_ofd" -d '' n v; do + case "$n" in + "") # Start of the next package file manifest. + more=true + break + ;; + + package-file-path) path="$v" ;; + package-file-type) type="$v" ;; + package-file-system-name) system_name="$v" ;; + esac + done + + if [[ -z "$path" ]]; then + exit_with_manifest 400 "package-file-path bindist result manifest value expected" + fi + + if [[ -z "$type" ]]; then + exit_with_manifest 400 "package-file-type bindist result manifest value expected" + fi + + package_file_paths+=("$path") + package_file_types+=("$type") + package_file_system_names+=("$system_name") # Note: system name can be empty. + + if [[ "$path" == *"$cpu_dist"* ]]; then + arch_specific=true + fi +done + +manifest_parser_finish + +# Sanitize the package configuration name. +# +config= +for c in $(sed 's/-/ /g' <<<"$package_config"); do + if [[ "$c" != "bindist" && + "$c" != "$instance" && + "$c" != "$os_release_name_id" && + "$c" != "$os_release_name_id$os_release_version_id" ]]; then + if [[ -z "$config" ]]; then + config="$c" + else + config="$config-$c" + fi + fi +done + +# Reflect the architecture in the sanitized configuration name. +# +if [[ -z "$config" ]]; then + if [[ "$arch_specific" ]]; then + config="$cpu" + else + config="noarch" + fi +else + if [[ "$arch_specific" && ("$config" != *"$cpu"*) ]]; then + config="$cpu-$config" + fi +fi + +# Compose the package configuration symlink path. +# +config_link="$root_dir" + +if [[ -n "$tenant" ]]; then + config_link="$config_link/$tenant" +fi + +config_link="$config_link/$instance/$os_release_name_id$os_release_version_id" +config_link="$config_link/$project/$name/$version/$config" + +# Compose the package configuration directory path. +# +config_dir="$config_link-$timestamp" + +# Create the package configuration directory. +# +# Note that it is highly unlikely that multiple uploads for the same package +# configuration/distribution occur at the same time (with the seconds +# resolution) making the directory name not unique. If that still happens, +# lets retry for some reasonable number of times to create the directory, +# while adding the -<number> suffix to its path on each iteration. If +# that also fails, then we assume that there is some issue with the handler +# setup and fail, printing the cached mkdir diagnostics to stderr. +# +# Note that we need to prevent removing of the potentially empty package +# version directory by the upload-bindist-clean script before we create +# configuration directory. To achieve that, we lock the root directory for the +# duration of the package version/configuration directories creation. +# +# Open the reading file descriptor and lock the root directory. Fail if +# unable to lock before timeout. +# +lock="$root_dir/upload.lock" +run touch "$lock" +trace "+ exec {lfd}<$lock" +exec {lfd}<"$lock" + +if ! run flock -w "$lock_timeout" "$lfd"; then + exit_with_manifest 503 "upload service is busy" +fi + +# Create parent (doesn't fail if directory exists). +# +config_parent_dir="$(dirname "$config_dir")" +run mkdir -p "$config_parent_dir" + +created= + +trace_cmd mkdir "$config_dir" +if ! e="$(mkdir "$config_dir" 2>&1)"; then # Note: fails if directory exists. + for ((i=0; i != $create_dir_retries; ++i)); do + d="$config_dir-$i" + trace_cmd mkdir "$d" + if e="$(mkdir "$d" 2>&1)"; then + config_dir="$d" + created=true + break + fi + done +else + created=true +fi + +# Close the file descriptor and unlock the root directory. +# +trace "+ exec {lfd}<&-" +exec {lfd}<&- + +if [[ ! "$created" ]]; then + echo "$e" 1>&2 + error "unable to create package configuration directory" +fi + +# On exit, remove the newly created package configuration directory, unless +# its removal is canceled (for example, the symlink is switched to refer to +# it). Also remove the new symlink, if already created. +# +# Make sure we don't fail if the entries are already removed, for example, by +# the upload-bindist-clean script. +# +config_link_new= +function exit_trap () +{ + if [[ -n "$config_dir" && -d "$config_dir" ]]; then + if [[ -n "$config_link_new" && -L "$config_link_new" ]]; then + run rm -f "$config_link_new" + fi + run rm -rf "$config_dir" + fi +} + +trap exit_trap EXIT + +# Copy all the extracted package files to the package configuration directory. +# +for ((i=0; i != "${#package_file_paths[@]}"; ++i)); do + run cp "$data_dir/$instance/${package_file_paths[$i]}" "$config_dir" +done + +# Generate the packages.sha256 file. +# +# Note that since we don't hold the root directory lock at this time, we +# temporary "hide" the resulting file from the upload-bindist-clean script +# (which uses it for the upload age calculation) by adding the leading dot to +# its name. Not doing so we may potentially end up with upload-bindist-clean +# removing the half-cooked directory and so breaking the upload handling. +# +trace "+ (cd $config_dir && exec sha256sum -b ${package_file_paths[@]} >.packages.sha256)" +(cd "$config_dir" && exec sha256sum -b "${package_file_paths[@]}" >".packages.sha256") + +# Create the new package configuration "hidden" symlink. Construct its name by +# prepending the configuration directory name with a dot. +# +config_dir_name="$(basename "$config_dir")" +config_link_new="$config_parent_dir/.$config_dir_name" +run ln -s "$config_dir_name" "$config_link_new" + +# Switch the package configuration symlink atomically. But first, cache the +# previous package configuration symlink target if the --keep-previous option +# is not specified and "unhide" the packages.sha256 file. +# +# Note that to avoid a race with upload-bindist-clean and other upload-bindist +# instances, we need to perform all the mentioned operations as well as +# removing the previous package configuration directory while holding the root +# directory lock. + +# Lock the root directory. +# +trace "+ exec {lfd}<$lock" +exec {lfd}<"$lock" + +if ! run flock -w "$lock_timeout" "$lfd"; then + exit_with_manifest 503 "upload service is busy" +fi + +# Note that while the realpath utility is not POSIX, it is present on both +# Linux and FreeBSD. +# +config_dir_prev= +if [[ ! "$keep_previous" && -L "$config_link" ]]; then + config_dir_prev="$(realpath "$config_link")" +fi + +# "Unhide" the packages.sha256 file. +# +run mv "$config_dir/.packages.sha256" "$config_dir/packages.sha256" + +# Note that since brep doesn't acquire the root directory lock, we need to +# switch the symlink as the final step, when the package directory is fully +# prepared and can be exposed. +# +# @@ Also note that the -T option is Linux-specific. To add support for +# FreeBSD we need to use -h option there (but maybe -T also works, +# who knows). +# +run mv -T "$config_link_new" "$config_link" + +# Now, when the package configuration symlink is switched, disable removal of +# the newly created package configuration directory. +# +# Note that we still can respond with an error status. However, the remaining +# operations are all cleanups and thus unlikely to fail. +# +config_dir= + +# Remove the previous package configuration directory, if requested. +# +if [[ -n "$config_dir_prev" ]]; then + run rm -r "$config_dir_prev" +fi + +# Unlock the root directory. +# +trace "+ exec {lfd}<&-" +exec {lfd}<&- + +# Remove the no longer needed upload data directory. +# +run rm -r "$data_dir" + +trace "binary distribution packages are published" +exit_with_manifest 200 "binary distribution packages are published" diff --git a/brep/handler/upload/upload.bash.in b/brep/handler/upload/upload.bash.in new file mode 100644 index 0000000..9acead9 --- /dev/null +++ b/brep/handler/upload/upload.bash.in @@ -0,0 +1,40 @@ +# file : brep/handler/upload/upload.bash.in +# license : MIT; see accompanying LICENSE file + +# Utility functions useful for implementing upload handlers. + +if [ "$brep_handler_upload" ]; then + return 0 +else + brep_handler_upload=true +fi + +@import brep/handler/handler@ + +# Serialize the upload result manifest to stdout and exit the (sub-)shell with +# the zero status. +# +reference= # Should be assigned later by the handler, when becomes available. + +function exit_with_manifest () # <status> <message> +{ + trace_func "$@" + + local sts="$1" + local msg="$2" + + manifest_serializer_start + + manifest_serialize "" "1" # Start of manifest. + manifest_serialize "status" "$sts" + manifest_serialize "message" "$msg" + + if [ -n "$reference" ]; then + manifest_serialize "reference" "$reference" + elif [ "$sts" == "200" ]; then + error "no reference for code $sts" + fi + + manifest_serializer_finish + run exit 0 +} diff --git a/build/root.build b/build/root.build index 627f0ee..3dbc0cf 100644 --- a/build/root.build +++ b/build/root.build @@ -1,6 +1,10 @@ # file : build/root.build # license : MIT; see accompanying LICENSE file +config [bool] config.brep.develop ?= false + +develop = $config.brep.develop + cxx.std = latest using cxx @@ -10,12 +14,22 @@ ixx{*}: extension = ixx txx{*}: extension = txx cxx{*}: extension = cxx -cxx.poptions =+ "-I$out_root" "-I$src_root" - # Disable "unknown pragma" warnings. # cxx.coptions += -Wno-unknown-pragmas +if ($cxx.id == 'gcc') +{ + cxx.coptions += -Wno-maybe-uninitialized -Wno-free-nonheap-object # libbutl + + if ($cxx.version.major >= 13) + cxx.coptions += -Wno-dangling-reference +} +elif ($cxx.id.type == 'clang' && $cxx.version.major >= 15) + cxx.coptions += -Wno-unqualified-std-cast-call + +cxx.poptions =+ "-I$out_root" "-I$src_root" + # Load the cli module but only if it's available. This way a distribution # that includes pre-generated files can be built without installing cli. # This is also the reason why we need to explicitly spell out individual @@ -51,9 +65,26 @@ tests/{libue libul}{*}: bin.whole = false # test.target = $cxx.target -# Extract the copyright notice from the LICENSE file. +# Omit the rest during the skeleton load. # -copyright = $process.run_regex( \ - cat $src_root/LICENSE, \ - 'Copyright \(c\) (.+) \(see the AUTHORS and LEGAL files\)\.', \ - '\1') +if ($build.mode != 'skeleton') +{ + # Unless we are in the develop mode, detect the Apache2 headers location + # automatically and add the respective preprocessor option. + # + if! $develop + { + apache2_includedir = [dir_path] $process.run(apxs -q 'INCLUDEDIR') + + config [config.report] apache2_includedir + + cxx.poptions += "-I$apache2_includedir" + } + + # Extract the copyright notice from the LICENSE file. + # + copyright = $process.run_regex( \ + cat $src_root/LICENSE, \ + 'Copyright \(c\) (.+) \(see the AUTHORS and LEGAL files\)\.', \ + '\1') +} diff --git a/clean/buildfile b/clean/buildfile index 11fa2a2..b91b1a0 100644 --- a/clean/buildfile +++ b/clean/buildfile @@ -7,9 +7,10 @@ import libs += libbutl%lib{butl} import libs += libbbot%lib{bbot} include ../libbrep/ +include ../mod/ exe{brep-clean}: {hxx ixx cxx}{* -clean-options} {hxx ixx cxx}{clean-options} \ - ../libbrep/lib{brep} $libs + ../mod/libue{mod} ../libbrep/lib{brep} $libs # Build options. # diff --git a/clean/clean.cxx b/clean/clean.cxx index 5401ab1..828ae4b 100644 --- a/clean/clean.cxx +++ b/clean/clean.cxx @@ -12,9 +12,7 @@ #include <odb/pgsql/database.hxx> -#include <libbutl/pager.mxx> - -#include <libbbot/build-config.hxx> +#include <libbutl/pager.hxx> #include <libbrep/build.hxx> #include <libbrep/build-odb.hxx> @@ -24,10 +22,11 @@ #include <libbrep/build-package-odb.hxx> #include <libbrep/database-lock.hxx> +#include <mod/build-target-config.hxx> + #include <clean/clean-options.hxx> using namespace std; -using namespace bbot; using namespace odb::core; namespace brep @@ -205,12 +204,13 @@ namespace brep return 1; } - set<string> configs; + // Load build target configurations. + // + build_target_configs configs; try { - for (auto& c: parse_buildtab (cp)) - configs.emplace (move (c.name)); + configs = bbot::parse_buildtab (cp); } catch (const io_error& e) { @@ -218,6 +218,13 @@ namespace brep return 1; } + // Note: contains shallow references to the configuration targets/names. + // + set<build_target_config_id> configs_set; + + for (const build_target_config& c: configs) + configs_set.insert (build_target_config_id {c.target, c.name}); + // Parse timestamps. // map<string, timestamp> timeouts; // Toolchain timeouts. @@ -259,18 +266,26 @@ namespace brep // // Query package builds in chunks in order not to hold locks for too long. // Sort the result by package version to minimize number of queries to the - // package database. + // package database. Note that we still need to sort by configuration and + // toolchain to make sure that builds are sorted consistently across + // queries and we don't miss any of them. // using bld_query = query<build>; using prep_bld_query = prepared_query<build>; size_t offset (0); bld_query bq ("ORDER BY" + - bld_query::id.package.tenant + "," + - bld_query::id.package.name + + bld_query::id.package.tenant + "," + + bld_query::id.package.name + order_by_version_desc (bld_query::id.package.version, - false) + - "OFFSET" + bld_query::_ref (offset) + "LIMIT 100"); + false) + "," + + bld_query::id.target + "," + + bld_query::id.target_config_name + "," + + bld_query::id.package_config_name + "," + + bld_query::id.toolchain_name + + order_by_version (bld_query::id.toolchain_version, + false /* first */) + + "OFFSET" + bld_query::_ref (offset) + "LIMIT 2000"); connection_ptr conn (db.connection ()); @@ -284,19 +299,19 @@ namespace brep // be made once per tenant package name due to the builds query sorting // criteria (see above). // - using pkg_query = query<buildable_package>; - using prep_pkg_query = prepared_query<buildable_package>; + using pkg_query = query<build_package_version>; + using prep_pkg_query = prepared_query<build_package_version>; string tnt; package_name pkg_name; set<version> package_versions; - pkg_query pq ( - pkg_query::build_package::id.tenant == pkg_query::_ref (tnt) && - pkg_query::build_package::id.name == pkg_query::_ref (pkg_name)); + pkg_query pq (pkg_query::buildable && + pkg_query::id.tenant == pkg_query::_ref (tnt) && + pkg_query::id.name == pkg_query::_ref (pkg_name)); prep_pkg_query pkg_prep_query ( - conn->prepare_query<buildable_package> ("package-query", pq)); + conn->prepare_query<build_package_version> ("package-query", pq)); for (bool ne (true); ne; ) { @@ -316,11 +331,16 @@ namespace brep ? i->second : default_timeout); - // @@ Note that this approach doesn't consider the case when both - // the configuration and the package still exists but the package - // now excludes the configuration (configuration is now of the - // legacy class instead of the default class, etc). We should - // probably re-implement it in a way brep-monitor does it. + // Note that we don't consider the case when both the configuration + // and the package still exist but the package now excludes the + // configuration (configuration is now of the legacy class instead + // of the default class, etc). Should we handle this case and + // re-implement in a way brep-monitor does it? Probably not since + // the described situation is not very common and storing some extra + // builds which sooner or later will be wiped out due to the timeout + // is harmless. The current implementation, however, is simpler and + // consumes less resources in runtime (doesn't load build package + // objects, etc). // bool cleanup ( // Check that the build is not stale. @@ -332,7 +352,10 @@ namespace brep // Note that we unable to detect configuration changes and rely on // periodic rebuilds to take care of that. // - configs.find (b.configuration) == configs.end ()); + configs_set.find ( + build_target_config_id {b.target, + b.target_config_name}) == + configs_set.end ()); // Check that the build package still exists. // @@ -349,7 +372,7 @@ namespace brep } cleanup = package_versions.find (b.package_version) == - package_versions.end (); + package_versions.end (); } if (cleanup) @@ -457,8 +480,8 @@ namespace brep auto tenant_ids (pq.execute ()); if ((ne = !tenant_ids.empty ())) { - // Cache tenant ids and erase packages, repositories, and tenants at - // once. + // Cache tenant ids and erase packages, repositories, public keys, and + // tenants at once. // strings tids; tids.reserve (tenant_ids.size ()); @@ -474,6 +497,9 @@ namespace brep db.erase_query<repository> ( query<repository>::id.tenant.in_range (tids.begin (), tids.end ())); + db.erase_query<public_key> ( + query<public_key>::id.tenant.in_range (tids.begin (), tids.end ())); + db.erase_query<tenant> ( query<tenant>::id.in_range (tids.begin (), tids.end ())); } @@ -1,6 +1,6 @@ #! /usr/bin/env bash -version=0.14.0-a.0.z +version=0.17.0-a.0.z trap 'exit 1' ERR set -o errtrace # Trap in functions. @@ -62,6 +62,7 @@ function compile () --man-epilogue-file man-epilogue.1 \ --link-regex '%bpkg(#.+)?%$1%' \ --link-regex '%brep(#.+)?%$1%' \ +--link-regex '%bbot(#.+)?%$1%' \ ../$n.cli } @@ -100,6 +101,7 @@ cli -I .. \ --link-regex '%b([-.].+)%../../build2/doc/b$1%' \ --link-regex '%bpkg([-.].+)%../../bpkg/doc/bpkg$1%' \ --link-regex '%bpkg(#.+)?%../../bpkg/doc/build2-package-manager-manual.xhtml$1%' \ +--link-regex '%bbot(#.+)?%../../bbot/doc/build2-build-bot-manual.xhtml$1%' \ --output-prefix build2-repository-interface- \ manual.cli diff --git a/doc/manual.cli b/doc/manual.cli index 71a25a5..2b96393 100644 --- a/doc/manual.cli +++ b/doc/manual.cli @@ -121,7 +121,6 @@ reference: <abbrev-checksum> | - \li|Send the submission email. If \c{submit-email} is configured, send an email to this address containing @@ -306,6 +305,15 @@ Check violations that are explicitly mentioned above are always reported with the CI result manifest. Other errors (for example, internal server errors) might be reported with unformatted text, including HTML. +If the CI request contains the \c{interactive} parameter, then the CI service +provides the execution environment login information for each test and stops +them at the specified breakpoint. + +Pre-defined breakpoint ids are \c{error} and \c{warning}. The breakpoint id is +included into the CI request manifest and the CI service must at least handle +\c{error} but may recognize additional ids (build phase/command identifiers, +etc). + If the CI request contains the \c{simulate} parameter, then the CI service simulates the specified outcome of the CI process without actually performing any externally visible actions (e.g., testing the package, publishing the @@ -328,16 +336,28 @@ corresponding to the custom request parameters. id: <request-id> repository: <url> [package]: <name>[/<version>] -timestamp: <date-time> +[interactive]: <breakpoint> [simulate]: <outcome> +timestamp: <date-time> [client-ip]: <string> [user-agent]: <string> +[service-id]: <string> +[service-type]: <string> +[service-data]: <string> \ The \c{package} value can be repeated multiple times. The \c{timestamp} value is in the ISO-8601 \c{<YYYY>-<MM>-<DD>T<hh>:<mm>:<ss>Z} form (always UTC). Note also that \c{client-ip} can be IPv4 or IPv6. +Note that some CI service implementations may serve as backends for +third-party services. The latter may initiate CI tasks, providing all the +required information via some custom protocol, and expect the CI service to +notify it about the progress. In this case the third-party service type as +well as optionally the third-party id and custom state data can be +communicated to the underlying CI handler program via the respective +\c{service-*} manifest values. + \h#ci-overrides-manifest|CI Overrides Manifest| @@ -349,8 +369,30 @@ being applied. Currently, only the following value groups can be overridden: \ build-email build-{warning,error}-email builds build-{include,exclude} +*-builds *-build-{include,exclude} +*-build-config \ +For the package configuration-specific build constraint overrides the +corresponding configuration must exist in the package manifest. In contrast, +the package configuration override (\cb{*-build-config}) adds a new +configuration if it doesn't exist and updates the arguments of the existing +configuration otherwise. In the former case, all the potential build +constraint overrides for such a newly added configuration must follow the +corresponding \cb{*-build-config} override. + +Note that the build constraints group values (both common and build package +configuration-specific) are overridden hierarchically so that the +\c{[\b{*-}]\b{build-}{\b{include},\b{exclude}\}} overrides don't affect the +respective \c{[\b{*-}]\b{builds}} values. + +Note also that the common and build package configuration-specific build +constraints group value overrides are mutually exclusive. If the common build +constraints are overridden, then all the configuration-specific constraints +are removed. Otherwise, if any configuration-specific constraints are +overridden, then for the remaining configurations the build constraints are +reset to \cb{builds:\ none}. + See \l{bpkg#manifest-package Package Manifest} for details on these values. @@ -368,4 +410,182 @@ message: <string> [reference]: <string> \ + +\h1#upload|Build Artifacts Upload| + +The build artifacts upload functionality allows uploading archives of files +generated as a byproduct of the package builds. Such archives as well as +additional, repository-specific information can optionally be uploaded by the +automated build bots via the HTTP \c{POST} method using the +\c{multipart/form-data} content type (see the \l{bbot \c{bbot} documentation} +for details). The implementation in \c{brep} only handles uploading as well as +basic actions and verification (build session resolution, agent +authentication, checksum verification) expecting the rest of the upload logic +to be handled by a separate entity according to the repository policy. Such an +entity can be notified by \c{brep} about a new upload as an invocation of the +\i{handler program} (as part of the HTTP request) and/or via email. It could +also be a separate process that monitors the upload data directory. + +For each upload request \c{brep} performs the following steps. + +\ol| + +\li|Determine upload type. + +The upload type must be passed via the \c{upload} parameter in the query +component of the request URL.| + +\li|Verify upload size limit. + +The upload form-data payload size must not exceed \c{upload-max-size} specific +for this upload type.| + +\li|Verify the required \c{session}, \c{instance}, \c{archive}, and +\c{sha256sum} parameters are present. If \c{brep} is configured to perform +agent authentication, then verify that the \c{challenge} parameter is also +present. See the \l{bbot#arch-result-req Result Request Manifest} for +semantics of the \c{session} and \c{challenge} parameters. + +The \c{archive} parameter must be the build artifacts archive upload while +\c{sha256sum} must be its 64 characters SHA256 checksum calculated in the +binary mode.| + +\li|Verify other parameters are valid manifest name/value pairs. + +The value can only contain UTF-8 encoded Unicode graphic characters as well as +tab (\c{\\t}), carriage return (\c{\\r}), and line feed (\c{\\n}).| + +\li|Resolve the session. + +Resolve the \c{session} parameter value to the actual package build +information.| + +\li| Authenticate the build bot agent. + +Use the \c{challenge} parameter value and the resolved package build +information to authenticate the agent, if configured to do so.| + +\li|Generate upload request id and create request directory. + +For each upload request a unique id (UUID) is generated and a request +subdirectory is created in the \c{upload-data} directory with this id as its +name.| + +\li|Save the upload archive into the request directory and verify its +checksum. + +The archive is saved using the submitted name, and its checksum is calculated +and compared to the submitted checksum.| + +\li|Save the upload request manifest into the request directory. + +The upload request manifest is saved as \c{request.manifest} into the request +subdirectory next to the archive.| + +\li|Invoke the upload handler program. + +If \c{upload-handler} is configured, invoke the handler program passing to it +additional arguments specified with \c{upload-handler-argument} (if any) +followed by the absolute path to the upload request directory. + +The handler program is expected to write the upload result manifest to +\c{stdout} and terminate with the zero exit status. A non-zero exit status is +treated as an internal error. The handler program's \c{stderr} is logged. + +Note that the handler program should report temporary server errors (service +overload, network connectivity loss, etc.) via the upload result manifest +status values in the [500-599] range (HTTP server error) rather than via a +non-zero exit status. + +The handler program assumes ownership of the upload request directory and can +move/remove it. If after the handler program terminates the request directory +still exists, then it is handled by \c{brep} depending on the handler process +exit status and the upload result manifest status value. If the process has +terminated abnormally or with a non-zero exit status or the result manifest +status is in the [500-599] range (HTTP server error), then the directory is +saved for troubleshooting by appending the \c{.fail} extension to its name. +Otherwise, if the status is in the [400-499] range (HTTP client error), then +the directory is removed. If the directory is left in place by the handler or +is saved for troubleshooting, then the upload result manifest is saved as +\c{result.manifest} into this directory, next to the request manifest. + +If \c{upload-handler-timeout} is configured and the handler program does not +exit in the allotted time, then it is killed and its termination is treated as +abnormal. + +If the handler program is not specified, then the following upload result +manifest is implied: + +\ +status: 200 +message: <upload-type> upload is queued +reference: <request-id> +\ + +| + +\li|Send the upload email. + +If \c{upload-email} is configured, send an email to this address containing +the upload request manifest and the upload result manifest.| + +\li|Respond to the client. + +Respond to the client with the upload result manifest and its \c{status} value +as the HTTP status code.| + +| + +Check violations (max size, etc) that are explicitly mentioned above are +always reported with the upload result manifest. Other errors (for example, +internal server errors) might be reported with unformatted text, including +HTML. + + +\h#upload-request-manifest|Upload Request Manifest| + +The upload request manifest starts with the below values and in that order +optionally followed by additional values in the unspecified order +corresponding to the custom request parameters. + +\ +id: <request-id> +session: <session-id> +instance: <name> +archive: <name> +sha256sum: <sum> +timestamp: <date-time> + +name: <name> +version: <version> +project: <name> +target-config: <name> +package-config: <name> +target: <target-triplet> +[tenant]: <tenant-id> +toolchain-name: <name> +toolchain-version: <standard-version> +repository-name: <canonical-name> +machine-name: <name> +machine-summary: <text> +\ + +The \c{timestamp} value is in the ISO-8601 +\c{<YYYY>-<MM>-<DD>T<hh>:<mm>:<ss>Z} form (always UTC). + + +\h#upload-result-manifest|Upload Result Manifest| + +The upload result manifest starts with the below values and in that order +optionally followed by additional values if returned by the handler program. +If the upload request is successful, then the \c{reference} value must be +present and contain a string that can be used to identify this request (for +example, the upload request id). + +\ +status: <http-code> +message: <string> +[reference]: <string> +\ + " diff --git a/doc/style b/doc/style -Subproject 10f31a8bea8e5817fccf01978009c1ecaf3eabf +Subproject b72eb624d13b1628e27e9f6c0b3c80853e8e015 diff --git a/etc/brep-module.conf b/etc/brep-module.conf index 83d18da..d5a5e78 100644 --- a/etc/brep-module.conf +++ b/etc/brep-module.conf @@ -14,6 +14,13 @@ # search-title Packages +# Package search page description. If specified, it is displayed before the +# search form on the first page only. The value is treated as an XHTML5 +# fragment. +# +# search-description "" + + # Web page logo. It is displayed in the page header aligned to the left edge. # The value is treated as an XHTML5 fragment. # @@ -112,6 +119,25 @@ menu About=?about # build-bot-agent-keys +# Regular expressions in the /<regex>/<replacement>/ form for transforming the +# interactive build login information, for example, into the actual command +# that can be used by the user. The regular expressions are matched against +# the "<agent> <interactive-login>" string containing the respective task +# request manifest values. The first matching expression is used for the +# transformation. If no expression matches, then the task request is +# considered invalid, unless no expressions are specified. Repeat this option +# to specify multiple expressions. +# +# build-interactive-login + + +# Order in which packages are considered for build. The valid values are +# 'stable' and 'random'. If not specified, then 'stable' is assumed. Note that +# interactive builds are always preferred. +# +#build-package-order stable + + # Number of builds per page. # # build-page-entries 20 @@ -128,16 +154,20 @@ menu About=?about # build-forced-rebuild-timeout 600 -# Time to wait before considering a package for a normal rebuild. Must be -# specified in seconds. Default is 24 hours. +# Time to wait before considering a package for a soft rebuild (only to be +# performed if the build environment or any of the package dependencies have +# changed). Must be specified in seconds. The special zero value disables soft +# rebuilds. Default is 24 hours. # -# build-normal-rebuild-timeout 86400 +# build-soft-rebuild-timeout 86400 -# Alternative package rebuild timeout to use instead of the normal rebuild -# timeout (see the build-normal-rebuild-timeout option for details) during -# the specified time interval. Must be specified in seconds. Default is the -# time interval length. +# Alternative package soft rebuild timeout to use instead of the soft rebuild +# timeout (see the build-soft-rebuild-timeout option for details) during the +# specified time interval. Must be specified in seconds. Default is the time +# interval length plus (build-soft-rebuild-timeout - 24h) if soft rebuild +# timeout is greater than 24 hours (thus the rebuild is only triggered within +# the last 24 hours of the build-soft-rebuild-timeout expiration). # # The alternative rebuild timeout can be used to "pull" the rebuild window to # the specified time of day, for example, to optimize load and/or power @@ -157,9 +187,33 @@ menu About=?about # times must both be either specified or absent. If unspecified, then no # alternative rebuild timeout will be used. # -# build-alt-rebuild-timeout -# build-alt-rebuild-start -# build-alt-rebuild-stop +# build-alt-soft-rebuild-timeout +# build-alt-soft-rebuild-start +# build-alt-soft-rebuild-stop + + +# Time to wait before considering a package for a hard rebuild (to be +# performed unconditionally). Must be specified in seconds. The special zero +# value disables hard rebuilds. Default is 7 days. +# +# build-hard-rebuild-timeout 604800 + + +# Alternative package hard rebuild timeout. The semantics is the same as for +# the build-alt-soft-rebuild-* options but for the build-hard-rebuild-timeout +# option. +# +# build-alt-hard-rebuild-timeout +# build-alt-hard-rebuild-start +# build-alt-hard-rebuild-stop + + +# Time to wait before assuming the 'queued' notifications are delivered for +# package CI requests submitted via third-party services (GitHub, etc). During +# this time a package is not considered for a build. Must be specified in +# seconds. Default is 30 seconds. +# +# build-queued-timeout 30 # The maximum size of the build task request manifest accepted. Note that the @@ -183,6 +237,19 @@ menu About=?about # build-result-request-max-size 10485760 +# Enable or disable package build notification emails in the <name>=<mode> +# form. The valid <mode> values are 'none', 'latest', and 'all'. If 'all' is +# specified for a toolchain name, then emails are sent according to the +# build-*email package manifest values when all versions of a package are +# built with this toolchain. If 'latest' is specified, then for this toolchain +# name the emails are only sent for the latest version of a package. If 'none' +# is specified, then no emails are sent for this toolchain name. By default +# the 'latest' mode is assumed. Repeat this option to enable/disable emails +# for multiple toolchains. +# +# build-toolchain-email <toolchain-name>=latest|none|all + + # The build database connection configuration. By default, brep will try to # connect to the local instance of PostgreSQL with the operating system-default # mechanism (Unix-domain socket, etc) and use operating system (login) user @@ -211,6 +278,25 @@ menu About=?about # build-db-retry 10 +# The root directory where the uploaded binary distribution packages are +# saved to under the following directory hierarchy: +# +# [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config> +# +# The package configuration directory symlinks that match these paths are +# mapped to web URLs based on the bindist-url value and displayed on the +# package version details page. If this option is specified, then bindist-url +# must be specified as well." +# +# bindist-root + + +# The root URL of the directory specified with the bindist-root option. This +# option must be specified if bindist-root is specified. +# +# bindist-url + + # The openssl program to be used for crypto operations. You can also specify # additional options that should be passed to the openssl program with # openssl-option. If the openssl program is not explicitly specified, then brep @@ -287,10 +373,9 @@ menu About=?about # The handler program to be executed on package submission. The handler is -# executed as part of the submission request and is passed additional -# arguments that can be specified with submit-handler-argument followed by -# the absolute path to the submission directory. Note that the program path -# must be absolute. +# executed as part of the HTTP request and is passed additional arguments that +# can be specified with submit-handler-argument followed by the absolute path +# to the submission directory. Note that the program path must be absolute. # # submit-handler @@ -354,6 +439,66 @@ menu About=?about # ci-handler-timeout +# The directory to save upload data to for the specified upload type. If +# unspecified, the build artifacts upload functionality will be disabled for +# this type. +# +# Note that the directory path must be absolute and the directory itself must +# exist and have read, write, and execute permissions granted to the user that +# runs the web server. +# +# upload-data <type>=<dir> + + +# The maximum size of the upload data accepted for the specified upload type. +# Note that currently the entire upload request is read into memory. The +# default is 10M. +# +# upload-max-size <type>=10485760 + + +# The build artifacts upload email. If specified, the upload request and +# result manifests will be sent to this address. +# +# upload-email <type>=<email> + + +# The handler program to be executed on build artifacts upload of the +# specified type. The handler is executed as part of the HTTP request and is +# passed additional arguments that can be specified with +# upload-handler-argument followed by the absolute path to the upload +# directory (upload-data). Note that the program path must be absolute. +# +# upload-handler <type>=<path> + + +# Additional arguments to be passed to the upload handler program for the +# specified upload type (see upload-handler for details). Repeat this option +# to specify multiple arguments. +# +# upload-handler-argument <type>=<arg> + + +# The upload handler program timeout in seconds for the specified upload type. +# If specified and the handler does not exit in the allotted time, then it is +# killed and its termination is treated as abnormal. +# +# upload-handler-timeout <type>=<seconds> + + +# Disable upload of the specified type for the specified toolchain name. +# Repeat this option to disable uploads for multiple toolchains. +# +# upload-toolchain-exclude <type>=<name> + + +# Disable upload of the specified type for packages from the repository with +# the specified canonical name. Repeat this option to disable uploads for +# multiple repositories. +# +# upload-repository-exclude <type>=<name> + + # The default view to display for the global repository root. The value is one # of the supported services (packages, builds, submit, ci, etc). Default is # packages. diff --git a/etc/private/install/brep-install b/etc/private/install/brep-install index 046f99f..37179c2 100755 --- a/etc/private/install/brep-install +++ b/etc/private/install/brep-install @@ -57,15 +57,15 @@ usage="Usage: $0 [<options>]" # repository the toolchain installation script downloads the build2 packages # from. # -toolchain_repo_cert_fp="86:BA:D4:DE:2C:87:1A:EE:38:C7:F1:64:7F:65:77:02:15:79:F3:C4:83:C0:AB:5A:EA:F4:F7:8C:1D:63:30:C6" -#toolchain_repo_cert_fp="37:CE:2C:A5:1D:CF:93:81:D7:07:46:AD:66:B3:C3:90:83:B8:96:9E:34:F0:E7:B3:A2:B0:6C:EF:66:A4:BE:65" +toolchain_repo_cert_fp="70:64:FE:E4:E0:F3:60:F1:B4:51:E1:FA:12:5C:E0:B3:DB:DF:96:33:39:B9:2E:E5:C2:68:63:4C:A6:47:39:43" +#toolchain_repo_cert_fp="EC:50:13:E2:3D:F7:92:B4:50:0B:BF:2A:1F:7D:31:04:C6:57:6F:BC:BE:04:2E:E0:58:14:FA:66:66:21:1F:14" # brep package repository URL and certificate fingerprint. # #brep_repo_url="https://pkg.cppget.org/1/alpha" -#brep_repo_cert_fp="86:BA:D4:DE:2C:87:1A:EE:38:C7:F1:64:7F:65:77:02:15:79:F3:C4:83:C0:AB:5A:EA:F4:F7:8C:1D:63:30:C6" +#brep_repo_cert_fp="70:64:FE:E4:E0:F3:60:F1:B4:51:E1:FA:12:5C:E0:B3:DB:DF:96:33:39:B9:2E:E5:C2:68:63:4C:A6:47:39:43" brep_repo_url="https://stage.build2.org/1" -brep_repo_cert_fp="37:CE:2C:A5:1D:CF:93:81:D7:07:46:AD:66:B3:C3:90:83:B8:96:9E:34:F0:E7:B3:A2:B0:6C:EF:66:A4:BE:65" +brep_repo_cert_fp="EC:50:13:E2:3D:F7:92:B4:50:0B:BF:2A:1F:7D:31:04:C6:57:6F:BC:BE:04:2E:E0:58:14:FA:66:66:21:1F:14" owd=`pwd` trap "{ exit 1; }" ERR @@ -271,6 +271,12 @@ GRANT ALL PRIVILEGES ON DATABASE brep_package, brep_build TO brep; CREATE USER "www-data" INHERIT IN ROLE brep; CREATE USER "brep-build" INHERIT IN ROLE brep PASSWORD '-'; + +\c brep_package +GRANT ALL PRIVILEGES ON SCHEMA public TO brep; + +\c brep_build +GRANT ALL PRIVILEGES ON SCHEMA public TO brep; EOF # Create the "staging" package database for the submit-pub package submission @@ -284,6 +290,9 @@ LC_COLLATE 'en_US.UTF8' LC_CTYPE 'en_US.UTF8'; GRANT ALL PRIVILEGES ON DATABASE brep_submit_package TO brep; + +\c brep_submit_package +GRANT ALL PRIVILEGES ON SCHEMA public TO brep; EOF # Make sure the 'brep' and Apache2 user's logins work properly. @@ -303,7 +312,7 @@ CREATE EXTENSION postgres_fdw; CREATE SERVER package_server FOREIGN DATA WRAPPER postgres_fdw -OPTIONS (dbname 'brep_package', updatable 'false'); +OPTIONS (dbname 'brep_package', updatable 'true'); GRANT USAGE ON FOREIGN SERVER package_server to brep; diff --git a/etc/private/install/brep-module.conf b/etc/private/install/brep-module.conf index 0bff58d..bfaa8f6 100644 --- a/etc/private/install/brep-module.conf +++ b/etc/private/install/brep-module.conf @@ -14,6 +14,13 @@ # search-title Packages +# Package search page description. If specified, it is displayed before the +# search form on the first page only. The value is treated as an XHTML5 +# fragment. +# +# search-description "" + + # Web page logo. It is displayed in the page header aligned to the left edge. # The value is treated as an XHTML5 fragment. # @@ -112,6 +119,25 @@ menu About=?about # build-bot-agent-keys +# Regular expressions in the /<regex>/<replacement>/ form for transforming the +# interactive build login information, for example, into the actual command +# that can be used by the user. The regular expressions are matched against +# the "<agent> <interactive-login>" string containing the respective task +# request manifest values. The first matching expression is used for the +# transformation. If no expression matches, then the task request is +# considered invalid, unless no expressions are specified. Repeat this option +# to specify multiple expressions. +# +# build-interactive-login + + +# Order in which packages are considered for build. The valid values are +# 'stable' and 'random'. If not specified, then 'stable' is assumed. Note that +# interactive builds are always preferred. +# +#build-package-order stable + + # Number of builds per page. # # build-page-entries 20 @@ -128,16 +154,20 @@ menu About=?about # build-forced-rebuild-timeout 600 -# Time to wait before considering a package for a normal rebuild. Must be -# specified in seconds. Default is 24 hours. +# Time to wait before considering a package for a soft rebuild (only to be +# performed if the build environment or any of the package dependencies have +# changed). Must be specified in seconds. The special zero value disables soft +# rebuilds. Default is 24 hours. # -# build-normal-rebuild-timeout 86400 +# build-soft-rebuild-timeout 86400 -# Alternative package rebuild timeout to use instead of the normal rebuild -# timeout (see the build-normal-rebuild-timeout option for details) during -# the specified time interval. Must be specified in seconds. Default is the -# time interval length. +# Alternative package soft rebuild timeout to use instead of the soft rebuild +# timeout (see the build-soft-rebuild-timeout option for details) during the +# specified time interval. Must be specified in seconds. Default is the time +# interval length plus (build-soft-rebuild-timeout - 24h) if soft rebuild +# timeout is greater than 24 hours (thus the rebuild is only triggered within +# the last 24 hours of the build-soft-rebuild-timeout expiration). # # The alternative rebuild timeout can be used to "pull" the rebuild window to # the specified time of day, for example, to optimize load and/or power @@ -157,9 +187,33 @@ menu About=?about # times must both be either specified or absent. If unspecified, then no # alternative rebuild timeout will be used. # -# build-alt-rebuild-timeout -# build-alt-rebuild-start -# build-alt-rebuild-stop +# build-alt-soft-rebuild-timeout +# build-alt-soft-rebuild-start +# build-alt-soft-rebuild-stop + + +# Time to wait before considering a package for a hard rebuild (to be +# performed unconditionally). Must be specified in seconds. The special zero +# value disables hard rebuilds. Default is 7 days. +# +# build-hard-rebuild-timeout 604800 + + +# Alternative package hard rebuild timeout. The semantics is the same as for +# the build-alt-soft-rebuild-* options but for the build-hard-rebuild-timeout +# option. +# +# build-alt-hard-rebuild-timeout +# build-alt-hard-rebuild-start +# build-alt-hard-rebuild-stop + + +# Time to wait before assuming the 'queued' notifications are delivered for +# package CI requests submitted via third-party services (GitHub, etc). During +# this time a package is not considered for a build. Must be specified in +# seconds. Default is 30 seconds. +# +# build-queued-timeout 30 # The maximum size of the build task request manifest accepted. Note that the @@ -183,6 +237,19 @@ menu About=?about # build-result-request-max-size 10485760 +# Enable or disable package build notification emails in the <name>=<mode> +# form. The valid <mode> values are 'none', 'latest', and 'all'. If 'all' is +# specified for a toolchain name, then emails are sent according to the +# build-*email package manifest values when all versions of a package are +# built with this toolchain. If 'latest' is specified, then for this toolchain +# name the emails are only sent for the latest version of a package. If 'none' +# is specified, then no emails are sent for this toolchain name. By default +# the 'latest' mode is assumed. Repeat this option to enable/disable emails +# for multiple toolchains. +# +# build-toolchain-email <toolchain-name>=latest|none|all + + # The build database connection configuration. By default, brep will try to # connect to the local instance of PostgreSQL with the operating system-default # mechanism (Unix-domain socket, etc) and use operating system (login) user @@ -211,6 +278,25 @@ menu About=?about # build-db-retry 10 +# The root directory where the uploaded binary distribution packages are +# saved to under the following directory hierarchy: +# +# [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config> +# +# The package configuration directory symlinks that match these paths are +# mapped to web URLs based on the bindist-url value and displayed on the +# package version details page. If this option is specified, then bindist-url +# must be specified as well." +# +# bindist-root + + +# The root URL of the directory specified with the bindist-root option. This +# option must be specified if bindist-root is specified. +# +# bindist-url + + # The openssl program to be used for crypto operations. You can also specify # additional options that should be passed to the openssl program with # openssl-option. If the openssl program is not explicitly specified, then brep @@ -289,10 +375,9 @@ submit-form /home/brep/install/share/brep/www/submit.xhtml # The handler program to be executed on package submission. The handler is -# executed as part of the submission request and is passed additional -# arguments that can be specified with submit-handler-argument followed by -# the absolute path to the submission directory. Note that the program path -# must be absolute. +# executed as part of the HTTP request and is passed additional arguments that +# can be specified with submit-handler-argument followed by the absolute path +# to the submission directory. Note that the program path must be absolute. # submit-handler /home/brep/install/bin/brep-submit-pub @@ -362,6 +447,66 @@ submit-handler-timeout 120 # ci-handler-timeout +# The directory to save upload data to for the specified upload type. If +# unspecified, the build artifacts upload functionality will be disabled for +# this type. +# +# Note that the directory path must be absolute and the directory itself must +# exist and have read, write, and execute permissions granted to the user that +# runs the web server. +# +# upload-data <type>=<dir> + + +# The maximum size of the upload data accepted for the specified upload type. +# Note that currently the entire upload request is read into memory. The +# default is 10M. +# +# upload-max-size <type>=10485760 + + +# The build artifacts upload email. If specified, the upload request and +# result manifests will be sent to this address. +# +# upload-email <type>=<email> + + +# The handler program to be executed on build artifacts upload of the +# specified type. The handler is executed as part of the HTTP request and is +# passed additional arguments that can be specified with +# upload-handler-argument followed by the absolute path to the upload +# directory (upload-data). Note that the program path must be absolute. +# +# upload-handler <type>=<path> + + +# Additional arguments to be passed to the upload handler program for the +# specified upload type (see upload-handler for details). Repeat this option +# to specify multiple arguments. +# +# upload-handler-argument <type>=<arg> + + +# The upload handler program timeout in seconds for the specified upload type. +# If specified and the handler does not exit in the allotted time, then it is +# killed and its termination is treated as abnormal. +# +# upload-handler-timeout <type>=<seconds> + + +# Disable upload of the specified type for the specified toolchain name. +# Repeat this option to disable uploads for multiple toolchains. +# +# upload-toolchain-exclude <type>=<name> + + +# Disable upload of the specified type for packages from the repository with +# the specified canonical name. Repeat this option to disable uploads for +# multiple repositories. +# +# upload-repository-exclude <type>=<name> + + # The default view to display for the global repository root. The value is one # of the supported services (packages, builds, submit, ci, etc). Default is # packages. diff --git a/etc/systemd/brep-clean.service b/etc/systemd/brep-clean.service index 739a54a..d2e5630 100644 --- a/etc/systemd/brep-clean.service +++ b/etc/systemd/brep-clean.service @@ -1,5 +1,5 @@ [Unit] -Description=brep build database cleaner service +Description=brep build database and artifacts cleaner service [Service] Type=oneshot @@ -7,9 +7,12 @@ Type=oneshot #Group=brep # Run both tenants and builds cleaners if CI request functionality is enabled. +# Also run outdated build artifacts cleaners if build artifacts upload +# functionality is enabled. # #ExecStart=/home/brep/install/bin/brep-clean tenants 240 ExecStart=/home/brep/install/bin/brep-clean builds /home/brep/config/buildtab +#ExecStart=/home/brep/install/bin/brep-upload-bindist-clean /var/bindist 2880 [Install] WantedBy=default.target diff --git a/libbrep/build-extra.sql b/libbrep/build-extra.sql index ddc5961..9e51a51 100644 --- a/libbrep/build-extra.sql +++ b/libbrep/build-extra.sql @@ -6,14 +6,36 @@ -- package-extra.sql file for details. -- +DROP FOREIGN TABLE IF EXISTS build_package_config_bot_keys; + +DROP FOREIGN TABLE IF EXISTS build_package_config_auxiliaries; + +DROP FOREIGN TABLE IF EXISTS build_package_config_constraints; + +DROP FOREIGN TABLE IF EXISTS build_package_config_builds; + +DROP FOREIGN TABLE IF EXISTS build_package_configs; + +DROP FOREIGN TABLE IF EXISTS build_package_bot_keys; + +DROP FOREIGN TABLE IF EXISTS build_package_auxiliaries; + DROP FOREIGN TABLE IF EXISTS build_package_constraints; DROP FOREIGN TABLE IF EXISTS build_package_builds; DROP FOREIGN TABLE IF EXISTS build_package_tests; +DROP FOREIGN TABLE IF EXISTS build_package_requirement_alternative_requirements; + +DROP FOREIGN TABLE IF EXISTS build_package_requirement_alternatives; + +DROP FOREIGN TABLE IF EXISTS build_package_requirements; + DROP FOREIGN TABLE IF EXISTS build_package; +DROP FOREIGN TABLE IF EXISTS build_public_key; + DROP FOREIGN TABLE IF EXISTS build_repository; DROP FOREIGN TABLE IF EXISTS build_tenant; @@ -22,7 +44,20 @@ DROP FOREIGN TABLE IF EXISTS build_tenant; -- CREATE FOREIGN TABLE build_tenant ( id TEXT NOT NULL, - archived BOOLEAN NOT NULL) + private BOOLEAN NOT NULL, + interactive TEXT NULL, + archived BOOLEAN NOT NULL, + service_id TEXT NULL, + service_type TEXT NULL, + service_data TEXT NULL, + queued_timestamp BIGINT NULL, + toolchain_name TEXT OPTIONS (column_name 'build_toolchain_name') NULL, + toolchain_version_epoch INTEGER OPTIONS (column_name 'build_toolchain_version_epoch') NULL, + toolchain_version_canonical_upstream TEXT OPTIONS (column_name 'build_toolchain_version_canonical_upstream') NULL, + toolchain_version_canonical_release TEXT OPTIONS (column_name 'build_toolchain_version_canonical_release') NULL, + toolchain_version_revision INTEGER OPTIONS (column_name 'build_toolchain_version_revision') NULL, + toolchain_version_upstream TEXT OPTIONS (column_name 'build_toolchain_version_upstream') NULL, + toolchain_version_release TEXT OPTIONS (column_name 'build_toolchain_version_release') NULL) SERVER package_server OPTIONS (table_name 'tenant'); -- The foreign table for build_repository object. @@ -35,6 +70,14 @@ CREATE FOREIGN TABLE build_repository ( certificate_fingerprint TEXT NULL) SERVER package_server OPTIONS (table_name 'repository'); +-- The foreign table for build_public_key object. +-- +CREATE FOREIGN TABLE build_public_key ( + tenant TEXT NOT NULL, + fingerprint TEXT NOT NULL, + "data" TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'public_key'); + -- The foreign table for build_package object. -- CREATE FOREIGN TABLE build_package ( @@ -46,11 +89,60 @@ CREATE FOREIGN TABLE build_package ( version_revision INTEGER NOT NULL, version_upstream TEXT NOT NULL, version_release TEXT NULL, + project CITEXT NOT NULL, + build_email TEXT NULL, + build_email_comment TEXT NULL, + build_warning_email TEXT NULL, + build_warning_email_comment TEXT NULL, + build_error_email TEXT NULL, + build_error_email_comment TEXT NULL, internal_repository_tenant TEXT NULL, internal_repository_canonical_name TEXT NULL, - buildable BOOLEAN NOT NULL) + buildable BOOLEAN NOT NULL, + custom_bot BOOLEAN NULL) SERVER package_server OPTIONS (table_name 'package'); +-- The foreign tables for the build_package object requirements member (that +-- is of a 3-dimensional container type). +-- +CREATE FOREIGN TABLE build_package_requirements ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + index BIGINT NOT NULL, + buildtime BOOLEAN NOT NULL, + comment TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_requirements'); + +CREATE FOREIGN TABLE build_package_requirement_alternatives ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + requirement_index BIGINT NOT NULL, + index BIGINT NOT NULL, + enable TEXT NULL, + reflect TEXT NULL) +SERVER package_server OPTIONS (table_name 'package_requirement_alternatives'); + +CREATE FOREIGN TABLE build_package_requirement_alternative_requirements ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + requirement_index BIGINT NOT NULL, + alternative_index BIGINT NOT NULL, + index BIGINT NOT NULL, + id TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_requirement_alternative_requirements'); + -- The foreign table for the build_package object tests member (that is of a -- container type). -- @@ -63,12 +155,30 @@ CREATE FOREIGN TABLE build_package_tests ( version_revision INTEGER NOT NULL, index BIGINT NOT NULL, test_name CITEXT NOT NULL, + test_min_version_epoch INTEGER NULL, + test_min_version_canonical_upstream TEXT NULL, + test_min_version_canonical_release TEXT NULL, + test_min_version_revision INTEGER NULL, + test_min_version_upstream TEXT NULL, + test_min_version_release TEXT NULL, + test_max_version_epoch INTEGER NULL, + test_max_version_canonical_upstream TEXT NULL, + test_max_version_canonical_release TEXT NULL, + test_max_version_revision INTEGER NULL, + test_max_version_upstream TEXT NULL, + test_max_version_release TEXT NULL, + test_min_open BOOLEAN NULL, + test_max_open BOOLEAN NULL, test_package_tenant TEXT NULL, test_package_name CITEXT NULL, test_package_version_epoch INTEGER NULL, test_package_version_canonical_upstream TEXT NULL, test_package_version_canonical_release TEXT NULL COLLATE "C", - test_package_version_revision INTEGER NULL) + test_package_version_revision INTEGER NULL, + test_type TEXT NOT NULL, + test_buildtime BOOLEAN NOT NULL, + test_enable TEXT NULL, + test_reflect TEXT NULL) SERVER package_server OPTIONS (table_name 'package_tests'); -- The foreign table for the build_package object builds member (that is of a @@ -102,3 +212,111 @@ CREATE FOREIGN TABLE build_package_constraints ( target TEXT NULL, comment TEXT NOT NULL) SERVER package_server OPTIONS (table_name 'package_build_constraints'); + +-- The foreign table for the build_package object auxiliaries member (that is +-- of a container type). +-- +CREATE FOREIGN TABLE build_package_auxiliaries ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + index BIGINT NOT NULL, + environment_name TEXT NOT NULL, + config TEXT NOT NULL, + comment TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_auxiliaries'); + +-- The foreign table for the build_package object bot_keys member (that is +-- of a container type). +-- +CREATE FOREIGN TABLE build_package_bot_keys ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + index BIGINT NOT NULL, + key_tenant TEXT NOT NULL, + key_fingerprint TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_bot_keys'); + +-- The foreign tables for the build_package object configs member (that is a +-- container of values containing containers. +-- +CREATE FOREIGN TABLE build_package_configs ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + index BIGINT NOT NULL, + config_name TEXT NOT NULL, + config_arguments TEXT NULL, + config_comment TEXT NOT NULL, + config_email TEXT NULL, + config_email_comment TEXT NULL, + config_warning_email TEXT NULL, + config_warning_email_comment TEXT NULL, + config_error_email TEXT NULL, + config_error_email_comment TEXT NULL) +SERVER package_server OPTIONS (table_name 'package_build_configs'); + +CREATE FOREIGN TABLE build_package_config_builds ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + config_index BIGINT NOT NULL, + index BIGINT NOT NULL, + expression TEXT NOT NULL, + comment TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_config_builds'); + +CREATE FOREIGN TABLE build_package_config_constraints ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + config_index BIGINT NOT NULL, + index BIGINT NOT NULL, + exclusion BOOLEAN NOT NULL, + config TEXT NOT NULL, + target TEXT NULL, + comment TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_config_constraints'); + +CREATE FOREIGN TABLE build_package_config_auxiliaries ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + config_index BIGINT NOT NULL, + index BIGINT NOT NULL, + environment_name TEXT NOT NULL, + config TEXT NOT NULL, + comment TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_config_auxiliaries'); + +CREATE FOREIGN TABLE build_package_config_bot_keys ( + tenant TEXT NOT NULL, + name CITEXT NOT NULL, + version_epoch INTEGER NOT NULL, + version_canonical_upstream TEXT NOT NULL, + version_canonical_release TEXT NOT NULL COLLATE "C", + version_revision INTEGER NOT NULL, + config_index BIGINT NOT NULL, + index BIGINT NOT NULL, + key_tenant TEXT NOT NULL, + key_fingerprint TEXT NOT NULL) +SERVER package_server OPTIONS (table_name 'package_build_config_bot_keys'); diff --git a/libbrep/build-package.hxx b/libbrep/build-package.hxx index 09ec41d..9a9c277 100644 --- a/libbrep/build-package.hxx +++ b/libbrep/build-package.hxx @@ -5,6 +5,8 @@ #define LIBBREP_BUILD_PACKAGE_HXX #include <odb/core.hxx> +#include <odb/section.hxx> +#include <odb/nested-container.hxx> #include <libbrep/types.hxx> #include <libbrep/utility.hxx> @@ -21,16 +23,23 @@ namespace brep // // The mapping is established in build-extra.sql. We also explicitly mark // non-primary key foreign-mapped members in the source object. - // + // Foreign object that is mapped to a subset of the tenant object. // - #pragma db object table("build_tenant") pointer(shared_ptr) readonly + // Note: table created manually thus assign table name explicitly. + // + #pragma db object table("build_tenant") pointer(shared_ptr) class build_tenant { public: string id; + bool private_; + optional<string> interactive; bool archived; + optional<tenant_service> service; + optional<timestamp> queued_timestamp; + optional<build_toolchain> toolchain; // Database mapping. // @@ -43,6 +52,8 @@ namespace brep // Foreign object that is mapped to a subset of the repository object. // + // Note: table created manually thus assign table name explicitly. + // #pragma db object table("build_repository") pointer(shared_ptr) readonly class build_repository { @@ -68,45 +79,133 @@ namespace brep build_repository (): canonical_name (id.canonical_name) {} }; + // Foreign object that is mapped to a subset of the public key object. + // + // Note: table created manually thus assign table name explicitly. + // + #pragma db object table("build_public_key") pointer(shared_ptr) readonly + class build_public_key: public string + { + public: + public_key_id id; + + // Database mapping. + // + #pragma db member(id) id column("") + + #pragma db member(data) virtual(string) access(this) + + private: + friend class odb::access; + build_public_key () = default; + }; + + // build_package_config + // + using build_package_config = + build_package_config_template<lazy_shared_ptr<build_public_key>>; + + using build_package_configs = + build_package_configs_template<lazy_shared_ptr<build_public_key>>; + + #pragma db value(build_package_config) definition + + #pragma db member(build_package_config::builds) transient + #pragma db member(build_package_config::constraints) transient + #pragma db member(build_package_config::auxiliaries) transient + #pragma db member(build_package_config::bot_keys) transient + + // build_package_bot_keys + // + using build_package_bot_keys = vector<lazy_shared_ptr<build_public_key>>; + using build_package_bot_key_key = odb::nested_key<build_package_bot_keys>; + + using build_package_bot_keys_map = + std::map<build_package_bot_key_key, lazy_shared_ptr<build_public_key>>; + + #pragma db value(build_package_bot_key_key) + #pragma db member(build_package_bot_key_key::outer) column("config_index") + #pragma db member(build_package_bot_key_key::inner) column("index") + // Forward declarations. // class build_package; - // Build package external test dependency. + // Build package dependency. // #pragma db value - struct build_test_dependency + struct build_dependency { package_name name; + optional<version_constraint> constraint; + lazy_shared_ptr<build_package> package; + + // Database mapping. + // + #pragma db member(constraint) column("") + }; + + // Build package external test dependency. + // + #pragma db value + struct build_test_dependency: build_dependency + { + test_dependency_type type; + bool buildtime; + optional<string> enable; + optional<string> reflect; }; // Foreign object that is mapped to a subset of the package object. // + // Note: table created manually thus assign table name explicitly. + // #pragma db object table("build_package") pointer(shared_ptr) readonly session class build_package { public: + using requirements_type = brep::requirements; + package_id id; upstream_version version; - // Mapped to the package object tests member using the PostgreSQL foreign - // table mechanism. + package_name project; + + optional<email> build_email; + optional<email> build_warning_email; + optional<email> build_error_email; + + // Mapped to the package object requirements and tests members using the + // PostgreSQL foreign table mechanism. // + requirements_type requirements; small_vector<build_test_dependency, 1> tests; + odb::section requirements_tests_section; + lazy_shared_ptr<build_repository> internal_repository; bool buildable; + optional<bool> custom_bot; - // Mapped to the package object builds member using the PostgreSQL foreign - // table mechanism. + // Mapped to the package object builds, build_constraints, + // build_auxiliaries, bot_keys, and build_configs members using the + // PostgreSQL foreign table mechanism. // - build_class_exprs builds; + build_class_exprs builds; + build_constraints constraints; + build_auxiliaries auxiliaries; + build_package_bot_keys bot_keys; + build_package_configs configs; - // Mapped to the package object build_constraints member using the - // PostgreSQL foreign table mechanism. + // Group the builds/constraints, auxiliaries, and bot_keys members of this + // object together with their respective nested configs entries into the + // separate sections for an explicit load. Note that the configs top-level + // members are loaded implicitly. // - build_constraints constraints; + odb::section constraints_section; + odb::section auxiliaries_section; + odb::section bot_keys_section; bool internal () const noexcept {return internal_repository != nullptr;} @@ -115,15 +214,133 @@ namespace brep // #pragma db member(id) id column("") #pragma db member(version) set(this.version.init (this.id.version, (?))) - #pragma db member(tests) id_column("") value_column("test_") - #pragma db member(builds) id_column("") value_column("") - #pragma db member(constraints) id_column("") value_column("") + + // requirements + // + // Note that this is a 2-level nested container (see package.hxx for + // details). + // + + // Container of the requirement_alternatives values. + // + #pragma db member(requirements) id_column("") value_column("") \ + section(requirements_tests_section) + + // Container of the requirement_alternative values. + // + #pragma db member(requirement_alternatives) \ + virtual(requirement_alternatives_map) \ + after(requirements) \ + get(odb::nested_get (this.requirements)) \ + set(odb::nested_set (this.requirements, std::move (?))) \ + id_column("") key_column("") value_column("") \ + section(requirements_tests_section) + + // Container of the requirement (string) values. + // + #pragma db member(requirement_alternative_requirements) \ + virtual(requirement_alternative_requirements_map) \ + after(requirement_alternatives) \ + get(odb::nested2_get (this.requirements)) \ + set(odb::nested2_set (this.requirements, std::move (?))) \ + id_column("") key_column("") value_column("id") \ + section(requirements_tests_section) + + // tests + // + #pragma db member(tests) id_column("") value_column("test_") \ + section(requirements_tests_section) + + #pragma db member(requirements_tests_section) load(lazy) update(always) + + // builds, constraints, auxiliaries, and bot_keys + // + #pragma db member(builds) id_column("") value_column("") \ + section(constraints_section) + + #pragma db member(constraints) id_column("") value_column("") \ + section(constraints_section) + + #pragma db member(auxiliaries) id_column("") value_column("") \ + section(auxiliaries_section) + + #pragma db member(bot_keys) id_column("") value_column("key_") \ + section(bot_keys_section) + + // configs + // + // Note that build_package_config::{builds,constraints,auxiliaries,bot_keys} + // are persisted/loaded via the separate nested containers (see + // commons.hxx for details). + // + #pragma db member(configs) id_column("") value_column("config_") + + #pragma db member(config_builds) \ + virtual(build_class_exprs_map) \ + after(configs) \ + get(odb::nested_get ( \ + brep::build_package_config_builds (this.configs))) \ + set(brep::build_package_config_builds bs; \ + odb::nested_set (bs, std::move (?)); \ + move (bs).to_configs (this.configs)) \ + id_column("") key_column("") value_column("") \ + section(constraints_section) + + #pragma db member(config_constraints) \ + virtual(build_constraints_map) \ + after(config_builds) \ + get(odb::nested_get ( \ + brep::build_package_config_constraints (this.configs))) \ + set(brep::build_package_config_constraints cs; \ + odb::nested_set (cs, std::move (?)); \ + move (cs).to_configs (this.configs)) \ + id_column("") key_column("") value_column("") \ + section(constraints_section) + + #pragma db member(config_auxiliaries) \ + virtual(build_auxiliaries_map) \ + after(config_constraints) \ + get(odb::nested_get ( \ + brep::build_package_config_auxiliaries (this.configs))) \ + set(brep::build_package_config_auxiliaries as; \ + odb::nested_set (as, std::move (?)); \ + move (as).to_configs (this.configs)) \ + id_column("") key_column("") value_column("") \ + section(auxiliaries_section) + + #pragma db member(config_bot_keys) \ + virtual(build_package_bot_keys_map) \ + after(config_auxiliaries) \ + get(odb::nested_get ( \ + brep::build_package_config_bot_keys< \ + lazy_shared_ptr<brep::build_public_key>> (this.configs))) \ + set(brep::build_package_config_bot_keys< \ + lazy_shared_ptr<brep::build_public_key>> bks; \ + odb::nested_set (bks, std::move (?)); \ + move (bks).to_configs (this.configs)) \ + id_column("") key_column("") value_column("key_") \ + section(bot_keys_section) + + #pragma db member(constraints_section) load(lazy) update(always) + #pragma db member(auxiliaries_section) load(lazy) update(always) + #pragma db member(bot_keys_section) load(lazy) update(always) private: friend class odb::access; build_package () = default; }; + #pragma db view object(build_package) + struct build_package_version + { + package_id id; + upstream_version version; + + // Database mapping. + // + #pragma db member(version) set(this.version.init (this.id.version, (?))) + }; + // Packages that can potentially be built. // // Note that ADL can't find the equal operator, so we use the function call @@ -138,14 +355,13 @@ namespace brep object(build_tenant: build_package::id.tenant == build_tenant::id) struct buildable_package { - package_id id; - upstream_version version; + shared_ptr<build_package> package; bool archived; // True if the tenant the package belongs to is archived. - // Database mapping. + // Present if the tenant the package belongs to is interactive. // - #pragma db member(version) set(this.version.init (this.id.version, (?))) + optional<string> interactive; }; #pragma db view \ diff --git a/libbrep/build.cxx b/libbrep/build.cxx index db5bda2..13f0818 100644 --- a/libbrep/build.cxx +++ b/libbrep/build.cxx @@ -12,6 +12,7 @@ namespace brep { switch (s) { + case build_state::queued: return "queued"; case build_state::building: return "building"; case build_state::built: return "built"; } @@ -22,9 +23,10 @@ namespace brep build_state to_build_state (const string& s) { - if (s == "building") return build_state::building; + if (s == "queued") return build_state::queued; + else if (s == "building") return build_state::building; else if (s == "built") return build_state::built; - else throw invalid_argument ("invalid build state '" + s + "'"); + else throw invalid_argument ("invalid build state '" + s + '\''); } // force_state @@ -48,7 +50,7 @@ namespace brep if (s == "unforced") return force_state::unforced; else if (s == "forcing") return force_state::forcing; else if (s == "forced") return force_state::forced; - else throw invalid_argument ("invalid force state '" + s + "'"); + else throw invalid_argument ("invalid force state '" + s + '\''); } // build @@ -57,45 +59,191 @@ namespace brep build (string tnt, package_name_type pnm, version pvr, - string cfg, + target_triplet trg, + string tcf, + string pcf, string tnm, version tvr, + optional<string> inr, optional<string> afp, optional<string> ach, - string mnm, string msm, - butl::target_triplet trg) + build_machine mcn, + vector<build_machine> ams, + string ccs, + string mcs) : id (package_id (move (tnt), move (pnm), pvr), - move (cfg), + move (trg), + move (tcf), + move (pcf), move (tnm), tvr), tenant (id.package.tenant), package_name (id.package.name), package_version (move (pvr)), - configuration (id.configuration), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), toolchain_name (id.toolchain_name), toolchain_version (move (tvr)), state (build_state::building), + interactive (move (inr)), timestamp (timestamp_type::clock::now ()), force (force_state::unforced), agent_fingerprint (move (afp)), agent_challenge (move (ach)), - machine (move (mnm)), - machine_summary (move (msm)), - target (move (trg)) + machine (move (mcn)), + auxiliary_machines (move (ams)), + controller_checksum (move (ccs)), + machine_checksum (move (mcs)) { } + build:: + build (string tnt, + package_name_type pnm, + version pvr, + target_triplet trg, + string tcf, + string pcf, + string tnm, version tvr) + : id (package_id (move (tnt), move (pnm), pvr), + move (trg), + move (tcf), + move (pcf), + move (tnm), tvr), + tenant (id.package.tenant), + package_name (id.package.name), + package_version (move (pvr)), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), + toolchain_name (id.toolchain_name), + toolchain_version (move (tvr)), + state (build_state::queued), + timestamp (timestamp_type::clock::now ()), + force (force_state::unforced) + { + } + + build:: + build (string tnt, + package_name_type pnm, + version pvr, + target_triplet trg, + string tcf, + string pcf, + string tnm, version tvr, + result_status rst, + operation_results ors, + build_machine mcn, + vector<build_machine> ams) + : id (package_id (move (tnt), move (pnm), pvr), + move (trg), + move (tcf), + move (pcf), + move (tnm), tvr), + tenant (id.package.tenant), + package_name (id.package.name), + package_version (move (pvr)), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), + toolchain_name (id.toolchain_name), + toolchain_version (move (tvr)), + state (build_state::built), + timestamp (timestamp_type::clock::now ()), + force (force_state::unforced), + status (rst), + soft_timestamp (timestamp), + hard_timestamp (timestamp), + machine (move (mcn)), + auxiliary_machines (move (ams)), + results (move (ors)) + { + } + + build:: + build (build&& b) + : id (move (b.id)), + tenant (id.package.tenant), + package_name (id.package.name), + package_version (move (b.package_version)), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), + toolchain_name (id.toolchain_name), + toolchain_version (move (b.toolchain_version)), + state (b.state), + interactive (move (b.interactive)), + timestamp (b.timestamp), + force (b.force), + status (b.status), + soft_timestamp (b.soft_timestamp), + hard_timestamp (b.hard_timestamp), + agent_fingerprint (move (b.agent_fingerprint)), + agent_challenge (move (b.agent_challenge)), + machine (move (b.machine)), + auxiliary_machines (move (b.auxiliary_machines)), + auxiliary_machines_section (move (b.auxiliary_machines_section)), + results (move (b.results)), + results_section (move (b.results_section)), + controller_checksum (move (b.controller_checksum)), + machine_checksum (move (b.machine_checksum)), + agent_checksum (move (b.agent_checksum)), + worker_checksum (move (b.worker_checksum)), + dependency_checksum (move (b.dependency_checksum)) + { + } + + build& build:: + operator= (build&& b) + { + if (this != &b) + { + id = move (b.id); + package_version = move (b.package_version); + toolchain_version = move (b.toolchain_version); + state = b.state; + interactive = move (b.interactive); + timestamp = b.timestamp; + force = b.force; + status = b.status; + soft_timestamp = b.soft_timestamp; + hard_timestamp = b.hard_timestamp; + agent_fingerprint = move (b.agent_fingerprint); + agent_challenge = move (b.agent_challenge); + machine = move (b.machine); + auxiliary_machines = move (b.auxiliary_machines); + auxiliary_machines_section = move (b.auxiliary_machines_section); + results = move (b.results); + results_section = move (b.results_section); + controller_checksum = move (b.controller_checksum); + machine_checksum = move (b.machine_checksum); + agent_checksum = move (b.agent_checksum); + worker_checksum = move (b.worker_checksum); + dependency_checksum = move (b.dependency_checksum); + } + + return *this; + } + // build_delay // build_delay:: build_delay (string tnt, package_name_type pnm, version pvr, - string cfg, + target_triplet trg, + string tcf, + string pcf, string tnm, version tvr, timestamp ptm) : id (package_id (move (tnt), move (pnm), pvr), - move (cfg), + move (trg), + move (tcf), + move (pcf), move (tnm), tvr), tenant (id.package.tenant), package_name (id.package.name), package_version (move (pvr)), - configuration (id.configuration), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), toolchain_name (id.toolchain_name), toolchain_version (move (tvr)), package_timestamp (ptm) diff --git a/libbrep/build.hxx b/libbrep/build.hxx index 380b17b..af49c03 100644 --- a/libbrep/build.hxx +++ b/libbrep/build.hxx @@ -9,28 +9,30 @@ #include <odb/core.hxx> #include <odb/section.hxx> -#include <libbutl/target-triplet.mxx> - -#include <libbbot/manifest.hxx> - #include <libbrep/types.hxx> #include <libbrep/utility.hxx> -// Must be included last (see assert in libbrep/common.hxx). -// #include <libbrep/common.hxx> #include <libbrep/build-package.hxx> +// Must be included after libbrep/common.hxx, so that the _version structure +// get defined before libbpkg/manifest.hxx inclusion. +// +// Note that if we start using assert() in get/set expressions in this header, +// we will have to redefine it for ODB compiler after all include directives +// (see libbrep/common.hxx for details). +// +#include <libbbot/manifest.hxx> + // Used by the data migration entries. // -#define LIBBREP_BUILD_SCHEMA_VERSION_BASE 12 +#define LIBBREP_BUILD_SCHEMA_VERSION_BASE 20 -#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 12, closed) +#pragma db model version(LIBBREP_BUILD_SCHEMA_VERSION_BASE, 27, closed) -// We have to keep these mappings at the global scope instead of inside -// the brep namespace because they need to be also effective in the -// bbot namespace from which we "borrow" types (and some of them use the mapped -// types). +// We have to keep these mappings at the global scope instead of inside the +// brep namespace because they need to be also effective in the bbot namespace +// from which we "borrow" types (and some of them use the mapped types). // #pragma db map type(bbot::result_status) as(std::string) \ to(to_string (?)) \ @@ -42,14 +44,23 @@ namespace brep struct build_id { package_id package; - string configuration; + target_triplet target; + string target_config_name; + string package_config_name; string toolchain_name; canonical_version toolchain_version; build_id () = default; - build_id (package_id p, string c, string n, const brep::version& v) + build_id (package_id p, + target_triplet t, + string tc, + string pc, + string n, + const brep::version& v) : package (move (p)), - configuration (move (c)), + target (move (t)), + target_config_name (move (tc)), + package_config_name (move (pc)), toolchain_name (move (n)), toolchain_version (v) {} }; @@ -60,7 +71,13 @@ namespace brep if (x.package != y.package) return x.package < y.package; - if (int r = x.configuration.compare (y.configuration)) + if (int r = x.target.compare (y.target)) + return r < 0; + + if (int r = x.target_config_name.compare (y.target_config_name)) + return r < 0; + + if (int r = x.package_config_name.compare (y.package_config_name)) return r < 0; if (int r = x.toolchain_name.compare (y.toolchain_name)) @@ -69,7 +86,7 @@ namespace brep return compare_version_lt (x.toolchain_version, y.toolchain_version, true); } - // These allow comparing objects that have package, configuration, + // These allow comparing objects that have package, configuration, target, // toolchain_name, and toolchain_version data members to build_id values. // The idea is that this works for both query members of build id types as // well as for values of the build_id type. @@ -77,35 +94,84 @@ namespace brep template <typename T> inline auto operator== (const T& x, const build_id& y) - -> decltype (x.package == y.package && - x.configuration == y.configuration && - x.toolchain_name == y.toolchain_name && + -> decltype (x.package == y.package && + x.target == y.target && + x.target_config_name == y.target_config_name && + x.package_config_name == y.package_config_name && + x.toolchain_name == y.toolchain_name && x.toolchain_version.epoch == y.toolchain_version.epoch) { - return x.package == y.package && - x.configuration == y.configuration && - x.toolchain_name == y.toolchain_name && + return x.package == y.package && + x.target == y.target && + x.target_config_name == y.target_config_name && + x.package_config_name == y.package_config_name && + x.toolchain_name == y.toolchain_name && compare_version_eq (x.toolchain_version, y.toolchain_version, true); } template <typename T> inline auto operator!= (const T& x, const build_id& y) - -> decltype (x.package == y.package && - x.configuration == y.configuration && - x.toolchain_name == y.toolchain_name && + -> decltype (x.package == y.package && + x.target == y.target && + x.target_config_name == y.target_config_name && + x.package_config_name == y.package_config_name && + x.toolchain_name == y.toolchain_name && x.toolchain_version.epoch == y.toolchain_version.epoch) { - return x.package != y.package || - x.configuration != y.configuration || - x.toolchain_name != y.toolchain_name || + return x.package != y.package || + x.target != y.target || + x.target_config_name != y.target_config_name || + x.package_config_name != y.package_config_name || + x.toolchain_name != y.toolchain_name || compare_version_ne (x.toolchain_version, y.toolchain_version, true); } + // Allow comparing the query members with the query parameters bound by + // reference to variables of the build id type (in particular in the + // prepared queries). + // + // Note that it is not operator==() since the query template parameter type + // can not be deduced from the function parameter types and needs to be + // specified explicitly. + // + template <typename T, typename ID> + inline auto + equal (const ID& x, const build_id& y, bool toolchain_version = true) + -> decltype (x.package.tenant == odb::query<T>::_ref (y.package.tenant) && + x.package.name == odb::query<T>::_ref (y.package.name) && + x.package.version.epoch == + odb::query<T>::_ref (y.package.version.epoch) && + x.target_config_name == + odb::query<T>::_ref (y.target_config_name) && + x.toolchain_name == odb::query<T>::_ref (y.toolchain_name) && + x.toolchain_version.epoch == + odb::query<T>::_ref (y.toolchain_version.epoch)) + { + using query = odb::query<T>; + + query r (equal<T> (x.package, y.package) && + x.target == query::_ref (y.target) && + x.target_config_name == query::_ref (y.target_config_name) && + x.package_config_name == query::_ref (y.package_config_name) && + x.toolchain_name == query::_ref (y.toolchain_name)); + + if (toolchain_version) + r = r && equal<T> (x.toolchain_version, y.toolchain_version); + + return r; + } + // build_state // + // The queued build state is semantically equivalent to a non-existent + // build. It is only used for those tenants, which have a third-party + // service associated that requires the `queued` notifications (see + // mod/tenant-service.hxx for background). + // enum class build_state: std::uint8_t { + queued, building, built }; @@ -157,12 +223,6 @@ namespace brep ? bbot::to_result_status (*(?)) \ : brep::optional_result_status ()) - // target_triplet - // - #pragma db map type(butl::target_triplet) as(string) \ - to((?).string ()) \ - from(butl::target_triplet (?)) - // operation_results // using bbot::operation_result; @@ -170,6 +230,13 @@ namespace brep using bbot::operation_results; + #pragma db value + struct build_machine + { + string name; + string summary; + }; + #pragma db object pointer(shared_ptr) session class build { @@ -178,29 +245,72 @@ namespace brep using package_name_type = brep::package_name; // Create the build object with the building state, non-existent status, - // the timestamp set to now and the force state set to unforced. + // the timestamp set to now, and the force state set to unforced. // build (string tenant, - package_name_type, - version, - string configuration, + package_name_type, version, + target_triplet, + string target_config_name, + string package_config_name, string toolchain_name, version toolchain_version, + optional<string> interactive, optional<string> agent_fingerprint, optional<string> agent_challenge, - string machine, string machine_summary, - butl::target_triplet); + build_machine, + vector<build_machine> auxiliary_machines, + string controller_checksum, + string machine_checksum); + + // Create the build object with the queued state. + // + build (string tenant, + package_name_type, version, + target_triplet, + string target_config_name, + string package_config_name, + string toolchain_name, version toolchain_version); + + // Create the build object with the built state, the specified status and + // operation results, all the timestamps set to now, and the force state + // set to unforced. + // + build (string tenant, + package_name_type, version, + target_triplet, + string target_config_name, + string package_config_name, + string toolchain_name, version toolchain_version, + result_status, + operation_results, + build_machine, + vector<build_machine> auxiliary_machines = {}); + + // Move-only type. + // + build (build&&); + build& operator= (build&&); + + build (const build&) = delete; + build& operator= (const build&) = delete; build_id id; string& tenant; // Tracks id.package.tenant. package_name_type& package_name; // Tracks id.package.name. upstream_version package_version; // Original of id.package.version. - string& configuration; // Tracks id.configuration. + target_triplet& target; // Tracks id.target. + string& target_config_name; // Tracks id.target_config_name. + string& package_config_name; // Tracks id.package_config_name. string& toolchain_name; // Tracks id.toolchain_name. upstream_version toolchain_version; // Original of id.toolchain_version. build_state state; + // If present, the login information for the interactive build. May be + // present only in the building state. + // + optional<string> interactive; + // Time of the last state change (the creation time initially). // timestamp_type timestamp; @@ -212,24 +322,35 @@ namespace brep // optional<result_status> status; - // Time of setting the result status that can be considered as the build - // task completion (currently all the result_status values). Initialized - // with timestamp_nonexistent by default. + // Times of the last soft/hard completed (re)builds. Used to decide when + // to perform soft and hard rebuilds, respectively. + // + // The soft timestamp is updated whenever we receive a task result. // - // Note that in the future we may not consider abort and abnormal as the - // task completion and, for example, proceed with automatic rebuild (the - // flake monitor idea). + // The hard timestamp is updated whenever we receive a task result with + // a status other than skip. // - timestamp_type completion_timestamp; + // Also note that whenever hard_timestamp is updated, soft_timestamp is + // updated as well and whenever soft_timestamp is updated, timestamp is + // updated as well. Thus the following condition is always true: + // + // hard_timestamp <= soft_timestamp <= timestamp + // + // Note that the "completed" above means that we may analyze the task + // result/log and deem it as not completed and proceed with automatic + // rebuild (the flake monitor idea). + // + timestamp_type soft_timestamp; + timestamp_type hard_timestamp; // May be present only for the building state. // optional<string> agent_fingerprint; optional<string> agent_challenge; - string machine; - string machine_summary; - butl::target_triplet target; + build_machine machine; + vector<build_machine> auxiliary_machines; + odb::section auxiliary_machines_section; // Note that the logs are stored as std::string/TEXT which is Ok since // they are UTF-8 and our database is UTF-8. @@ -237,6 +358,21 @@ namespace brep operation_results results; odb::section results_section; + // Checksums of entities involved in the build. + // + // Optional checksums are provided by the external entities (agent and + // worker). All are absent initially. + // + // Note that the agent checksum can also be absent after the hard rebuild + // task is issued and the worker and dependency checksums - after a failed + // rebuild (error result status or worse). + // + string controller_checksum; + string machine_checksum; + optional<string> agent_checksum; + optional<string> worker_checksum; + optional<string> dependency_checksum; + // Database mapping. // #pragma db member(id) id column("") @@ -245,7 +381,9 @@ namespace brep #pragma db member(package_name) transient #pragma db member(package_version) \ set(this.package_version.init (this.id.package.version, (?))) - #pragma db member(configuration) transient + #pragma db member(target) transient + #pragma db member(target_config_name) transient + #pragma db member(package_config_name) transient #pragma db member(toolchain_name) transient #pragma db member(toolchain_version) \ set(this.toolchain_version.init (this.id.toolchain_version, (?))) @@ -254,30 +392,33 @@ namespace brep // #pragma db member(timestamp) index - // This is not required since 0.14.0. Note however, that just dropping - // this line won't pan out since this would require migration which odb is - // currently unable to handle automatically, advising to re-implement this - // change by adding a new data member with the desired default value, - // migrating the data, and deleting the old data member. This sounds a bit - // hairy, so let's keep it for now. - // - #pragma db member(completion_timestamp) default(0) + #pragma db member(machine) transient + + #pragma db member(machine_name) virtual(std::string) \ + access(machine.name) column("machine") + + #pragma db member(machine_summary) virtual(std::string) \ + access(machine.summary) + + #pragma db member(auxiliary_machines) id_column("") value_column("") \ + section(auxiliary_machines_section) + + #pragma db member(auxiliary_machines_section) load(lazy) update(always) #pragma db member(results) id_column("") value_column("") \ section(results_section) #pragma db member(results_section) load(lazy) update(always) - build (const build&) = delete; - build& operator= (const build&) = delete; - private: friend class odb::access; build () : tenant (id.package.tenant), package_name (id.package.name), - configuration (id.configuration), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), toolchain_name (id.toolchain_name) {} }; @@ -328,7 +469,7 @@ namespace brep canonical_version version_; }; - // Build of an existing buildable package. + // Builds of existing buildable packages. // #pragma db view \ object(build) \ @@ -339,6 +480,7 @@ namespace brep struct package_build { shared_ptr<brep::build> build; + bool archived; // True if the tenant the build belongs to is archived. }; #pragma db view \ @@ -358,6 +500,19 @@ namespace brep #pragma db member(result) column("count(" + build::id.package.name + ")") }; + // Ids of existing buildable package builds. + // + #pragma db view object(build) \ + object(build_package inner: \ + brep::operator== (build::id.package, build_package::id) && \ + build_package::buildable) + struct package_build_id + { + build_id id; + + operator build_id& () {return id;} + }; + // Used to track the package build delays since the last build or, if not // present, since the first opportunity to build the package. // @@ -372,7 +527,9 @@ namespace brep // build_delay (string tenant, package_name_type, version, - string configuration, + target_triplet, + string target_config_name, + string package_config_name, string toolchain_name, version toolchain_version, timestamp package_timestamp); @@ -381,14 +538,20 @@ namespace brep string& tenant; // Tracks id.package.tenant. package_name_type& package_name; // Tracks id.package.name. upstream_version package_version; // Original of id.package.version. - string& configuration; // Tracks id.configuration. + target_triplet& target; // Tracks id.target. + string& target_config_name; // Tracks id.target_config_name. + string& package_config_name; // Tracks id.package_config_name. string& toolchain_name; // Tracks id.toolchain_name. upstream_version toolchain_version; // Original of id.toolchain_version. - // Time of the latest delay report. Initialized with timestamp_nonexistent - // by default. + // Times of the latest soft and hard rebuild delay reports. Initialized + // with timestamp_nonexistent by default. + // + // Note that both reports notify about initial build delays (at their + // respective time intervals). // - timestamp report_timestamp; + timestamp report_soft_timestamp; + timestamp report_hard_timestamp; // Time when the package is initially considered as buildable for this // configuration and toolchain. It is used to track the build delay if the @@ -405,7 +568,9 @@ namespace brep #pragma db member(package_name) transient #pragma db member(package_version) \ set(this.package_version.init (this.id.package.version, (?))) - #pragma db member(configuration) transient + #pragma db member(target) transient + #pragma db member(target_config_name) transient + #pragma db member(package_config_name) transient #pragma db member(toolchain_name) transient #pragma db member(toolchain_version) \ set(this.toolchain_version.init (this.id.toolchain_version, (?))) @@ -416,7 +581,9 @@ namespace brep build_delay () : tenant (id.package.tenant), package_name (id.package.name), - configuration (id.configuration), + target (id.target), + target_config_name (id.target_config_name), + package_config_name (id.package_config_name), toolchain_name (id.toolchain_name) {} }; } diff --git a/libbrep/build.xml b/libbrep/build.xml index 3af7640..1eba85a 100644 --- a/libbrep/build.xml +++ b/libbrep/build.xml @@ -1,5 +1,90 @@ <changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="build" version="1"> - <model version="12"> + <changeset version="27"/> + + <changeset version="26"/> + + <changeset version="25"> + <add-table name="build_auxiliary_machines" kind="container"> + <column name="package_tenant" type="TEXT" null="false"/> + <column name="package_name" type="CITEXT" null="false"/> + <column name="package_version_epoch" type="INTEGER" null="false"/> + <column name="package_version_canonical_upstream" type="TEXT" null="false"/> + <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="package_version_revision" type="INTEGER" null="false"/> + <column name="target" type="TEXT" null="false"/> + <column name="target_config_name" type="TEXT" null="false"/> + <column name="package_config_name" type="TEXT" null="false"/> + <column name="toolchain_name" type="TEXT" null="false"/> + <column name="toolchain_version_epoch" type="INTEGER" null="false"/> + <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/> + <column name="toolchain_version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="toolchain_version_revision" type="INTEGER" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="name" type="TEXT" null="false"/> + <column name="summary" type="TEXT" null="false"/> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="package_tenant"/> + <column name="package_name"/> + <column name="package_version_epoch"/> + <column name="package_version_canonical_upstream"/> + <column name="package_version_canonical_release"/> + <column name="package_version_revision"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> + <column name="toolchain_name"/> + <column name="toolchain_version_epoch"/> + <column name="toolchain_version_canonical_upstream"/> + <column name="toolchain_version_canonical_release"/> + <column name="toolchain_version_revision"/> + <references table="build"> + <column name="package_tenant"/> + <column name="package_name"/> + <column name="package_version_epoch"/> + <column name="package_version_canonical_upstream"/> + <column name="package_version_canonical_release"/> + <column name="package_version_revision"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> + <column name="toolchain_name"/> + <column name="toolchain_version_epoch"/> + <column name="toolchain_version_canonical_upstream"/> + <column name="toolchain_version_canonical_release"/> + <column name="toolchain_version_revision"/> + </references> + </foreign-key> + <index name="build_auxiliary_machines_object_id_i"> + <column name="package_tenant"/> + <column name="package_name"/> + <column name="package_version_epoch"/> + <column name="package_version_canonical_upstream"/> + <column name="package_version_canonical_release"/> + <column name="package_version_revision"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> + <column name="toolchain_name"/> + <column name="toolchain_version_epoch"/> + <column name="toolchain_version_canonical_upstream"/> + <column name="toolchain_version_canonical_release"/> + <column name="toolchain_version_revision"/> + </index> + <index name="build_auxiliary_machines_index_i"> + <column name="index"/> + </index> + </add-table> + </changeset> + + <changeset version="24"/> + + <changeset version="23"/> + + <changeset version="22"/> + + <changeset version="21"/> + + <model version="20"> <table name="build" kind="object"> <column name="package_tenant" type="TEXT" null="false"/> <column name="package_name" type="CITEXT" null="false"/> @@ -7,7 +92,9 @@ <column name="package_version_canonical_upstream" type="TEXT" null="false"/> <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> <column name="package_version_revision" type="INTEGER" null="false"/> - <column name="configuration" type="TEXT" null="false"/> + <column name="target" type="TEXT" null="false"/> + <column name="target_config_name" type="TEXT" null="false"/> + <column name="package_config_name" type="TEXT" null="false"/> <column name="toolchain_name" type="TEXT" null="false"/> <column name="toolchain_version_epoch" type="INTEGER" null="false"/> <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/> @@ -18,15 +105,21 @@ <column name="toolchain_version_upstream" type="TEXT" null="false"/> <column name="toolchain_version_release" type="TEXT" null="true"/> <column name="state" type="TEXT" null="false"/> + <column name="interactive" type="TEXT" null="true"/> <column name="timestamp" type="BIGINT" null="false"/> <column name="force" type="TEXT" null="false"/> <column name="status" type="TEXT" null="true"/> - <column name="completion_timestamp" type="BIGINT" null="false" default="0"/> + <column name="soft_timestamp" type="BIGINT" null="false"/> + <column name="hard_timestamp" type="BIGINT" null="false"/> <column name="agent_fingerprint" type="TEXT" null="true"/> <column name="agent_challenge" type="TEXT" null="true"/> <column name="machine" type="TEXT" null="false"/> <column name="machine_summary" type="TEXT" null="false"/> - <column name="target" type="TEXT" null="false"/> + <column name="controller_checksum" type="TEXT" null="false"/> + <column name="machine_checksum" type="TEXT" null="false"/> + <column name="agent_checksum" type="TEXT" null="true"/> + <column name="worker_checksum" type="TEXT" null="true"/> + <column name="dependency_checksum" type="TEXT" null="true"/> <primary-key> <column name="package_tenant"/> <column name="package_name"/> @@ -34,7 +127,9 @@ <column name="package_version_canonical_upstream"/> <column name="package_version_canonical_release"/> <column name="package_version_revision"/> - <column name="configuration"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> <column name="toolchain_name"/> <column name="toolchain_version_epoch"/> <column name="toolchain_version_canonical_upstream"/> @@ -52,7 +147,9 @@ <column name="package_version_canonical_upstream" type="TEXT" null="false"/> <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> <column name="package_version_revision" type="INTEGER" null="false"/> - <column name="configuration" type="TEXT" null="false"/> + <column name="target" type="TEXT" null="false"/> + <column name="target_config_name" type="TEXT" null="false"/> + <column name="package_config_name" type="TEXT" null="false"/> <column name="toolchain_name" type="TEXT" null="false"/> <column name="toolchain_version_epoch" type="INTEGER" null="false"/> <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/> @@ -69,7 +166,9 @@ <column name="package_version_canonical_upstream"/> <column name="package_version_canonical_release"/> <column name="package_version_revision"/> - <column name="configuration"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> <column name="toolchain_name"/> <column name="toolchain_version_epoch"/> <column name="toolchain_version_canonical_upstream"/> @@ -82,7 +181,9 @@ <column name="package_version_canonical_upstream"/> <column name="package_version_canonical_release"/> <column name="package_version_revision"/> - <column name="configuration"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> <column name="toolchain_name"/> <column name="toolchain_version_epoch"/> <column name="toolchain_version_canonical_upstream"/> @@ -97,7 +198,9 @@ <column name="package_version_canonical_upstream"/> <column name="package_version_canonical_release"/> <column name="package_version_revision"/> - <column name="configuration"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> <column name="toolchain_name"/> <column name="toolchain_version_epoch"/> <column name="toolchain_version_canonical_upstream"/> @@ -115,7 +218,9 @@ <column name="package_version_canonical_upstream" type="TEXT" null="false"/> <column name="package_version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> <column name="package_version_revision" type="INTEGER" null="false"/> - <column name="configuration" type="TEXT" null="false"/> + <column name="target" type="TEXT" null="false"/> + <column name="target_config_name" type="TEXT" null="false"/> + <column name="package_config_name" type="TEXT" null="false"/> <column name="toolchain_name" type="TEXT" null="false"/> <column name="toolchain_version_epoch" type="INTEGER" null="false"/> <column name="toolchain_version_canonical_upstream" type="TEXT" null="false"/> @@ -125,7 +230,8 @@ <column name="package_version_release" type="TEXT" null="true"/> <column name="toolchain_version_upstream" type="TEXT" null="false"/> <column name="toolchain_version_release" type="TEXT" null="true"/> - <column name="report_timestamp" type="BIGINT" null="false"/> + <column name="report_soft_timestamp" type="BIGINT" null="false"/> + <column name="report_hard_timestamp" type="BIGINT" null="false"/> <column name="package_timestamp" type="BIGINT" null="false"/> <primary-key> <column name="package_tenant"/> @@ -134,7 +240,9 @@ <column name="package_version_canonical_upstream"/> <column name="package_version_canonical_release"/> <column name="package_version_revision"/> - <column name="configuration"/> + <column name="target"/> + <column name="target_config_name"/> + <column name="package_config_name"/> <column name="toolchain_name"/> <column name="toolchain_version_epoch"/> <column name="toolchain_version_canonical_upstream"/> diff --git a/libbrep/common-traits.hxx b/libbrep/common-traits.hxx index 99e8f3e..141a738 100644 --- a/libbrep/common-traits.hxx +++ b/libbrep/common-traits.hxx @@ -10,12 +10,50 @@ #include <odb/pgsql/traits.hxx> +#include <libbutl/target-triplet.hxx> + #include <libbpkg/package-name.hxx> namespace odb { namespace pgsql { + // target_triplet + // + template <> + class value_traits<butl::target_triplet, id_string>: + value_traits<std::string, id_string> + { + public: + using value_type = butl::target_triplet; + using query_type = butl::target_triplet; + using image_type = details::buffer; + + using base_type = value_traits<std::string, id_string>; + + static void + set_value (value_type& v, + const details::buffer& b, + std::size_t n, + bool is_null) + { + std::string s; + base_type::set_value (s, b, n, is_null); + v = !s.empty () ? value_type (s) : value_type (); + } + + static void + set_image (details::buffer& b, + std::size_t& n, + bool& is_null, + const value_type& v) + { + base_type::set_image (b, n, is_null, v.string ()); + } + }; + + // package_name + // template <> class value_traits<bpkg::package_name, id_string>: value_traits<std::string, id_string> diff --git a/libbrep/common.cxx b/libbrep/common.cxx index 8964e0a..4f729a3 100644 --- a/libbrep/common.cxx +++ b/libbrep/common.cxx @@ -30,6 +30,6 @@ namespace brep else if (r == "test") return unbuildable_reason::test; else if (r == "external") return unbuildable_reason::external; else if (r == "unbuildable") return unbuildable_reason::unbuildable; - else throw invalid_argument ("invalid unbuildable reason '" + r + "'"); + else throw invalid_argument ("invalid unbuildable reason '" + r + '\''); } } diff --git a/libbrep/common.hxx b/libbrep/common.hxx index 73353c7..1433c8c 100644 --- a/libbrep/common.hxx +++ b/libbrep/common.hxx @@ -4,11 +4,15 @@ #ifndef LIBBREP_COMMON_HXX #define LIBBREP_COMMON_HXX +#include <map> #include <ratio> #include <chrono> #include <type_traits> // static_assert #include <odb/query.hxx> +#include <odb/nested-container.hxx> + +#include <libbutl/target-triplet.hxx> #include <libbpkg/package-name.hxx> @@ -113,7 +117,7 @@ namespace brep std::chrono::nanoseconds::period>::value, "The following timestamp ODB mapping is invalid"); - // As it pointed out in libbutl/timestamp.mxx we will overflow in year 2262, + // As it pointed out in libbutl/timestamp.hxx we will overflow in year 2262, // but by that time some larger basic type will be available for mapping. // #pragma db map type(timestamp) as(uint64_t) \ @@ -123,6 +127,20 @@ namespace brep std::chrono::duration_cast<brep::timestamp::duration> ( \ std::chrono::nanoseconds (?)))) + using optional_timestamp = optional<timestamp>; + using optional_uint64 = optional<uint64_t>; + + #pragma db map type(optional_timestamp) as(brep::optional_uint64) \ + to((?) \ + ? std::chrono::duration_cast<std::chrono::nanoseconds> ( \ + (?)->time_since_epoch ()).count () \ + : brep::optional_uint64 ()) \ + from((?) \ + ? brep::timestamp ( \ + std::chrono::duration_cast<brep::timestamp::duration> ( \ + std::chrono::nanoseconds (*(?)))) \ + : brep::optional_timestamp ()) + // version // using bpkg::version; @@ -228,6 +246,12 @@ namespace brep // extern const version wildcard_version; + // target_triplet + // + using butl::target_triplet; + + #pragma db value(target_triplet) type("TEXT") + // package_name // using bpkg::package_name; @@ -302,6 +326,19 @@ namespace brep : tenant (move (t)), canonical_name (move (n)) {} }; + // public_key_id + // + #pragma db value + struct public_key_id + { + string tenant; + string fingerprint; + + public_key_id () = default; + public_key_id (string t, string f) + : tenant (move (t)), fingerprint (move (f)) {} + }; + // build_class_expr // using bpkg::build_class_expr; @@ -323,6 +360,223 @@ namespace brep #pragma db value(build_constraint) definition + // build_auxiliaries + // + using bpkg::build_auxiliary; + using build_auxiliaries = vector<build_auxiliary>; + + #pragma db value(build_auxiliary) definition + + // build_toolchain + // + #pragma db value + struct build_toolchain + { + string name; + brep::version version; + }; + + // email + // + using bpkg::email; + + #pragma db value(email) definition + #pragma db member(email::value) virtual(string) before access(this) column("") + + // build_package_config_template + // + using bpkg::build_package_config_template; + + // 1 for the default configuration which is always present. + // + template <typename K> + using build_package_configs_template = + small_vector<build_package_config_template<K>, 1>; + + // Return the address of the configuration object with the specified name, + // if present, and NULL otherwise. + // + template <typename K> + inline build_package_config_template<K>* + find (const string& name, build_package_configs_template<K>& cs) + { + auto i (find_if (cs.begin (), cs.end (), + [&name] (const build_package_config_template<K>& c) + {return c.name == name;})); + + return i != cs.end () ? &*i : nullptr; + } + + // Note that ODB doesn't support containers of value types which contain + // containers. Thus, we will persist/load + // build_package_config_template<K>::{builds,constraint,auxiliaries,bot_keys} + // via the separate nested containers using the adapter classes. + // + + // build_package_config_template<K>::builds + // + using build_class_expr_key = odb::nested_key<build_class_exprs>; + using build_class_exprs_map = std::map<build_class_expr_key, build_class_expr>; + + #pragma db value(build_class_expr_key) + #pragma db member(build_class_expr_key::outer) column("config_index") + #pragma db member(build_class_expr_key::inner) column("index") + + // Adapter for build_package_config_template<K>::builds. + // + // Note: 1 as for build_package_configs_template. + // + class build_package_config_builds: public small_vector<build_class_exprs, 1> + { + public: + build_package_config_builds () = default; + + template <typename K> + explicit + build_package_config_builds (const build_package_configs_template<K>& cs) + { + reserve (cs.size ()); + for (const build_package_config_template<K>& c: cs) + push_back (c.builds); + } + + template <typename K> + void + to_configs (build_package_configs_template<K>& cs) && + { + // Note that the empty trailing entries will be missing (see ODB's + // nested-container.hxx for details). + // + assert (size () <= cs.size ()); + + auto i (cs.begin ()); + for (build_class_exprs& ces: *this) + i++->builds = move (ces); + } + }; + + // build_package_config_template<K>::constraints + // + using build_constraint_key = odb::nested_key<build_constraints>; + using build_constraints_map = std::map<build_constraint_key, build_constraint>; + + #pragma db value(build_constraint_key) + #pragma db member(build_constraint_key::outer) column("config_index") + #pragma db member(build_constraint_key::inner) column("index") + + // Adapter for build_package_config_template<K>::constraints. + // + // Note: 1 as for build_package_configs_template. + // + class build_package_config_constraints: + public small_vector<build_constraints, 1> + { + public: + build_package_config_constraints () = default; + + template <typename K> + explicit + build_package_config_constraints ( + const build_package_configs_template<K>& cs) + { + reserve (cs.size ()); + for (const build_package_config_template<K>& c: cs) + push_back (c.constraints); + } + + template <typename K> + void + to_configs (build_package_configs_template<K>& cs) && + { + // Note that the empty trailing entries will be missing (see ODB's + // nested-container.hxx for details). + // + assert (size () <= cs.size ()); + + auto i (cs.begin ()); + for (build_constraints& bcs: *this) + i++->constraints = move (bcs); + } + }; + + // build_package_config_template<K>::auxiliaries + // + using build_auxiliary_key = odb::nested_key<build_auxiliaries>; + using build_auxiliaries_map = std::map<build_auxiliary_key, build_auxiliary>; + + #pragma db value(build_auxiliary_key) + #pragma db member(build_auxiliary_key::outer) column("config_index") + #pragma db member(build_auxiliary_key::inner) column("index") + + // Adapter for build_package_config_template<K>::auxiliaries. + // + // Note: 1 as for build_package_configs_template. + // + class build_package_config_auxiliaries: + public small_vector<build_auxiliaries, 1> + { + public: + build_package_config_auxiliaries () = default; + + template <typename K> + explicit + build_package_config_auxiliaries ( + const build_package_configs_template<K>& cs) + { + reserve (cs.size ()); + for (const build_package_config_template<K>& c: cs) + push_back (c.auxiliaries); + } + + template <typename K> + void + to_configs (build_package_configs_template<K>& cs) && + { + // Note that the empty trailing entries will be missing (see ODB's + // nested-container.hxx for details). + // + assert (size () <= cs.size ()); + + auto i (cs.begin ()); + for (build_auxiliaries& bas: *this) + i++->auxiliaries = move (bas); + } + }; + + // build_package_config_template<K>::bot_keys + // + // Adapter for build_package_config_template<K>::bot_keys. + // + // Note: 1 as for build_package_configs_template. + // + template <typename K> + class build_package_config_bot_keys: public small_vector<vector<K>, 1> + { + public: + build_package_config_bot_keys () = default; + + explicit + build_package_config_bot_keys (const build_package_configs_template<K>& cs) + { + this->reserve (cs.size ()); + for (const build_package_config_template<K>& c: cs) + this->push_back (c.bot_keys); + } + + void + to_configs (build_package_configs_template<K>& cs) && + { + // Note that the empty trailing entries will be missing (see ODB's + // nested-container.hxx for details). + // + assert (this->size () <= cs.size ()); + + auto i (cs.begin ()); + for (vector<K>& bks: *this) + i++->bot_keys = move (bks); + } + }; + // The primary reason why a package is unbuildable by the build bot // controller service. // @@ -355,15 +609,77 @@ namespace brep ? brep::to_unbuildable_reason (*(?)) \ : brep::optional_unbuildable_reason ()) \ + // version_constraint + // + using bpkg::version_constraint; + + #pragma db value(version_constraint) definition + + // test_dependency_type + // + using bpkg::test_dependency_type; + using bpkg::to_test_dependency_type; + + #pragma db map type(test_dependency_type) as(string) \ + to(to_string (?)) \ + from(brep::to_test_dependency_type (?)) + + // requirements + // + // Note that this is a 2-level nested container (see package.hxx for + // details). + // + using bpkg::requirement_alternative; + using bpkg::requirement_alternatives; + using requirements = vector<requirement_alternatives>; + + #pragma db value(requirement_alternative) definition + #pragma db value(requirement_alternatives) definition + + using requirement_alternative_key = + odb::nested_key<requirement_alternatives>; + + using requirement_alternatives_map = + std::map<requirement_alternative_key, requirement_alternative>; + + #pragma db value(requirement_alternative_key) + #pragma db member(requirement_alternative_key::outer) column("requirement_index") + #pragma db member(requirement_alternative_key::inner) column("index") + + using requirement_key = odb::nested2_key<requirement_alternatives>; + + using requirement_alternative_requirements_map = + std::map<requirement_key, string>; + + #pragma db value(requirement_key) + #pragma db member(requirement_key::outer) column("requirement_index") + #pragma db member(requirement_key::middle) column("alternative_index") + #pragma db member(requirement_key::inner) column("index") + + // Third-party service state which may optionally be associated with a + // tenant (see also mod/tenant-service.hxx for background). + // + #pragma db value + struct tenant_service + { + string id; + string type; + optional<string> data; + + tenant_service () = default; + + tenant_service (string i, string t, optional<string> d = nullopt) + : id (move (i)), type (move (t)), data (move (d)) {} + }; + // Version comparison operators. // - // They allow comparing objects that have epoch, canonical_upstream, - // canonical_release, and revision data members. The idea is that this - // works for both query members of types version and canonical_version. - // Note, though, that the object revisions should be comparable (both - // optional, numeric, etc), so to compare version to query member or - // canonical_version you may need to explicitly convert the version object - // to canonical_version. + // Compare objects that have epoch, canonical_upstream, canonical_release, + // and revision data members. The idea is that this works for both query + // members of types version and canonical_version. Note, though, that the + // object revisions should be comparable (both optional, numeric, etc), so + // to compare version to query member or canonical_version you may need to + // explicitly convert the version object to canonical_version. // template <typename T1, typename T2> inline auto @@ -515,10 +831,9 @@ namespace brep return compare_version_lt (x.version, y.version, true); } - // They allow comparing objects that have tenant, name, and version data - // members. The idea is that this works for both query members of package id - // types (in particular in join conditions) as well as for values of - // package_id type. + // Compare objects that have tenant, name, and version data members. The + // idea is that this works for both query members of package id types (in + // particular in join conditions) as well as for values of package_id type. // template <typename T1, typename T2> inline auto @@ -545,6 +860,27 @@ namespace brep } // Allow comparing the query members with the query parameters bound by + // reference to variables of the canonical version type (in particular in + // the prepared queries). + // + // Note that it is not operator==() since the query template parameter type + // can not be deduced from the function parameter types and needs to be + // specified explicitly. + // + template <typename T, typename V> + inline auto + equal (const V& x, const canonical_version& y) + -> decltype (x.epoch == odb::query<T>::_ref (y.epoch)) + { + using query = odb::query<T>; + + return x.epoch == query::_ref (y.epoch) && + x.canonical_upstream == query::_ref (y.canonical_upstream) && + x.canonical_release == query::_ref (y.canonical_release) && + x.revision == query::_ref (y.revision); + } + + // Allow comparing the query members with the query parameters bound by // reference to variables of the package id type (in particular in the // prepared queries). // @@ -555,21 +891,15 @@ namespace brep template <typename T, typename ID> inline auto equal (const ID& x, const package_id& y) - -> decltype (x.tenant == odb::query<T>::_ref (y.tenant) && - x.name == odb::query<T>::_ref (y.name) && + -> decltype (x.tenant == odb::query<T>::_ref (y.tenant) && + x.name == odb::query<T>::_ref (y.name) && x.version.epoch == odb::query<T>::_ref (y.version.epoch)) { using query = odb::query<T>; - const auto& qv (x.version); - const canonical_version& v (y.version); - - return x.tenant == query::_ref (y.tenant) && - x.name == query::_ref (y.name) && - qv.epoch == query::_ref (v.epoch) && - qv.canonical_upstream == query::_ref (v.canonical_upstream) && - qv.canonical_release == query::_ref (v.canonical_release) && - qv.revision == query::_ref (v.revision); + return x.tenant == query::_ref (y.tenant) && + x.name == query::_ref (y.name) && + equal<T> (x.version, y.version); } // Repository id comparison operators. @@ -583,10 +913,10 @@ namespace brep return x.canonical_name.compare (y.canonical_name) < 0; } - // They allow comparing objects that have tenant and canonical_name data - // members. The idea is that this works for both query members of repository - // id types (in particular in join conditions) as well as for values of - // repository_id type. + // Compare objects that have tenant and canonical_name data members. The + // idea is that this works for both query members of repository id types (in + // particular in join conditions) as well as for values of repository_id + // type. // template <typename T1, typename T2> inline auto @@ -603,6 +933,38 @@ namespace brep { return x.tenant != y.tenant || x.canonical_name != y.canonical_name; } + + // Public key id comparison operators. + // + inline bool + operator< (const public_key_id& x, const public_key_id& y) + { + if (int r = x.tenant.compare (y.tenant)) + return r < 0; + + return x.fingerprint.compare (y.fingerprint) < 0; + } + + // Compare objects that have tenant and fingerprint data members. The idea + // is that this works for both query members of public key id types (in + // particular in join conditions) as well as for values of public_key_id + // type. + // + template <typename T1, typename T2> + inline auto + operator== (const T1& x, const T2& y) + -> decltype (x.tenant == y.tenant && x.fingerprint == y.fingerprint) + { + return x.tenant == y.tenant && x.fingerprint == y.fingerprint; + } + + template <typename T1, typename T2> + inline auto + operator!= (const T1& x, const T2& y) + -> decltype (x.tenant == y.tenant && x.fingerprint == y.fingerprint) + { + return x.tenant != y.tenant || x.fingerprint != y.fingerprint; + } } #endif // LIBBREP_COMMON_HXX diff --git a/libbrep/odb.sh b/libbrep/odb.sh index 9ee11fa..89dc5be 100755 --- a/libbrep/odb.sh +++ b/libbrep/odb.sh @@ -16,6 +16,8 @@ if test -d ../.bdep; then sed -r -ne 's#^(@[^ ]+ )?([^ ]+)/ .*default.*$#\2#p')" fi + # Note: here we use libodb*, not libbutl-odb. + # inc+=("-I$(echo "$cfg"/libodb-[1-9]*/)") inc+=("-I$(echo "$cfg"/libodb-pgsql-[1-9]*/)") @@ -33,6 +35,11 @@ sed -r -ne 's#^(@[^ ]+ )?([^ ]+)/ .*default.*$#\2#p')" else + # Feels like this case should not be necessary (unlike in bpkg/bdep). + # + echo "not bdep-initialized" 1>&2 + exit 1 + inc+=("-I$HOME/work/odb/builds/default/libodb-pgsql-default") inc+=("-I$HOME/work/odb/libodb-pgsql") @@ -53,7 +60,7 @@ $odb "${inc[@]}" -d pgsql --std c++14 --generate-query \ --hxx-prologue '#include <libbrep/common-traits.hxx>' \ -DLIBODB_BUILD2 -DLIBODB_PGSQL_BUILD2 \ --include-with-brackets --include-prefix libbrep \ - --guard-prefix LIBBREP \ + --guard-prefix LIBBREP \ common.hxx $odb "${inc[@]}" -d pgsql --std c++14 --generate-query --generate-schema \ @@ -74,7 +81,7 @@ $odb "${inc[@]}" -d pgsql --std c++14 --generate-query --generate-schema \ --odb-epilogue '#include <libbrep/wrapper-traits.hxx>' \ --generate-prepared -DLIBODB_BUILD2 -DLIBODB_PGSQL_BUILD2 \ --include-with-brackets --include-prefix libbrep \ - --guard-prefix LIBBREP \ + --guard-prefix LIBBREP \ build.hxx $odb "${inc[@]}" -d pgsql --std c++14 --generate-query \ diff --git a/libbrep/package-extra.sql b/libbrep/package-extra.sql index fe936ff..5c04147 100644 --- a/libbrep/package-extra.sql +++ b/libbrep/package-extra.sql @@ -38,16 +38,17 @@ DROP TYPE IF EXISTS weighted_text CASCADE; CREATE TYPE weighted_text AS (a TEXT, b TEXT, c TEXT, d TEXT); -- Return the latest versions of matching a tenant internal packages as a set --- of package rows. If tenant is NULL, then match all tenants. +-- of package rows. If tenant is NULL, then match all public tenants. -- CREATE FUNCTION latest_packages(IN tenant TEXT) RETURNS SETOF package AS $$ SELECT p1.* - FROM package p1 LEFT JOIN package p2 ON ( + FROM package p1 + LEFT JOIN package p2 ON ( p1.internal_repository_canonical_name IS NOT NULL AND - p1.tenant = p2.tenant AND - p1.name = p2.name AND + p1.tenant = p2.tenant AND + p1.name = p2.name AND p2.internal_repository_canonical_name IS NOT NULL AND (p1.version_epoch < p2.version_epoch OR p1.version_epoch = p2.version_epoch AND @@ -56,8 +57,12 @@ RETURNS SETOF package AS $$ (p1.version_canonical_release < p2.version_canonical_release OR p1.version_canonical_release = p2.version_canonical_release AND p1.version_revision < p2.version_revision)))) + JOIN tenant t ON (p1.tenant = t.id) WHERE - (latest_packages.tenant IS NULL OR p1.tenant = latest_packages.tenant) AND + CASE + WHEN latest_packages.tenant IS NULL THEN NOT t.private + ELSE p1.tenant = latest_packages.tenant + END AND p1.internal_repository_canonical_name IS NOT NULL AND p2.name IS NULL; $$ LANGUAGE SQL STABLE; @@ -83,7 +88,8 @@ $$ LANGUAGE SQL STABLE; -- Search for the latest version of an internal packages matching the -- specified search query and tenant. Return a set of rows containing the -- package id and search rank. If query is NULL, then match all packages and --- return 0 rank for all rows. If tenant is NULL, then match all tenants. +-- return 0 rank for all rows. If tenant is NULL, then match all public +-- tenants. -- CREATE FUNCTION search_latest_packages(IN query tsquery, @@ -107,9 +113,9 @@ RETURNS SETOF record AS $$ $$ LANGUAGE SQL STABLE; -- Search for packages matching the search query and tenant and having the --- specified name. Return a set of rows containing the package id and search +-- specified name. Return a set of rows containing the package id and search -- rank. If query is NULL, then match all packages and return 0 rank for all --- rows. If tenant is NULL, then match all tenants. +-- rows. If tenant is NULL, then match all public tenants. -- CREATE FUNCTION search_packages(IN query tsquery, @@ -121,19 +127,22 @@ search_packages(IN query tsquery, OUT version_revision INTEGER, OUT rank real) RETURNS SETOF record AS $$ - SELECT tenant, name, version_epoch, version_canonical_upstream, - version_canonical_release, version_revision, + SELECT p.tenant, p.name, p.version_epoch, p.version_canonical_upstream, + p.version_canonical_release, p.version_revision, CASE WHEN query IS NULL THEN 0 -- Weight mapping: D C B A ELSE ts_rank_cd('{0.05, 0.2, 0.9, 1.0}', search_index, query) END AS rank - FROM package + FROM package p JOIN tenant t ON (p.tenant = t.id) WHERE - (search_packages.tenant IS NULL OR tenant = search_packages.tenant) AND - name = search_packages.name AND - internal_repository_canonical_name IS NOT NULL AND - (query IS NULL OR search_index @@ query); + CASE + WHEN search_packages.tenant IS NULL THEN NOT t.private + ELSE p.tenant = search_packages.tenant + END AND + name = search_packages.name AND + internal_repository_canonical_name IS NOT NULL AND + (query IS NULL OR search_index @@ query); $$ LANGUAGE SQL STABLE; -- Parse weighted_text to tsvector. diff --git a/libbrep/package.cxx b/libbrep/package.cxx index 564fec7..4eb6fe8 100644 --- a/libbrep/package.cxx +++ b/libbrep/package.cxx @@ -40,9 +40,15 @@ namespace brep // tenant // tenant:: - tenant (string i) + tenant (string i, + bool p, + optional<string> r, + optional<tenant_service> s) : id (move (i)), - creation_timestamp (timestamp::clock::now ()) + private_ (p), + interactive (move (r)), + creation_timestamp (timestamp::clock::now ()), + service (move (s)) { } @@ -58,9 +64,9 @@ namespace brep license_alternatives_type la, small_vector<string, 5> tp, small_vector<string, 5> kw, - optional<string> ds, - optional<text_type> dt, - string ch, + optional<typed_text> ds, + optional<typed_text> pds, + optional<typed_text> ch, optional<manifest_url> ur, optional<manifest_url> du, optional<manifest_url> su, @@ -75,6 +81,9 @@ namespace brep small_vector<test_dependency, 1> ts, build_class_exprs bs, build_constraints_type bc, + build_auxiliaries_type ac, + package_build_bot_keys bk, + package_build_configs bcs, optional<path> lc, optional<string> fr, optional<string> sh, @@ -91,7 +100,7 @@ namespace brep topics (move (tp)), keywords (move (kw)), description (move (ds)), - description_type (move (dt)), + package_description (move (pds)), changes (move (ch)), url (move (ur)), doc_url (move (du)), @@ -107,11 +116,19 @@ namespace brep tests (move (ts)), builds (move (bs)), build_constraints (move (bc)), + build_auxiliaries (move (ac)), + build_bot_keys (move (bk)), + build_configs (move (bcs)), internal_repository (move (rp)), location (move (lc)), fragment (move (fr)), sha256sum (move (sh)) { + // The default configuration is always added by the package manifest + // parser (see libbpkg/manifest.cxx for details). + // + assert (find ("default", build_configs) != nullptr); + if (stub ()) unbuildable_reason = brep::unbuildable_reason::stub; else if (!internal_repository->buildable) @@ -119,6 +136,31 @@ namespace brep buildable = !unbuildable_reason; + // If the package is buildable deduce the custom_bot flag. + // + if (buildable) + { + for (const package_build_config& bc: build_configs) + { + bool custom (!bc.effective_bot_keys (build_bot_keys).empty ()); + + if (!custom_bot) + { + custom_bot = custom; + } + // + // If both the custom and default bots are used by the package, then + // reset the custom_bot flag to nullopt and bail out from the build + // package configurations loop. + // + else if (*custom_bot != custom) + { + custom_bot = nullopt; + break; + } + } + } + assert (internal_repository->internal); } @@ -127,6 +169,8 @@ namespace brep version_type vr, build_class_exprs bs, build_constraints_type bc, + build_auxiliaries_type ac, + package_build_configs bcs, shared_ptr<repository_type> rp) : id (rp->tenant, move (nm), vr), tenant (id.tenant), @@ -134,11 +178,18 @@ namespace brep version (move (vr)), builds (move (bs)), build_constraints (move (bc)), + build_auxiliaries (move (ac)), + build_configs (move (bcs)), buildable (false), unbuildable_reason (stub () ? brep::unbuildable_reason::stub : brep::unbuildable_reason::external) { + // The default configuration is always added by the package manifest + // parser (see libbpkg/manifest.cxx for details). + // + assert (find ("default", build_configs) != nullptr); + assert (!rp->internal); other_repositories.emplace_back (move (rp)); } @@ -159,11 +210,11 @@ namespace brep // Probably drop-box would be better as also tells what are // the available internal repositories. // - string k (project.string () + " " + name.string () + " " + - version.string () + " " + version.string (true)); + string k (project.string () + ' ' + name.string () + ' ' + + version.string () + ' ' + version.string (true)); if (upstream_version) - k += " " + *upstream_version; + k += ' ' + *upstream_version; // Add licenses to search keywords. // @@ -171,13 +222,13 @@ namespace brep { for (const auto& l: la) { - k += " " + l; + k += ' ' + l; // If license is say LGPLv2 then LGPL is also a search keyword. // size_t n (l.size ()); if (n > 2 && l[n - 2] == 'v' && l[n - 1] >= '0' && l[n - 1] <= '9') - k += " " + string (l, 0, n - 2); + k += ' ' + string (l, 0, n - 2); } } @@ -188,14 +239,24 @@ namespace brep // Add topics to the second-strongest search keywords. // for (const auto& t: topics) - k2 += " " + t; + k2 += ' ' + t; // Add keywords to the second-strongest search keywords. // for (const auto& k: keywords) - k2 += " " + k; + k2 += ' ' + k; + + string d (description ? description->text : ""); + + if (package_description) + { + if (description) + d += ' '; + + d += package_description->text; + } - return {move (k), move (k2), description ? *description : "", changes}; + return {move (k), move (k2), move (d), changes ? changes->text : ""}; } // repository diff --git a/libbrep/package.hxx b/libbrep/package.hxx index 33444a9..45008d4 100644 --- a/libbrep/package.hxx +++ b/libbrep/package.hxx @@ -18,9 +18,9 @@ // Used by the data migration entries. // -#define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 19 +#define LIBBREP_PACKAGE_SCHEMA_VERSION_BASE 27 -#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 19, closed) +#pragma db model version(LIBBREP_PACKAGE_SCHEMA_VERSION_BASE, 33, closed) namespace brep { @@ -49,9 +49,12 @@ namespace brep using bpkg::text_type; using bpkg::to_text_type; + // Note that here we assume that the saved string representation of a type + // is always recognized later. + // #pragma db map type(text_type) as(string) \ to(to_string (?)) \ - from(brep::to_text_type (?)) + from(*brep::to_text_type (?)) using optional_text_type = optional<text_type>; @@ -69,26 +72,15 @@ namespace brep set(this = brep::manifest_url ((?), "" /* comment */)) \ column("") - // email - // - using bpkg::email; - - #pragma db value(email) definition - #pragma db member(email::value) virtual(string) before access(this) column("") - // licenses // using bpkg::licenses; - using license_alternatives = vector<licenses>; + using license_alternatives = small_vector<licenses, 1>; #pragma db value(licenses) definition // dependencies // - using bpkg::version_constraint; - - #pragma db value(version_constraint) definition - // Notes: // // 1. Will the package be always resolvable? What if it is in @@ -160,49 +152,73 @@ namespace brep operator!= (const dependency&, const dependency&); #pragma db value - class dependency_alternatives: public vector<dependency> + class dependency_alternative: public small_vector<dependency, 1> + { + public: + // While we currently don't use the reflect, prefer, accept, and require + // values, let's save them for completeness. + // + optional<string> enable; + optional<string> reflect; + optional<string> prefer; + optional<string> accept; + optional<string> require; + + dependency_alternative () = default; + dependency_alternative (optional<string> e, + optional<string> r, + optional<string> p, + optional<string> a, + optional<string> q) + : enable (move (e)), + reflect (move (r)), + prefer (move (p)), + accept (move (a)), + require (move (q)) {} + }; + + #pragma db value + class dependency_alternatives: public small_vector<dependency_alternative, 1> { public: - bool conditional; bool buildtime; string comment; dependency_alternatives () = default; - dependency_alternatives (bool d, bool b, string c) - : conditional (d), buildtime (b), comment (move (c)) {} + dependency_alternatives (bool b, string c) + : buildtime (b), comment (move (c)) {} }; using dependencies = vector<dependency_alternatives>; - // requirements - // - using bpkg::requirement_alternatives; - using requirements = vector<requirement_alternatives>; - - #pragma db value(requirement_alternatives) definition - // tests // - using bpkg::test_dependency_type; - using bpkg::to_test_dependency_type; - - #pragma db map type(test_dependency_type) as(string) \ - to(to_string (?)) \ - from(brep::to_test_dependency_type (?)) - #pragma db value struct test_dependency: dependency { test_dependency_type type; + bool buildtime; + optional<string> enable; + optional<string> reflect; test_dependency () = default; test_dependency (package_name n, test_dependency_type t, - optional<version_constraint> c) - : dependency {std::move (n), std::move (c), nullptr /* package */}, - type (t) + bool b, + optional<version_constraint> c, + optional<string> e, + optional<string> r) + : dependency {move (n), move (c), nullptr /* package */}, + type (t), + buildtime (b), + enable (move (e)), + reflect (move (r)) { } + + // Database mapping. + // + #pragma db member(buildtime) }; // certificate @@ -225,17 +241,82 @@ namespace brep // Create the tenant object with the timestamp set to now and the archived // flag set to false. // - explicit - tenant (string id); + tenant (string id, + bool private_, + optional<string> interactive, + optional<tenant_service>); string id; + // If this flag is true, then display the packages in the web interface + // only in the tenant view mode. + // + bool private_; // Note: foreign-mapped in build. + + // Interactive package build breakpoint. + // + // If present, then packages from this tenant will only be built + // interactively and only non-interactively otherwise. + // + optional<string> interactive; // Note: foreign-mapped in build. + timestamp creation_timestamp; - bool archived = false; // Note: foreign-mapped in build. + bool archived = false; // Note: foreign-mapped in build. + + optional<tenant_service> service; // Note: foreign-mapped in build. + + // Note that due to the implementation complexity and performance + // considerations, the service notifications are not synchronized. This + // leads to a potential race, so that before we have sent the `queued` + // notification for a package build, some other thread (potentially in a + // different process) could have already sent the `building` notification + // for it. It feels like there is no easy way to reliably fix that. + // Instead, we just decrease the probability of such a notifications + // sequence failure by delaying builds of the freshly queued packages for + // some time. Specifically, whenever the `queued` notification is ought + // to be sent (normally out of the database transaction, since it likely + // sends an HTTP request, etc) the tenant's queued_timestamp member is set + // to the current time. During the configured time interval since that + // time point the build tasks may not be issued for the tenant's packages. + // + // Also note that while there are similar potential races for other + // notification sequences, their probability is rather low due to the + // natural reasons (non-zero build task execution time, etc) and thus we + // just ignore them. + // + optional<timestamp> queued_timestamp; // Note: foreign-mapped in build. + + // Note that after the package tenant is created but before the first + // build object is created, there is no easy way to produce a list of + // unbuilt package configurations. That would require to know the build + // toolchain(s), which are normally extracted from the build objects. + // Thus, the empty unbuilt package configurations list is ambiguous and + // can either mean that no more package configurations can be built or + // that we have not enough information to produce the list. To + // disambiguate the empty list in the interface, in the latter case we + // want to display the question mark instead of 0 as an unbuilt package + // configurations count. To achieve this we will stash the build toolchain + // in the tenant when a package from this tenant is considered for a build + // for the first time but no configuration is picked for the build (the + // target configurations are excluded, an auxiliary machine is not + // available, etc). We will also use the stashed toolchain as a fallback + // until we are able to retrieve the toolchain(s) from the tenant builds + // to produce the unbuilt package configurations list. + // + // Note: foreign-mapped in build. + // + optional<brep::build_toolchain> build_toolchain; // Database mapping. // #pragma db member(id) id + #pragma db member(private_) + + #pragma db index("tenant_service_i") \ + unique \ + members(service.id, service.type) + + #pragma db index member(service.id) private: friend class odb::access; @@ -366,6 +447,67 @@ namespace brep string d; }; + #pragma db value + struct typed_text + { + string text; + text_type type; + + #pragma db member(text) column("") + }; + + // Tweak public_key_id mapping to include a constraint (this only affects the + // database schema). + // + #pragma db member(public_key_id::tenant) points_to(tenant) + + #pragma db object pointer(shared_ptr) session + class public_key: public string + { + public: + public_key (string tenant, string fingerprint, string key) + : string (move (key)), id (move (tenant), move (fingerprint)) {} + + public_key_id id; + + // Database mapping. + // + #pragma db member(id) id column("") + + #pragma db member(data) virtual(string) access(this) + + private: + friend class odb::access; + public_key () = default; + }; + + // package_build_config + // + using package_build_config = + build_package_config_template<lazy_shared_ptr<public_key>>; + + using package_build_configs = + build_package_configs_template<lazy_shared_ptr<public_key>>; + + #pragma db value(package_build_config) definition + + #pragma db member(package_build_config::builds) transient + #pragma db member(package_build_config::constraints) transient + #pragma db member(package_build_config::auxiliaries) transient + #pragma db member(package_build_config::bot_keys) transient + + // package_build_bot_keys + // + using package_build_bot_keys = vector<lazy_shared_ptr<public_key>>; + using package_build_bot_key_key = odb::nested_key<package_build_bot_keys>; + + using package_build_bot_keys_map = std::map<package_build_bot_key_key, + lazy_shared_ptr<public_key>>; + + #pragma db value(package_build_bot_key_key) + #pragma db member(package_build_bot_key_key::outer) column("config_index") + #pragma db member(package_build_bot_key_key::inner) column("index") + // Tweak package_id mapping to include a constraint (this only affects the // database schema). // @@ -384,9 +526,12 @@ namespace brep using dependencies_type = brep::dependencies; using requirements_type = brep::requirements; using build_constraints_type = brep::build_constraints; + using build_auxiliaries_type = brep::build_auxiliaries; // Create internal package object. // + // Note: the default build package config is expected to always be present. + // package (package_name, version_type, optional<string> upstream_version, @@ -396,9 +541,9 @@ namespace brep license_alternatives_type, small_vector<string, 5> topics, small_vector<string, 5> keywords, - optional<string> description, - optional<text_type> description_type, - string changes, + optional<typed_text> description, + optional<typed_text> package_description, + optional<typed_text> changes, optional<manifest_url> url, optional<manifest_url> doc_url, optional<manifest_url> src_url, @@ -413,6 +558,9 @@ namespace brep small_vector<test_dependency, 1> tests, build_class_exprs, build_constraints_type, + build_auxiliaries_type, + package_build_bot_keys, + package_build_configs, optional<path> location, optional<string> fragment, optional<string> sha256sum, @@ -427,14 +575,20 @@ namespace brep // // External package can also be a separate test for some primary package // (and belong to a complement but yet external repository), and so we may - // need its build class expressions and constraints to decide if to build - // it together with the primary package or not (see test-exclude task - // manifest value for details). + // need its build class expressions, constraints, and configurations to + // decide if to build it together with the primary package or not (see + // test-exclude task manifest value for details). Additionally, when the + // test package is being built the auxiliary machines may also be + // required. + // + // Note: the default build package config is expected to always be present. // package (package_name name, version_type, build_class_exprs, build_constraints_type, + build_auxiliaries_type, + package_build_configs, shared_ptr<repository_type>); bool @@ -460,31 +614,53 @@ namespace brep // Matches the package name if the project name is not specified in // the manifest. // - package_name project; + package_name project; // Note: foreign-mapped in build. priority_type priority; string summary; license_alternatives_type license_alternatives; small_vector<string, 5> topics; small_vector<string, 5> keywords; - optional<string> description; // Absent if type is unknown. - optional<text_type> description_type; // Present if description is present. - string changes; + + // Note that the descriptions and changes are absent if the respective + // type is unknown. + // + optional<typed_text> description; + optional<typed_text> package_description; + optional<typed_text> changes; + optional<manifest_url> url; optional<manifest_url> doc_url; optional<manifest_url> src_url; optional<manifest_url> package_url; optional<email_type> email; optional<email_type> package_email; - optional<email_type> build_email; - optional<email_type> build_warning_email; - optional<email_type> build_error_email; + optional<email_type> build_email; // Note: foreign-mapped in build. + optional<email_type> build_warning_email; // Note: foreign-mapped in build. + optional<email_type> build_error_email; // Note: foreign-mapped in build. dependencies_type dependencies; - requirements_type requirements; + requirements_type requirements; // Note: foreign-mapped in build. small_vector<test_dependency, 1> tests; // Note: foreign-mapped in build. + // Common build classes, constraints, auxiliaries, and bot keys that apply + // to all configurations unless overridden. + // build_class_exprs builds; // Note: foreign-mapped in build. build_constraints_type build_constraints; // Note: foreign-mapped in build. + build_auxiliaries_type build_auxiliaries; // Note: foreign-mapped in build. + package_build_bot_keys build_bot_keys; // Note: foreign-mapped in build. + package_build_configs build_configs; // Note: foreign-mapped in build. + + // Group the build_configs, builds, and build_constraints members of this + // object together with their respective nested configs entries into the + // separate section for an explicit load. + // + // Note that while the build auxiliaries and bot keys are persisted via + // the newly created package objects, they are only used via the + // foreign-mapped build_package objects (see build-package.hxx for + // details). Thus, we add them to the never-loaded unused_section (see + // below). + // odb::section build_section; // Note that it is foreign-mapped in build. @@ -515,6 +691,18 @@ namespace brep bool buildable; // Note: foreign-mapped in build. optional<brep::unbuildable_reason> unbuildable_reason; + // If this flag is true, then all the package configurations are buildable + // with the custom build bots. If false, then all configurations are + // buildable with the default bots. If nullopt, then some configurations + // are buildable with the custom and some with the default build bots. + // + // Note: meaningless if buildable is false. + // + optional<bool> custom_bot; // Note: foreign-mapped in build. + + private: + odb::section unused_section; + // Database mapping. // #pragma db member(id) id column("") @@ -549,38 +737,75 @@ namespace brep // dependencies // - using _dependency_key = odb::nested_key<dependency_alternatives>; + // Note that this is a 2-level nested container which is mapped to three + // container tables each containing data of each dimension. + + // Container of the dependency_alternatives values. + // + #pragma db member(dependencies) id_column("") value_column("") + + // Container of the dependency_alternative values. + // + using _dependency_alternative_key = + odb::nested_key<dependency_alternatives>; + using _dependency_alternatives_type = - std::map<_dependency_key, dependency>; + std::map<_dependency_alternative_key, dependency_alternative>; - #pragma db value(_dependency_key) - #pragma db member(_dependency_key::outer) column("dependency_index") - #pragma db member(_dependency_key::inner) column("index") + #pragma db value(_dependency_alternative_key) + #pragma db member(_dependency_alternative_key::outer) column("dependency_index") + #pragma db member(_dependency_alternative_key::inner) column("index") - #pragma db member(dependencies) id_column("") value_column("") #pragma db member(dependency_alternatives) \ virtual(_dependency_alternatives_type) \ after(dependencies) \ get(odb::nested_get (this.dependencies)) \ set(odb::nested_set (this.dependencies, std::move (?))) \ + id_column("") key_column("") value_column("") + + // Container of the dependency values. + // + using _dependency_key = odb::nested2_key<dependency_alternatives>; + using _dependency_alternative_dependencies_type = + std::map<_dependency_key, dependency>; + + #pragma db value(_dependency_key) + #pragma db member(_dependency_key::outer) column("dependency_index") + #pragma db member(_dependency_key::middle) column("alternative_index") + #pragma db member(_dependency_key::inner) column("index") + + #pragma db member(dependency_alternative_dependencies) \ + virtual(_dependency_alternative_dependencies_type) \ + after(dependency_alternatives) \ + get(odb::nested2_get (this.dependencies)) \ + set(odb::nested2_set (this.dependencies, std::move (?))) \ id_column("") key_column("") value_column("dep_") // requirements // - using _requirement_key = odb::nested_key<requirement_alternatives>; - using _requirement_alternatives_type = - std::map<_requirement_key, string>; - - #pragma db value(_requirement_key) - #pragma db member(_requirement_key::outer) column("requirement_index") - #pragma db member(_requirement_key::inner) column("index") + // Note that this is a 2-level nested container which is mapped to three + // container tables each containing data of each dimension. + // Container of the requirement_alternatives values. + // #pragma db member(requirements) id_column("") value_column("") + + // Container of the requirement_alternative values. + // #pragma db member(requirement_alternatives) \ - virtual(_requirement_alternatives_type) \ + virtual(requirement_alternatives_map) \ after(requirements) \ get(odb::nested_get (this.requirements)) \ set(odb::nested_set (this.requirements, std::move (?))) \ + id_column("") key_column("") value_column("") + + // Container of the requirement (string) values. + // + #pragma db member(requirement_alternative_requirements) \ + virtual(requirement_alternative_requirements_map) \ + after(requirement_alternatives) \ + get(odb::nested2_get (this.requirements)) \ + set(odb::nested2_set (this.requirements, std::move (?))) \ id_column("") key_column("") value_column("id") // tests @@ -597,7 +822,74 @@ namespace brep #pragma db member(build_constraints) id_column("") value_column("") \ section(build_section) - #pragma db member(build_section) load(lazy) update(always) + // build_auxiliaries + // + #pragma db member(build_auxiliaries) id_column("") value_column("") \ + section(unused_section) + + // build_bot_keys + // + #pragma db member(build_bot_keys) \ + id_column("") value_column("key_") value_not_null \ + section(unused_section) + + // build_configs + // + // Note that package_build_config::{builds,constraints,auxiliaries, + // bot_keys} are persisted/loaded via the separate nested containers (see + // commons.hxx for details). + // + #pragma db member(build_configs) id_column("") value_column("config_") \ + section(build_section) + + #pragma db member(build_config_builds) \ + virtual(build_class_exprs_map) \ + after(build_configs) \ + get(odb::nested_get ( \ + brep::build_package_config_builds (this.build_configs))) \ + set(brep::build_package_config_builds bs; \ + odb::nested_set (bs, std::move (?)); \ + move (bs).to_configs (this.build_configs)) \ + id_column("") key_column("") value_column("") \ + section(build_section) + + #pragma db member(build_config_constraints) \ + virtual(build_constraints_map) \ + after(build_config_builds) \ + get(odb::nested_get ( \ + brep::build_package_config_constraints (this.build_configs))) \ + set(brep::build_package_config_constraints cs; \ + odb::nested_set (cs, std::move (?)); \ + move (cs).to_configs (this.build_configs)) \ + id_column("") key_column("") value_column("") \ + section(build_section) + + #pragma db member(build_config_auxiliaries) \ + virtual(build_auxiliaries_map) \ + after(build_config_constraints) \ + get(odb::nested_get ( \ + brep::build_package_config_auxiliaries (this.build_configs))) \ + set(brep::build_package_config_auxiliaries as; \ + odb::nested_set (as, std::move (?)); \ + move (as).to_configs (this.build_configs)) \ + id_column("") key_column("") value_column("") \ + section(unused_section) + + #pragma db member(build_config_bot_keys) \ + virtual(package_build_bot_keys_map) \ + after(build_config_auxiliaries) \ + get(odb::nested_get ( \ + brep::build_package_config_bot_keys< \ + lazy_shared_ptr<brep::public_key>> (this.build_configs))) \ + set(brep::build_package_config_bot_keys< \ + lazy_shared_ptr<brep::public_key>> bks; \ + odb::nested_set (bks, std::move (?)); \ + move (bks).to_configs (this.build_configs)) \ + id_column("") key_column("") value_column("key_") value_not_null \ + section(unused_section) + + #pragma db member(build_section) load(lazy) update(always) + #pragma db member(unused_section) load(lazy) update(manual) // other_repositories // @@ -615,9 +907,9 @@ namespace brep friend class odb::access; package (): tenant (id.tenant), name (id.name) {} - // Save keywords, summary, description, and changes to weighted_text - // a, b, c, d members, respectively. So a word found in keywords will - // have a higher weight than if it's found in the summary. + // Save keywords, summary, descriptions, and changes to weighted_text a, + // b, c, d members, respectively. So a word found in keywords will have a + // higher weight than if it's found in the summary. // weighted_text search_text () const; diff --git a/libbrep/package.xml b/libbrep/package.xml index 454cdbc..96e93a7 100644 --- a/libbrep/package.xml +++ b/libbrep/package.xml @@ -1,7 +1,280 @@ <changelog xmlns="http://www.codesynthesis.com/xmlns/odb/changelog" database="pgsql" schema-name="package" version="1"> - <model version="19"> + <changeset version="33"> + <add-table name="public_key" kind="object"> + <column name="tenant" type="TEXT" null="false"/> + <column name="fingerprint" type="TEXT" null="false"/> + <column name="data" type="TEXT" null="false"/> + <primary-key> + <column name="tenant"/> + <column name="fingerprint"/> + </primary-key> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + </add-table> + <alter-table name="package"> + <add-column name="custom_bot" type="BOOLEAN" null="true"/> + </alter-table> + <add-table name="package_build_bot_keys" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="key_tenant" type="TEXT" null="false"/> + <column name="key_fingerprint" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_build_bot_keys_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + <index name="package_build_bot_keys_index_i"> + <column name="index"/> + </index> + <foreign-key name="key_tenant_fk" deferrable="DEFERRED"> + <column name="key_tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="key_fk" deferrable="DEFERRED"> + <column name="key_tenant"/> + <column name="key_fingerprint"/> + <references table="public_key"> + <column name="tenant"/> + <column name="fingerprint"/> + </references> + </foreign-key> + </add-table> + <add-table name="package_build_config_bot_keys" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="config_index" type="BIGINT" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="key_tenant" type="TEXT" null="false"/> + <column name="key_fingerprint" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_build_config_bot_keys_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + <foreign-key name="key_tenant_fk" deferrable="DEFERRED"> + <column name="key_tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="key_fk" deferrable="DEFERRED"> + <column name="key_tenant"/> + <column name="key_fingerprint"/> + <references table="public_key"> + <column name="tenant"/> + <column name="fingerprint"/> + </references> + </foreign-key> + </add-table> + </changeset> + + <changeset version="32"> + <alter-table name="tenant"> + <add-column name="build_toolchain_name" type="TEXT" null="true"/> + <add-column name="build_toolchain_version_epoch" type="INTEGER" null="true"/> + <add-column name="build_toolchain_version_canonical_upstream" type="TEXT" null="true"/> + <add-column name="build_toolchain_version_canonical_release" type="TEXT" null="true"/> + <add-column name="build_toolchain_version_revision" type="INTEGER" null="true"/> + <add-column name="build_toolchain_version_upstream" type="TEXT" null="true"/> + <add-column name="build_toolchain_version_release" type="TEXT" null="true"/> + </alter-table> + </changeset> + + <changeset version="31"> + <add-table name="package_build_auxiliaries" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="environment_name" type="TEXT" null="false"/> + <column name="config" type="TEXT" null="false"/> + <column name="comment" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_build_auxiliaries_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + <index name="package_build_auxiliaries_index_i"> + <column name="index"/> + </index> + </add-table> + <add-table name="package_build_config_auxiliaries" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="config_index" type="BIGINT" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="environment_name" type="TEXT" null="false"/> + <column name="config" type="TEXT" null="false"/> + <column name="comment" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_build_config_auxiliaries_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + </add-table> + </changeset> + + <changeset version="30"> + <alter-table name="tenant"> + <add-column name="service_id" type="TEXT" null="true"/> + <add-column name="service_type" type="TEXT" null="true"/> + <add-column name="service_data" type="TEXT" null="true"/> + <add-column name="queued_timestamp" type="BIGINT" null="true"/> + <add-index name="tenant_service_i" type="UNIQUE"> + <column name="service_id"/> + <column name="service_type"/> + </add-index> + <add-index name="tenant_service_id_i"> + <column name="service_id"/> + </add-index> + </alter-table> + </changeset> + + <changeset version="29"> + <alter-table name="package_tests"> + <add-column name="test_enable" type="TEXT" null="true"/> + </alter-table> + </changeset> + + <changeset version="28"> + <alter-table name="package_build_configs"> + <add-column name="config_email" type="TEXT" null="true"/> + <add-column name="config_email_comment" type="TEXT" null="true"/> + <add-column name="config_warning_email" type="TEXT" null="true"/> + <add-column name="config_warning_email_comment" type="TEXT" null="true"/> + <add-column name="config_error_email" type="TEXT" null="true"/> + <add-column name="config_error_email_comment" type="TEXT" null="true"/> + </alter-table> + </changeset> + + <model version="27"> <table name="tenant" kind="object"> <column name="id" type="TEXT" null="false"/> + <column name="private" type="BOOLEAN" null="false"/> + <column name="interactive" type="TEXT" null="true"/> <column name="creation_timestamp" type="BIGINT" null="false"/> <column name="archived" type="BOOLEAN" null="false"/> <primary-key> @@ -142,7 +415,10 @@ <column name="summary" type="TEXT" null="false"/> <column name="description" type="TEXT" null="true"/> <column name="description_type" type="TEXT" null="true"/> - <column name="changes" type="TEXT" null="false"/> + <column name="package_description" type="TEXT" null="true"/> + <column name="package_description_type" type="TEXT" null="true"/> + <column name="changes" type="TEXT" null="true"/> + <column name="changes_type" type="TEXT" null="true"/> <column name="url" type="TEXT" null="true"/> <column name="url_comment" type="TEXT" null="true"/> <column name="doc_url" type="TEXT" null="true"/> @@ -379,7 +655,6 @@ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> <column name="version_revision" type="INTEGER" null="false"/> <column name="index" type="BIGINT" null="false"/> - <column name="conditional" type="BOOLEAN" null="false"/> <column name="buildtime" type="BOOLEAN" null="false"/> <column name="comment" type="TEXT" null="false"/> <foreign-key name="tenant_fk" deferrable="DEFERRED"> @@ -425,6 +700,52 @@ <column name="version_revision" type="INTEGER" null="false"/> <column name="dependency_index" type="BIGINT" null="false"/> <column name="index" type="BIGINT" null="false"/> + <column name="enable" type="TEXT" null="true"/> + <column name="reflect" type="TEXT" null="true"/> + <column name="prefer" type="TEXT" null="true"/> + <column name="accept" type="TEXT" null="true"/> + <column name="require" type="TEXT" null="true"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_dependency_alternatives_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + </table> + <table name="package_dependency_alternative_dependencies" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="dependency_index" type="BIGINT" null="false"/> + <column name="alternative_index" type="BIGINT" null="false"/> + <column name="index" type="BIGINT" null="false"/> <column name="dep_name" type="CITEXT" null="false"/> <column name="dep_min_version_epoch" type="INTEGER" null="true"/> <column name="dep_min_version_canonical_upstream" type="TEXT" null="true"/> @@ -468,7 +789,7 @@ <column name="version_revision"/> </references> </foreign-key> - <index name="package_dependency_alternatives_object_id_i"> + <index name="package_dependency_alternative_dependencies_object_id_i"> <column name="tenant"/> <column name="name"/> <column name="version_epoch"/> @@ -507,7 +828,6 @@ <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> <column name="version_revision" type="INTEGER" null="false"/> <column name="index" type="BIGINT" null="false"/> - <column name="conditional" type="BOOLEAN" null="false"/> <column name="buildtime" type="BOOLEAN" null="false"/> <column name="comment" type="TEXT" null="false"/> <foreign-key name="tenant_fk" deferrable="DEFERRED"> @@ -553,7 +873,8 @@ <column name="version_revision" type="INTEGER" null="false"/> <column name="requirement_index" type="BIGINT" null="false"/> <column name="index" type="BIGINT" null="false"/> - <column name="id" type="TEXT" null="false"/> + <column name="enable" type="TEXT" null="true"/> + <column name="reflect" type="TEXT" null="true"/> <foreign-key name="tenant_fk" deferrable="DEFERRED"> <column name="tenant"/> <references table="tenant"> @@ -585,6 +906,48 @@ <column name="version_revision"/> </index> </table> + <table name="package_requirement_alternative_requirements" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="requirement_index" type="BIGINT" null="false"/> + <column name="alternative_index" type="BIGINT" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="id" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_requirement_alternative_requirements_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + </table> <table name="package_tests" kind="container"> <column name="tenant" type="TEXT" null="false"/> <column name="name" type="CITEXT" null="false"/> @@ -615,6 +978,8 @@ <column name="test_package_version_canonical_release" type="TEXT" null="true" options="COLLATE "C""/> <column name="test_package_version_revision" type="INTEGER" null="true"/> <column name="test_type" type="TEXT" null="false"/> + <column name="test_buildtime" type="BOOLEAN" null="false"/> + <column name="test_reflect" type="TEXT" null="true"/> <foreign-key name="tenant_fk" deferrable="DEFERRED"> <column name="tenant"/> <references table="tenant"> @@ -761,6 +1126,137 @@ <column name="index"/> </index> </table> + <table name="package_build_configs" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="config_name" type="TEXT" null="false"/> + <column name="config_arguments" type="TEXT" null="false"/> + <column name="config_comment" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_build_configs_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + <index name="package_build_configs_index_i"> + <column name="index"/> + </index> + </table> + <table name="package_build_config_builds" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="config_index" type="BIGINT" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="expression" type="TEXT" null="false"/> + <column name="comment" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_build_config_builds_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + </table> + <table name="package_build_config_constraints" kind="container"> + <column name="tenant" type="TEXT" null="false"/> + <column name="name" type="CITEXT" null="false"/> + <column name="version_epoch" type="INTEGER" null="false"/> + <column name="version_canonical_upstream" type="TEXT" null="false"/> + <column name="version_canonical_release" type="TEXT" null="false" options="COLLATE "C""/> + <column name="version_revision" type="INTEGER" null="false"/> + <column name="config_index" type="BIGINT" null="false"/> + <column name="index" type="BIGINT" null="false"/> + <column name="exclusion" type="BOOLEAN" null="false"/> + <column name="config" type="TEXT" null="false"/> + <column name="target" type="TEXT" null="true"/> + <column name="comment" type="TEXT" null="false"/> + <foreign-key name="tenant_fk" deferrable="DEFERRED"> + <column name="tenant"/> + <references table="tenant"> + <column name="id"/> + </references> + </foreign-key> + <foreign-key name="object_id_fk" on-delete="CASCADE"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + <references table="package"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </references> + </foreign-key> + <index name="package_build_config_constraints_object_id_i"> + <column name="tenant"/> + <column name="name"/> + <column name="version_epoch"/> + <column name="version_canonical_upstream"/> + <column name="version_canonical_release"/> + <column name="version_revision"/> + </index> + </table> <table name="package_other_repositories" kind="container"> <column name="tenant" type="TEXT" null="false"/> <column name="name" type="CITEXT" null="false"/> diff --git a/libbrep/types.hxx b/libbrep/types.hxx index 65dfc2d..3b5777d 100644 --- a/libbrep/types.hxx +++ b/libbrep/types.hxx @@ -21,12 +21,12 @@ #include <odb/lazy-ptr.hxx> -#include <libbutl/url.mxx> -#include <libbutl/path.mxx> -#include <libbutl/path-io.mxx> -#include <libbutl/optional.mxx> -#include <libbutl/timestamp.mxx> -#include <libbutl/small-vector.mxx> +#include <libbutl/url.hxx> +#include <libbutl/path.hxx> +#include <libbutl/path-io.hxx> +#include <libbutl/optional.hxx> +#include <libbutl/timestamp.hxx> +#include <libbutl/small-vector.hxx> namespace brep { @@ -50,7 +50,7 @@ namespace brep using std::weak_ptr; using std::vector; - using butl::small_vector; // <libbutl/small-vector.mxx> + using butl::small_vector; // <libbutl/small-vector.hxx> using strings = vector<string>; using cstrings = vector<const char*>; @@ -69,7 +69,7 @@ namespace brep using std::generic_category; - // <libbutl/optional.mxx> + // <libbutl/optional.hxx> // using butl::optional; using butl::nullopt; @@ -79,7 +79,7 @@ namespace brep using odb::lazy_shared_ptr; using odb::lazy_weak_ptr; - // <libbutl/path.mxx> + // <libbutl/path.hxx> // using butl::path; using butl::dir_path; @@ -91,11 +91,11 @@ namespace brep using butl::path_cast; - // <libbutl/url.mxx> + // <libbutl/url.hxx> // using butl::url; - // <libbutl/timestamp.mxx> + // <libbutl/timestamp.hxx> // using butl::system_clock; using butl::timestamp; diff --git a/libbrep/utility.hxx b/libbrep/utility.hxx index be27a71..fce8fb5 100644 --- a/libbrep/utility.hxx +++ b/libbrep/utility.hxx @@ -4,13 +4,14 @@ #ifndef LIBBREP_UTILITY_HXX #define LIBBREP_UTILITY_HXX -#include <memory> // make_shared() -#include <string> // to_string() -#include <utility> // move(), forward(), declval(), make_pair() -#include <cassert> // assert() -#include <iterator> // make_move_iterator() - -#include <libbutl/utility.mxx> // icasecmp(), reverse_iterate(), +#include <memory> // make_shared() +#include <string> // to_string() +#include <utility> // move(), forward(), declval(), make_pair() +#include <cassert> // assert() +#include <iterator> // make_move_iterator() +#include <algorithm> // * + +#include <libbutl/utility.hxx> // icasecmp(), reverse_iterate(), // operator<<(ostream, exception) namespace brep @@ -24,7 +25,7 @@ namespace brep using std::make_move_iterator; using std::to_string; - // <libbutl/utility.mxx> + // <libbutl/utility.hxx> // using butl::utf8; using butl::icasecmp; diff --git a/libbrep/version.hxx.in b/libbrep/version.hxx.in index 3ac3752..9adb5ab 100644 --- a/libbrep/version.hxx.in +++ b/libbrep/version.hxx.in @@ -49,11 +49,11 @@ $libbbot.check(LIBBBOT_VERSION, LIBBBOT_SNAPSHOT)$ #include <odb/version.hxx> -$libodb.check(LIBODB_VERSION, LIBODB_SNAPSHOT)$ +$libodb.check(LIBODB_VERSION_FULL, LIBODB_SNAPSHOT)$ #include <odb/pgsql/version.hxx> -$libodb_pgsql.check(LIBODB_PGSQL_VERSION, LIBODB_PGSQL_SNAPSHOT)$ +$libodb_pgsql.check(LIBODB_PGSQL_VERSION_FULL, LIBODB_PGSQL_SNAPSHOT)$ // For now these are the same. // diff --git a/libbrep/wrapper-traits.hxx b/libbrep/wrapper-traits.hxx index 9dad27b..8c9d830 100644 --- a/libbrep/wrapper-traits.hxx +++ b/libbrep/wrapper-traits.hxx @@ -6,7 +6,7 @@ #include <odb/pre.hxx> -#include <libbutl/optional.mxx> +#include <libbutl/optional.hxx> #include <odb/wrapper-traits.hxx> diff --git a/load/buildfile b/load/buildfile index b55489f..4278f20 100644 --- a/load/buildfile +++ b/load/buildfile @@ -23,8 +23,8 @@ if $cli.configured cli.options += --std c++11 -I $src_root --include-with-brackets \ --include-prefix load --guard-prefix LOAD --generate-specifier \ ---cxx-prologue "#include <load/types-parsers.hxx>" --page-usage print_ \ ---ansi-color --long-usage +--generate-modifier --cxx-prologue "#include <load/types-parsers.hxx>" \ +--page-usage print_ --ansi-color --long-usage # Include the generated cli files into the distribution and don't remove # them when cleaning in src (so that clean results in a state identical to diff --git a/load/load.cli b/load/load.cli index 05bbb11..99d76f6 100644 --- a/load/load.cli +++ b/load/load.cli @@ -57,6 +57,14 @@ class options don't detect package dependency cycles." }; + bool --ignore-unresolved-tests + { + "Ignore tests, examples, and benchmarks package manifest entries which + cannot be resolved from the main package's complement repositories, + recursively. Note that in contrast to --shallow option, such entries will + be removed from the main package manifests outright." + } + std::string --tenant { "<id>", @@ -64,6 +72,41 @@ class options specified, then the single-tenant mode is assumed." }; + bool --private + { + "Display the tenant packages in the web interface only in the tenant view + mode." + }; + + std::string --interactive + { + "<bkp>", + "Build the tenant packages interactively, stopping builds at the specified + breakpoint. Implies \cb{--private}." + }; + + std::string --service-id + { + "<id>", + "Third party service information to associate with the being created + tenant. Requires the \cb{--tenant} and \cb{--service-type} options to be + specified." + }; + + std::string --service-type + { + "<type>", + "Type of the service to associate with the being created tenant. Requires + the \cb{--service-id} option to be specified." + }; + + std::string --service-data + { + "<data>", + "Service data to associate with the being created tenant. Requires the + \cb{--service-id} option to be specified." + }; + brep::path --overrides-file { "<file>", @@ -124,6 +167,22 @@ class options this option to specify multiple package manager options." } + brep::path openssl = "openssl" + { + "<path>", + "The openssl program to be used for crypto operations. You can also + specify additional options that should be passed to the openssl program + with \cb{openssl-option}. If the openssl program is not explicitly + specified, then \cb{brep-load} will use \cb{openssl} by default." + } + + brep::strings openssl-option + { + "<opt>", + "Additional option to be passed to the openssl program (see \cb{openssl} + for details). Repeat this option to specify multiple openssl options." + } + std::string --pager // String to allow empty value. { "<path>", diff --git a/load/load.cxx b/load/load.cxx index 31230a7..474b443 100644 --- a/load/load.cxx +++ b/load/load.cxx @@ -5,10 +5,9 @@ #include <cerrno> #include <chrono> -#include <thread> // this_thread::sleep_for() -#include <cstring> // strncmp() +#include <thread> // this_thread::sleep_for() +#include <cstring> // strncmp() #include <iostream> -#include <algorithm> // find(), find_if() #include <odb/session.hxx> #include <odb/database.hxx> @@ -18,13 +17,14 @@ #include <odb/pgsql/database.hxx> -#include <libbutl/pager.mxx> -#include <libbutl/sha256.mxx> -#include <libbutl/process.mxx> -#include <libbutl/fdstream.mxx> -#include <libbutl/filesystem.mxx> -#include <libbutl/tab-parser.mxx> -#include <libbutl/manifest-parser.mxx> +#include <libbutl/pager.hxx> +#include <libbutl/sha256.hxx> +#include <libbutl/process.hxx> +#include <libbutl/openssl.hxx> +#include <libbutl/fdstream.hxx> +#include <libbutl/filesystem.hxx> +#include <libbutl/tab-parser.hxx> +#include <libbutl/manifest-parser.hxx> #include <libbpkg/manifest.hxx> @@ -262,7 +262,7 @@ load_repositories (path p) bad_line ("invalid buildable option value"); } else - bad_line ("invalid option '" + nv + "'"); + bad_line ("invalid option '" + nv + '\''); } // For now cache option is mandatory. @@ -365,11 +365,13 @@ repository_info (const options& lo, const string& rl, const cstrings& options) // the repository. Should be called once per repository. // static void -load_packages (const shared_ptr<repository>& rp, +load_packages (const options& lo, + const shared_ptr<repository>& rp, const repository_location& cl, database& db, bool ignore_unknown, - const manifest_name_values& overrides) + const manifest_name_values& overrides, + const string& overrides_name) { // packages_timestamp other than timestamp_nonexistent signals the // repository packages are already loaded. @@ -405,8 +407,8 @@ load_packages (const shared_ptr<repository>& rp, mp, move (nv), ignore_unknown, - false /* complete_depends */, - package_manifest_flags::forbid_incomplete_dependencies); + false /* complete_values */, + package_manifest_flags::forbid_incomplete_values); } else pms = pkg_package_manifests (mp, ignore_unknown); @@ -418,11 +420,15 @@ load_packages (const shared_ptr<repository>& rp, } using brep::dependency; + using brep::dependency_alternative; + using brep::dependency_alternatives; + + const string& tenant (rp->tenant); for (package_manifest& pm: pms) { shared_ptr<package> p ( - db.find<package> (package_id (rp->tenant, pm.name, pm.version))); + db.find<package> (package_id (tenant, pm.name, pm.version))); // sha256sum should always be present if the package manifest comes from // the packages.manifest file belonging to the pkg repository. @@ -431,94 +437,174 @@ load_packages (const shared_ptr<repository>& rp, if (p == nullptr) { - if (rp->internal) + // Apply the package manifest overrides. + // + if (!overrides.empty ()) + try { - try - { - pm.override (overrides, "" /* name */); - } - catch (const manifest_parsing&) + pm.override (overrides, overrides_name); + } + catch (const manifest_parsing& e) + { + cerr << "error: unable to override " << pm.name << ' ' << pm.version + << " manifest: " << e << endl; + + throw failed (); + } + + // Convert the package manifest build configurations (contain public + // keys data) into the brep's build package configurations (contain + // public key object lazy pointers). Keep the bot key lists empty if + // the package is not buildable. + // + package_build_configs build_configs; + + if (!pm.build_configs.empty ()) + { + build_configs.reserve (pm.build_configs.size ()); + + for (bpkg::build_package_config& c: pm.build_configs) { - // Overrides are already validated (see below). - // - assert (false); + build_configs.emplace_back (move (c.name), + move (c.arguments), + move (c.comment), + move (c.builds), + move (c.constraints), + move (c.auxiliaries), + package_build_bot_keys (), + move (c.email), + move (c.warning_email), + move (c.error_email)); } + } + if (rp->internal) + { // Create internal package object. // - optional<string> dsc; - optional<text_type> dst; - - if (pm.description) + // Return nullopt if the text is in a file (can happen if the + // repository is of a type other than pkg) or if the type is not + // recognized (can only happen in the "ignore unknown" mode). + // + auto to_typed_text = [&cl, ignore_unknown] (typed_text_file&& v) { + optional<typed_text> r; + // The description value should not be of the file type if the // package manifest comes from the pkg repository. // - assert (!pm.description->file || cl.type () != repository_type::pkg); + assert (!v.file || cl.type () != repository_type::pkg); - if (!pm.description->file) + if (!v.file) { - dst = pm.effective_description_type (ignore_unknown); + // Cannot throw since the manifest parser has already verified the + // effective type in the same "ignore unknown" mode. + // + optional<text_type> t (v.effective_type (ignore_unknown)); // If the description type is unknown (which may be the case for // some "transitional" period and only if --ignore-unknown is // specified) we just silently drop the description. // - assert (dst || ignore_unknown); + assert (t || ignore_unknown); - if (dst) - dsc = move (pm.description->text); + if (t) + r = typed_text {move (v.text), *t}; } - } - string chn; + return r; + }; + + // Convert descriptions. + // + optional<typed_text> ds ( + pm.description + ? to_typed_text (move (*pm.description)) + : optional<typed_text> ()); + + optional<typed_text> pds ( + pm.package_description + ? to_typed_text (move (*pm.package_description)) + : optional<typed_text> ()); + + // Merge changes into a single typed text object. + // + // If the text type is not recognized for any changes entry or some + // entry refers to a file, then assume that no changes are specified. + // + optional<typed_text> chn; + for (auto& c: pm.changes) { - // The changes value should not be of the file type if the package - // manifest comes from the pkg repository. - // - assert (!c.file || cl.type () != repository_type::pkg); + optional<typed_text> tc (to_typed_text (move (c))); - if (!c.file) + if (!tc) { - if (chn.empty ()) - chn = move (c.text); - else - { - if (chn.back () != '\n') - chn += '\n'; // Always have a blank line as a separator. + chn = nullopt; + break; + } - chn += "\n" + c.text; - } + if (!chn) + { + chn = move (*tc); + } + else + { + // Should have failed while parsing the manifest otherwise. + // + assert (tc->type == chn->type); + + string& v (chn->text); + + assert (!v.empty ()); // Changes manifest value cannot be empty. + + if (v.back () != '\n') + v += '\n'; // Always have a blank line as a separator. + + v += '\n'; + v += tc->text; } } - dependencies ds; + dependencies tds; - for (auto& pda: pm.dependencies) + for (auto& das: pm.dependencies) { - // Ignore special build2 and bpkg dependencies. We may not have - // packages for them and also showing them for every package is - // probably not very helpful. - // - if (pda.buildtime && !pda.empty ()) + dependency_alternatives tdas (das.buildtime, move (das.comment)); + + for (auto& da: das) { - const package_name& n (pda.front ().name); - if (n == "build2" || n == "bpkg") - continue; - } + dependency_alternative tda (move (da.enable), + move (da.reflect), + move (da.prefer), + move (da.accept), + move (da.require)); - ds.emplace_back (pda.conditional, pda.buildtime, move (pda.comment)); + for (auto& d: da) + { + package_name& n (d.name); - for (auto& pd: pda) - { - // The package member will be assigned during dependency - // resolution procedure. - // - ds.back ().push_back (dependency {move (pd.name), - move (pd.constraint), - nullptr /* package */}); + // Ignore special build2 and bpkg dependencies. We may not have + // packages for them and also showing them for every package is + // probably not very helpful. + // + if (das.buildtime && (n == "build2" || n == "bpkg")) + continue; + + // The package member will be assigned during dependency + // resolution procedure. + // + tda.push_back (dependency {move (n), + move (d.constraint), + nullptr /* package */}); + } + + if (!tda.empty ()) + tdas.push_back (move (tda)); } + + if (!tdas.empty ()) + tds.push_back (move (tdas)); } small_vector<brep::test_dependency, 1> ts; @@ -528,13 +614,119 @@ load_packages (const shared_ptr<repository>& rp, ts.reserve (pm.tests.size ()); for (bpkg::test_dependency& td: pm.tests) - ts.emplace_back (move (td.name), td.type, move (td.constraint)); + ts.emplace_back (move (td.name), + td.type, + td.buildtime, + move (td.constraint), + move (td.enable), + move (td.reflect)); } // Cache before the package name is moved. // package_name project (pm.effective_project ()); + // If the package is buildable, then save the package manifest's + // common and build configuration-specific bot keys into the database + // and translate the key data lists into the lists of the public key + // object lazy pointers. + // + package_build_bot_keys bot_keys; + + if (rp->buildable) + { + // Save the specified bot keys into the database as public key + // objects, unless they are already persisted. Translate these keys + // into the public key object lazy pointers. + // + auto keys_to_objects = [&lo, + &pm, + &tenant, + &db] (strings&& keys) + { + package_build_bot_keys r; + + if (keys.empty ()) + return r; + + r.reserve (keys.size ()); + + for (string& key: keys) + { + // Calculate the key fingerprint. + // + string fp; + + try + { + openssl os (path ("-"), path ("-"), 2, + lo.openssl (), + "pkey", + lo.openssl_option (), "-pubin", "-outform", "DER"); + + os.out << key; + os.out.close (); + + fp = sha256 (os.in).string (); + os.in.close (); + + if (!os.wait ()) + { + cerr << "process " << lo.openssl () << ' ' << *os.exit + << endl; + + throw io_error (""); + } + } + catch (const io_error&) + { + cerr << "error: unable to convert custom build bot public key " + << "for package " << pm.name << ' ' << pm.version << endl + << " info: key:" << endl + << key << endl; + + throw failed (); + } + catch (const process_error& e) + { + cerr << "error: unable to convert custom build bot public key " + << "for package " << pm.name << ' ' << pm.version << ": " + << e << endl; + + throw failed (); + } + + // Try to find the public_key object for the calculated + // fingerprint. If it doesn't exist, then create and persist the + // new object. + // + public_key_id id (tenant, move (fp)); + shared_ptr<public_key> k (db.find<public_key> (id)); + + if (k == nullptr) + { + k = make_shared<public_key> (move (id.tenant), + move (id.fingerprint), + move (key)); + + db.persist (k); + } + + r.push_back (move (k)); + } + + return r; + }; + + bot_keys = keys_to_objects (move (pm.build_bot_keys)); + + assert (build_configs.size () == pm.build_configs.size ()); + + for (size_t i (0); i != build_configs.size (); ++i) + build_configs[i].bot_keys = + keys_to_objects (move (pm.build_configs[i].bot_keys)); + } + p = make_shared<package> ( move (pm.name), move (pm.version), @@ -545,8 +737,8 @@ load_packages (const shared_ptr<repository>& rp, move (pm.license_alternatives), move (pm.topics), move (pm.keywords), - move (dsc), - move (dst), + move (ds), + move (pds), move (chn), move (pm.url), move (pm.doc_url), @@ -557,11 +749,14 @@ load_packages (const shared_ptr<repository>& rp, move (pm.build_email), move (pm.build_warning_email), move (pm.build_error_email), - move (ds), + move (tds), move (pm.requirements), move (ts), move (pm.builds), move (pm.build_constraints), + move (pm.build_auxiliaries), + move (bot_keys), + move (build_configs), move (pm.location), move (pm.fragment), move (pm.sha256sum), @@ -574,6 +769,8 @@ load_packages (const shared_ptr<repository>& rp, move (pm.version), move (pm.builds), move (pm.build_constraints), + move (pm.build_auxiliaries), + move (build_configs), rp); db.persist (p); @@ -664,6 +861,9 @@ load_repositories (const options& lo, manifest_parser mp (ifs, p.string ()); rpm = pkg_repository_manifests (mp, ignore_unknown); + + if (rpm.empty ()) + rpm.emplace_back (repository_manifest ()); // Add the base repository. } catch (const io_error& e) { @@ -952,11 +1152,13 @@ load_repositories (const options& lo, // We don't apply overrides to the external packages. // - load_packages (pr, + load_packages (lo, + pr, !pr->cache_location.empty () ? pr->cache_location : cl, db, ignore_unknown, - manifest_name_values () /* overrides */); + manifest_name_values () /* overrides */, + "" /* overrides_name */); load_repositories (lo, pr, @@ -1004,18 +1206,26 @@ find (const lazy_shared_ptr<repository>& r, return false; } -// Resolve package run-time dependencies and external tests. Make sure that -// the best matching dependency belongs to the package repositories, their +// Resolve package regular dependencies and external tests. Make sure that the +// best matching dependency belongs to the package repositories, their // complements, recursively, or their immediate prerequisite repositories -// (only for run-time dependencies). Set the buildable flag to false for the -// resolved external tests packages. Fail if unable to resolve a dependency, -// unless ignore_unresolved is true in which case leave this dependency -// NULL. Should be called once per internal package. +// (only for regular dependencies). Set the buildable flag to false for the +// resolved external tests packages. Fail if unable to resolve a regular +// dependency, unless ignore_unresolved is true in which case leave this +// dependency NULL. Fail if unable to resolve an external test, unless +// ignore_unresolved or ignore_unresolved_tests is true in which case leave +// this dependency NULL, if ignore_unresolved_tests is false, and remove the +// respective tests manifest entry otherwise. Should be called once per +// internal package. // static void -resolve_dependencies (package& p, database& db, bool ignore_unresolved) +resolve_dependencies (package& p, + database& db, + bool ignore_unresolved, + bool ignore_unresolved_tests) { using brep::dependency; + using brep::dependency_alternative; using brep::dependency_alternatives; // Resolve dependencies for internal packages only. @@ -1114,34 +1324,50 @@ resolve_dependencies (package& p, database& db, bool ignore_unresolved) return false; }; - auto bail = [&p] (const dependency& d, const char* what) + auto bail = [&p] (const dependency& d, const string& what) { - cerr << "error: can't resolve " << what << " " << d << " for the package " - << p.name << " " << p.version << endl + cerr << "error: can't resolve " << what << ' ' << d << " for the package " + << p.name << ' ' << p.version << endl << " info: repository " << p.internal_repository.load ()->location << " appears to be broken" << endl; throw failed (); }; - for (dependency_alternatives& da: p.dependencies) + for (dependency_alternatives& das: p.dependencies) { - for (dependency& d: da) + // Practically it is enough to resolve at least one dependency alternative + // to build a package. Meanwhile here we consider an error specifying in + // the manifest file an alternative which can't be resolved, unless + // unresolved dependencies are allowed. + // + for (dependency_alternative& da: das) { - // Practically it is enough to resolve at least one dependency - // alternative to build a package. Meanwhile here we consider an error - // specifying in the manifest file an alternative which can't be - // resolved, unless unresolved dependencies are allowed. - // - if (!resolve (d, false /* test */) && !ignore_unresolved) - bail (d, "dependency"); + for (dependency& d: da) + { + if (!resolve (d, false /* test */) && !ignore_unresolved) + bail (d, "dependency"); + } } } - for (brep::test_dependency& td: p.tests) + for (auto i (p.tests.begin ()); i != p.tests.end (); ) { - if (!resolve (td, true /* test */) && !ignore_unresolved) - bail (td, td.name.string ().c_str ()); + brep::test_dependency& td (*i); + + if (!resolve (td, true /* test */)) + { + if (!ignore_unresolved && !ignore_unresolved_tests) + bail (td, to_string (td.type)); + + if (ignore_unresolved_tests) + { + i = p.tests.erase (i); + continue; + } + } + + ++i; } db.update (p); // Update the package state. @@ -1198,10 +1424,13 @@ detect_dependency_cycle (const package_id& id, chain.push_back (id); shared_ptr<package> p (db.load<package> (id)); - for (const auto& da: p->dependencies) + for (const auto& das: p->dependencies) { - for (const auto& d: da) - detect_dependency_cycle (d.package.object_id (), chain, db); + for (const auto& da: das) + { + for (const auto& d: da) + detect_dependency_cycle (d.package.object_id (), chain, db); + } } chain.pop_back (); @@ -1421,8 +1650,46 @@ try throw failed (); } + // Verify the --service-* options. + // + if (ops.service_id_specified ()) + { + if (!ops.tenant_specified ()) + { + cerr << "error: --service-id requires --tenant" << endl; + throw failed (); + } + + if (ops.service_type ().empty ()) + { + cerr << "error: --service-id requires --service-type" + << endl; + throw failed (); + } + } + else + { + if (ops.service_type_specified ()) + { + cerr << "error: --service-type requires --service-id" + << endl; + throw failed (); + } + + if (ops.service_data_specified ()) + { + cerr << "error: --service-data requires --service-id" + << endl; + throw failed (); + } + } + // Parse and validate overrides, if specified. // + // Note that here we make sure that the overrides manifest is valid. + // Applying overrides to a specific package manifest may still fail (see + // package_manifest::validate_overrides() for details). + // manifest_name_values overrides; if (ops.overrides_file_specified ()) @@ -1474,6 +1741,11 @@ try throw failed (); } + // Note: the interactive tenant implies private. + // + if (ops.interactive_specified ()) + ops.private_ (true); + // Load the description of all the internal repositories from the // configuration file. // @@ -1493,6 +1765,7 @@ try { db.erase_query<package> (); db.erase_query<repository> (); + db.erase_query<public_key> (); db.erase_query<tenant> (); } else // Multi-tenant mode. @@ -1505,13 +1778,39 @@ try db.erase_query<repository> ( query<repository>::id.tenant.in_range (ts.begin (), ts.end ())); + db.erase_query<public_key> ( + query<public_key>::id.tenant.in_range (ts.begin (), ts.end ())); + db.erase_query<tenant> ( query<tenant>::id.in_range (ts.begin (), ts.end ())); } // Persist the tenant. // - db.persist (tenant (tnt)); + // Note that if the tenant service is specified and some tenant with the + // same service id and type is already persisted, then we will end up with + // the `object already persistent` error and terminate with the exit code + // 1 (fatal error). We could potentially dedicate a special exit code for + // such a case, so that the caller may recognize it and behave accordingly + // (CI request handler can treat it as a client error rather than an + // internal error, etc). However, let's first see if it ever becomes a + // problem. + // + optional<tenant_service> service; + + if (ops.service_id_specified ()) + service = tenant_service (ops.service_id (), + ops.service_type (), + (ops.service_data_specified () + ? ops.service_data () + : optional<string> ())); + + db.persist (tenant (tnt, + ops.private_ (), + (ops.interactive_specified () + ? ops.interactive () + : optional<string> ()), + move (service))); // On the first pass over the internal repositories we load their // certificate information and packages. @@ -1536,11 +1835,13 @@ try ir.buildable, priority++)); - load_packages (r, + load_packages (ops, + r, r->cache_location, db, ops.ignore_unknown (), - overrides); + overrides, + ops.overrides_file ().string ()); } // On the second pass over the internal repositories we load their @@ -1572,7 +1873,10 @@ try db.query<package> ( query::id.tenant == tnt && query::internal_repository.canonical_name.is_not_null ())) - resolve_dependencies (p, db, ops.shallow ()); + resolve_dependencies (p, + db, + ops.shallow (), + ops.ignore_unresolved_tests ()); if (!ops.shallow ()) { @@ -1,6 +1,6 @@ : 1 name: brep -version: 0.14.0-a.0.z +version: 0.17.0-a.0.z project: build2 summary: build2 package repository web interface license: MIT @@ -13,23 +13,40 @@ doc-url: https://build2.org/doc.xhtml src-url: https://git.build2.org/cgit/brep/tree/ email: users@build2.org build-warning-email: builds@build2.org -builds: linux freebsd ; Only supports Linux and FreeBSD. -builds: -linux -freebsd ; Requires system packages. requires: c++14 requires: postgresql >= 9.0 requires: apache2 ; Including development files (httpd.h header, etc). -depends: * build2 >= 0.13.0 -depends: * bpkg >= 0.13.0 -# @@ Should probably become conditional dependency. -requires: ? cli ; Only required if changing .cli files. +depends: * build2 >= 0.16.0- +depends: * bpkg >= 0.16.0- +# @@ DEP Should probably become conditional dependency. +#requires: ? cli ; Only required if changing .cli files. depends: libapr1 depends: libapreq2 -depends: libcmark-gfm == 0.29.0-a.1 -depends: libcmark-gfm-extensions == 0.29.0-a.1 -depends: libstudxml [1.1.0-b.9.1 1.1.0-b.10) -depends: libodb [2.5.0-b.20.1 2.5.0-b.21) -depends: libodb-pgsql [2.5.0-b.20.1 2.5.0-b.21) -depends: libbutl [0.14.0-a.0.1 0.14.0-a.1) -depends: libbpkg [0.14.0-a.0.1 0.14.0-a.1) -depends: libbbot [0.14.0-a.0.1 0.14.0-a.1) -depends: libbutl.bash [0.14.0-a.0.1 0.14.0-a.1) +depends: libcmark-gfm == 0.29.0-a.4 +depends: libcmark-gfm-extensions == 0.29.0-a.4 +depends: libstudxml ^1.1.0-b.10 +depends: libodb [2.5.0-b.26.1 2.5.0-b.27) +depends: libodb-pgsql [2.5.0-b.26.1 2.5.0-b.27) +depends: libbutl [0.17.0-a.0.1 0.17.0-a.1) +depends: libbpkg [0.17.0-a.0.1 0.17.0-a.1) +depends: libbbot [0.17.0-a.0.1 0.17.0-a.1) +depends: libbutl.bash [0.17.0-a.0.1 0.17.0-a.1) +depends: bpkg-util [0.17.0-a.0.1 0.17.0-a.1) + +# This package dependens on platform-specific implementation libraries that +# are (currently) not packaged and need to come from the system package +# manager. It also requires rsync for tests. +# +builds: none + +debian-builds: sys +debian-build-exclude: linux_debian_12-** ; libapreq2 not available +debian-build-include: linux_debian*-** +debian-build-include: linux_ubuntu*-** +debian-build-exclude: ** +debian-build-config: sys:apache2-dev ?sys:libapr1 ?sys:libapreq2 ?sys:libpq sys:rsync + +fedora-builds: sys +fedora-build-include: linux_fedora*-** +fedora-build-exclude: ** +fedora-build-config: sys:httpd-devel ?sys:libapr1 ?sys:libapreq2 ?sys:libpq sys:rsync diff --git a/migrate/migrate.cxx b/migrate/migrate.cxx index 88c87c1..090fcac 100644 --- a/migrate/migrate.cxx +++ b/migrate/migrate.cxx @@ -12,8 +12,10 @@ #include <odb/pgsql/database.hxx> -#include <libbutl/pager.mxx> +#include <libbutl/pager.hxx> +#include <libbrep/build.hxx> +#include <libbrep/build-odb.hxx> #include <libbrep/package.hxx> #include <libbrep/package-odb.hxx> #include <libbrep/database-lock.hxx> @@ -113,7 +115,7 @@ schema (const char* s, string name) string kw; i >> kw; - statement += " " + kw; + statement += ' ' + kw; if (strcasecmp (kw.c_str (), "FUNCTION") == 0) { @@ -131,7 +133,7 @@ schema (const char* s, string name) else if (strcasecmp (kw.c_str (), "FOREIGN") == 0) { i >> kw; - statement += " " + kw; + statement += ' ' + kw; valid = strcasecmp (kw.c_str (), "TABLE") == 0; // Fall through. @@ -218,8 +220,28 @@ struct package_migration_entry: package_migration_entry_base<v> : package_migration_entry_base<v> (f, "package") {} }; -static const package_migration_entry<20> -package_migrate_v20 ([] (database& db) +static const package_migration_entry<26> +package_migrate_v26 ([] (database& db) +{ +}); +#endif + +// Register the data migration functions for the build database schema. +// +#if 0 +template <schema_version v> +using build_migration_entry_base = + data_migration_entry<v, LIBBREP_BUILD_SCHEMA_VERSION_BASE>; + +template <schema_version v> +struct build_migration_entry: build_migration_entry_base<v> +{ + build_migration_entry (void (*f) (database& db)) + : build_migration_entry_base<v> (f, "build") {} +}; + +static const build_migration_entry<19> +build_migrate_v19 ([] (database& db) { }); #endif diff --git a/mod/build-config-module.cxx b/mod/build-config-module.cxx index 831cb78..97c9f9e 100644 --- a/mod/build-config-module.cxx +++ b/mod/build-config-module.cxx @@ -8,36 +8,35 @@ #include <map> #include <sstream> -#include <libbutl/sha256.mxx> -#include <libbutl/utility.mxx> // throw_generic_error() -#include <libbutl/openssl.mxx> -#include <libbutl/filesystem.mxx> // dir_iterator, dir_entry +#include <libbutl/sha256.hxx> +#include <libbutl/utility.hxx> // throw_generic_error() +#include <libbutl/openssl.hxx> +#include <libbutl/filesystem.hxx> // dir_iterator, dir_entry namespace brep { using namespace std; using namespace butl; using namespace bpkg; - using namespace bbot; - // Return pointer to the shared build configurations instance, creating one - // on the first call. Throw tab_parsing on parsing error, io_error on the - // underlying OS error. Note: not thread-safe. + // Return pointer to the shared build target configurations instance, + // creating one on the first call. Throw tab_parsing on parsing error, + // io_error on the underlying OS error. Note: not thread-safe. // - static shared_ptr<const build_configs> + static shared_ptr<const build_target_configs> shared_build_config (const path& p) { - static map<path, weak_ptr<build_configs>> configs; + static map<path, weak_ptr<build_target_configs>> configs; auto i (configs.find (p)); if (i != configs.end ()) { - if (shared_ptr<build_configs> c = i->second.lock ()) + if (shared_ptr<build_target_configs> c = i->second.lock ()) return c; } - shared_ptr<build_configs> c ( - make_shared<build_configs> (parse_buildtab (p))); + shared_ptr<build_target_configs> c ( + make_shared<build_target_configs> (bbot::parse_buildtab (p))); configs[p] = c; return c; @@ -72,7 +71,7 @@ namespace brep try { - for (const dir_entry& de: dir_iterator (d, false /* ignore_dangling */)) + for (const dir_entry& de: dir_iterator (d, dir_iterator::no_follow)) { if (de.path ().extension () == "pem" && de.type () == entry_type::regular) @@ -109,7 +108,7 @@ namespace brep catch (const system_error& e) { ostringstream os; - os<< "unable to iterate over agents keys directory '" << d << "'"; + os << "unable to iterate over agents keys directory '" << d << "'"; throw_generic_error (e.code ().value (), os.str ().c_str ()); } @@ -122,7 +121,7 @@ namespace brep { try { - build_conf_ = shared_build_config (bo.build_config ()); + target_conf_ = shared_build_config (bo.build_config ()); } catch (const io_error& e) { @@ -137,29 +136,21 @@ namespace brep bot_agent_key_map_ = shared_bot_agent_keys (bo, bo.build_bot_agent_keys ()); - cstrings conf_names; - - using conf_map_type = map<const char*, - const build_config*, - compare_c_string>; + using conf_map_type = map<build_target_config_id, + const build_target_config*>; conf_map_type conf_map; - for (const auto& c: *build_conf_) - { - const char* cn (c.name.c_str ()); - conf_map[cn] = &c; - conf_names.push_back (cn); - } + for (const auto& c: *target_conf_) + conf_map[build_target_config_id {c.target, c.name}] = &c; - build_conf_names_ = make_shared<cstrings> (move (conf_names)); - build_conf_map_ = make_shared<conf_map_type> (move (conf_map)); + target_conf_map_ = make_shared<conf_map_type> (move (conf_map)); } bool build_config_module:: - belongs (const bbot::build_config& cfg, const char* cls) const + belongs (const build_target_config& cfg, const char* cls) const { - const map<string, string>& im (build_conf_->class_inheritance_map); + const map<string, string>& im (target_conf_->class_inheritance_map); for (const string& c: cfg.classes) { diff --git a/mod/build-config-module.hxx b/mod/build-config-module.hxx index ba2698d..c1630b0 100644 --- a/mod/build-config-module.hxx +++ b/mod/build-config-module.hxx @@ -6,17 +6,15 @@ #include <map> -#include <libbutl/utility.mxx> // compare_c_string +#include <libbutl/target-triplet.hxx> #include <libbpkg/manifest.hxx> -#include <libbbot/build-config.hxx> - #include <libbrep/types.hxx> #include <libbrep/utility.hxx> -#include <mod/build-config.hxx> #include <mod/module-options.hxx> +#include <mod/build-target-config.hxx> // Base class for modules that utilize the build controller configuration. // @@ -38,17 +36,20 @@ namespace brep void init (const options::build&); + template <typename K> bool - exclude (const small_vector<bpkg::build_class_expr, 1>& exprs, - const vector<bpkg::build_constraint>& constrs, - const bbot::build_config& cfg, + exclude (const build_package_config_template<K>& pc, + const build_class_exprs& common_builds, + const build_constraints& common_constraints, + const build_target_config& tc, string* reason = nullptr, bool default_all_ucs = false) const { - return brep::exclude (exprs, - constrs, - cfg, - build_conf_->class_inheritance_map, + return brep::exclude (pc, + common_builds, + common_constraints, + tc, + target_conf_->class_inheritance_map, reason, default_all_ucs); } @@ -56,26 +57,30 @@ namespace brep // Check if the configuration belongs to the specified class. // bool - belongs (const bbot::build_config&, const char*) const; + belongs (const build_target_config&, const char*) const; bool - belongs (const bbot::build_config& cfg, const string& cls) const + belongs (const build_target_config& cfg, const string& cls) const { return belongs (cfg, cls.c_str ()); } - // Configuration/toolchain combination that, in particular, can be used as - // a set value. + // Target/configuration/toolchain combination that, in particular, can be + // used as a set value. // - // Note: contains shallow references to the configuration, toolchain name, - // and version. + // Note: all members are the shallow references. // struct config_toolchain { - const string& configuration; + const butl::target_triplet& target; + const string& target_config; + const string& package_config; const string& toolchain_name; const bpkg::version& toolchain_version; + // Note: the comparison reflects the order of unbuilt configurations on + // the Builds page. + // bool operator< (const config_toolchain& ct) const { @@ -85,19 +90,24 @@ namespace brep if (toolchain_version != ct.toolchain_version) return toolchain_version > ct.toolchain_version; - return configuration.compare (ct.configuration) < 0; + if (int r = target.compare (ct.target)) + return r < 0; + + if (int r = target_config.compare (ct.target_config)) + return r < 0; + + return package_config.compare (ct.package_config) < 0; } }; protected: // Build configurations. // - shared_ptr<const bbot::build_configs> build_conf_; - shared_ptr<const cstrings> build_conf_names_; + shared_ptr<const build_target_configs> target_conf_; - shared_ptr<const std::map<const char*, - const bbot::build_config*, - butl::compare_c_string>> build_conf_map_; + shared_ptr<const std::map<build_target_config_id, + const build_target_config*>> + target_conf_map_; // Map of build bot agent public keys fingerprints to the key file paths. // diff --git a/mod/build-config.hxx b/mod/build-config.hxx deleted file mode 100644 index e8dfe07..0000000 --- a/mod/build-config.hxx +++ /dev/null @@ -1,49 +0,0 @@ -// file : mod/build-config.hxx -*- C++ -*- -// license : MIT; see accompanying LICENSE file - -#ifndef MOD_BUILD_CONFIG_HXX -#define MOD_BUILD_CONFIG_HXX - -#include <map> - -#include <libbpkg/manifest.hxx> - -#include <libbbot/build-config.hxx> - -#include <libbrep/types.hxx> -#include <libbrep/utility.hxx> - -namespace brep -{ - // Return true if the specified build configuration is excluded by a package - // based on its underlying build class set, build class expressions, and - // build constraints, potentially extending the underlying set with the - // special classes. Set the exclusion reason if requested. Optionally use - // the `all` class as a default underlying build class set rather than the - // `default` class (which is, for example, the case for the external test - // packages not to reduce their build configuration set needlessly). - // - bool - exclude (const small_vector<bpkg::build_class_expr, 1>&, - const vector<bpkg::build_constraint>&, - const bbot::build_config&, - const std::map<string, string>& class_inheritance_map, - string* reason = nullptr, - bool default_all_ucs = false); - - // Convert dash-separated components (target, build configuration name, - // machine name) or a pattern thereof into a path, replacing dashes with - // slashes (directory separators), `**` with `*/**/*`, and appending the - // trailing slash for a subsequent match using the path_match() - // functionality (the idea here is for `linux**` to match `linux-gcc` which - // is quite natural to expect). Throw invalid_path if the resulting path is - // invalid. - // - // Note that the match_absent path match flag must be used for the above - // `**` transformation to work. - // - path - dash_components_to_path (const string&); -} - -#endif // MOD_BUILD_CONFIG diff --git a/mod/build-result-module.cxx b/mod/build-result-module.cxx new file mode 100644 index 0000000..9ac1390 --- /dev/null +++ b/mod/build-result-module.cxx @@ -0,0 +1,349 @@ +// file : mod/build-result-module.cxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#include <mod/build-result-module.hxx> + +#include <odb/database.hxx> + +#include <libbutl/openssl.hxx> +#include <libbutl/fdstream.hxx> +#include <libbutl/process-io.hxx> +#include <libbutl/semantic-version.hxx> + +#include <libbrep/build-package.hxx> +#include <libbrep/build-package-odb.hxx> + +namespace brep +{ + using namespace std; + using namespace butl; + + // While currently the user-defined copy constructor is not required (we + // don't need to deep copy nullptr's), it is a good idea to keep the + // placeholder ready for less trivial cases. + // + build_result_module:: + build_result_module (const build_result_module& r) + : database_module (r), + build_config_module (r), + use_openssl_pkeyutl_ (r.initialized_ ? r.use_openssl_pkeyutl_ : false) + { + } + + void build_result_module:: + init (const options::build& bo, const options::build_db& bdo) + { + HANDLER_DIAG; + + build_config_module::init (bo); + database_module::init (bdo, bdo.build_db_retry ()); + + try + { + optional<openssl_info> oi ( + openssl::info ([&trace, this] (const char* args[], size_t n) + { + l2 ([&]{trace << process_args {args, n};}); + }, + 2, + bo.openssl ())); + + use_openssl_pkeyutl_ = oi && + oi->name == "OpenSSL" && + oi->version >= semantic_version {3, 0, 0}; + } + catch (const system_error& e) + { + fail << "unable to obtain openssl version: " << e; + } + } + + build_result_module::parse_session_result build_result_module:: + parse_session (const string& s) const + { + using brep::version; // Not to confuse with module::version. + + parse_session_result r; + + size_t p (s.find ('/')); // End of tenant. + + if (p == string::npos) + throw invalid_argument ("no package name"); + + if (tenant.compare (0, tenant.size (), s, 0, p) != 0) + throw invalid_argument ("tenant mismatch"); + + size_t b (p + 1); // Start of package name. + p = s.find ('/', b); // End of package name. + + if (p == b) + throw invalid_argument ("empty package name"); + + if (p == string::npos) + throw invalid_argument ("no package version"); + + package_name name; + + try + { + name = package_name (string (s, b, p - b)); + } + catch (const invalid_argument& e) + { + throw invalid_argument ( + string ("invalid package name : ") + e.what ()); + } + + b = p + 1; // Start of version. + p = s.find ('/', b); // End of version. + + if (p == string::npos) + throw invalid_argument ("no target"); + + auto parse_version = [&s, &b, &p] (const char* what) -> version + { + // Intercept exception handling to add the parsing error attribution. + // + try + { + return brep::version (string (s, b, p - b)); + } + catch (const invalid_argument& e) + { + throw invalid_argument ( + string ("invalid ") + what + ": " + e.what ()); + } + }; + + r.package_version = parse_version ("package version"); + + b = p + 1; // Start of target. + p = s.find ('/', b); // End of target. + + if (p == string::npos) + throw invalid_argument ("no target configuration name"); + + target_triplet target; + try + { + target = target_triplet (string (s, b, p - b)); + } + catch (const invalid_argument& e) + { + throw invalid_argument (string ("invalid target: ") + e.what ()); + } + + b = p + 1; // Start of target configuration name. + p = s.find ('/', b); // End of target configuration name. + + if (p == string::npos) + throw invalid_argument ("no package configuration name"); + + string target_config (s, b, p - b); + + if (target_config.empty ()) + throw invalid_argument ("empty target configuration name"); + + b = p + 1; // Start of package configuration name. + p = s.find ('/', b); // End of package configuration name. + + if (p == string::npos) + throw invalid_argument ("no toolchain name"); + + string package_config (s, b, p - b); + + if (package_config.empty ()) + throw invalid_argument ("empty package configuration name"); + + b = p + 1; // Start of toolchain name. + p = s.find ('/', b); // End of toolchain name. + + if (p == string::npos) + throw invalid_argument ("no toolchain version"); + + string toolchain_name (s, b, p - b); + + if (toolchain_name.empty ()) + throw invalid_argument ("empty toolchain name"); + + b = p + 1; // Start of toolchain version. + p = s.find ('/', b); // End of toolchain version. + + if (p == string::npos) + throw invalid_argument ("no timestamp"); + + r.toolchain_version = parse_version ("toolchain version"); + + r.id = build_id (package_id (move (tenant), move (name), r.package_version), + move (target), + move (target_config), + move (package_config), + move (toolchain_name), + r.toolchain_version); + + try + { + size_t tsn; + string ts (s, p + 1); + + r.timestamp = timestamp (chrono::duration_cast<timestamp::duration> ( + chrono::nanoseconds (stoull (ts, &tsn)))); + + if (tsn != ts.size ()) + throw invalid_argument ("trailing junk"); + } + // Handle invalid_argument or out_of_range (both derive from logic_error), + // that can be thrown by stoull(). + // + catch (const logic_error& e) + { + throw invalid_argument (string ("invalid timestamp: ") + e.what ()); + } + + return r; + } + + bool build_result_module:: + authenticate_session (const options::build& o, + const optional<vector<char>>& challenge, + const build& b, + const string& session) const + { + HANDLER_DIAG; + + auto warn_auth = [&session, &warn] (const string& d) + { + warn << "session '" << session << "' authentication failed: " << d; + }; + + bool r (false); + + // Must both be present or absent. + // + if (!b.agent_challenge != !challenge) + { + warn_auth (challenge ? "unexpected challenge": "challenge is expected"); + } + else if (bot_agent_key_map_ == nullptr) // Authentication is disabled. + { + r = true; + } + else if (!b.agent_challenge) // Authentication is recently enabled. + { + warn_auth ("challenge is required now"); + } + else + { + assert (b.agent_fingerprint && challenge); + + auto auth = [&challenge, + &b, + &o, + &fail, &trace, + &warn_auth, + this] (const path& key) + { + bool r (false); + + try + { + openssl os ([&trace, this] (const char* args[], size_t n) + { + l2 ([&]{trace << process_args {args, n};}); + }, + path ("-"), fdstream_mode::text, 2, + process_env (o.openssl (), o.openssl_envvar ()), + use_openssl_pkeyutl_ ? "pkeyutl" : "rsautl", + o.openssl_option (), + use_openssl_pkeyutl_ ? "-verifyrecover" : "-verify", + "-pubin", + "-inkey", key); + + for (const auto& c: *challenge) + os.out.put (c); // Sets badbit on failure. + + os.out.close (); + + string s; + getline (os.in, s); + + bool v (os.in.eof ()); + os.in.close (); + + if (os.wait () && v) + { + r = (s == *b.agent_challenge); + + if (!r) + warn_auth ("challenge mismatched"); + } + else // The signature is presumably meaningless. + warn_auth ("unable to verify challenge"); + } + catch (const system_error& e) + { + fail << "unable to verify challenge: " << e; + } + + return r; + }; + + const string& fp (*b.agent_fingerprint); + auto i (bot_agent_key_map_->find (fp)); + + // Note that it is possible that the default vs custom bot + // classification has changed since the task request time. It feels that + // there is nothing wrong with that and we will handle that + // automatically. + // + if (i != bot_agent_key_map_->end ()) // Default bot? + { + r = auth (i->second); + } + else // Custom bot. + { + shared_ptr<build_public_key> k ( + build_db_->find<build_public_key> (public_key_id (b.tenant, fp))); + + if (k != nullptr) + { + // Temporarily save the key data to disk (note that it's the + // challenge which is passed via stdin to openssl). Hopefully /tmp + // is using tmpfs. + // + auto_rmfile arm; + + try + { + arm = auto_rmfile (path::temp_path ("brep-custom-bot-key")); + } + catch (const system_error& e) + { + fail << "unable to obtain temporary file: " << e; + } + + try + { + ofdstream os (arm.path); + os << *k; + os.close (); + } + catch (const io_error& e) + { + fail << "unable to write to '" << arm.path << "': " << e; + } + + r = auth (arm.path); + } + else + { + // The agent's key is recently replaced. + // + warn_auth ("agent's public key not found"); + } + } + } + + return r; + } +} diff --git a/mod/build-result-module.hxx b/mod/build-result-module.hxx new file mode 100644 index 0000000..34466e4 --- /dev/null +++ b/mod/build-result-module.hxx @@ -0,0 +1,78 @@ +// file : mod/build-result-module.hxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef MOD_BUILD_RESULT_MODULE_HXX +#define MOD_BUILD_RESULT_MODULE_HXX + +#include <libbrep/types.hxx> +#include <libbrep/utility.hxx> + +#include <libbrep/build.hxx> + +#include <mod/module-options.hxx> +#include <mod/database-module.hxx> +#include <mod/build-config-module.hxx> + +namespace brep +{ + // Base class for modules that handle the build task results. + // + // Specifically, it loads build controller configuration, initializes the + // build database instance, and provides utilities for parsing and + // authenticating the build task session. + // + class build_result_module: public database_module, + protected build_config_module + { + protected: + build_result_module () = default; + + // Create a shallow copy (handling instance) if initialized and a deep + // copy (context exemplar) otherwise. + // + explicit + build_result_module (const build_result_module&); + + void + init (const options::build&, const options::build_db&); + + using handler::init; // Unhide. + + // Parse the build task session and verify that the session matches the + // tenant. Throw invalid_argument on errors. + // + struct parse_session_result + { + build_id id; + brep::version package_version; + brep::version toolchain_version; + brep::timestamp timestamp; + }; + + parse_session_result + parse_session (const string&) const; + + // Return true if bbot agent authentication is disabled or the agent is + // recognized and challenge matches. If the session authentication fails + // (challenge is not expected, expected but doesn't match, etc), then log + // the failure reason with the warning severity and return false. + // + // Note that the session argument is used only for logging. + // + bool + authenticate_session (const options::build&, + const optional<vector<char>>& challenge, + const build&, + const string& session) const; + + protected: + // True if the openssl version is greater or equal to 3.0.0 and so pkeyutl + // needs to be used instead of rsautl. + // + // Note that openssl 3.0.0 deprecates rsautl in favor of pkeyutl. + // + bool use_openssl_pkeyutl_; + }; +} + +#endif // MOD_BUILD_RESULT_MODULE_HXX diff --git a/mod/build-config.cxx b/mod/build-target-config.cxx index 43a85e8..a30e281 100644 --- a/mod/build-config.cxx +++ b/mod/build-target-config.cxx @@ -1,17 +1,16 @@ -// file : mod/build-config-module.cxx -*- C++ -*- +// file : mod/target-build-config.cxx -*- C++ -*- // license : MIT; see accompanying LICENSE file -#include <mod/build-config.hxx> +#include <mod/build-target-config.hxx> -#include <libbutl/utility.mxx> // alpha(), etc. -#include <libbutl/path-pattern.mxx> +#include <libbutl/utility.hxx> // alpha(), etc. +#include <libbutl/path-pattern.hxx> namespace brep { using namespace std; using namespace butl; using namespace bpkg; - using namespace bbot; // The default underlying class set expressions (see below). // @@ -22,9 +21,9 @@ namespace brep {"all"}, '+', "All."); bool - exclude (const small_vector<build_class_expr, 1>& exprs, - const vector<build_constraint>& constrs, - const build_config& cfg, + exclude (const build_class_exprs& exprs, + const build_constraints& constrs, + const build_target_config& tc, const map<string, string>& class_inheritance_map, string* reason, bool default_all_ucs) @@ -74,11 +73,11 @@ namespace brep // (changing the result from true to false) or non-including one (leaving // the false result) as an exclusion reason. // - auto match = [&cfg, &m, reason, &sanitize, &class_inheritance_map] + auto match = [&tc, &m, reason, &sanitize, &class_inheritance_map] (const build_class_expr& e) { bool pm (m); - e.match (cfg.classes, class_inheritance_map, m); + e.match (tc.classes, class_inheritance_map, m); if (reason != nullptr) { @@ -168,8 +167,8 @@ namespace brep if (!constrs.empty ()) try { - path cn (dash_components_to_path (cfg.name)); - path tg (dash_components_to_path (cfg.target.string ())); + path cn (dash_components_to_path (tc.name)); + path tg (dash_components_to_path (tc.target.string ())); for (const build_constraint& c: constrs) { diff --git a/mod/build-target-config.hxx b/mod/build-target-config.hxx new file mode 100644 index 0000000..60d159c --- /dev/null +++ b/mod/build-target-config.hxx @@ -0,0 +1,96 @@ +// file : mod/build-target-config.hxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef MOD_BUILD_TARGET_CONFIG_HXX +#define MOD_BUILD_TARGET_CONFIG_HXX + +#include <map> + +#include <libbutl/target-triplet.hxx> + +#include <libbpkg/manifest.hxx> + +#include <libbbot/build-target-config.hxx> + +#include <libbrep/types.hxx> +#include <libbrep/utility.hxx> + +#include <libbrep/common.hxx> + +namespace brep +{ + using build_target_config = bbot::build_target_config; + using build_target_configs = bbot::build_target_configs; + + // Return true if the specified build target configuration is excluded by a + // package configuration based on its underlying build class set, build + // class expressions, and build constraints, potentially extending the + // underlying set with the special classes. Set the exclusion reason if + // requested. Optionally use the `all` class as a default underlying build + // class set rather than the `default` class (which is, for example, the + // case for the external test packages not to reduce their build target + // configuration set needlessly). + // + bool + exclude (const build_class_exprs& builds, + const build_constraints& constraints, + const build_target_config&, + const std::map<string, string>& class_inheritance_map, + string* reason = nullptr, + bool default_all_ucs = false); + + template <typename K> + inline bool + exclude (const build_package_config_template<K>& pc, + const build_class_exprs& common_builds, + const build_constraints& common_constraints, + const build_target_config& tc, + const std::map<string, string>& class_inheritance_map, + string* reason = nullptr, + bool default_all_ucs = false) + { + return exclude (pc.effective_builds (common_builds), + pc.effective_constraints (common_constraints), + tc, + class_inheritance_map, + reason, + default_all_ucs); + } + + // Convert dash-separated components (target, build target configuration + // name, machine name) or a pattern thereof into a path, replacing dashes + // with slashes (directory separators), `**` with `*/**/*`, and appending + // the trailing slash for a subsequent match using the path_match() + // functionality (the idea here is for `linux**` to match `linux-gcc` which + // is quite natural to expect). Throw invalid_path if the resulting path is + // invalid. + // + // Note that the match_absent path match flag must be used for the above + // `**` transformation to work. + // + path + dash_components_to_path (const string&); + + // Build target/target configuration name combination that, in particular, + // identifies configurations in the buildtab and thus can be used as a + // set/map key. + // + // Note: contains shallow references to the target and configuration name. + // + struct build_target_config_id + { + reference_wrapper<const butl::target_triplet> target; + reference_wrapper<const string> config; + + bool + operator< (const build_target_config_id& x) const + { + if (int r = target.get ().compare (x.target.get ())) + return r < 0; + + return config.get ().compare (x.config.get ()) < 0; + } + }; +} + +#endif // MOD_BUILD_TARGET_CONFIG diff --git a/mod/build.cxx b/mod/build.cxx index 5b9d8aa..5c37acb 100644 --- a/mod/build.cxx +++ b/mod/build.cxx @@ -3,12 +3,22 @@ #include <mod/build.hxx> +#include <odb/database.hxx> +#include <odb/connection.hxx> +#include <odb/transaction.hxx> + +#include <libbutl/sendmail.hxx> +#include <libbutl/process-io.hxx> + #include <web/server/mime-url-encoding.hxx> +#include <libbrep/build-package-odb.hxx> + #include <mod/utility.hxx> namespace brep { + using namespace std; using namespace web; string @@ -20,12 +30,15 @@ namespace brep // needs to be url-encoded, and only in the query part of the URL. We embed // the package version into the URL path part and so don't encode it. // - string url (host + tenant_dir (root, b.tenant).representation () + - mime_url_encode (b.package_name.string (), false) + '/' + - b.package_version.string () + "/log/" + - mime_url_encode (b.configuration, false /* query */) + '/' + - mime_url_encode (b.toolchain_name, false /* query */) + '/' + - b.toolchain_version.string ()); + string url ( + host + tenant_dir (root, b.tenant).representation () + + mime_url_encode (b.package_name.string (), false) + '/' + + b.package_version.string () + "/log/" + + mime_url_encode (b.target.string (), false /* query */) + '/' + + mime_url_encode (b.target_config_name, false /* query */) + '/' + + mime_url_encode (b.package_config_name, false /* query */) + '/' + + mime_url_encode (b.toolchain_name, false /* query */) + '/' + + b.toolchain_version.string ()); if (op != nullptr) { @@ -44,12 +57,154 @@ namespace brep // we embed the package version into the URL query part, where it is not // encoded by design. // - return host + tenant_dir (root, b.tenant).string () + + return host + tenant_dir (root, b.tenant).string () + "?build-force&pn=" + mime_url_encode (b.package_name.string ()) + - "&pv=" + b.package_version.string () + - "&cf=" + mime_url_encode (b.configuration) + - "&tn=" + mime_url_encode (b.toolchain_name) + - "&tv=" + b.toolchain_version.string () + + "&pv=" + b.package_version.string () + + "&tg=" + mime_url_encode (b.target.string ()) + + "&tc=" + mime_url_encode (b.target_config_name) + + "&pc=" + mime_url_encode (b.package_config_name) + + "&tn=" + mime_url_encode (b.toolchain_name) + + "&tv=" + b.toolchain_version.string () + "&reason="; } + + void + send_notification_email (const options::build_email_notification& o, + const odb::core::connection_ptr& conn, + const build& b, + const build_package& p, + const build_package_config& pc, + const string& what, + const basic_mark& error, + const basic_mark* trace) + { + using namespace odb::core; + using namespace butl; + + assert (b.state == build_state::built && b.status); + + // Bail out if sending build notification emails is disabled for this + // toolchain for this package. + // + { + const map<string, build_email>& tes (o.build_toolchain_email ()); + auto i (tes.find (b.id.toolchain_name)); + build_email mode (i != tes.end () ? i->second : build_email::latest); + + if (mode == build_email::none) + { + return; + } + else if (mode == build_email::latest) + { + transaction t (conn->begin ()); + database& db (t.database ()); + + const auto& id (query<buildable_package>::build_package::id); + + buildable_package lp ( + db.query_value<buildable_package> ( + (id.tenant == b.tenant && id.name == b.package_name) + + order_by_version_desc (id.version) + + "LIMIT 1")); + + t.commit (); + + if (lp.package->version != p.version) + return; + } + } + + string subj (what + ' ' + + to_string (*b.status) + ": " + + b.package_name.string () + '/' + + b.package_version.string () + ' ' + + b.target_config_name + '/' + + b.target.string () + ' ' + + b.package_config_name + ' ' + + b.toolchain_name + '-' + b.toolchain_version.string ()); + + // Send notification emails to the interested parties. + // + auto send_email = [&b, &subj, &o, &error, trace] (const string& to) + { + try + { + if (trace != nullptr) + *trace << "email '" << subj << "' to " << to; + + // Redirect the diagnostics to webserver error log. + // + sendmail sm ([trace] (const char* args[], size_t n) + { + if (trace != nullptr) + *trace << process_args {args, n}; + }, + 2, + o.email (), + subj, + {to}); + + if (b.results.empty ()) + { + sm.out << "No operation results available." << endl; + } + else + { + const string& host (o.host ()); + const dir_path& root (o.root ()); + + ostream& os (sm.out); + + os << "combined: " << *b.status << endl << endl + << " " << build_log_url (host, root, b) << endl << endl; + + for (const auto& r: b.results) + os << r.operation << ": " << r.status << endl << endl + << " " << build_log_url (host, root, b, &r.operation) + << endl << endl; + + os << "Force rebuild (enter the reason, use '+' instead of spaces):" + << endl << endl + << " " << build_force_url (host, root, b) << endl; + } + + sm.out.close (); + + if (!sm.wait ()) + error << "sendmail " << *sm.exit; + } + // Handle process_error and io_error (both derive from system_error). + // + catch (const system_error& e) + { + error << "sendmail error: " << e; + } + }; + + // Send the build notification email if a non-empty package build email is + // specified. + // + if (const optional<email>& e = pc.effective_email (p.build_email)) + { + if (!e->empty ()) + send_email (*e); + } + + // Send the build warning/error notification emails, if requested. + // + if (*b.status >= result_status::warning) + { + if (const optional<email>& e = + pc.effective_warning_email (p.build_warning_email)) + send_email (*e); + } + + if (*b.status >= result_status::error) + { + if (const optional<email>& e = + pc.effective_error_email (p.build_error_email)) + send_email (*e); + } + } } diff --git a/mod/build.hxx b/mod/build.hxx index f0846be..07e4411 100644 --- a/mod/build.hxx +++ b/mod/build.hxx @@ -4,10 +4,16 @@ #ifndef MOD_BUILD_HXX #define MOD_BUILD_HXX +#include <odb/forward.hxx> // odb::core::connection_ptr + #include <libbrep/types.hxx> #include <libbrep/utility.hxx> #include <libbrep/build.hxx> +#include <libbrep/build-package.hxx> + +#include <mod/diagnostics.hxx> +#include <mod/module-options.hxx> // Various package build-related utilities. // @@ -25,6 +31,19 @@ namespace brep // string build_force_url (const string& host, const dir_path& root, const build&); + + // Send the notification email for the specified package configuration + // build. The build is expected to be in the built state. + // + void + send_notification_email (const options::build_email_notification&, + const odb::core::connection_ptr&, + const build&, + const build_package&, + const build_package_config&, + const string& what, // build, rebuild, etc. + const basic_mark& error, + const basic_mark* trace); } #endif // MOD_BUILD_HXX diff --git a/mod/buildfile b/mod/buildfile index 191d966..c3895dc 100644 --- a/mod/buildfile +++ b/mod/buildfile @@ -25,7 +25,7 @@ include ../web/server/ ./: mod{brep} {libue libus}{mod} -libu_src = options-types types-parsers build-config +libu_src = options-types types-parsers build-target-config mod{brep}: {hxx ixx txx cxx}{* -module-options -{$libu_src}} \ libus{mod} ../libbrep/lib{brep} ../web/server/libus{web-server} \ @@ -35,6 +35,11 @@ mod{brep}: {hxx ixx txx cxx}{* -module-options -{$libu_src}} \ {hxx ixx txx cxx}{+{$libu_src} } \ $libs +# Add support for tenant-associated service notifications to the CI module for +# the debugging of the notifications machinery. +# +cxx.poptions += -DBREP_CI_TENANT_SERVICE + libus{mod}: ../web/xhtml/libus{xhtml} libue{mod}: ../web/xhtml/libue{xhtml} @@ -50,7 +55,7 @@ if $cli.configured cli.options += --std c++11 -I $src_root --include-with-brackets \ --include-prefix mod --guard-prefix MOD --generate-specifier \ --cxx-prologue "#include <mod/types-parsers.hxx>" \ ---cli-namespace brep::cli --generate-file-scanner --option-length 41 \ +--cli-namespace brep::cli --generate-file-scanner --option-length 46 \ --generate-modifier --generate-description --option-prefix "" # Include the generated cli files into the distribution and don't remove diff --git a/mod/ci-common.cxx b/mod/ci-common.cxx new file mode 100644 index 0000000..cb61e66 --- /dev/null +++ b/mod/ci-common.cxx @@ -0,0 +1,494 @@ +// file : mod/ci-common.cxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#include <mod/ci-common.hxx> + +#include <libbutl/uuid.hxx> +#include <libbutl/fdstream.hxx> +#include <libbutl/sendmail.hxx> +#include <libbutl/timestamp.hxx> +#include <libbutl/filesystem.hxx> +#include <libbutl/process-io.hxx> // operator<<(ostream, process_args) +#include <libbutl/manifest-serializer.hxx> + +#include <mod/external-handler.hxx> + +namespace brep +{ + using namespace std; + using namespace butl; + + void ci_start:: + init (shared_ptr<options::ci_start> o) + { + // Verify the data directory satisfies the requirements. + // + const dir_path& d (o->ci_data ()); + + if (d.relative ()) + throw runtime_error ("ci-data directory path must be absolute"); + + if (!dir_exists (d)) + throw runtime_error ("ci-data directory '" + d.string () + + "' does not exist"); + + if (o->ci_handler_specified () && o->ci_handler ().relative ()) + throw runtime_error ("ci-handler path must be absolute"); + + options_ = move (o); + } + + optional<ci_start::start_result> ci_start:: + start (const basic_mark& error, + const basic_mark& warn, + const basic_mark* trace, + optional<tenant_service>&& service, + const repository_location& repository, + const vector<package>& packages, + const optional<string>& client_ip, + const optional<string>& user_agent, + const optional<string>& interactive, + const optional<string>& simulate, + const vector<pair<string, string>>& custom_request, + const vector<pair<string, string>>& overrides) + { + using serializer = manifest_serializer; + using serialization = manifest_serialization; + + assert (options_ != nullptr); // Shouldn't be called otherwise. + + // If the tenant service is specified, then its type may not be empty. + // + assert (!service || !service->type.empty ()); + + // Generate the request id. + // + // Note that it will also be used as a CI result manifest reference, + // unless the latter is provided by the external handler. + // + string request_id; + + try + { + request_id = uuid::generate ().string (); + } + catch (const system_error& e) + { + error << "unable to generate request id: " << e; + return nullopt; + } + + // Create the submission data directory. + // + dir_path dd (options_->ci_data () / dir_path (request_id)); + + try + { + // It's highly unlikely but still possible that the directory already + // exists. This can only happen if the generated uuid is not unique. + // + if (try_mkdir (dd) == mkdir_status::already_exists) + throw_generic_error (EEXIST); + } + catch (const system_error& e) + { + error << "unable to create directory '" << dd << "': " << e; + return nullopt; + } + + auto_rmdir ddr (dd); + + // Return the start_result object for the client errors (normally the bad + // request status code (400) for the client data serialization errors). + // + auto client_error = [&request_id] (uint16_t status, string message) + { + return start_result {status, + move (message), + request_id, + vector<pair<string, string>> ()}; + }; + + // Serialize the CI request manifest to a stream. On the serialization + // error return false together with the start_result object containing the + // bad request (400) code and the error message. On the stream error pass + // through the io_error exception. Otherwise return true. + // + timestamp ts (system_clock::now ()); + + auto rqm = [&request_id, + &ts, + &service, + &repository, + &packages, + &client_ip, + &user_agent, + &interactive, + &simulate, + &custom_request, + &client_error] (ostream& os, bool long_lines = false) + -> pair<bool, optional<start_result>> + { + try + { + serializer s (os, "request", long_lines); + + // Serialize the submission manifest header. + // + s.next ("", "1"); // Start of manifest. + s.next ("id", request_id); + s.next ("repository", repository.string ()); + + for (const package& p: packages) + { + if (!p.version) + s.next ("package", p.name.string ()); + else + s.next ("package", + p.name.string () + '/' + p.version->string ()); + } + + if (interactive) + s.next ("interactive", *interactive); + + if (simulate) + s.next ("simulate", *simulate); + + s.next ("timestamp", + butl::to_string (ts, + "%Y-%m-%dT%H:%M:%SZ", + false /* special */, + false /* local */)); + + if (client_ip) + s.next ("client-ip", *client_ip); + + if (user_agent) + s.next ("user-agent", *user_agent); + + if (service) + { + // Note that if the service id is not specified, then the handler + // will use the generated reference instead. + // + if (!service->id.empty ()) + s.next ("service-id", service->id); + + s.next ("service-type", service->type); + + if (service->data) + s.next ("service-data", *service->data); + } + + // Serialize the request custom parameters. + // + // Note that the serializer constraints the custom parameter names + // (can't start with '#', can't contain ':' and the whitespaces, + // etc). + // + for (const pair<string, string>& nv: custom_request) + s.next (nv.first, nv.second); + + s.next ("", ""); // End of manifest. + return make_pair (true, optional<start_result> ()); + } + catch (const serialization& e) + { + return make_pair (false, + optional<start_result> ( + client_error (400, + string ("invalid parameter: ") + + e.what ()))); + } + }; + + // Serialize the CI request manifest to the submission directory. + // + path rqf (dd / "request.manifest"); + + try + { + ofdstream os (rqf); + pair<bool, optional<start_result>> r (rqm (os)); + os.close (); + + if (!r.first) + return move (*r.second); + } + catch (const io_error& e) + { + error << "unable to write to '" << rqf << "': " << e; + return nullopt; + } + + // Serialize the CI overrides manifest to a stream. On the serialization + // error return false together with the start_result object containing the + // bad request (400) code and the error message. On the stream error pass + // through the io_error exception. Otherwise return true. + // + auto ovm = [&overrides, &client_error] (ostream& os, + bool long_lines = false) + -> pair<bool, optional<start_result>> + { + try + { + serializer s (os, "overrides", long_lines); + + s.next ("", "1"); // Start of manifest. + + for (const pair<string, string>& nv: overrides) + s.next (nv.first, nv.second); + + s.next ("", ""); // End of manifest. + return make_pair (true, optional<start_result> ()); + } + catch (const serialization& e) + { + return make_pair (false, + optional<start_result> ( + client_error ( + 400, + string ("invalid manifest override: ") + + e.what ()))); + } + }; + + // Serialize the CI overrides manifest to the submission directory. + // + path ovf (dd / "overrides.manifest"); + + if (!overrides.empty ()) + try + { + ofdstream os (ovf); + pair<bool, optional<start_result>> r (ovm (os)); + os.close (); + + if (!r.first) + return move (*r.second); + } + catch (const io_error& e) + { + error << "unable to write to '" << ovf << "': " << e; + return nullopt; + } + + // Given that the submission data is now successfully persisted we are no + // longer in charge of removing it, except for the cases when the + // submission handler terminates with an error (see below for details). + // + ddr.cancel (); + + // If the handler terminates with non-zero exit status or specifies 5XX + // (HTTP server error) submission result manifest status value, then we + // stash the submission data directory for troubleshooting. Otherwise, if + // it's the 4XX (HTTP client error) status value, then we remove the + // directory. + // + auto stash_submit_dir = [&dd, error] () + { + if (dir_exists (dd)) + try + { + mvdir (dd, dir_path (dd + ".fail")); + } + catch (const system_error& e) + { + // Not much we can do here. Let's just log the issue and bail out + // leaving the directory in place. + // + error << "unable to rename directory '" << dd << "': " << e; + } + }; + + // Run the submission handler, if specified, reading the CI result + // manifest from its stdout and parse it into the resulting manifest + // object. Otherwise, create implied CI result manifest. + // + start_result sr; + + if (options_->ci_handler_specified ()) + { + using namespace external_handler; + + optional<result_manifest> r (run (options_->ci_handler (), + options_->ci_handler_argument (), + dd, + options_->ci_handler_timeout (), + error, + warn, + trace)); + if (!r) + { + stash_submit_dir (); + return nullopt; // The diagnostics is already issued. + } + + sr.status = r->status; + + for (manifest_name_value& nv: r->values) + { + string& n (nv.name); + string& v (nv.value); + + if (n == "message") + sr.message = move (v); + else if (n == "reference") + sr.reference = move (v); + else if (n != "status") + sr.custom_result.emplace_back (move (n), move (v)); + } + + if (sr.reference.empty ()) + sr.reference = move (request_id); + } + else // Create the implied CI result manifest. + { + sr.status = 200; + sr.message = "CI request is queued"; + sr.reference = move (request_id); + } + + // Serialize the CI result manifest manifest to a stream. On the + // serialization error log the error description and return false, on the + // stream error pass through the io_error exception, otherwise return + // true. + // + auto rsm = [&sr, &error] (ostream& os, bool long_lines = false) -> bool + { + try + { + serialize_manifest (sr, os, long_lines); + return true; + } + catch (const serialization& e) + { + error << "ref " << sr.reference << ": unable to serialize handler's " + << "output: " << e; + return false; + } + }; + + // If the submission data directory still exists then perform an + // appropriate action on it, depending on the submission result status. + // Note that the handler could move or remove the directory. + // + if (dir_exists (dd)) + { + // Remove the directory if the client error is detected. + // + if (sr.status >= 400 && sr.status < 500) + { + rmdir_r (dd); + } + // + // Otherwise, save the result manifest, into the directory. Also stash + // the directory for troubleshooting in case of the server error. + // + else + { + path rsf (dd / "result.manifest"); + + try + { + ofdstream os (rsf); + + // Not being able to stash the result manifest is not a reason to + // claim the submission failed. The error is logged nevertheless. + // + rsm (os); + + os.close (); + } + catch (const io_error& e) + { + // Not fatal (see above). + // + error << "unable to write to '" << rsf << "': " << e; + } + + if (sr.status >= 500 && sr.status < 600) + stash_submit_dir (); + } + } + + // Send email, if configured, and the CI request submission is not + // simulated. Use the long lines manifest serialization mode for the + // convenience of copying/clicking URLs they contain. + // + // Note that we don't consider the email sending failure to be a + // submission failure as the submission data is successfully persisted and + // the handler is successfully executed, if configured. One can argue that + // email can be essential for the submission processing and missing it + // would result in the incomplete submission. In this case it's natural to + // assume that the web server error log is monitored and the email sending + // failure will be noticed. + // + if (options_->ci_email_specified () && !simulate) + try + { + // Redirect the diagnostics to the web server error log. + // + sendmail sm ([trace] (const char* args[], size_t n) + { + if (trace != nullptr) + *trace << process_args {args, n}; + }, + 2 /* stderr */, + options_->email (), + "CI request submission (" + sr.reference + ')', + {options_->ci_email ()}); + + // Write the CI request manifest. + // + pair<bool, optional<start_result>> r ( + rqm (sm.out, true /* long_lines */)); + + assert (r.first); // The serialization succeeded once, so can't fail now. + + // Write the CI overrides manifest. + // + sm.out << "\n\n"; + + r = ovm (sm.out, true /* long_lines */); + assert (r.first); // The serialization succeeded once, so can't fail now. + + // Write the CI result manifest. + // + sm.out << "\n\n"; + + // We don't care about the result (see above). + // + rsm (sm.out, true /* long_lines */); + + sm.out.close (); + + if (!sm.wait ()) + error << "sendmail " << *sm.exit; + } + // Handle process_error and io_error (both derive from system_error). + // + catch (const system_error& e) + { + error << "sendmail error: " << e; + } + + return optional<start_result> (move (sr)); + } + + void ci_start:: + serialize_manifest (const start_result& r, ostream& os, bool long_lines) + { + manifest_serializer s (os, "result", long_lines); + + s.next ("", "1"); // Start of manifest. + s.next ("status", to_string (r.status)); + s.next ("message", r.message); + s.next ("reference", r.reference); + + for (const pair<string, string>& nv: r.custom_result) + s.next (nv.first, nv.second); + + s.next ("", ""); // End of manifest. + } +} diff --git a/mod/ci-common.hxx b/mod/ci-common.hxx new file mode 100644 index 0000000..6f62c4b --- /dev/null +++ b/mod/ci-common.hxx @@ -0,0 +1,96 @@ +// file : mod/ci-common.hxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef MOD_CI_COMMON_HXX +#define MOD_CI_COMMON_HXX + +#include <odb/forward.hxx> // database + +#include <libbrep/types.hxx> +#include <libbrep/utility.hxx> + +#include <libbrep/common.hxx> + +#include <mod/diagnostics.hxx> +#include <mod/module-options.hxx> + +namespace brep +{ + class ci_start + { + public: + void + init (shared_ptr<options::ci_start>); + + // If the request handling has been performed normally, then return the + // information that corresponds to the CI result manifest (see CI Result + // Manifest in the manual). Otherwise (some internal has error occured), + // log the error and return nullopt. + // + // The arguments correspond to the CI request and overrides manifest + // values (see CI Request and Overrides Manifests in the manual). Note: + // request id and timestamp are generated by the implementation. + // + struct package + { + package_name name; + optional<brep::version> version; + }; + // Note that the inability to generate the reference is an internal + // error. Thus, it is not optional. + // + struct start_result + { + uint16_t status; + string message; + string reference; + vector<pair<string, string>> custom_result; + }; + + // In the optional service information, if id is empty, then the generated + // reference is used instead. + // + optional<start_result> + start (const basic_mark& error, + const basic_mark& warn, + const basic_mark* trace, + optional<tenant_service>&&, + const repository_location& repository, + const vector<package>& packages, + const optional<string>& client_ip, + const optional<string>& user_agent, + const optional<string>& interactive = nullopt, + const optional<string>& simulate = nullopt, + const vector<pair<string, string>>& custom_request = {}, + const vector<pair<string, string>>& overrides = {}); + + // Helpers. + // + + // Serialize the start result as a CI result manifest. + // + static void + serialize_manifest (const start_result&, ostream&, bool long_lines = false); + + private: + shared_ptr<options::ci_start> options_; + }; + + class ci_cancel + { + public: + void + init (shared_ptr<options::ci_cancel>, shared_ptr<odb::core::database>); + + // @@ TODO Archive the tenant. + // + void + cancel (/*...*/); + + private: + shared_ptr<options::ci_cancel> options_; + shared_ptr<odb::core::database> build_db_; + }; +} + +#endif // MOD_CI_COMMON_HXX diff --git a/mod/database-module.cxx b/mod/database-module.cxx index f598bfd..bbb3e59 100644 --- a/mod/database-module.cxx +++ b/mod/database-module.cxx @@ -3,13 +3,20 @@ #include <mod/database-module.hxx> +#include <odb/database.hxx> #include <odb/exceptions.hxx> +#include <odb/transaction.hxx> + +#include <libbrep/build-package.hxx> +#include <libbrep/build-package-odb.hxx> #include <mod/database.hxx> #include <mod/module-options.hxx> namespace brep { + using namespace odb::core; + // While currently the user-defined copy constructor is not required (we // don't need to deep copy nullptr's), it is a good idea to keep the // placeholder ready for less trivial cases. @@ -68,4 +75,61 @@ namespace brep throw; } + + optional<string> database_module:: + update_tenant_service_state ( + const connection_ptr& conn, + const string& tid, + const function<optional<string> (const tenant_service&)>& f) + { + assert (f != nullptr); // Shouldn't be called otherwise. + + // Must be initialized via the init(options::build_db) function call. + // + assert (build_db_ != nullptr); + + optional<string> r; + + for (size_t retry (retry_);; ) + { + try + { + transaction tr (conn->begin ()); + + shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tid)); + + if (t != nullptr && t->service) + { + tenant_service& s (*t->service); + + if (optional<string> data = f (s)) + { + s.data = move (*data); + build_db_->update (t); + + r = move (s.data); + } + } + + tr.commit (); + + // Bail out if we have successfully updated the service state. + // + break; + } + catch (const odb::recoverable& e) + { + if (retry-- == 0) + throw; + + HANDLER_DIAG; + l1 ([&]{trace << e << "; " << retry + 1 << " tenant service " + << "state update retries left";}); + + r = nullopt; // Prepare for the next iteration. + } + } + + return r; + } } diff --git a/mod/database-module.hxx b/mod/database-module.hxx index f72ba83..298afbf 100644 --- a/mod/database-module.hxx +++ b/mod/database-module.hxx @@ -4,7 +4,7 @@ #ifndef MOD_DATABASE_MODULE_HXX #define MOD_DATABASE_MODULE_HXX -#include <odb/forward.hxx> // database +#include <odb/forward.hxx> // odb::core::database, odb::core::connection_ptr #include <libbrep/types.hxx> #include <libbrep/utility.hxx> @@ -14,6 +14,8 @@ namespace brep { + struct tenant_service; + // A handler that utilises the database. Specifically, it will retry the // request in the face of recoverable database failures (deadlock, loss of // connection, etc) up to a certain number of times. @@ -50,6 +52,26 @@ namespace brep virtual bool handle (request&, response&) = 0; + // Helpers. + // + + // Update the tenant-associated service state if the specified + // notification callback-returned function (expected to be not NULL) + // returns the new state data. Return the service state data, if updated, + // and nullopt otherwise. + // + // Specifically, start the database transaction, query the service state, + // and call the callback-returned function on this state. If this call + // returns the data string (rather than nullopt), then update the service + // state with this data and persist the change. Repeat all the above steps + // on the recoverable database failures (deadlocks, etc). + // + optional<string> + update_tenant_service_state ( + const odb::core::connection_ptr&, + const string& tid, + const function<optional<string> (const tenant_service&)>&); + protected: size_t retry_ = 0; // Max of all retries. diff --git a/mod/database.cxx b/mod/database.cxx index d53ee50..02d521d 100644 --- a/mod/database.cxx +++ b/mod/database.cxx @@ -24,10 +24,10 @@ namespace brep operator< (const db_key& x, const db_key& y) { int r; - if ((r = x.user.compare (y.user)) != 0 || - (r = x.role.compare (y.role)) != 0 || + if ((r = x.user.compare (y.user)) != 0 || + (r = x.role.compare (y.role)) != 0 || (r = x.password.compare (y.password)) != 0 || - (r = x.name.compare (y.name)) != 0 || + (r = x.name.compare (y.name)) != 0 || (r = x.host.compare (y.host))) return r < 0; @@ -59,7 +59,7 @@ namespace brep // Change the connection current user to the execution user name. // if (!role_.empty ()) - conn->execute ("SET ROLE '" + role_ + "'"); + conn->execute ("SET ROLE '" + role_ + '\''); return conn; } diff --git a/mod/diagnostics.hxx b/mod/diagnostics.hxx index 37ab25e..f83e1de 100644 --- a/mod/diagnostics.hxx +++ b/mod/diagnostics.hxx @@ -109,7 +109,7 @@ namespace brep uncaught_ (r.uncaught_), #endif data_ (move (r.data_)), - os_ (move (r.os_)), + os_ (move (r.os_)), // Note: can throw. epilogue_ (r.epilogue_) { r.data_.clear (); // Empty. diff --git a/mod/external-handler.cxx b/mod/external-handler.cxx index 7f26680..3a85bd8 100644 --- a/mod/external-handler.cxx +++ b/mod/external-handler.cxx @@ -13,9 +13,10 @@ #include <type_traits> // static_assert #include <system_error> // error_code, generic_category() -#include <libbutl/process.mxx> -#include <libbutl/fdstream.mxx> -#include <libbutl/process-io.mxx> // operator<<(ostream, process_args) +#include <libbutl/process.hxx> +#include <libbutl/fdstream.hxx> +#include <libbutl/process-io.hxx> // operator<<(ostream, process_args) +#include <libbutl/manifest-parser.hxx> using namespace std; using namespace butl; @@ -95,6 +96,8 @@ namespace brep data_dir)); pipe.out.close (); + // Kill the process and wait for its completion. + // auto kill = [&pr, &warn, &handler, &ref] () { // We may still end up well (see below), thus this is a warning. @@ -103,6 +106,7 @@ namespace brep << " execution timeout expired"; pr.kill (); + pr.wait (); }; try @@ -313,7 +317,7 @@ namespace brep assert (e != nullptr); if (!(*e == '\0' && c >= 100 && c < 600)) - bad_value ("invalid HTTP status '" + v + "'"); + bad_value ("invalid HTTP status '" + v + '\''); // Save the HTTP status. // diff --git a/mod/external-handler.hxx b/mod/external-handler.hxx index f8f7ee8..0276a25 100644 --- a/mod/external-handler.hxx +++ b/mod/external-handler.hxx @@ -4,7 +4,7 @@ #ifndef MOD_EXTERNAL_HANDLER_HXX #define MOD_EXTERNAL_HANDLER_HXX -#include <libbutl/manifest-parser.mxx> +#include <libbutl/manifest-types.hxx> #include <libbrep/types.hxx> #include <libbrep/utility.hxx> diff --git a/mod/mod-build-configs.cxx b/mod/mod-build-configs.cxx index 6731b28..9282544 100644 --- a/mod/mod-build-configs.cxx +++ b/mod/mod-build-configs.cxx @@ -3,8 +3,6 @@ #include <mod/mod-build-configs.hxx> -#include <algorithm> // replace() - #include <libstudxml/serializer.hxx> #include <web/server/module.hxx> @@ -15,7 +13,6 @@ #include <mod/module-options.hxx> using namespace std; -using namespace bbot; using namespace brep::cli; // While currently the user-defined copy constructor is not required (we don't @@ -40,6 +37,9 @@ init (scanner& s) if (options_->build_config_specified ()) build_config_module::init (*options_); + + if (options_->root ().empty ()) + options_->root (dir_path ("/")); } bool brep::build_configs:: @@ -49,7 +49,7 @@ handle (request& rq, response& rs) HANDLER_DIAG; - if (build_conf_ == nullptr) + if (target_conf_ == nullptr) throw invalid_request (501, "not implemented"); const size_t page_configs (options_->build_config_page_entries ()); @@ -57,6 +57,8 @@ handle (request& rq, response& rs) params::build_configs params; + string& selected_class (params.class_name ()); // Note: can be empty. + try { name_value_scanner s (rq.parameters (1024)); @@ -67,8 +69,7 @@ handle (request& rq, response& rs) // character (that is otherwise forbidden in a class name) to the plus // character. // - string& cn (params.class_name ()); - replace (cn.begin (), cn.end (), ' ', '+'); + replace (selected_class.begin (), selected_class.end (), ' ', '+'); } catch (const cli::exception& e) { @@ -89,11 +90,11 @@ handle (request& rq, response& rs) << DIV_HEADER (options_->logo (), options_->menu (), root, tenant) << DIV(ID="content"); - auto url = [&root] (const string& cls) + auto url = [&root, this] (const string& cls) { - string r (root.string () + "?build-configs"); + string r (tenant_dir (root, tenant).string () + "?build-configs"); - if (cls != "all") + if (!cls.empty ()) { r += '='; @@ -120,34 +121,44 @@ handle (request& rq, response& rs) // if (params.page () == 0) { - const strings& cls (build_conf_->classes); - const map<string, string>& im (build_conf_->class_inheritance_map); + const strings& cls (target_conf_->classes); + const map<string, string>& im (target_conf_->class_inheritance_map); s << DIV(ID="filter-heading") << "Build Configuration Classes" << ~DIV << P(ID="filter"); for (auto b (cls.begin ()), i (b), e (cls.end ()); i != e; ++i) { - if (i != b) - s << ' '; - + // Skip the 'hidden' class. + // const string& c (*i); - print_class_name (c, c == params.class_name ()); - // Append the base class, if present. - // - auto j (im.find (c)); - if (j != im.end ()) + if (c != "hidden") { - s << ':'; - print_class_name (j->second); + // Note that here we rely on the fact that the first class in the list + // can never be 'hidden' (is always 'all'). + // + if (i != b) + s << ' '; + + print_class_name (c, c == selected_class); + + // Append the base class, if present. + // + auto j (im.find (c)); + if (j != im.end ()) + { + s << ':'; + print_class_name (j->second); + } } } s << ~P; } - // Print build configurations that belong to the selected class. + // Print build configurations that belong to the selected class (all + // configurations if no class is selected) and are not hidden. // // We will calculate the total configuration count and cache configurations // for printing (skipping an appropriate number of them for page number @@ -155,14 +166,15 @@ handle (request& rq, response& rs) // before printing the configurations. // size_t count (0); - vector<const build_config*> configs; + vector<const build_target_config*> configs; configs.reserve (page_configs); size_t skip (page * page_configs); size_t print (page_configs); - for (const build_config& c: *build_conf_) + for (const build_target_config& c: *target_conf_) { - if (belongs (c, params.class_name ())) + if ((selected_class.empty () || belongs (c, selected_class)) && + !belongs (c, "hidden")) { if (skip != 0) --skip; @@ -185,7 +197,7 @@ handle (request& rq, response& rs) // Enclose the subsequent tables to be able to use nth-child CSS selector. // s << DIV; - for (const build_config* c: configs) + for (const build_target_config* c: configs) { s << TABLE(CLASS="proplist config") << TBODY @@ -217,7 +229,7 @@ handle (request& rq, response& rs) count, page_configs, options_->build_config_pages (), - url (params.class_name ())) + url (selected_class)) << ~DIV << ~BODY << ~HTML; diff --git a/mod/mod-build-force.cxx b/mod/mod-build-force.cxx index bd172e3..bdae356 100644 --- a/mod/mod-build-force.cxx +++ b/mod/mod-build-force.cxx @@ -3,8 +3,6 @@ #include <mod/mod-build-force.hxx> -#include <algorithm> // replace() - #include <odb/database.hxx> #include <odb/transaction.hxx> @@ -12,23 +10,32 @@ #include <libbrep/build.hxx> #include <libbrep/build-odb.hxx> +#include <libbrep/build-package.hxx> +#include <libbrep/build-package-odb.hxx> #include <mod/module-options.hxx> +#include <mod/tenant-service.hxx> using namespace std; -using namespace bbot; using namespace brep::cli; using namespace odb::core; +brep::build_force:: +build_force (const tenant_service_map& tsm) + : tenant_service_map_ (tsm) +{ +} + // While currently the user-defined copy constructor is not required (we don't // need to deep copy nullptr's), it is a good idea to keep the placeholder // ready for less trivial cases. // brep::build_force:: -build_force (const build_force& r) +build_force (const build_force& r, const tenant_service_map& tsm) : database_module (r), build_config_module (r), - options_ (r.initialized_ ? r.options_ : nullptr) + options_ (r.initialized_ ? r.options_ : nullptr), + tenant_service_map_ (tsm) { } @@ -115,10 +122,26 @@ handle (request& rq, response& rs) version package_version (parse_version (params.version (), "package version")); - string& config (params.configuration ()); + target_triplet target; + + try + { + target = target_triplet (params.target ()); + } + catch (const invalid_argument& e) + { + throw invalid_argument (string ("invalid target: ") + e.what ()); + } + + string& target_config (params.target_config ()); + + if (target_config.empty ()) + throw invalid_argument ("no target configuration name"); - if (config.empty ()) - throw invalid_argument ("no configuration name"); + string& package_config (params.package_config ()); + + if (package_config.empty ()) + throw invalid_argument ("no package configuration name"); string& toolchain_name (params.toolchain_name ()); @@ -129,7 +152,9 @@ handle (request& rq, response& rs) "toolchain version")); id = build_id (package_id (move (tenant), move (p), package_version), - move (config), + move (target), + move (target_config), + move (package_config), move (toolchain_name), toolchain_version); } @@ -149,42 +174,137 @@ handle (request& rq, response& rs) // Make sure the build configuration still exists. // - if (build_conf_map_->find (id.configuration.c_str ()) == - build_conf_map_->end ()) - config_expired ("no configuration"); + if (target_conf_map_->find ( + build_target_config_id {id.target, + id.target_config_name}) == + target_conf_map_->end ()) + config_expired ("no target configuration"); // Load the package build configuration (if present), set the force flag and // update the object's persistent state. // + // If the incomplete package build is being forced to rebuild and the + // tenant_service_build_queued callback is associated with the package + // tenant, then stash the state, the build object, and the callback pointer + // and calculate the hints for the subsequent service `queued` notification. + // + const tenant_service_build_queued* tsq (nullptr); + optional<pair<tenant_service, shared_ptr<build>>> tss; + tenant_service_build_queued::build_queued_hints qhs; + + connection_ptr conn (build_db_->connection ()); { - transaction t (build_db_->begin ()); + transaction t (conn->begin ()); package_build pb; + shared_ptr<build> b; + if (!build_db_->query_one<package_build> ( - query<package_build>::build::id == id, pb)) + query<package_build>::build::id == id, pb) || + (b = move (pb.build))->state == build_state::queued) config_expired ("no package build"); - shared_ptr<build> b (pb.build); force_state force (b->state == build_state::built ? force_state::forced : force_state::forcing); if (b->force != force) { + // Log the force rebuild with the warning severity, truncating the + // reason if too long. + // + diag_record dr (warn); + dr << "force rebuild for "; + + if (!b->tenant.empty ()) + dr << b->tenant << ' '; + + dr << b->package_name << '/' << b->package_version << ' ' + << b->target_config_name << '/' << b->target << ' ' + << b->package_config_name << ' ' + << b->toolchain_name << '-' << b->toolchain_version + << " (state: " << to_string (b->state) << ' ' << to_string (b->force) + << "): "; + + if (reason.size () < 50) + dr << reason; + else + dr << string (reason, 0, 50) << "..."; + b->force = force; build_db_->update (b); - l1 ([&]{trace << "force rebuild for " - << b->tenant << ' ' - << b->package_name << '/' << b->package_version << ' ' - << b->configuration << ' ' - << b->toolchain_name << '-' << b->toolchain_version - << ": " << reason;}); + if (force == force_state::forcing) + { + shared_ptr<build_tenant> t (build_db_->load<build_tenant> (b->tenant)); + + if (t->service) + { + auto i (tenant_service_map_.find (t->service->type)); + + if (i != tenant_service_map_.end ()) + { + tsq = dynamic_cast<const tenant_service_build_queued*> ( + i->second.get ()); + + // If we ought to call the + // tenant_service_build_queued::build_queued() callback, then also + // set the package tenant's queued timestamp to the current time + // to prevent the notifications race (see tenant::queued_timestamp + // for details). + // + if (tsq != nullptr) + { + // Calculate the tenant service hints. + // + buildable_package_count tpc ( + build_db_->query_value<buildable_package_count> ( + query<buildable_package_count>::build_tenant::id == t->id)); + + shared_ptr<build_package> p ( + build_db_->load<build_package> (b->id.package)); + + qhs = tenant_service_build_queued::build_queued_hints { + tpc == 1, p->configs.size () == 1}; + + // Set the package tenant's queued timestamp. + // + t->queued_timestamp = system_clock::now (); + build_db_->update (t); + + tss = make_pair (move (*t->service), move (b)); + } + } + } + } } t.commit (); } + // If the incomplete package build is being forced to rebuild and the + // tenant-associated third-party service needs to be notified about the + // queued builds, then call the tenant_service_build_queued::build_queued() + // callback function and update the service state, if requested. + // + if (tsq != nullptr) + { + assert (tss); // Wouldn't be here otherwise. + + const tenant_service& ss (tss->first); + build& b (*tss->second); + + vector<build> qbs; + qbs.push_back (move (b)); + + if (auto f = tsq->build_queued (ss, + qbs, + build_state::building, + qhs, + log_writer_)) + update_tenant_service_state (conn, qbs.back ().tenant, f); + } + // We have all the data, so don't buffer the response content. // ostream& os (rs.content (200, "text/plain;charset=utf-8", false)); diff --git a/mod/mod-build-force.hxx b/mod/mod-build-force.hxx index 22df383..ea9c141 100644 --- a/mod/mod-build-force.hxx +++ b/mod/mod-build-force.hxx @@ -8,6 +8,7 @@ #include <libbrep/utility.hxx> #include <mod/module-options.hxx> +#include <mod/tenant-service.hxx> #include <mod/database-module.hxx> #include <mod/build-config-module.hxx> @@ -16,13 +17,13 @@ namespace brep class build_force: public database_module, private build_config_module { public: - build_force () = default; + explicit + build_force (const tenant_service_map&); // Create a shallow copy (handling instance) if initialized and a deep // copy (context exemplar) otherwise. // - explicit - build_force (const build_force&); + build_force (const build_force&, const tenant_service_map&); virtual bool handle (request&, response&); @@ -39,6 +40,7 @@ namespace brep private: shared_ptr<options::build_force> options_; + const tenant_service_map& tenant_service_map_; }; } diff --git a/mod/mod-build-log.cxx b/mod/mod-build-log.cxx index 3032e52..c8e803b 100644 --- a/mod/mod-build-log.cxx +++ b/mod/mod-build-log.cxx @@ -3,12 +3,10 @@ #include <mod/mod-build-log.hxx> -#include <algorithm> // find_if() - #include <odb/database.hxx> #include <odb/transaction.hxx> -#include <libbutl/timestamp.mxx> // to_stream() +#include <libbutl/timestamp.hxx> // to_stream() #include <web/server/module.hxx> @@ -18,7 +16,6 @@ #include <mod/module-options.hxx> using namespace std; -using namespace bbot; using namespace brep::cli; using namespace odb::core; @@ -68,7 +65,7 @@ handle (request& rq, response& rs) // // Note that the URL path must be in the following form: // - // <pkg-name>/<pkg-version>/log/<cfg-name>/<toolchain-name>/<toolchain-version>[/<operation>] + // <pkg-name>/<pkg-version>/log/<cfg-name>/<target>/<toolchain-name>/<toolchain-version>[/<operation>] // // Also note that the presence of the first 3 components is guaranteed by // the repository_root module. @@ -124,12 +121,33 @@ handle (request& rq, response& rs) assert (i != lpath.end () && *i == "log"); if (++i == lpath.end ()) - throw invalid_argument ("no configuration name"); + throw invalid_argument ("no target"); + + target_triplet target; + try + { + target = target_triplet (*i++); + } + catch (const invalid_argument& e) + { + throw invalid_argument (string ("invalid target: ") + e.what ()); + } + + if (i == lpath.end ()) + throw invalid_argument ("no target configuration name"); - string config (*i++); + string target_config (*i++); - if (config.empty ()) - throw invalid_argument ("empty configuration name"); + if (target_config.empty ()) + throw invalid_argument ("empty target configuration name"); + + if (i == lpath.end ()) + throw invalid_argument ("no package configuration name"); + + string package_config (*i++); + + if (package_config.empty ()) + throw invalid_argument ("empty package configuration name"); if (i == lpath.end ()) throw invalid_argument ("no toolchain name"); @@ -145,7 +163,9 @@ handle (request& rq, response& rs) version toolchain_version (parse_version (*i++, "toolchain version")); id = build_id (package_id (tenant, move (name), package_version), - move (config), + move (target), + move (target_config), + move (package_config), move (toolchain_name), toolchain_version); @@ -182,7 +202,7 @@ handle (request& rq, response& rs) auto config_expired = [&trace, &lpath, this] (const string& d) { l2 ([&]{trace << "package build configuration for " << lpath - << (!tenant.empty () ? "(" + tenant + ")" : "") + << (!tenant.empty () ? '(' + tenant + ')' : "") << " expired: " << d;}); throw invalid_request (404, "package build configuration expired: " + d); @@ -190,9 +210,11 @@ handle (request& rq, response& rs) // Make sure the build configuration still exists. // - if (build_conf_map_->find (id.configuration.c_str ()) == - build_conf_map_->end ()) - config_expired ("no configuration"); + if (target_conf_map_->find ( + build_target_config_id {id.target, + id.target_config_name}) == + target_conf_map_->end ()) + config_expired ("no target configuration"); // Load the package build configuration (if present). // @@ -205,11 +227,16 @@ handle (request& rq, response& rs) query<package_build>::build::id == id, pb)) config_expired ("no package build"); - b = pb.build; + b = move (pb.build); if (b->state != build_state::built) + { config_expired ("state is " + to_string (b->state)); + } else + { build_db_->load (*b, b->results_section); + build_db_->load (*b, b->auxiliary_machines_section); + } t.commit (); } @@ -228,15 +255,20 @@ handle (request& rq, response& rs) if (!b->tenant.empty ()) os << options_->tenant_name () << ": " << b->tenant << endl << endl; - os << "package: " << b->package_name << endl - << "version: " << b->package_version << endl - << "toolchain: " << b->toolchain_name << '-' << b->toolchain_version - << endl - << "config: " << b->configuration << endl - << "machine: " << b->machine << " (" << b->machine_summary << ")" - << endl - << "target: " << b->target.string () << endl - << "timestamp: "; + os << "package: " << b->package_name << endl + << "version: " << b->package_version << endl + << "toolchain: " << b->toolchain_name << '-' + << b->toolchain_version << endl + << "target: " << b->target << endl + << "target config: " << b->target_config_name << endl + << "package config: " << b->package_config_name << endl + << "build machine: " << b->machine.name << " -- " + << b->machine.summary << endl; + + for (const build_machine& m: b->auxiliary_machines) + os << "auxiliary machine: " << m.name << " -- " << m.summary << endl; + + os << "timestamp: "; butl::to_stream (os, b->timestamp, diff --git a/mod/mod-build-result.cxx b/mod/mod-build-result.cxx index 734ea5c..ccce17f 100644 --- a/mod/mod-build-result.cxx +++ b/mod/mod-build-result.cxx @@ -6,12 +6,8 @@ #include <odb/database.hxx> #include <odb/transaction.hxx> -#include <libbutl/openssl.mxx> -#include <libbutl/sendmail.mxx> -#include <libbutl/fdstream.mxx> -#include <libbutl/process-io.mxx> -#include <libbutl/manifest-parser.mxx> -#include <libbutl/manifest-serializer.mxx> +#include <libbutl/manifest-parser.hxx> +#include <libbutl/manifest-serializer.hxx> #include <libbbot/manifest.hxx> @@ -19,11 +15,12 @@ #include <libbrep/build.hxx> #include <libbrep/build-odb.hxx> -#include <libbrep/package.hxx> -#include <libbrep/package-odb.hxx> +#include <libbrep/build-package.hxx> +#include <libbrep/build-package-odb.hxx> -#include <mod/build.hxx> // *_url() +#include <mod/build.hxx> // send_notification_email() #include <mod/module-options.hxx> +#include <mod/tenant-service.hxx> using namespace std; using namespace butl; @@ -31,15 +28,21 @@ using namespace bbot; using namespace brep::cli; using namespace odb::core; +brep::build_result:: +build_result (const tenant_service_map& tsm) + : tenant_service_map_ (tsm) +{ +} + // While currently the user-defined copy constructor is not required (we don't // need to deep copy nullptr's), it is a good idea to keep the placeholder // ready for less trivial cases. // brep::build_result:: -build_result (const build_result& r) - : database_module (r), - build_config_module (r), - options_ (r.initialized_ ? r.options_ : nullptr) +build_result (const build_result& r, const tenant_service_map& tsm) + : build_result_module (r), + options_ (r.initialized_ ? r.options_ : nullptr), + tenant_service_map_ (tsm) { } @@ -51,16 +54,8 @@ init (scanner& s) options_ = make_shared<options::build_result> ( s, unknown_mode::fail, unknown_mode::fail); - database_module::init (static_cast<const options::package_db&> (*options_), - options_->package_db_retry ()); - if (options_->build_config_specified ()) - { - database_module::init (static_cast<const options::build_db&> (*options_), - options_->build_db_retry ()); - - build_config_module::init (*options_); - } + build_result_module::init (*options_, *options_); if (options_->root ().empty ()) options_->root (dir_path ("/")); @@ -108,120 +103,23 @@ handle (request& rq, response&) throw invalid_request (400, e.what ()); } - // Parse the task response session to obtain the build id and the timestamp, - // and to make sure the session matches tenant and the result manifest's - // package name, and version. + // Parse the task response session and make sure the session matches tenant + // and the result manifest's package name, and version. // - build_id id; - timestamp session_timestamp; + parse_session_result session; + const build_id& id (session.id); try { - const string& s (rqm.session); - - size_t p (s.find ('/')); // End of tenant. - - if (p == string::npos) - throw invalid_argument ("no package name"); - - if (tenant.compare (0, tenant.size (), s, 0, p) != 0) - throw invalid_argument ("tenant mismatch"); - - size_t b (p + 1); // Start of package name. - p = s.find ('/', b); // End of package name. - - if (p == b) - throw invalid_argument ("empty package name"); - - if (p == string::npos) - throw invalid_argument ("no package version"); - - package_name& name (rqm.result.name); - { - const string& n (name.string ()); - if (n.compare (0, n.size (), s, b, p - b) != 0) - throw invalid_argument ("package name mismatch"); - } - - b = p + 1; // Start of version. - p = s.find ('/', b); // End of version. - - if (p == string::npos) - throw invalid_argument ("no configuration name"); - - auto parse_version = [&s, &b, &p] (const char* what) -> version - { - // Intercept exception handling to add the parsing error attribution. - // - try - { - return brep::version (string (s, b, p - b)); - } - catch (const invalid_argument& e) - { - throw invalid_argument (string ("invalid ") + what + ": " + e.what ()); - } - }; + // Note: also verifies that the tenant matches the session. + // + session = parse_session (rqm.session); - version package_version (parse_version ("package version")); + if (rqm.result.name != id.package.name) + throw invalid_argument ("package name mismatch"); - if (package_version != rqm.result.version) + if (rqm.result.version != session.package_version) throw invalid_argument ("package version mismatch"); - - b = p + 1; // Start of configuration name. - p = s.find ('/', b); // End of configuration name. - - if (p == string::npos) - throw invalid_argument ("no toolchain name"); - - string config (s, b, p - b); - - if (config.empty ()) - throw invalid_argument ("empty configuration name"); - - b = p + 1; // Start of toolchain name. - p = s.find ('/', b); // End of toolchain name. - - if (p == string::npos) - throw invalid_argument ("no toolchain version"); - - string toolchain_name (s, b, p - b); - - if (toolchain_name.empty ()) - throw invalid_argument ("empty toolchain name"); - - b = p + 1; // Start of toolchain version. - p = s.find ('/', b); // End of toolchain version. - - if (p == string::npos) - throw invalid_argument ("no timestamp"); - - version toolchain_version (parse_version ("toolchain version")); - - id = build_id (package_id (move (tenant), move (name), package_version), - move (config), - move (toolchain_name), - toolchain_version); - - try - { - size_t tsn; - string ts (s, p + 1); - - session_timestamp = timestamp ( - chrono::duration_cast<timestamp::duration> ( - chrono::nanoseconds (stoull (ts, &tsn)))); - - if (tsn != ts.size ()) - throw invalid_argument ("trailing junk"); - } - // Handle invalid_argument or out_of_range (both derive from logic_error), - // that can be thrown by stoull(). - // - catch (const logic_error& e) - { - throw invalid_argument (string ("invalid timestamp: ") + e.what ()); - } } catch (const invalid_argument& e) { @@ -233,52 +131,42 @@ handle (request& rq, response&) // if the session is valid. The thinking is that this is a problem with the // controller's setup (expires too fast), not with the agent's. // - auto warn_expired = [&rqm, &warn] (const string& d) + // Note, though, that there can be quite a common situation when a build + // machine is suspended by the bbot agent due to the build timeout. In this + // case the task result request may arrive anytime later (after the issue is + // investigated, etc) with the abort or abnormal status. By that arrival + // time a new build task may already be issued/completed for this package + // build configuration or this configuration may even be gone (brep has been + // reconfigured, package has gone, etc). We will log no warning in this + // case, assuming that such an expiration is not a problem with the + // controller's setup. + // + shared_ptr<build> b; + result_status rs (rqm.result.status); + + auto warn_expired = [&rqm, &warn, &b, &session, rs] (const string& d) { - warn << "session '" << rqm.session << "' expired: " << d; + if (!((b == nullptr || b->timestamp > session.timestamp) && + (rs == result_status::abort || rs == result_status::abnormal))) + warn << "session '" << rqm.session << "' expired: " << d; }; // Make sure the build configuration still exists. // - const bbot::build_config* cfg; + const build_target_config* tc; { - auto i (build_conf_map_->find (id.configuration.c_str ())); + auto i (target_conf_map_->find ( + build_target_config_id {id.target, id.target_config_name})); - if (i == build_conf_map_->end ()) + if (i == target_conf_map_->end ()) { warn_expired ("no build configuration"); return true; } - cfg = i->second; - } - - // Load the built package (if present). - // - // The only way not to deal with 2 databases simultaneously is to pull - // another bunch of the package fields into the build_package foreign - // object, which is a pain (see build_package.hxx for details). Doesn't seem - // worth it here: email members are really secondary and we don't need to - // switch transactions back and forth. - // - shared_ptr<package> pkg; - { - transaction t (package_db_->begin ()); - pkg = package_db_->find<package> (id.package); - t.commit (); - } - - if (pkg == nullptr) - { - warn_expired ("no package"); - return true; + tc = i->second; } - auto print_args = [&trace, this] (const char* args[], size_t n) - { - l2 ([&]{trace << process_args {args, n};}); - }; - // Load and update the package build configuration (if present). // // NULL if the package build doesn't exist or is not updated for any reason @@ -287,241 +175,389 @@ handle (request& rq, response&) // shared_ptr<build> bld; - optional<result_status> prev_status; + // The built package configuration. + // + // Not NULL if bld is not NULL. + // + shared_ptr<build_package> pkg; + build_package_config* cfg (nullptr); + + // Don't send email to the build-email address for the success-to-success + // status change, unless the build was forced. + // bool build_notify (false); bool unforced (true); + // If the package is built (result status differs from interrupt, etc) and + // the package tenant has a third-party service state associated with it, + // then check if the tenant_service_build_built callback is registered for + // the type of the associated service. If it is, then stash the state, the + // build object, and the callback pointer for the subsequent service `built` + // notification. Note that we send this notification for the skip result as + // well, since it is semantically equivalent to the previous build result + // with the actual build process being optimized out. + // + // If the package build is interrupted and the tenant_service_build_queued + // callback is associated with the package tenant, then stash the state, the + // build object, and the callback pointer and calculate the hints for the + // subsequent service `queued` notification. + // + const tenant_service_build_built* tsb (nullptr); + const tenant_service_build_queued* tsq (nullptr); + optional<pair<tenant_service, shared_ptr<build>>> tss; + tenant_service_build_queued::build_queued_hints qhs; + + // Note that if the session authentication fails (probably due to the + // authentication settings change), then we log this case with the warning + // severity and respond with the 200 HTTP code as if the challenge is + // valid. The thinking is that we shouldn't alarm a law-abaiding agent and + // shouldn't provide any information to a malicious one. + // + connection_ptr conn (build_db_->connection ()); { - transaction t (build_db_->begin ()); + transaction t (conn->begin ()); package_build pb; - shared_ptr<build> b; + + auto build_timestamp = [&b] () + { + return to_string ( + chrono::duration_cast<std::chrono::nanoseconds> ( + b->timestamp.time_since_epoch ()).count ()); + }; + if (!build_db_->query_one<package_build> ( query<package_build>::build::id == id, pb)) + { warn_expired ("no package build"); + } else if ((b = move (pb.build))->state != build_state::building) - warn_expired ("package configuration state is " + to_string (b->state)); - else if (b->timestamp != session_timestamp) - warn_expired ("non-matching timestamp"); - else { - // Check the challenge. - // - // If the challenge doesn't match expectations (probably due to the - // authentication settings change), then we log this case with the - // warning severity and respond with the 200 HTTP code as if the - // challenge is valid. The thinking is that we shouldn't alarm a - // law-abaiding agent and shouldn't provide any information to a - // malicious one. - // - auto warn_auth = [&rqm, &warn] (const string& d) + warn_expired ("package configuration state is " + to_string (b->state) + + ", force state " + to_string (b->force) + + ", timestamp " + build_timestamp ()); + } + else if (b->timestamp != session.timestamp) + { + warn_expired ("non-matching timestamp " + build_timestamp ()); + } + else if (authenticate_session (*options_, rqm.challenge, *b, rqm.session)) + { + const tenant_service_base* ts (nullptr); + + shared_ptr<build_tenant> t (build_db_->load<build_tenant> (b->tenant)); + + if (t->service) { - warn << "session '" << rqm.session << "' authentication failed: " << d; - }; + auto i (tenant_service_map_.find (t->service->type)); - bool auth (false); + if (i != tenant_service_map_.end ()) + ts = i->second.get (); + } - // Must both be present or absent. + // If the build is interrupted, then revert it to the original built + // state if this is a rebuild. Otherwise (initial build), turn the build + // into the queued state if the tenant_service_build_queued callback is + // registered for the package tenant and delete it from the database + // otherwise. + // + // Note that if the tenant_service_build_queued callback is registered, + // we always send the `queued` notification for the interrupted build, + // even when we reverse it to the original built state. We could also + // turn the build into the queued state in this case, but it feels that + // there is no harm in keeping the previous build information available + // for the user. // - if (!b->agent_challenge != !rqm.challenge) - warn_auth (rqm.challenge - ? "unexpected challenge" - : "challenge is expected"); - else if (bot_agent_key_map_ == nullptr) // Authentication is disabled. - auth = true; - else if (!b->agent_challenge) // Authentication is recently enabled. - warn_auth ("challenge is required now"); - else + if (rs == result_status::interrupt) { - assert (b->agent_fingerprint && rqm.challenge); - auto i (bot_agent_key_map_->find (*b->agent_fingerprint)); - - // The agent's key is recently replaced. + // Schedule the `queued` notification, if the + // tenant_service_build_queued callback is registered for the tenant. // - if (i == bot_agent_key_map_->end ()) - warn_auth ("agent's public key not found"); - else + tsq = dynamic_cast<const tenant_service_build_queued*> (ts); + + if (b->status) // Is this a rebuild? { - try - { - openssl os (print_args, - path ("-"), fdstream_mode::text, 2, - process_env (options_->openssl (), - options_->openssl_envvar ()), - "rsautl", - options_->openssl_option (), - "-verify", "-pubin", "-inkey", i->second); - - for (const auto& c: *rqm.challenge) - os.out.put (c); // Sets badbit on failure. - - os.out.close (); - - string s; - getline (os.in, s); - - bool v (os.in.eof ()); - os.in.close (); - - if (os.wait () && v) - { - auth = s == *b->agent_challenge; - - if (!auth) - warn_auth ("challenge mismatched"); - } - else // The signature is presumably meaningless. - warn_auth ("unable to verify challenge"); - } - catch (const system_error& e) + b->state = build_state::built; + + // Keep the force rebuild indication. Note that the forcing state is + // only valid for the building state. + // + if (b->force == force_state::forcing) + b->force = force_state::forced; + + // Cleanup the interactive build login information. + // + b->interactive = nullopt; + + // Cleanup the authentication data. + // + b->agent_fingerprint = nullopt; + b->agent_challenge = nullopt; + + // Note that we are unable to restore the pre-rebuild timestamp + // since it has been overwritten when the build task was issued. + // That, however, feels ok and we just keep it unchanged. + // + // Moreover, we actually use the fact that the build's timestamp is + // greater then its soft_timestamp as an indication that the build + // object represents the interrupted rebuild (see the build_task + // handler for details). + // + // @@ Actually, we also unable to restore the pre-rebuild machine + // and auxiliary machines, which are also displayed in the build + // log and may potentially be confusing. Should we drop them from + // the log in this case or replace with the "machine: unknown" + // record? + + build_db_->update (b); + } + else // Initial build. + { + if (tsq != nullptr) { - fail << "unable to verify challenge: " << e; + // Since this is not a rebuild, there are no operation results and + // thus we don't need to load the results section to erase results + // from the database. + // + assert (b->results.empty ()); + + *b = build (move (b->tenant), + move (b->package_name), + move (b->package_version), + move (b->target), + move (b->target_config_name), + move (b->package_config_name), + move (b->toolchain_name), + move (b->toolchain_version)); + + build_db_->update (b); } + else + build_db_->erase (b); } - } - if (auth) + // If we ought to call the tenant_service_build_queued::build_queued() + // callback, then also set the package tenant's queued timestamp to + // the current time to prevent the notifications race (see + // tenant::queued_timestamp for details). + // + if (tsq != nullptr) + { + // Calculate the tenant service hints. + // + buildable_package_count tpc ( + build_db_->query_value<buildable_package_count> ( + query<buildable_package_count>::build_tenant::id == t->id)); + + shared_ptr<build_package> p ( + build_db_->load<build_package> (b->id.package)); + + qhs = tenant_service_build_queued::build_queued_hints { + tpc == 1, p->configs.size () == 1}; + + // Set the package tenant's queued timestamp. + // + t->queued_timestamp = system_clock::now (); + build_db_->update (t); + } + } + else // Regular or skip build result. { - unforced = b->force == force_state::unforced; + // Schedule the `built` notification, if the + // tenant_service_build_built callback is registered for the tenant. + // + tsb = dynamic_cast<const tenant_service_build_built*> (ts); - // Don't send email to the build-email address for the - // success-to-success status change, unless the build was forced. + // Verify the result status/checksums. // - build_notify = !(rqm.result.status == result_status::success && - b->status && - *b->status == rqm.result.status && - unforced); + // Specifically, if the result status is skip, then it can only be in + // response to the soft rebuild task (all checksums are present in the + // build object) and the result checksums must match the build object + // checksums. On verification failure respond with the bad request + // HTTP code (400). + // + if (rs == result_status::skip) + { + if (!b->agent_checksum || + !b->worker_checksum || + !b->dependency_checksum) + throw invalid_request (400, "unexpected skip result status"); + + // Can only be absent for initial build, in which case the + // checksums are also absent and we would end up with the above + // 400 response. + // + assert (b->status); + + // Verify that the result checksum matches the build checksum and + // throw invalid_request(400) if that's not the case. + // + auto verify = [] (const string& build_checksum, + const optional<string>& result_checksum, + const char* what) + { + if (!result_checksum) + throw invalid_request ( + 400, + string (what) + " checksum is expected for skip result status"); + + if (*result_checksum != build_checksum) + throw invalid_request ( + 400, + string (what) + " checksum '" + build_checksum + + "' is expected instead of '" + *result_checksum + + "' for skip result status"); + }; + + verify (*b->agent_checksum, rqm.agent_checksum, "agent"); + + verify (*b->worker_checksum, + rqm.result.worker_checksum, + "worker"); + + verify (*b->dependency_checksum, + rqm.result.dependency_checksum, + "dependency"); + } - prev_status = move (b->status); + unforced = (b->force == force_state::unforced); + + build_notify = !(rs == result_status::success && + b->status && + *b->status == rs && + unforced); b->state = build_state::built; - b->status = rqm.result.status; b->force = force_state::unforced; + // Cleanup the interactive build login information. + // + b->interactive = nullopt; + // Cleanup the authentication data. // b->agent_fingerprint = nullopt; b->agent_challenge = nullopt; - // Mark the section as loaded, so results are updated. - // - b->results_section.load (); - b->results = move (rqm.result.results); - b->timestamp = system_clock::now (); - b->completion_timestamp = b->timestamp; + b->soft_timestamp = b->timestamp; + + // If the result status is other than skip, then save the status, + // results, and checksums and update the hard timestamp. Also stash + // the service notification information, if present. + // + if (rs != result_status::skip) + { + b->status = rs; + b->hard_timestamp = b->soft_timestamp; + + // Mark the section as loaded, so results are updated. + // + b->results_section.load (); + b->results = move (rqm.result.results); + + // Save the checksums. + // + b->agent_checksum = move (rqm.agent_checksum); + b->worker_checksum = move (rqm.result.worker_checksum); + b->dependency_checksum = move (rqm.result.dependency_checksum); + } build_db_->update (b); - shared_ptr<build_package> p ( - build_db_->load<build_package> (b->id.package)); + pkg = build_db_->load<build_package> (b->id.package); + cfg = find (b->package_config_name, pkg->configs); - if (belongs (*cfg, "all") && - !exclude (p->builds, p->constraints, *cfg)) - bld = move (b); + // The package configuration should be present (see mod-builds.cxx for + // details) but if it is not, let's log the warning. + // + if (cfg != nullptr) + { + // Don't send the build notification email if the task result is + // `skip`, the configuration is hidden, or is now excluded by the + // package. + // + if (rs != result_status::skip && !belongs (*tc, "hidden")) + { + build_db_->load (*pkg, pkg->constraints_section); + + if (!exclude (*cfg, pkg->builds, pkg->constraints, *tc)) + bld = b; + } + } + else + warn << "cannot find configuration '" << b->package_config_name + << "' for package " << pkg->id.name << '/' << pkg->version; } + + // If required, stash the service notification information. + // + if (tsb != nullptr || tsq != nullptr) + tss = make_pair (move (*t->service), move (b)); } t.commit (); } - if (bld == nullptr) - return true; - - string subj ((unforced ? "build " : "rebuild ") + - to_string (*bld->status) + ": " + - bld->package_name.string () + '/' + - bld->package_version.string () + '/' + - bld->configuration + '/' + - bld->toolchain_name + '-' + bld->toolchain_version.string ()); + // We either notify about the queued build or notify about the built package + // or don't notify at all. + // + assert (tsb == nullptr || tsq == nullptr); - // Send notification emails to the interested parties. + // If the package build is interrupted and the tenant-associated third-party + // service needs to be notified about the queued builds, then call the + // tenant_service_build_queued::build_queued() callback function and update + // the service state, if requested. // - auto send_email = [&bld, &subj, &error, &trace, &print_args, this] - (const string& to) + if (tsq != nullptr) { - try - { - l2 ([&]{trace << "email '" << subj << "' to " << to;}); - - // Redirect the diagnostics to webserver error log. - // - // Note: if using this somewhere else, then need to factor out all this - // exit status handling code. - // - sendmail sm (print_args, - 2, - options_->email (), - subj, - {to}); - - if (bld->results.empty ()) - sm.out << "No operation results available." << endl; - else - { - const string& host (options_->host ()); - const dir_path& root (options_->root ()); - - ostream& os (sm.out); - - assert (bld->status); - os << "combined: " << *bld->status << endl << endl - << " " << build_log_url (host, root, *bld) << endl << endl; + assert (tss); // Wouldn't be here otherwise. - for (const auto& r: bld->results) - os << r.operation << ": " << r.status << endl << endl - << " " << build_log_url (host, root, *bld, &r.operation) - << endl << endl; - - os << "Force rebuild (enter the reason, use '+' instead of spaces):" - << endl << endl - << " " << build_force_url (host, root, *bld) << endl; - } + const tenant_service& ss (tss->first); - sm.out.close (); + vector<build> qbs; + qbs.push_back (move (*tss->second)); - if (!sm.wait ()) - error << "sendmail " << *sm.exit; - } - // Handle process_error and io_error (both derive from system_error). - // - catch (const system_error& e) - { - error << "sendmail error: " << e; - } - }; + if (auto f = tsq->build_queued (ss, + qbs, + build_state::building, + qhs, + log_writer_)) + update_tenant_service_state (conn, qbs.back ().tenant, f); + } - // Don't send the build notification email if the empty package build email - // is specified. + // If a third-party service needs to be notified about the built package, + // then call the tenant_service_build_built::build_built() callback function + // and update the service state, if requested. // - optional<email>& build_email (pkg->build_email); - if (build_notify && (!build_email || !build_email->empty ())) + if (tsb != nullptr) { - // If none of the package build-* addresses is specified, then the build - // email address is assumed to be the same as the package email address, - // if specified, otherwise as the project email address, if specified, - // otherwise the notification email is not sent. - // - optional<email> to; + assert (tss); // Wouldn't be here otherwise. - if (build_email) - to = move (build_email); - else if (!pkg->build_warning_email && !pkg->build_error_email) - to = move (pkg->package_email ? pkg->package_email : pkg->email); + const tenant_service& ss (tss->first); + const build& b (*tss->second); - if (to) - send_email (*to); + if (auto f = tsb->build_built (ss, b, log_writer_)) + update_tenant_service_state (conn, b.tenant, f); } - assert (bld->status); - - // Send the build warning/error notification emails, if requested. - // - if (pkg->build_warning_email && *bld->status >= result_status::warning) - send_email (*pkg->build_warning_email); - - if (pkg->build_error_email && *bld->status >= result_status::error) - send_email (*pkg->build_error_email); + if (bld != nullptr) + { + // Don't sent the notification email for success-to-success status change, + // etc. + // + if (!build_notify) + (cfg->email ? cfg->email : pkg->build_email) = email (); + + send_notification_email (*options_, + conn, + *bld, + *pkg, + *cfg, + unforced ? "build" : "rebuild", + error, + verb_ >= 2 ? &trace : nullptr); + } return true; } diff --git a/mod/mod-build-result.hxx b/mod/mod-build-result.hxx index 71a60f9..96449d5 100644 --- a/mod/mod-build-result.hxx +++ b/mod/mod-build-result.hxx @@ -8,21 +8,21 @@ #include <libbrep/utility.hxx> #include <mod/module-options.hxx> -#include <mod/database-module.hxx> -#include <mod/build-config-module.hxx> +#include <mod/tenant-service.hxx> +#include <mod/build-result-module.hxx> namespace brep { - class build_result: public database_module, private build_config_module + class build_result: public build_result_module { public: - build_result () = default; + explicit + build_result (const tenant_service_map&); // Create a shallow copy (handling instance) if initialized and a deep // copy (context exemplar) otherwise. // - explicit - build_result (const build_result&); + build_result (const build_result&, const tenant_service_map&); virtual bool handle (request&, response&); @@ -36,6 +36,7 @@ namespace brep private: shared_ptr<options::build_result> options_; + const tenant_service_map& tenant_service_map_; }; } diff --git a/mod/mod-build-task.cxx b/mod/mod-build-task.cxx index 04b2a36..07aff8d 100644 --- a/mod/mod-build-task.cxx +++ b/mod/mod-build-task.cxx @@ -4,23 +4,26 @@ #include <mod/mod-build-task.hxx> #include <map> +#include <regex> #include <chrono> +#include <random> #include <odb/database.hxx> #include <odb/transaction.hxx> #include <odb/schema-catalog.hxx> -#include <libbutl/sha256.mxx> -#include <libbutl/utility.mxx> // compare_c_string -#include <libbutl/openssl.mxx> -#include <libbutl/fdstream.mxx> // nullfd -#include <libbutl/process-io.mxx> -#include <libbutl/path-pattern.mxx> -#include <libbutl/manifest-parser.mxx> -#include <libbutl/manifest-serializer.mxx> +#include <libbutl/ft/lang.hxx> // thread_local + +#include <libbutl/regex.hxx> +#include <libbutl/sha256.hxx> +#include <libbutl/openssl.hxx> +#include <libbutl/fdstream.hxx> // nullfd +#include <libbutl/process-io.hxx> +#include <libbutl/path-pattern.hxx> +#include <libbutl/manifest-parser.hxx> +#include <libbutl/manifest-serializer.hxx> #include <libbbot/manifest.hxx> -#include <libbbot/build-config.hxx> #include <web/server/module.hxx> @@ -29,7 +32,9 @@ #include <libbrep/build-package.hxx> #include <libbrep/build-package-odb.hxx> +#include <mod/build.hxx> // send_notification_email() #include <mod/module-options.hxx> +#include <mod/build-target-config.hxx> using namespace std; using namespace butl; @@ -37,15 +42,62 @@ using namespace bbot; using namespace brep::cli; using namespace odb::core; +static thread_local mt19937 rand_gen (random_device {} ()); + +// The challenge (nonce) is randomly generated for every build task if brep is +// configured to authenticate bbot agents. +// +// Nonce generator must guarantee a probabilistically insignificant chance +// of repeating a previously generated value. The common approach is to use +// counters or random number generators (alone or in combination), that +// produce values of the sufficient length. 64-bit non-repeating and +// 512-bit random numbers are considered to be more than sufficient for +// most practical purposes. +// +// We will produce the challenge as the sha256sum of the 512-bit random +// number and the 64-bit current timestamp combination. The latter is +// not really a non-repeating counter and can't be used alone. However +// adding it is a good and cheap uniqueness improvement. +// +// Note that since generating a challenge is not exactly cheap/fast, we will +// generate it in advance for every task request, out of the database +// transaction, and will cache it if it turns out that it wasn't used (no +// package configuration to (re-)build, etc). +// +static thread_local optional<string> challenge; + +// Generate a random number in the specified range (max value is included). +// +static inline size_t +rand (size_t min_val, size_t max_val) +{ + // Note that size_t is not whitelisted as a type the + // uniform_int_distribution class template can be instantiated with. + // + return min_val == max_val + ? min_val + : static_cast<size_t> ( + uniform_int_distribution<unsigned long long> ( + static_cast<unsigned long long> (min_val), + static_cast<unsigned long long> (max_val)) (rand_gen)); +} + +brep::build_task:: +build_task (const tenant_service_map& tsm) + : tenant_service_map_ (tsm) +{ +} + // While currently the user-defined copy constructor is not required (we don't // need to deep copy nullptr's), it is a good idea to keep the placeholder // ready for less trivial cases. // brep::build_task:: -build_task (const build_task& r) +build_task (const build_task& r, const tenant_service_map& tsm) : database_module (r), build_config_module (r), - options_ (r.initialized_ ? r.options_ : nullptr) + options_ (r.initialized_ ? r.options_ : nullptr), + tenant_service_map_ (tsm) { } @@ -59,13 +111,23 @@ init (scanner& s) if (options_->build_config_specified ()) { - // Verify that build-alt-rebuild-{start,stop} are both either specified or - // not. + // Verify that build-alt-*-rebuild-{start,stop} are both either specified + // or not. // - if (options_->build_alt_rebuild_start_specified () != - options_->build_alt_rebuild_stop_specified ()) - fail << "build-alt-rebuild-start and build-alt-rebuild-stop " - << "configuration options must both be either specified or not"; + auto bad_alt = [&fail] (const char* what) + { + fail << "build-alt-" << what << "-rebuild-start and build-alt-" << what + << "-rebuild-stop configuration options must both be either " + << "specified or not"; + }; + + if (options_->build_alt_soft_rebuild_start_specified () != + options_->build_alt_soft_rebuild_stop_specified ()) + bad_alt ("soft"); + + if (options_->build_alt_hard_rebuild_start_specified () != + options_->build_alt_hard_rebuild_stop_specified ()) + bad_alt ("hard"); database_module::init (*options_, options_->build_db_retry ()); @@ -86,6 +148,84 @@ init (scanner& s) options_->root (dir_path ("/")); } +// Skip tenants with the freshly queued packages from the consideration (see +// tenant::queued_timestamp for the details on the service notifications race +// prevention). +// +template <typename T> +static inline query<T> +package_query (bool custom_bot, + brep::params::build_task& params, + interactive_mode imode, + uint64_t queued_expiration_ns) +{ + using namespace brep; + using query = query<T>; + + query q (!query::build_tenant::archived); + + if (custom_bot) + { + // Note that we could potentially only query the packages which refer to + // this custom bot key in one of their build configurations. For that we + // would need to additionally join the current query tables with the bot + // fingerprint-containing build_package_bot_keys and + // build_package_config_bot_keys tables and use the SELECT DISTINCT + // clause. The problem is that we also use the ORDER BY clause and in this + // case PostgreSQL requires all the ORDER BY clause expressions to also be + // present in the SELECT DISTINCT clause and fails with the 'for SELECT + // DISTINCT, ORDER BY expressions must appear in select list' error if + // that's not the case. Also note that in the ODB-generated code the + // 'build_package.project::TEXT' expression in the SELECT DISTINCT clause + // (see the CITEXT type mapping for details in libbrep/common.hxx) would + // not match the 'build_package.name' expression in the ORDER BY clause + // and so we will end up with the mentioned error. One (hackish) way to + // fix that would be to add a dummy member of the string type for the + // build_package.name column. This all sounds quite hairy at the moment + // and it also feels that this can potentially pessimize querying the + // packages built with the default bots only. Thus let's keep it simple + // for now and filter packages by the bot fingerprint at the program + // level. + // + q = q && (query::build_package::custom_bot.is_null () || + query::build_package::custom_bot); + } + else + q = q && (query::build_package::custom_bot.is_null () || + !query::build_package::custom_bot); + + // Filter by repositories canonical names (if requested). + // + const strings& rp (params.repository ()); + + if (!rp.empty ()) + q = q && + query::build_repository::id.canonical_name.in_range (rp.begin (), + rp.end ()); + + // If the interactive mode is false or true, then filter out the respective + // packages. + // + switch (imode) + { + case interactive_mode::false_: + { + q = q && query::build_tenant::interactive.is_null (); + break; + } + case interactive_mode::true_: + { + q = q && query::build_tenant::interactive.is_not_null (); + break; + } + case interactive_mode::both: break; + } + + return q && + (query::build_tenant::queued_timestamp.is_null () || + query::build_tenant::queued_timestamp < queued_expiration_ns); +} + bool brep::build_task:: handle (request& rq, response& rs) { @@ -126,155 +266,263 @@ handle (request& rq, response& rs) throw invalid_request (400, e.what ()); } - // Obtain the agent's public key fingerprint if requested. If the fingerprint - // is requested but is not present in the request or is unknown, then respond - // with 401 HTTP code (unauthorized). + // Obtain the agent's public key fingerprint if requested. If the + // fingerprint is requested but is not present in the request, then respond + // with 401 HTTP code (unauthorized). If a key with the specified + // fingerprint is not present in the build bot agent keys directory, then + // assume that this is a custom build bot. + // + // Note that if the agent authentication is not configured (the agent keys + // directory is not specified), then the bot can never be custom and its + // fingerprint is ignored, if present. // optional<string> agent_fp; + bool custom_bot (false); if (bot_agent_key_map_ != nullptr) { - if (!tqm.fingerprint || - bot_agent_key_map_->find (*tqm.fingerprint) == - bot_agent_key_map_->end ()) + if (!tqm.fingerprint) throw invalid_request (401, "unauthorized"); agent_fp = move (tqm.fingerprint); + + custom_bot = (bot_agent_key_map_->find (*agent_fp) == + bot_agent_key_map_->end ()); } - task_response_manifest tsm; + // The resulting task manifest and the related build, package, and + // configuration objects. Note that the latter 3 are only meaningful if the + // the task manifest is present. + // + task_response_manifest task_response; + shared_ptr<build> task_build; + shared_ptr<build_package> task_package; + const build_package_config* task_config; + + auto serialize_task_response_manifest = [&task_response, &rs] () + { + // @@ Probably it would be a good idea to also send some cache control + // headers to avoid caching by HTTP proxies. That would require + // extension of the web::response interface. + // + + manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"), + "task_response_manifest"); + task_response.serialize (s); + }; + + interactive_mode imode (tqm.effective_interactive_mode ()); - // Map build configurations to machines that are capable of building them. - // The first matching machine is selected for each configuration. Also - // create the configuration name list for use in database queries. + // Restict the interactive mode (specified by the task request manifest) if + // the interactive parameter is specified and is other than "both". If + // values specified by the parameter and manifest are incompatible (false vs + // true), then just bail out responding with the manifest with an empty + // session. + // + if (params.interactive () != interactive_mode::both) + { + if (imode != interactive_mode::both) + { + if (params.interactive () != imode) + { + serialize_task_response_manifest (); + return true; + } + } + else + imode = params.interactive (); // Can only change both to true or false. + } + + // Map build target configurations to machines that are capable of building + // them. The first matching machine is selected for each configuration. // struct config_machine { - const build_config* config; + const build_target_config* config; machine_header_manifest* machine; }; - using config_machines = map<const char*, config_machine, compare_c_string>; + using config_machines = map<build_target_config_id, config_machine>; - cstrings cfg_names; - config_machines cfg_machines; + config_machines conf_machines; - for (const auto& c: *build_conf_) + for (const build_target_config& c: *target_conf_) { - for (auto& m: tqm.machines) + for (machine_header_manifest& m: tqm.machines) { - // The same story as in exclude() from build-config.cxx. - // + if (m.effective_role () == machine_role::build) try { + // The same story as in exclude() from build-target-config.cxx. + // if (path_match (dash_components_to_path (m.name), dash_components_to_path (c.machine_pattern), dir_path () /* start */, - path_match_flags::match_absent) && - cfg_machines.insert ( - make_pair (c.name.c_str (), config_machine ({&c, &m}))).second) - cfg_names.push_back (c.name.c_str ()); + path_match_flags::match_absent)) + { + conf_machines.emplace (build_target_config_id {c.target, c.name}, + config_machine {&c, &m}); + break; + } } catch (const invalid_path&) {} } } - // Go through packages until we find one that has no build configuration - // present in the database, or is in the building state but expired - // (collectively called unbuilt). If such a package configuration is found - // then put it into the building state, set the current timestamp and respond - // with the task for building this package configuration. + // Collect the auxiliary configurations/machines available for the build. + // + struct auxiliary_config_machine + { + string config; + const machine_header_manifest* machine; + }; + + vector<auxiliary_config_machine> auxiliary_config_machines; + + for (const machine_header_manifest& m: tqm.machines) + { + if (m.effective_role () == machine_role::auxiliary) + { + // Derive the auxiliary configuration name by stripping the first + // (architecture) component from the machine name. + // + size_t p (m.name.find ('-')); + + if (p == string::npos || p == 0 || p == m.name.size () - 1) + throw invalid_request (400, + (string ("no ") + + (p == 0 ? "architecture" : "OS") + + " component in machine name '" + m.name + "'")); + + auxiliary_config_machines.push_back ( + auxiliary_config_machine {string (m.name, p + 1), &m}); + } + } + + // Go through package build configurations until we find one that has no + // build target configuration present in the database, or is in the building + // state but expired (collectively called unbuilt). If such a target + // configuration is found then put it into the building state, set the + // current timestamp and respond with the task for building this package + // configuration. // // While trying to find a non-built package configuration we will also - // collect the list of the built package configurations which it's time to - // rebuild. So if no unbuilt package is found, we will pickup one to - // rebuild. The rebuild preference is given in the following order: the - // greater force state, the greater overall status, the lower timestamp. + // collect the list of the built configurations which it's time to + // rebuild. So if no unbuilt package configuration is found, we will pickup + // one to rebuild. The rebuild preference is given in the following order: + // the greater force state, the greater overall status, the lower timestamp. // - if (!cfg_machines.empty ()) + if (!conf_machines.empty ()) { vector<shared_ptr<build>> rebuilds; - // Create the task response manifest. The package must have the internal - // repository loaded. + // Create the task response manifest. Must be called inside the build db + // transaction. // - auto task = [this] (shared_ptr<build>&& b, - shared_ptr<build_package>&& p, + auto task = [this] (const build& b, + const build_package& p, + const build_package_config& pc, + small_vector<bpkg::test_dependency, 1>&& tests, + vector<auxiliary_machine>&& ams, + optional<string>&& interactive, const config_machine& cm) -> task_response_manifest { uint64_t ts ( chrono::duration_cast<std::chrono::nanoseconds> ( - b->timestamp.time_since_epoch ()).count ()); - - string session (b->tenant + '/' + - b->package_name.string () + '/' + - b->package_version.string () + '/' + - b->configuration + '/' + - b->toolchain_name + '/' + - b->toolchain_version.string () + '/' + + b.timestamp.time_since_epoch ()).count ()); + + string session (b.tenant + '/' + + b.package_name.string () + '/' + + b.package_version.string () + '/' + + b.target.string () + '/' + + b.target_config_name + '/' + + b.package_config_name + '/' + + b.toolchain_name + '/' + + b.toolchain_version.string () + '/' + to_string (ts)); - string result_url (options_->host () + - tenant_dir (options_->root (), b->tenant).string () + - "?build-result"); + string tenant (tenant_dir (options_->root (), b.tenant).string ()); + string result_url (options_->host () + tenant + "?build-result"); - lazy_shared_ptr<build_repository> r (p->internal_repository); + assert (transaction::has_current ()); + + assert (p.internal ()); // The package is expected to be buildable. + + shared_ptr<build_repository> r (p.internal_repository.load ()); strings fps; if (r->certificate_fingerprint) fps.emplace_back (move (*r->certificate_fingerprint)); - // Exclude external test packages which exclude the task build - // configuration. - // - small_vector<package, 1> tes; + const package_name& pn (p.id.name); - for (const build_test_dependency& td: p->tests) - { - // Don't exclude unresolved external tests. - // - // Note that this may result in the build task failure. However, - // silently excluding such tests could end up with missed software - // bugs which feels much worse. - // - if (td.package != nullptr) - { - shared_ptr<build_package> p (td.package.load ()); + bool module_pkg (pn.string ().compare (0, 10, "libbuild2-") == 0); - // Use the `all` class as a least restrictive default underlying - // build class set. Note that we should only apply the explicit - // build restrictions to the external test packages (think about - // the `builds: all` and `builds: -windows` manifest values for - // the primary and external test packages, respectively). - // - if (exclude (p->builds, - p->constraints, - *cm.config, - nullptr /* reason */, - true /* default_all_ucs */)) - tes.push_back (package {move (p->id.name), move (p->version)}); - } - } - - task_manifest task (move (b->package_name), - move (b->package_version), + // Note that the auxiliary environment is crafted by the bbot agent + // after the auxiliary machines are booted. + // + task_manifest task (pn, + p.version, move (r->location), move (fps), - move (tes), + p.requirements, + move (tests), + b.dependency_checksum, cm.machine->name, + move (ams), cm.config->target, cm.config->environment, + nullopt /* auxiliary_environment */, cm.config->args, - cm.config->warning_regexes); + pc.arguments, + belongs (*cm.config, module_pkg ? "build2" : "host"), + cm.config->warning_regexes, + move (interactive), + b.worker_checksum); + + // Collect the build artifacts upload URLs, skipping those which are + // excluded with the upload-*-exclude configuration options. + // + vector<upload_url> upload_urls; + + for (const auto& ud: options_->upload_data ()) + { + const string& t (ud.first); + + auto exclude = [&t] (const multimap<string, string>& mm, + const string& v) + { + auto range (mm.equal_range (t)); + + for (auto i (range.first); i != range.second; ++i) + { + if (i->second == v) + return true; + } + + return false; + }; + + if (!exclude (options_->upload_toolchain_exclude (), + b.toolchain_name) && + !exclude (options_->upload_repository_exclude (), + r->canonical_name)) + { + upload_urls.emplace_back (options_->host () + tenant + "?upload=" + t, + t); + } + } return task_response_manifest (move (session), - move (b->agent_challenge), + b.agent_challenge, move (result_url), + move (upload_urls), + b.agent_checksum, move (task)); }; - // Calculate the build (building state) or rebuild (built state) expiration - // time for package configurations + // Calculate the build/rebuild (building/built state) and the `queued` + // notifications expiration time for package configurations. // timestamp now (system_clock::now ()); @@ -298,105 +546,103 @@ handle (request& rq, response& rs) timestamp forced_rebuild_expiration ( expiration (options_->build_forced_rebuild_timeout ())); - timestamp normal_rebuild_expiration; - - if (options_->build_alt_rebuild_start_specified ()) - { - const duration& start (options_->build_alt_rebuild_start ()); - const duration& stop (options_->build_alt_rebuild_stop ()); - - duration dt (daytime (now)); - - // Note that if the stop time is less than the start time then the - // interval extends through the midnight. - // - bool alt_timeout (start <= stop - ? dt >= start && dt < stop - : dt >= start || dt < stop); - - // If we out of the alternative rebuild timeout interval, then fall back - // to using the normal rebuild timeout. - // - if (alt_timeout) - { - if (!options_->build_alt_rebuild_timeout_specified ()) - { - duration interval_len (start <= stop - ? stop - start - : (24h - start) + stop); - - normal_rebuild_expiration = now - interval_len; - } - else - normal_rebuild_expiration = - expiration (options_->build_alt_rebuild_timeout ()); - } - } + uint64_t queued_expiration_ns ( + expiration_ns (options_->build_queued_timeout ())); - if (normal_rebuild_expiration == timestamp_nonexistent) - normal_rebuild_expiration = - expiration (options_->build_normal_rebuild_timeout ()); - - // Return the challenge (nonce) if brep is configured to authenticate bbot - // agents. Return nullopt otherwise. + // Calculate the soft/hard rebuild expiration time, based on the + // respective build-{soft,hard}-rebuild-timeout and + // build-alt-{soft,hard}-rebuild-{start,stop,timeout} configuration + // options. // - // Nonce generator must guarantee a probabilistically insignificant chance - // of repeating a previously generated value. The common approach is to use - // counters or random number generators (alone or in combination), that - // produce values of the sufficient length. 64-bit non-repeating and - // 512-bit random numbers are considered to be more than sufficient for - // most practical purposes. + // If normal_timeout is zero, then return timestamp_unknown to indicate + // 'never expire'. Note that this value is less than any build timestamp + // value, including timestamp_nonexistent. // - // We will produce the challenge as the sha256sum of the 512-bit random - // number and the 64-bit current timestamp combination. The latter is - // not really a non-repeating counter and can't be used alone. However - // adding it is a good and cheap uniqueness improvement. + // NOTE: there is a similar code in monitor/monitor.cxx. // - auto challenge = [&agent_fp, &now, &fail, &trace, this] () + auto build_expiration = [&now] ( + const optional<pair<duration, duration>>& alt_interval, + optional<size_t> alt_timeout, + size_t normal_timeout) { - optional<string> r; + if (normal_timeout == 0) + return timestamp_unknown; + + timestamp r; + chrono::seconds nt (normal_timeout); - if (agent_fp) + if (alt_interval) { - try - { - auto print_args = [&trace, this] (const char* args[], size_t n) - { - l2 ([&]{trace << process_args {args, n};}); - }; + const duration& start (alt_interval->first); + const duration& stop (alt_interval->second); - openssl os (print_args, - nullfd, path ("-"), 2, - process_env (options_->openssl (), - options_->openssl_envvar ()), - "rand", - options_->openssl_option (), 64); + duration dt (daytime (now)); - vector<char> nonce (os.in.read_binary ()); - os.in.close (); + // Note that if the stop time is less than the start time then the + // interval extends through the midnight. + // + bool use_alt_timeout (start <= stop + ? dt >= start && dt < stop + : dt >= start || dt < stop); - if (!os.wait () || nonce.size () != 64) - fail << "unable to generate nonce"; + // If we out of the alternative rebuild timeout interval, then fall + // back to using the normal rebuild timeout. + // + if (use_alt_timeout) + { + // Calculate the alternative timeout, unless it is specified + // explicitly. + // + duration t; - uint64_t t (chrono::duration_cast<chrono::nanoseconds> ( - now.time_since_epoch ()).count ()); + if (!alt_timeout) + { + t = start <= stop ? (stop - start) : ((24h - start) + stop); - sha256 cs (nonce.data (), nonce.size ()); - cs.append (&t, sizeof (t)); - r = cs.string (); - } - catch (const system_error& e) - { - fail << "unable to generate nonce: " << e; + // If the normal rebuild timeout is greater than 24 hours, then + // increase the default alternative timeout by (normal - 24h) (see + // build-alt-soft-rebuild-timeout configuration option for + // details). + // + if (nt > 24h) + t += nt - 24h; + } + else + t = chrono::seconds (*alt_timeout); + + r = now - t; } } - return r; + return r != timestamp_nonexistent ? r : (now - nt); }; + timestamp soft_rebuild_expiration ( + build_expiration ( + (options_->build_alt_soft_rebuild_start_specified () + ? make_pair (options_->build_alt_soft_rebuild_start (), + options_->build_alt_soft_rebuild_stop ()) + : optional<pair<duration, duration>> ()), + (options_->build_alt_soft_rebuild_timeout_specified () + ? options_->build_alt_soft_rebuild_timeout () + : optional<size_t> ()), + options_->build_soft_rebuild_timeout ())); + + timestamp hard_rebuild_expiration ( + build_expiration ( + (options_->build_alt_hard_rebuild_start_specified () + ? make_pair (options_->build_alt_hard_rebuild_start (), + options_->build_alt_hard_rebuild_stop ()) + : optional<pair<duration, duration>> ()), + (options_->build_alt_hard_rebuild_timeout_specified () + ? options_->build_alt_hard_rebuild_timeout () + : optional<size_t> ()), + options_->build_hard_rebuild_timeout ())); + // Convert butl::standard_version type to brep::version. // brep::version toolchain_version (tqm.toolchain_version.string ()); + string& toolchain_name (tqm.toolchain_name); // Prepare the buildable package prepared query. // @@ -417,354 +663,1761 @@ handle (request& rq, response& rs) using pkg_query = query<buildable_package>; using prep_pkg_query = prepared_query<buildable_package>; - // Exclude archived tenants. + pkg_query pq (package_query<buildable_package> (custom_bot, + params, + imode, + queued_expiration_ns)); + + // Transform (in-place) the interactive login information into the actual + // login command, if specified in the manifest and the transformation + // regexes are specified in the configuration. // - pkg_query pq (!pkg_query::build_tenant::archived); + if (tqm.interactive_login && + options_->build_interactive_login_specified ()) + { + optional<string> lc; + string l (tqm.agent + ' ' + *tqm.interactive_login); + + // Use the first matching regex for the transformation. + // + for (const pair<regex, string>& rf: options_->build_interactive_login ()) + { + pair<string, bool> r (regex_replace_match (l, rf.first, rf.second)); + + if (r.second) + { + lc = move (r.first); + break; + } + } - // Filter by repositories canonical names (if requested). + if (!lc) + throw invalid_request (400, "unable to match login info '" + l + '\''); + + tqm.interactive_login = move (lc); + } + + // In the random package ordering mode iterate over the packages list by + // starting from the random offset and wrapping around when reaching the + // end. + // + // Note, however, that since there can be some packages which are already + // built for all configurations and are not archived yet, picking an + // unbuilt package this way may not work as desired. Think of the + // following case with 5 packages in 3 non-archived tenants: // - const vector<string>& rp (params.repository ()); + // 0: A - unbuilt, tenant 1 + // 1: B - built, tenant 2 + // 2: C - built, tenant 2 + // 3: D - built, tenant 2 + // 4: E - unbuilt, tenant 3 + // + // If we just pick a random starting offset in the [0, 4] range, then we + // will build A package with probability 0.2 and E with probability 0.8. + // + // To fix that we will only try to build a package from a tenant that the + // random starting offset refers to. Failed that, we will randomly pick + // new starting offset and retry. To make sure we don't retry indefinitely + // when there are no more packages to build (and also for the sake of + // optimization; see below), we will track positions of packages which we + // (unsuccessfully) have already tried to build and skip them while + // generating the random starting offsets and while iterating over + // packages. + // + // Also note that since we iterate over packages in chunks, each queried + // in a separate transaction, the number of packages may potentially + // increase or decrease while iterating over them. Thus, to keep things + // consistent, we may need to update our tried positions tracking state + // accordingly (not to cycle, not to refer to an entry out of the list + // boundaries, etc). Generally, regardless whether the number of packages + // has changed or not, the offsets and position statuses may now refer to + // some different packages. The only sensible thing we can do in such + // cases (without trying to detect this situation and restart from + // scratch) is to serve the request and issue some build task, if + // possible. + // + bool random (options_->build_package_order () == build_order::random); + size_t start_offset (0); - if (!rp.empty ()) - pq = pq && - pkg_query::build_repository::id.canonical_name.in_range (rp.begin (), - rp.end ()); + // List of "tried to build" package statuses. True entries denote + // positions of packages which we have tried to build. Initially all + // entries are false. + // + vector<bool> tried_positions; - // Specify the portion. + // Number of false entries in the above vector. Used merely as an + // optimization to bail out. // - size_t offset (0); + size_t untried_positions_count (0); - pq += "ORDER BY" + - pkg_query::build_package::id.tenant + "," + - pkg_query::build_package::id.name + - order_by_version (pkg_query::build_package::id.version, false) + - "OFFSET" + pkg_query::_ref (offset) + "LIMIT 50"; + // Return a random position of a package that we have not yet tried to + // build, if present, and nullopt otherwise. + // + auto rand_position = [&tried_positions, + &untried_positions_count] () -> optional<size_t> + { + assert (untried_positions_count <= tried_positions.size ()); - connection_ptr conn (build_db_->connection ()); + if (untried_positions_count == 0) + return nullopt; - prep_pkg_query pkg_prep_query ( - conn->prepare_query<buildable_package> ( - "mod-build-task-package-query", pq)); + size_t r; + while (tried_positions[r = rand (0, tried_positions.size () - 1)]) ; + return r; + }; - // Prepare the build prepared query. + // Mark the package at specified position as tried to build. Assume that + // it is not yet been tried to build. // - // Note that we can not query the database for configurations that a - // package was not built with, as the database contains only those package - // configurations that have already been acted upon (initially empty). + auto position_tried = [&tried_positions, + &untried_positions_count] (size_t i) + { + assert (i < tried_positions.size () && + !tried_positions[i] && + untried_positions_count != 0); + + tried_positions[i] = true; + --untried_positions_count; + }; + + // Resize the tried positions list and update the untried positions + // counter accordingly if the package number has changed. // - // This is why we query the database for package configurations that - // should not be built (in the built state, or in the building state and - // not expired). Having such a list we will select the first build - // configuration that is not in the list (if available) for the response. + // For simplicity, assume that packages are added/removed to/from the end + // of the list. Note that misguessing in such a rare cases are possible + // but not harmful (see above for the reasoning). // - using bld_query = query<build>; - using prep_bld_query = prepared_query<build>; + auto resize_tried_positions = [&tried_positions, &untried_positions_count] + (size_t n) + { + if (n > tried_positions.size ()) // Packages added? + { + untried_positions_count += n - tried_positions.size (); + tried_positions.resize (n, false); + } + else if (n < tried_positions.size ()) // Packages removed? + { + for (size_t i (n); i != tried_positions.size (); ++i) + { + if (!tried_positions[i]) + { + assert (untried_positions_count != 0); + --untried_positions_count; + } + } + + tried_positions.resize (n); + } + else + { + // Not supposed to be called if the number of packages didn't change. + // + assert (false); + } + }; - package_id id; + if (random) + { + using query = query<buildable_package_count>; - bld_query bq ( - equal<build> (bld_query::id.package, id) && + query q (package_query<buildable_package_count> (custom_bot, + params, + imode, + queued_expiration_ns)); - bld_query::id.configuration.in_range (cfg_names.begin (), - cfg_names.end ()) && + transaction t (build_db_->begin ()); - bld_query::id.toolchain_name == tqm.toolchain_name && + // If there are any non-archived interactive build tenants, then the + // chosen randomization approach doesn't really work since interactive + // tenants must be preferred over non-interactive ones, which is + // achieved by proper ordering of the package query result (see below). + // Thus, we just disable randomization if there are any interactive + // tenants. + // + // But shouldn't we randomize the order between packages in multiple + // interactive tenants? Given that such a tenant may only contain a + // single package and can only be built in a single configuration that + // is probably not important. However, we may assume that the + // randomization still happens naturally due to the random nature of the + // tenant id, which is used as a primary sorting criteria (see below). + // + size_t interactive_package_count ( + build_db_->query_value<buildable_package_count> ( + q && query::build_tenant::interactive.is_not_null ())); - compare_version_eq (bld_query::id.toolchain_version, - canonical_version (toolchain_version), - true /* revision */) && + if (interactive_package_count == 0) + { + untried_positions_count = + build_db_->query_value<buildable_package_count> (q); + } + else + random = false; - (bld_query::state == "built" || - (bld_query::force == "forcing" && - bld_query::timestamp > forced_result_expiration_ns) || - (bld_query::force != "forcing" && // Unforced or forced. - bld_query::timestamp > normal_result_expiration_ns))); + t.commit (); - prep_bld_query bld_prep_query ( - conn->prepare_query<build> ("mod-build-task-build-query", bq)); + if (untried_positions_count != 0) + { + tried_positions.resize (untried_positions_count, false); + + optional<size_t> so (rand_position ()); + assert (so); // Wouldn't be here otherwise. + start_offset = *so; + } + } - while (tsm.session.empty ()) + if (!random || !tried_positions.empty ()) { - transaction t (conn->begin ()); + // Specify the portion. + // + size_t offset (start_offset); + size_t limit (50); + + pq += "ORDER BY"; - // Query (and cache) buildable packages. + // If the interactive mode is both, then order the packages so that ones + // from the interactive build tenants appear first. // - auto packages (pkg_prep_query.execute ()); + if (imode == interactive_mode::both) + pq += pkg_query::build_tenant::interactive + "NULLS LAST,"; + + pq += pkg_query::build_package::id.tenant + "," + + pkg_query::build_package::id.name + + order_by_version (pkg_query::build_package::id.version, false) + + "OFFSET" + pkg_query::_ref (offset) + + "LIMIT" + pkg_query::_ref (limit); + + connection_ptr conn (build_db_->connection ()); - // Bail out if there is nothing left. + prep_pkg_query pkg_prep_query ( + conn->prepare_query<buildable_package> ( + "mod-build-task-package-query", pq)); + + // Prepare the build prepared query. + // + // Note that we can not query the database for configurations that a + // package was not built with, as the database contains only those build + // configurations that have already been acted upon (initially empty). + // + // This is why we query the database for configurations that should not + // be built (in the built state, or in the building state and not + // expired). Having such a list we will select the first build + // configuration that is not in the list (if available) for the + // response. + // + using bld_query = query<build>; + using prep_bld_query = prepared_query<build>; + + package_id id; + string pkg_config; + + bld_query sq (false); + for (const auto& cm: conf_machines) + sq = sq || (bld_query::id.target == cm.first.target && + bld_query::id.target_config_name == cm.first.config); + + bld_query bq ( + equal<build> (bld_query::id.package, id) && + bld_query::id.package_config_name == bld_query::_ref (pkg_config) && + sq && + bld_query::id.toolchain_name == toolchain_name && + + compare_version_eq (bld_query::id.toolchain_version, + canonical_version (toolchain_version), + true /* revision */) && + + (bld_query::state == "built" || + (bld_query::state == "building" && + ((bld_query::force == "forcing" && + bld_query::timestamp > forced_result_expiration_ns) || + (bld_query::force != "forcing" && // Unforced or forced. + bld_query::timestamp > normal_result_expiration_ns))))); + + prep_bld_query bld_prep_query ( + conn->prepare_query<build> ("mod-build-task-build-query", bq)); + + // Return true if a package needs to be rebuilt. // - if (packages.empty ()) + auto needs_rebuild = [&forced_rebuild_expiration, + &soft_rebuild_expiration, + &hard_rebuild_expiration] (const build& b) { - t.commit (); - break; - } + assert (b.state == build_state::built); + + return (b.force == force_state::forced && + b.timestamp <= forced_rebuild_expiration) || + b.soft_timestamp <= soft_rebuild_expiration || + b.hard_timestamp <= hard_rebuild_expiration; + }; + + // Convert a build to the hard rebuild, resetting the agent checksum. + // + // Note that since the checksums are hierarchical, the agent checksum + // reset will trigger resets of the "subordinate" checksums up to the + // dependency checksum and so the package will be rebuilt. + // + // Also note that we keep the previous build task result and status + // intact since we may still need to revert the build into the built + // state if the task execution is interrupted. + // + auto convert_to_hard = [] (const shared_ptr<build>& b) + { + b->agent_checksum = nullopt; + }; + + // Return SHA256 checksum of the controller logic and the configuration + // target, environment, arguments, and warning-detecting regular + // expressions. + // + auto controller_checksum = [] (const build_target_config& c) + { + sha256 cs ("1"); // Hash the logic version. + + cs.append (c.target.string ()); + cs.append (c.environment ? *c.environment : ""); - offset += packages.size (); + for (const string& a: c.args) + cs.append (a); - // Iterate over packages until we find one that needs building. + for (const string& re: c.warning_regexes) + cs.append (re); + + return string (cs.string ()); + }; + + // Return the machine id as a machine checksum. + // + // Note that we don't include auxiliary machine ids into this checksum + // since a different machine will most likely get picked for a pattern. + // And we view all auxiliary machines that match a pattern as equal for + // testing purposes (in other words, pattern is not the way to get + // coverage). + // + auto machine_checksum = [] (const machine_header_manifest& m) + { + return m.id; + }; + + // Tenant that the start offset refers to. + // + optional<string> start_tenant; + + // If the build task is created and the tenant of the being built + // package has a third-party service state associated with it, then + // check if the tenant_service_build_building and/or + // tenant_service_build_queued callbacks are registered for the type of + // the associated service. If they are, then stash the state, the build + // object, and the callback pointers for the subsequent service + // notifications. // - for (auto& bp: packages) + // Also, if the tenant_service_build_queued callback is registered, then + // create, persist, and stash the queued build objects for all the + // unbuilt by the current toolchain and not yet queued configurations of + // the package the build task is created for and calculate the hints. + // Note that for the task build, we need to make sure that the + // third-party service receives the `queued` notification prior to the + // `building` notification (see mod/tenant-service.hxx for valid + // transitions). The `queued` notification is assumed to be already sent + // for the build if the respective object exists and any of the + // following is true for it: + // + // - It is in the queued state (initial_state is build_state::queued). + // + // - It is a user-forced rebuild of an incomplete build + // (rebuild_forced_build is true). + // + // - It is a rebuild of an interrupted rebuild (rebuild_forced_build is + // true). + // + const tenant_service_build_building* tsb (nullptr); + const tenant_service_build_queued* tsq (nullptr); + optional<pair<tenant_service, shared_ptr<build>>> tss; + vector<build> qbs; + tenant_service_build_queued::build_queued_hints qhs; + optional<build_state> initial_state; + bool rebuild_forced_build (false); + bool rebuild_interrupted_rebuild (false); + + // Create, persist, and return the queued build objects for all the + // unbuilt by the current toolchain and not yet queued configurations of + // the specified package. + // + // Note that the build object argument is only used for the toolchain + // information retrieval. Also note that the package constraints section + // is expected to be loaded. + // + auto queue_builds = [this] (const build_package& p, const build& b) { - id = move (bp.id); + assert (p.constraints_section.loaded ()); - // Iterate through the package configurations and erase those that - // don't need building from the build configuration map. All those - // configurations that remained can be built. We will take the first - // one, if present. + // Query the existing build ids and stash them into the set. // - // Also save the built package configurations for which it's time to be - // rebuilt. + set<build_id> existing_builds; + + using query = query<package_build_id>; + + query q (query::build::id.package == p.id && + query::build::id.toolchain_name == b.toolchain_name && + compare_version_eq (query::build::id.toolchain_version, + b.id.toolchain_version, + true /* revision */)); + + for (build_id& id: build_db_->query<package_build_id> (q)) + existing_builds.emplace (move (id)); + + // Go through all the potential package builds and queue those which + // are not in the existing builds set. // - config_machines configs (cfg_machines); // Make a copy for this pkg. - auto pkg_builds (bld_prep_query.execute ()); + vector<build> r; - for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i) + for (const build_package_config& pc: p.configs) { - auto j (configs.find (i->id.configuration.c_str ())); + for (const build_target_config& tc: *target_conf_) + { + if (!exclude (pc, p.builds, p.constraints, tc)) + { + build_id id (p.id, + tc.target, tc.name, + pc.name, + b.toolchain_name, b.toolchain_version); + + if (existing_builds.find (id) == existing_builds.end ()) + { + r.emplace_back (move (id.package.tenant), + move (id.package.name), + p.version, + move (id.target), + move (id.target_config_name), + move (id.package_config_name), + move (id.toolchain_name), + b.toolchain_version); + + // @@ TODO Persist the whole vector of builds with a single + // operation if/when bulk operations support is added + // for objects with containers. + // + build_db_->persist (r.back ()); + } + } + } + } - // Outdated configurations are already excluded with the database - // query. + return r; + }; + + auto queue_hints = [this] (const build_package& p) + { + buildable_package_count tpc ( + build_db_->query_value<buildable_package_count> ( + query<buildable_package_count>::build_tenant::id == p.id.tenant)); + + return tenant_service_build_queued::build_queued_hints { + tpc == 1, p.configs.size () == 1}; + }; + + // Collect the auxiliary machines required for testing of the specified + // package configuration and the external test packages, if present for + // the specified target configuration (task_auxiliary_machines), + // together with the auxiliary machines information that needs to be + // persisted in the database as a part of the build object + // (build_auxiliary_machines, which is parallel to + // task_auxiliary_machines). While at it collect the involved test + // dependencies. Return nullopt if any auxiliary configuration patterns + // may not be resolved to the auxiliary machines (no matching + // configuration, auxiliary machines RAM limit is exceeded, etc). + // + // Note that if the same auxiliary environment name is used for multiple + // packages (for example, for the main and tests packages or for the + // tests and examples packages, etc), then a shared auxiliary machine is + // used for all these packages. In this case all the respective + // configuration patterns must match the configuration derived from this + // machine name. If they don't, then return nullopt. The thinking here + // is that on the next task request a machine whose derived + // configuration matches all the patterns can potentially be picked. + // + struct collect_auxiliaries_result + { + vector<auxiliary_machine> task_auxiliary_machines; + vector<build_machine> build_auxiliary_machines; + small_vector<bpkg::test_dependency, 1> tests; + }; + + auto collect_auxiliaries = [&tqm, &auxiliary_config_machines, this] + (const shared_ptr<build_package>& p, + const build_package_config& pc, + const build_target_config& tc) + -> optional<collect_auxiliaries_result> + { + // The list of the picked build auxiliary machines together with the + // environment names they have been picked for. + // + vector<pair<auxiliary_config_machine, string>> picked_machines; + + // Try to randomly pick the auxiliary machine that matches the + // specified pattern and which can be supplied with the minimum + // required RAM, if specified. Return false if such a machine is not + // available. If a machine is already picked for the specified + // environment name, then return true if the machine's configuration + // matches the specified pattern and false otherwise. + // + auto pick_machine = + [&tqm, + &picked_machines, + used_ram = uint64_t (0), + available_machines = auxiliary_config_machines] + (const build_auxiliary& ba) mutable -> bool + { + vector<size_t> ams; // Indexes of the available matching machines. + optional<uint64_t> ar (tqm.auxiliary_ram); + + // If the machine configuration name pattern (which is legal) or any + // of the machine configuration names (illegal) are invalid paths, + // then we assume we cannot pick the machine. + // + try + { + // The same story as in exclude() from build-target-config.cxx. + // + auto match = [pattern = dash_components_to_path (ba.config)] + (const string& config) + { + return path_match (dash_components_to_path (config), + pattern, + dir_path () /* start */, + path_match_flags::match_absent); + }; + + // Check if a machine is already picked for the specified + // environment name. + // + for (const auto& m: picked_machines) + { + if (m.second == ba.environment_name) + return match (m.first.config); + } + + // Collect the matching machines from the list of the available + // machines and bail out if there are none. + // + for (size_t i (0); i != available_machines.size (); ++i) + { + const auxiliary_config_machine& m (available_machines[i]); + optional<uint64_t> mr (m.machine->ram_minimum); + + if (match (m.config) && (!mr || !ar || used_ram + *mr <= *ar)) + ams.push_back (i); + } + + if (ams.empty ()) + return false; + } + catch (const invalid_path&) + { + return false; + } + + // Pick the matching machine randomly. + // + size_t i (ams[rand (0, ams.size () - 1)]); + auxiliary_config_machine& cm (available_machines[i]); + + // Bump the used RAM. + // + if (optional<uint64_t> r = cm.machine->ram_minimum) + used_ram += *r; + + // Move out the picked machine from the available machines list. // - assert (j != configs.end ()); - configs.erase (j); + picked_machines.emplace_back (move (cm), ba.environment_name); + available_machines.erase (available_machines.begin () + i); + return true; + }; - if (i->state == build_state::built) + // Collect auxiliary machines for the main package build configuration. + // + for (const build_auxiliary& ba: + pc.effective_auxiliaries (p->auxiliaries)) + { + if (!pick_machine (ba)) + return nullopt; // No matched auxiliary machine. + } + + // Collect the test packages and the auxiliary machines for their + // default build configurations. Exclude external test packages which + // exclude the current target configuration. + // + small_vector<bpkg::test_dependency, 1> tests; + + if (!p->requirements_tests_section.loaded ()) + build_db_->load (*p, p->requirements_tests_section); + + for (const build_test_dependency& td: p->tests) + { + // Don't exclude unresolved external tests. + // + // Note that this may result in the build task failure. However, + // silently excluding such tests could end up with missed software + // bugs which feels much worse. + // + if (td.package != nullptr) { - assert (i->force != force_state::forcing); + shared_ptr<build_package> tp (td.package.load ()); + + // Try to use the test package configuration named the same as the + // current configuration of the main package. If there is no such + // a configuration, then fallback to using the default + // configuration (which must exist). If the selected test package + // configuration excludes the current target configuration, then + // exclude this external test package from the build task. + // + // Note that potentially the selected test package configuration + // may contain some (bpkg) arguments associated, but we currently + // don't provide build bot worker with such information. This, + // however, is probably too far fetched so let's keep it simple + // for now. + // + const build_package_config* tpc (find (pc.name, tp->configs)); + + if (tpc == nullptr) + { + tpc = find ("default", tp->configs); + + assert (tpc != nullptr); // Must always be present. + } + + // Use the `all` class as a least restrictive default underlying + // build class set. Note that we should only apply the explicit + // build restrictions to the external test packages (think about + // the `builds: all` and `builds: -windows` manifest values for + // the primary and external test packages, respectively). + // + build_db_->load (*tp, tp->constraints_section); + + if (exclude (*tpc, + tp->builds, + tp->constraints, + tc, + nullptr /* reason */, + true /* default_all_ucs */)) + continue; + + build_db_->load (*tp, tp->auxiliaries_section); + + for (const build_auxiliary& ba: + tpc->effective_auxiliaries (tp->auxiliaries)) + { + if (!pick_machine (ba)) + return nullopt; // No matched auxiliary machine. + } + } + + tests.emplace_back (td.name, + td.type, + td.buildtime, + td.constraint, + td.enable, + td.reflect); + } + + vector<auxiliary_machine> tms; + vector<build_machine> bms; - if (i->timestamp <= (i->force == force_state::forced - ? forced_rebuild_expiration - : normal_rebuild_expiration)) - rebuilds.emplace_back (i.load ()); + if (size_t n = picked_machines.size ()) + { + tms.reserve (n); + bms.reserve (n); + + for (pair<auxiliary_config_machine, string>& pm: picked_machines) + { + const machine_header_manifest& m (*pm.first.machine); + tms.push_back (auxiliary_machine {m.name, move (pm.second)}); + bms.push_back (build_machine {m.name, m.summary}); } } - if (!configs.empty ()) + return collect_auxiliaries_result { + move (tms), move (bms), move (tests)}; + }; + + if (agent_fp && !challenge) + try + { + auto print_args = [&trace, this] (const char* args[], size_t n) + { + l2 ([&]{trace << process_args {args, n};}); + }; + + openssl os (print_args, + nullfd, path ("-"), 2, + process_env (options_->openssl (), + options_->openssl_envvar ()), + "rand", + options_->openssl_option (), 64); + + vector<char> nonce (os.in.read_binary ()); + os.in.close (); + + if (!os.wait () || nonce.size () != 64) + fail << "unable to generate nonce"; + + uint64_t t (chrono::duration_cast<chrono::nanoseconds> ( + now.time_since_epoch ()).count ()); + + sha256 cs (nonce.data (), nonce.size ()); + cs.append (&t, sizeof (t)); + challenge = cs.string (); + } + catch (const system_error& e) + { + fail << "unable to generate nonce: " << e; + } + + // While at it, collect the aborted for various reasons builds + // (interactive builds in multiple configurations, builds with too many + // auxiliary machines, etc) to send the notification emails at the end + // of the request handling. + // + struct aborted_build + { + shared_ptr<build> b; + shared_ptr<build_package> p; + const build_package_config* pc; + const char* what; + }; + vector<aborted_build> aborted_builds; + + // Note: is only used for crafting of the notification email subjects. + // + bool unforced (true); + + for (bool done (false); !task_response.task && !done; ) + { + transaction tr (conn->begin ()); + + // We need to be careful in the random package ordering mode not to + // miss the end after having wrapped around. + // + done = (start_offset != 0 && + offset < start_offset && + offset + limit >= start_offset); + + if (done) + limit = start_offset - offset; + + // Query (and cache) buildable packages. + // + auto packages (pkg_prep_query.execute ()); + + size_t chunk_size (packages.size ()); + size_t next_offset (offset + chunk_size); + + // If we are in the random package ordering mode, then also check if + // the package number has changed and, if that's the case, resize the + // tried positions list accordingly. + // + if (random && + (next_offset > tried_positions.size () || + (next_offset < tried_positions.size () && chunk_size < limit))) + { + resize_tried_positions (next_offset); + } + + // Bail out if there is nothing left, unless we need to wrap around in + // the random package ordering mode. + // + if (chunk_size == 0) + { + tr.commit (); + + if (start_offset != 0 && offset >= start_offset) + offset = 0; + else + done = true; + + continue; + } + + size_t position (offset); // Current package position. + offset = next_offset; + + // Iterate over packages until we find one that needs building or have + // to bail out in the random package ordering mode for some reason (no + // more untried positions, need to restart, etc). + // + // Note that it is not uncommon for the sequentially examined packages + // to belong to the same tenant (single tenant mode, etc). Thus, we + // will cache the loaded tenant objects. + // + shared_ptr<build_tenant> t; + + for (auto& bp: packages) { - // Find the first build configuration that is not excluded by the - // package. + shared_ptr<build_package>& p (bp.package); + + id = p->id; + + // Reset the tenant cache if the current package belongs to a + // different tenant. // - shared_ptr<build_package> p (build_db_->load<build_package> (id)); + if (t != nullptr && t->id != id.tenant) + t = nullptr; + + // If we are in the random package ordering mode, then cache the + // tenant the start offset refers to, if not cached yet, and check + // if we are still iterating over packages from this tenant + // otherwise. If the latter is not the case, then restart from a new + // random untried offset, if present, and bail out otherwise. + // + if (random) + { + if (!start_tenant) + { + start_tenant = id.tenant; + } + else if (*start_tenant != id.tenant) + { + if (optional<size_t> so = rand_position ()) + { + start_offset = *so; + offset = start_offset; + start_tenant = nullopt; + limit = 50; + done = false; + } + else + done = true; + + break; + } + + size_t pos (position++); + + // Should have been resized, if required. + // + assert (pos < tried_positions.size ()); - auto i (configs.begin ()); - auto e (configs.end ()); + // Skip the position if it has already been tried. + // + if (tried_positions[pos]) + continue; - for (; - i != e && - exclude (p->builds, p->constraints, *i->second.config); - ++i) ; + position_tried (pos); + } - if (i != e) + // Note that a request to interactively build a package in multiple + // configurations is most likely a mistake than a deliberate choice. + // Thus, for the interactive tenant let's check if the package can + // be built in multiple configurations. If that's the case then we + // will put all the potential builds into the aborted state and + // continue iterating looking for another package. Otherwise, just + // proceed for this package normally. + // + // It also feels like a good idea to archive an interactive tenant + // after a build object is created for it, regardless if the build + // task is issued or not. This way we make sure that an interactive + // build is never performed multiple times for such a tenant for any + // reason (multiple toolchains, buildtab change, etc). Note that the + // build result will still be accepted for an archived build. + // + if (bp.interactive) { - config_machine& cm (i->second); - machine_header_manifest& mh (*cm.machine); + // Note that the tenant can be archived via some other package on + // some previous iteration. Skip the package if that's the case. + // + // Also note that if bp.archived is false, then we need to + // (re-)load the tenant object to re-check the archived flag. + // + if (!bp.archived) + { + if (t == nullptr) + t = build_db_->load<build_tenant> (id.tenant); - build_id bid (move (id), - cm.config->name, - move (tqm.toolchain_name), - toolchain_version); + bp.archived = t->archived; + } - shared_ptr<build> b (build_db_->find<build> (bid)); - optional<string> cl (challenge ()); + if (bp.archived) + continue; - // If build configuration doesn't exist then create the new one - // and persist. Otherwise put it into the building state, refresh - // the timestamp and update. + assert (t != nullptr); // Wouldn't be here otherwise. + + // Collect the potential build configurations as all combinations + // of the tenant's packages build configurations and the + // non-excluded (by the packages) build target + // configurations. Note that here we ignore the machines from the + // task request. // - if (b == nullptr) + struct build_config { - b = make_shared<build> (move (bid.package.tenant), - move (bid.package.name), - move (bp.version), - move (bid.configuration), - move (bid.toolchain_name), - move (toolchain_version), - move (agent_fp), - move (cl), - mh.name, - move (mh.summary), - cm.config->target); - - build_db_->persist (b); + shared_ptr<build_package> p; + const build_package_config* pc; + const build_target_config* tc; + }; + + small_vector<build_config, 1> build_configs; + + // Note that we don't bother creating a prepared query here, since + // its highly unlikely to encounter multiple interactive tenants + // per task request. Given that we archive such tenants + // immediately, as a common case there will be none. + // + pkg_query pq (pkg_query::build_tenant::id == id.tenant); + for (auto& tp: build_db_->query<buildable_package> (pq)) + { + shared_ptr<build_package>& p (tp.package); + + build_db_->load (*p, p->constraints_section); + + for (build_package_config& pc: p->configs) + { + for (const auto& tc: *target_conf_) + { + if (!exclude (pc, p->builds, p->constraints, tc)) + build_configs.push_back (build_config {p, &pc, &tc}); + } + } } - else + + // If multiple build configurations are collected, then abort all + // the potential builds and continue iterating over the packages. + // + if (build_configs.size () > 1) { - // The package configuration is in the building state, and there - // are no results. + // Abort the builds. // - // Note that in both cases we keep the status intact to be able - // to compare it with the final one in the result request - // handling in order to decide if to send the notification - // email. The same is true for the forced flag (in the sense - // that we don't set the force state to unforced). + for (build_config& c: build_configs) + { + shared_ptr<build_package>& p (c.p); + const string& pc (c.pc->name); + const build_target_config& tc (*c.tc); + + build_id bid (p->id, + tc.target, + tc.name, + pc, + toolchain_name, + toolchain_version); + + // Can there be any existing builds for such a tenant? Doesn't + // seem so, unless due to some manual intervention into the + // database. Anyway, let's just leave such a build alone. + // + shared_ptr<build> b (build_db_->find<build> (bid)); + + if (b == nullptr) + { + b = make_shared<build> (move (bid.package.tenant), + move (bid.package.name), + p->version, + move (bid.target), + move (bid.target_config_name), + move (bid.package_config_name), + move (bid.toolchain_name), + toolchain_version, + result_status::abort, + operation_results ({ + operation_result { + "configure", + result_status::abort, + "error: multiple configurations " + "for interactive build\n"}}), + build_machine { + "brep", "build task module"}); + + build_db_->persist (b); + + // Schedule the build notification email. + // + aborted_builds.push_back (aborted_build { + move (b), move (p), c.pc, "build"}); + } + } + + // Archive the tenant. // - // Load the section to assert the above statement. - // - build_db_->load (*b, b->results_section); + t->archived = true; + build_db_->update (t); + + continue; // Skip the package. + } + } + + // If true, then the package is (being) built for some + // configurations. + // + // Note that since we only query the built and forced rebuild + // objects there can be false negatives. + // + bool package_built (false); + + build_db_->load (*p, p->bot_keys_section); + + for (const build_package_config& pc: p->configs) + { + // If this is a custom bot, then skip this configuration if it + // doesn't contain this bot's public key in its custom bot keys + // list. Otherwise (this is a default bot), skip this + // configuration if its custom bot keys list is not empty. + // + { + const build_package_bot_keys& bks ( + pc.effective_bot_keys (p->bot_keys)); + + if (custom_bot) + { + assert (agent_fp); // Wouldn't be here otherwise. + + if (find_if ( + bks.begin (), bks.end (), + [&agent_fp] (const lazy_shared_ptr<build_public_key>& k) + { + return k.object_id ().fingerprint == *agent_fp; + }) == bks.end ()) + { + continue; + } + } + else + { + if (!bks.empty ()) + continue; + } + } + + pkg_config = pc.name; + + // Iterate through the built configurations and erase them from the + // build configuration map. All those configurations that remained + // can be built. We will take the first one, if present. + // + // Also save the built configurations for which it's time to be + // rebuilt. + // + config_machines configs (conf_machines); // Make copy for this pkg. + auto pkg_builds (bld_prep_query.execute ()); - assert (b->state == build_state::building && - b->results.empty ()); + if (!package_built && !pkg_builds.empty ()) + package_built = true; - b->state = build_state::building; + for (auto i (pkg_builds.begin ()); i != pkg_builds.end (); ++i) + { + auto j ( + configs.find (build_target_config_id { + i->id.target, i->id.target_config_name})); - // Switch the force state not to reissue the task after the - // forced rebuild timeout. Note that the result handler will - // still recognize that the rebuild was forced. + // Outdated configurations are already excluded with the + // database query. // - if (b->force == force_state::forcing) - b->force = force_state::forced; + assert (j != configs.end ()); + configs.erase (j); - b->agent_fingerprint = move (agent_fp); - b->agent_challenge = move (cl); - b->machine = mh.name; - b->machine_summary = move (mh.summary); - b->target = cm.config->target; - b->timestamp = system_clock::now (); + if (i->state == build_state::built) + { + assert (i->force != force_state::forcing); - build_db_->update (b); + if (needs_rebuild (*i)) + rebuilds.emplace_back (i.load ()); + } } - // Finally, prepare the task response manifest. - // - // We iterate over buildable packages. + if (!configs.empty ()) + { + // Find the first build configuration that is not excluded by + // the package configuration and for which all the requested + // auxiliary machines can be provided. + // + const config_machine* cm (nullptr); + optional<collect_auxiliaries_result> aux; + + build_db_->load (*p, p->constraints_section); + + for (auto i (configs.begin ()), e (configs.end ()); i != e; ++i) + { + cm = &i->second; + const build_target_config& tc (*cm->config); + + if (!exclude (pc, p->builds, p->constraints, tc)) + { + if (!p->auxiliaries_section.loaded ()) + build_db_->load (*p, p->auxiliaries_section); + + if ((aux = collect_auxiliaries (p, pc, tc))) + break; + } + } + + if (aux) + { + machine_header_manifest& mh (*cm->machine); + + build_id bid (move (id), + cm->config->target, + cm->config->name, + move (pkg_config), + move (toolchain_name), + toolchain_version); + + shared_ptr<build> b (build_db_->find<build> (bid)); + + // Move the interactive build login information into the build + // object, if the package to be built interactively. + // + optional<string> login (bp.interactive + ? move (tqm.interactive_login) + : nullopt); + + // If build configuration doesn't exist then create the new + // one and persist. Otherwise put it into the building state, + // refresh the timestamp and update. + // + if (b == nullptr) + { + b = make_shared<build> (move (bid.package.tenant), + move (bid.package.name), + p->version, + move (bid.target), + move (bid.target_config_name), + move (bid.package_config_name), + move (bid.toolchain_name), + move (toolchain_version), + move (login), + move (agent_fp), + move (challenge), + build_machine { + mh.name, move (mh.summary)}, + move (aux->build_auxiliary_machines), + controller_checksum (*cm->config), + machine_checksum (*cm->machine)); + + challenge = nullopt; + + build_db_->persist (b); + } + else + { + // The build configuration is in the building or queued + // state. + // + // Note that in both the building and built cases we keep + // the status intact to be able to compare it with the final + // one in the result request handling in order to decide if + // to send the notification email or to revert it to the + // built state if interrupted. The same is true for the + // forced flag (in the sense that we don't set the force + // state to unforced). + // + assert (b->state != build_state::built); + + initial_state = b->state; + + b->state = build_state::building; + b->interactive = move (login); + + unforced = (b->force == force_state::unforced); + + // Switch the force state not to reissue the task after the + // forced rebuild timeout. Note that the result handler will + // still recognize that the rebuild was forced. + // + if (b->force == force_state::forcing) + { + b->force = force_state::forced; + rebuild_forced_build = true; + } + + b->agent_fingerprint = move (agent_fp); + + b->agent_challenge = move (challenge); + challenge = nullopt; + + b->machine = build_machine {mh.name, move (mh.summary)}; + + // Mark the section as loaded, so auxiliary_machines are + // updated. + // + b->auxiliary_machines_section.load (); + + b->auxiliary_machines = + move (aux->build_auxiliary_machines); + + string ccs (controller_checksum (*cm->config)); + string mcs (machine_checksum (*cm->machine)); + + // Issue the hard rebuild if it is forced or the + // configuration or machine has changed. + // + if (b->hard_timestamp <= hard_rebuild_expiration || + b->force == force_state::forced || + b->controller_checksum != ccs || + b->machine_checksum != mcs) + convert_to_hard (b); + + b->controller_checksum = move (ccs); + b->machine_checksum = move (mcs); + + b->timestamp = system_clock::now (); + + build_db_->update (b); + } + + if (t == nullptr) + t = build_db_->load<build_tenant> (b->tenant); + + // Archive an interactive tenant. + // + if (bp.interactive) + { + t->archived = true; + build_db_->update (t); + } + + // Finally, stash the service notification information, if + // present, and prepare the task response manifest. + // + if (t->service) + { + auto i (tenant_service_map_.find (t->service->type)); + + if (i != tenant_service_map_.end ()) + { + const tenant_service_base* s (i->second.get ()); + + tsb = dynamic_cast<const tenant_service_build_building*> (s); + tsq = dynamic_cast<const tenant_service_build_queued*> (s); + + if (tsq != nullptr) + { + qbs = queue_builds (*p, *b); + + // If we ought to call the + // tenant_service_build_queued::build_queued() callback, + // then also set the package tenant's queued timestamp + // to the current time to prevent the notifications race + // (see tenant::queued_timestamp for details). + // + if (!qbs.empty () || + !initial_state || + (*initial_state != build_state::queued && + !rebuild_forced_build)) + { + qhs = queue_hints (*p); + + t->queued_timestamp = system_clock::now (); + build_db_->update (t); + } + } + + if (tsb != nullptr || tsq != nullptr) + tss = make_pair (*t->service, b); + } + } + + task_response = task (*b, + *p, + pc, + move (aux->tests), + move (aux->task_auxiliary_machines), + move (bp.interactive), + *cm); + + task_build = move (b); + task_package = move (p); + task_config = &pc; + + package_built = true; + + break; // Bail out from the package configurations loop. + } + } + } + + // If the task manifest is prepared, then bail out from the package + // loop, commit the transaction and respond. Otherwise, stash the + // build toolchain into the tenant, unless it is already stashed or + // the current package already has some configurations (being) + // built. + // + if (!task_response.task) + { + // Note that since there can be false negatives for the + // package_built flag (see above), there can be redundant tenant + // queries which, however, seems harmless (query uses the primary + // key and the object memory footprint is small). // - assert (p->internal ()); + if (!package_built) + { + if (t == nullptr) + t = build_db_->load<build_tenant> (p->id.tenant); - p->internal_repository.load (); + if (!t->toolchain) + { + t->toolchain = build_toolchain {toolchain_name, + toolchain_version}; - tsm = task (move (b), move (p), cm); + build_db_->update (t); + } + } } + else + break; } - // If the task response manifest is prepared, then bail out from the - // package loop, commit the transaction and respond. - // - if (!tsm.session.empty ()) - break; + tr.commit (); } - t.commit (); - } - - // If we don't have an unbuilt package, then let's see if we have a - // package to rebuild. - // - if (tsm.session.empty () && !rebuilds.empty ()) - { - // Sort the package configuration rebuild list with the following sort - // priority: + // If we don't have an unbuilt package, then let's see if we have a + // build configuration to rebuild. // - // 1: force state - // 2: overall status - // 3: timestamp (less is preferred) + if (!task_response.task && !rebuilds.empty ()) + { + // Sort the configuration rebuild list with the following sort + // priority: + // + // 1: force state + // 2: overall status + // 3: timestamp (less is preferred) + // + auto cmp = [] (const shared_ptr<build>& x, const shared_ptr<build>& y) + { + if (x->force != y->force) + return x->force > y->force; // Forced goes first. + + assert (x->status && y->status); // Both built. + + if (x->status != y->status) + return x->status > y->status; // Larger status goes first. + + // Older build completion goes first. + // + // Note that a completed build can have the state change timestamp + // (timestamp member) newer than the completion timestamp + // (soft_timestamp member) if the build was interrupted. + // + return x->soft_timestamp < y->soft_timestamp; + }; + + sort (rebuilds.begin (), rebuilds.end (), cmp); + + // Pick the first build configuration from the ordered list. + // + // Note that the configurations and packages may not match the + // required criteria anymore (as we have committed the database + // transactions that were used to collect this data) so we recheck. If + // we find one that matches then put it into the building state, + // refresh the timestamp and update. Note that we don't amend the + // status and the force state to have them available in the result + // request handling (see above). + // + for (auto& b: rebuilds) + { + try + { + transaction t (conn->begin ()); + + b = build_db_->find<build> (b->id); + + if (b != nullptr && + b->state == build_state::built && + needs_rebuild (*b)) + { + auto i (conf_machines.find ( + build_target_config_id { + b->target, b->target_config_name})); + + // Only actual package configurations are loaded (see above). + // + assert (i != conf_machines.end ()); + const config_machine& cm (i->second); + + // Rebuild the package configuration if still present, is + // buildable, doesn't exclude the target configuration, can be + // provided with all the requested auxiliary machines, and + // matches the request's interactive mode. + // + // Note that while change of the latter seems rather far fetched, + // let's check it for good measure. + // + shared_ptr<build_package> p ( + build_db_->find<build_package> (b->id.package)); + + shared_ptr<build_tenant> t ( + p != nullptr + ? build_db_->load<build_tenant> (p->id.tenant) + : nullptr); + + build_package_config* pc (p != nullptr + ? find (b->package_config_name, + p->configs) + : nullptr); + + if (pc != nullptr && + p->buildable && + (imode == interactive_mode::both || + (t->interactive.has_value () == + (imode == interactive_mode::true_)))) + { + const build_target_config& tc (*cm.config); + + build_db_->load (*p, p->constraints_section); + + if (exclude (*pc, p->builds, p->constraints, tc)) + continue; + + build_db_->load (*p, p->auxiliaries_section); + + if (optional<collect_auxiliaries_result> aux = + collect_auxiliaries (p, *pc, tc)) + { + assert (b->status); + + initial_state = build_state::built; + + rebuild_interrupted_rebuild = + (b->timestamp > b->soft_timestamp); + + b->state = build_state::building; + + // Save the interactive build login information into the + // build object, if the package to be built interactively. + // + // Can't move from, as may need it on the next iteration. + // + b->interactive = t->interactive + ? tqm.interactive_login + : nullopt; + + unforced = (b->force == force_state::unforced); + + b->agent_fingerprint = move (agent_fp); + + b->agent_challenge = move (challenge); + challenge = nullopt; + + const machine_header_manifest& mh (*cm.machine); + b->machine = build_machine {mh.name, mh.summary}; + + // Mark the section as loaded, so auxiliary_machines are + // updated. + // + b->auxiliary_machines_section.load (); + + b->auxiliary_machines = + move (aux->build_auxiliary_machines); + + // Issue the hard rebuild if the timeout expired, rebuild is + // forced, or the configuration or machine has changed. + // + // Note that we never reset the build status (see above for + // the reasoning). + // + string ccs (controller_checksum (*cm.config)); + string mcs (machine_checksum (*cm.machine)); + + if (b->hard_timestamp <= hard_rebuild_expiration || + b->force == force_state::forced || + b->controller_checksum != ccs || + b->machine_checksum != mcs) + convert_to_hard (b); + + b->controller_checksum = move (ccs); + b->machine_checksum = move (mcs); + + b->timestamp = system_clock::now (); + + build_db_->update (b); + + // Stash the service notification information, if present, + // and prepare the task response manifest. + // + if (t->service) + { + auto i (tenant_service_map_.find (t->service->type)); + + if (i != tenant_service_map_.end ()) + { + const tenant_service_base* s (i->second.get ()); + + tsb = dynamic_cast<const tenant_service_build_building*> (s); + tsq = dynamic_cast<const tenant_service_build_queued*> (s); + + if (tsq != nullptr) + { + qbs = queue_builds (*p, *b); + + // If we ought to call the + // tenant_service_build_queued::build_queued() + // callback, then also set the package tenant's queued + // timestamp to the current time to prevent the + // notifications race (see tenant::queued_timestamp + // for details). + // + if (!qbs.empty () || !rebuild_interrupted_rebuild) + { + qhs = queue_hints (*p); + + t->queued_timestamp = system_clock::now (); + build_db_->update (t); + } + } + + if (tsb != nullptr || tsq != nullptr) + tss = make_pair (move (*t->service), b); + } + } + + task_response = task (*b, + *p, + *pc, + move (aux->tests), + move (aux->task_auxiliary_machines), + move (t->interactive), + cm); + + task_build = move (b); + task_package = move (p); + task_config = pc; + } + } + } + + t.commit (); + } + catch (const odb::deadlock&) + { + // Just try with the next rebuild. But first, restore the agent's + // fingerprint and challenge and reset the task manifest and the + // session that we may have prepared. + // + agent_fp = move (b->agent_fingerprint); + challenge = move (b->agent_challenge); + task_response = task_response_manifest (); + } + + // If the task manifest is prepared, then bail out from the package + // configuration rebuilds loop and respond. + // + if (task_response.task) + break; + } + } + + // If the tenant-associated third-party service needs to be notified + // about the queued builds, then call the + // tenant_service_build_queued::build_queued() callback function and + // update the service state, if requested. // - auto cmp = [] (const shared_ptr<build>& x, const shared_ptr<build>& y) + if (tsq != nullptr) { - if (x->force != y->force) - return x->force > y->force; // Forced goes first. + assert (tss); // Wouldn't be here otherwise. - assert (x->status && y->status); // Both built. + tenant_service& ss (tss->first); - if (x->status != y->status) - return x->status > y->status; // Larger status goes first. + // If the task build has no initial state (is just created), then + // temporarily move it into the list of the queued builds until the + // `queued` notification is delivered. Afterwards, restore it so that + // the `building` notification can also be sent. + // + build& b (*tss->second); + bool restore_build (false); - return x->timestamp < y->timestamp; // Older goes first. - }; + if (!initial_state) + { + qbs.push_back (move (b)); + restore_build = true; + } - sort (rebuilds.begin (), rebuilds.end (), cmp); + if (!qbs.empty ()) + { + if (auto f = tsq->build_queued (ss, + qbs, + nullopt /* initial_state */, + qhs, + log_writer_)) + { + if (optional<string> data = + update_tenant_service_state (conn, qbs.back ().tenant, f)) + ss.data = move (data); + } + } - optional<string> cl (challenge ()); + // Send the `queued` notification for the task build, unless it is + // already sent, and update the service state, if requested. + // + if (initial_state && + *initial_state != build_state::queued && + !rebuild_interrupted_rebuild && + !rebuild_forced_build) + { + qbs.clear (); + qbs.push_back (move (b)); + restore_build = true; + + if (auto f = tsq->build_queued (ss, + qbs, + initial_state, + qhs, + log_writer_)) + { + if (optional<string> data = + update_tenant_service_state (conn, qbs.back ().tenant, f)) + ss.data = move (data); + } + } + + if (restore_build) + b = move (qbs.back ()); + } - // Pick the first package configuration from the ordered list. + // If a third-party service needs to be notified about the package + // build, then call the tenant_service_build_built::build_building() + // callback function and, if requested, update the tenant-associated + // service state. // - // Note that the configurations and packages may not match the required - // criteria anymore (as we have committed the database transactions that - // were used to collect this data) so we recheck. If we find one that - // matches then put it into the building state, refresh the timestamp and - // update. Note that we don't amend the status and the force state to - // have them available in the result request handling (see above). + if (tsb != nullptr) + { + assert (tss); // Wouldn't be here otherwise. + + tenant_service& ss (tss->first); + const build& b (*tss->second); + + if (auto f = tsb->build_building (ss, b, log_writer_)) + { + if (optional<string> data = + update_tenant_service_state (conn, b.tenant, f)) + ss.data = move (data); + } + } + + // If the task manifest is prepared, then check that the number of the + // build auxiliary machines is less than 10. If that's not the case, + // then turn the build into the built state with the abort status. // - for (auto& b: rebuilds) + if (task_response.task && + task_response.task->auxiliary_machines.size () > 9) { - try + // Respond with the no-task manifest. + // + task_response = task_response_manifest (); + + // If the package tenant has a third-party service state associated + // with it, then check if the tenant_service_build_built callback is + // registered for the type of the associated service. If it is, then + // stash the state, the build object, and the callback pointer for the + // subsequent service `built` notification. + // + const tenant_service_build_built* tsb (nullptr); + optional<pair<tenant_service, shared_ptr<build>>> tss; { - transaction t (build_db_->begin ()); + transaction t (conn->begin ()); - b = build_db_->find<build> (b->id); + shared_ptr<build> b (build_db_->find<build> (task_build->id)); - if (b != nullptr && b->state == build_state::built && - b->timestamp <= (b->force == force_state::forced - ? forced_rebuild_expiration - : normal_rebuild_expiration)) + // For good measure, check that the build object is in the building + // state and has not been updated. + // + if (b->state == build_state::building && + b->timestamp == task_build->timestamp) { - auto i (cfg_machines.find (b->id.configuration.c_str ())); + b->state = build_state::built; + b->status = result_status::abort; + b->force = force_state::unforced; - // Only actual package configurations are loaded (see above). + // Cleanup the interactive build login information. // - assert (i != cfg_machines.end ()); - const config_machine& cm (i->second); + b->interactive = nullopt; - // Rebuild the package if still present, is buildable and doesn't - // exclude the configuration. + // Cleanup the authentication data. // - shared_ptr<build_package> p ( - build_db_->find<build_package> (b->id.package)); + b->agent_fingerprint = nullopt; + b->agent_challenge = nullopt; - if (p != nullptr && - p->internal () && - !exclude (p->builds, p->constraints, *cm.config)) - { - assert (b->status); + b->timestamp = system_clock::now (); + b->soft_timestamp = b->timestamp; + b->hard_timestamp = b->soft_timestamp; - b->state = build_state::building; - - // Can't move from, as may need them on the next iteration. - // - b->agent_fingerprint = agent_fp; - b->agent_challenge = cl; - - const machine_header_manifest& mh (*cm.machine); - b->machine = mh.name; - b->machine_summary = mh.summary; - - b->target = cm.config->target; + // Mark the section as loaded, so results are updated. + // + b->results_section.load (); - // Mark the section as loaded, so results are updated. - // - b->results_section.load (); - b->results.clear (); + b->results = operation_results ({ + operation_result { + "configure", + result_status::abort, + "error: not more than 9 auxiliary machines are allowed"}}); - b->timestamp = system_clock::now (); + b->agent_checksum = nullopt; + b->worker_checksum = nullopt; + b->dependency_checksum = nullopt; - build_db_->update (b); + build_db_->update (b); - p->internal_repository.load (); + // Schedule the `built` notification, if the + // tenant_service_build_built callback is registered for the + // tenant. + // + shared_ptr<build_tenant> t ( + build_db_->load<build_tenant> (b->tenant)); - tsm = task (move (b), move (p), cm); + if (t->service) + { + auto i (tenant_service_map_.find (t->service->type)); + + if (i != tenant_service_map_.end ()) + { + tsb = dynamic_cast<const tenant_service_build_built*> ( + i->second.get ()); + + // If required, stash the service notification information. + // + if (tsb != nullptr) + tss = make_pair (move (*t->service), b); + } } + + // Schedule the build notification email. + // + aborted_builds.push_back ( + aborted_build {move (b), + move (task_package), + task_config, + unforced ? "build" : "rebuild"}); } t.commit (); } - catch (const odb::deadlock&) {} // Just try with the next rebuild. - // If the task response manifest is prepared, then bail out from the - // package configuration rebuilds loop and respond. + // If a third-party service needs to be notified about the built + // package, then call the tenant_service_build_built::build_built() + // callback function and update the service state, if requested. // - if (!tsm.session.empty ()) - break; + if (tsb != nullptr) + { + assert (tss); // Wouldn't be here otherwise. + + tenant_service& ss (tss->first); + const build& b (*tss->second); + + if (auto f = tsb->build_built (ss, b, log_writer_)) + { + if (optional<string> data = + update_tenant_service_state (conn, b.tenant, f)) + ss.data = move (data); + } + } } + + // Send notification emails for all the aborted builds. + // + for (const aborted_build& ab: aborted_builds) + send_notification_email (*options_, + conn, + *ab.b, + *ab.p, + *ab.pc, + ab.what, + error, + verb_ >= 2 ? &trace : nullptr); } } - // @@ Probably it would be a good idea to also send some cache control - // headers to avoid caching by HTTP proxies. That would require extension - // of the web::response interface. - // - - manifest_serializer s (rs.content (200, "text/manifest;charset=utf-8"), - "task_response_manifest"); - tsm.serialize (s); - + serialize_task_response_manifest (); return true; } diff --git a/mod/mod-build-task.hxx b/mod/mod-build-task.hxx index 7875db1..d0b3d44 100644 --- a/mod/mod-build-task.hxx +++ b/mod/mod-build-task.hxx @@ -8,6 +8,7 @@ #include <libbrep/utility.hxx> #include <mod/module-options.hxx> +#include <mod/tenant-service.hxx> #include <mod/database-module.hxx> #include <mod/build-config-module.hxx> @@ -16,13 +17,13 @@ namespace brep class build_task: public database_module, private build_config_module { public: - build_task () = default; + explicit + build_task (const tenant_service_map&); // Create a shallow copy (handling instance) if initialized and a deep // copy (context exemplar) otherwise. // - explicit - build_task (const build_task&); + build_task (const build_task&, const tenant_service_map&); virtual bool handle (request&, response&); @@ -36,6 +37,7 @@ namespace brep private: shared_ptr<options::build_task> options_; + const tenant_service_map& tenant_service_map_; }; } diff --git a/mod/mod-builds.cxx b/mod/mod-builds.cxx index ab9e93e..30562f3 100644 --- a/mod/mod-builds.cxx +++ b/mod/mod-builds.cxx @@ -4,15 +4,15 @@ #include <mod/mod-builds.hxx> #include <set> -#include <algorithm> // find_if() #include <libstudxml/serializer.hxx> #include <odb/database.hxx> #include <odb/transaction.hxx> -#include <libbutl/timestamp.mxx> // to_string() -#include <libbutl/path-pattern.mxx> +#include <libbutl/utility.hxx> // compare_c_string +#include <libbutl/timestamp.hxx> // to_string() +#include <libbutl/path-pattern.hxx> #include <libbbot/manifest.hxx> // to_result_status(), to_string(result_status) @@ -31,7 +31,6 @@ using namespace std; using namespace butl; -using namespace bbot; using namespace web; using namespace odb::core; using namespace brep::cli; @@ -133,28 +132,32 @@ match (const C qc, const string& pattern) return qc + "SIMILAR TO" + query<T>::_val (transform (pattern)); } +// If tenant is absent, then query builds from all the public tenants. +// template <typename T> static inline query<T> -build_query (const brep::cstrings* configs, +build_query (const brep::vector<brep::build_target_config_id>* config_ids, const brep::params::builds& params, - const brep::optional<brep::string>& tenant, - const brep::optional<bool>& archived) + const brep::optional<brep::string>& tenant) { using namespace brep; using query = query<T>; using qb = typename query::build; - - query q (configs != nullptr - ? qb::id.configuration.in_range (configs->begin (), configs->end ()) - : query (true)); + using qt = typename query::build_tenant; const auto& pid (qb::id.package); - if (tenant) - q = q && pid.tenant == *tenant; + query q (tenant ? pid.tenant == *tenant : !qt::private_); - if (archived) - q = q && query::build_tenant::archived == *archived; + if (config_ids != nullptr) + { + query sq (false); + for (const auto& id: *config_ids) + sq = sq || (qb::id.target == id.target && + qb::id.target_config_name == id.config); + + q = q && sq; + } // Note that there is no error reported if the filter parameters parsing // fails. Instead, it is considered that no package builds match such a @@ -173,7 +176,7 @@ build_query (const brep::cstrings* configs, { // May throw invalid_argument. // - version v (params.version (), false /* fold_zero_revision */); + version v (params.version (), version::none); q = q && compare_version_eq (pid.version, canonical_version (v), @@ -182,11 +185,11 @@ build_query (const brep::cstrings* configs, // Build toolchain name/version. // - const string& tc (params.toolchain ()); + const string& th (params.toolchain ()); - if (tc != "*") + if (th != "*") { - size_t p (tc.find ('-')); + size_t p (th.find ('-')); if (p == string::npos) // Invalid format. throw invalid_argument (""); @@ -194,8 +197,8 @@ build_query (const brep::cstrings* configs, // the exact version revision, so an absent and zero revisions have the // same semantics and the zero revision is folded. // - string tn (tc, 0, p); - version tv (string (tc, p + 1)); // May throw invalid_argument. + string tn (th, 0, p); + version tv (string (th, p + 1)); // May throw invalid_argument. q = q && qb::id.toolchain_name == tn && @@ -204,38 +207,44 @@ build_query (const brep::cstrings* configs, true /* revision */); } - // Build configuration name. + // Build target. // - if (!params.configuration ().empty ()) - q = q && match<T> (qb::id.configuration, params.configuration ()); + if (!params.target ().empty ()) + q = q && match<T> (qb::id.target, params.target ()); - // Build machine name. + // Build target configuration name. // - if (!params.machine ().empty ()) - q = q && match<T> (qb::machine, params.machine ()); + if (!params.target_config ().empty ()) + q = q && match<T> (qb::id.target_config_name, params.target_config ()); - // Build target. + // Build package configuration name. // - if (!params.target ().empty ()) - q = q && match<T> (qb::target, params.target ()); + if (!params.package_config ().empty ()) + q = q && match<T> (qb::id.package_config_name, params.package_config ()); // Build result. // const string& rs (params.result ()); + bool add_state (true); if (rs != "*") { if (rs == "pending") + { q = q && qb::force != "unforced"; + } else if (rs == "building") + { q = q && qb::state == "building"; + add_state = false; + } else { query sq (qb::status == rs); // May throw invalid_argument. // - result_status st (to_result_status (rs)); + result_status st (bbot::to_result_status (rs)); if (st != result_status::success) { @@ -256,8 +265,12 @@ build_query (const brep::cstrings* configs, // well (rebuild). // q = q && qb::state == "built" && sq; + add_state = false; } } + + if (add_state) + q = q && qb::state != "queued"; } catch (const invalid_argument&) { @@ -267,23 +280,19 @@ build_query (const brep::cstrings* configs, return q; } +// If tenant is absent, then query packages from all the public tenants. +// template <typename T> static inline query<T> package_query (const brep::params::builds& params, - const brep::optional<brep::string>& tenant, - const brep::optional<bool>& archived) + const brep::optional<brep::string>& tenant) { using namespace brep; using query = query<T>; using qp = typename query::build_package; + using qt = typename query::build_tenant; - query q (true); - - if (tenant) - q = q && qp::id.tenant == *tenant; - - if (archived) - q = q && query::build_tenant::archived == *archived; + query q (tenant ? qp::id.tenant == *tenant : !qt::private_); // Note that there is no error reported if the filter parameters parsing // fails. Instead, it is considered that no packages match such a query. @@ -301,7 +310,7 @@ package_query (const brep::params::builds& params, { // May throw invalid_argument. // - version v (params.version (), false /* fold_zero_revision */); + version v (params.version (), version::none); q = q && compare_version_eq (qp::id.version, canonical_version (v), @@ -355,11 +364,6 @@ handle (request& rq, response& rs) throw invalid_request (400, e.what ()); } - // Override the name parameter for the old URL (see options.cli for details). - // - if (params.name_legacy_specified ()) - params.name (params.name_legacy ()); - const char* title ("Builds"); xml::serializer s (rs.content (), title); @@ -383,14 +387,17 @@ handle (request& rq, response& rs) << DIV(ID="content"); // If the tenant is empty then we are in the global view and will display - // builds from all the tenants. + // builds from all the public tenants. // optional<string> tn; if (!tenant.empty ()) tn = tenant; - // Return the list of distinct toolchain name/version pairs. The build db - // transaction must be started. + // Return the list of distinct toolchain name/version pairs. If no builds + // are present for the tenant, then fallback to the toolchain recorded in + // the tenant object, if present. + // + // Note: the build db transaction must be started. // using toolchains = vector<pair<string, version>>; @@ -406,11 +413,19 @@ handle (request& rq, response& rs) false /* first */))) r.emplace_back (move (t.name), move (t.version)); + if (r.empty ()) + { + shared_ptr<build_tenant> t (build_db_->find<build_tenant> (tenant)); + + if (t != nullptr && t->toolchain) + r.emplace_back (t->toolchain->name, t->toolchain->version); + } + return r; }; auto print_form = [&s, ¶ms, this] (const toolchains& toolchains, - size_t build_count) + optional<size_t> build_count) { // Print the package builds filter form on the first page only. // @@ -421,16 +436,16 @@ handle (request& rq, response& rs) // the selected toolchain is still present in the database. Otherwise // fallback to the * wildcard selection. // - string ctc ("*"); + string cth ("*"); vector<pair<string, string>> toolchain_opts ({{"*", "*"}}); { for (const auto& t: toolchains) { - string tc (t.first + '-' + t.second.string ()); - toolchain_opts.emplace_back (tc, tc); + string th (t.first + '-' + t.second.string ()); + toolchain_opts.emplace_back (th, th); - if (tc == params.toolchain ()) - ctc = move (tc); + if (th == params.toolchain ()) + cth = move (th); } } @@ -446,28 +461,42 @@ handle (request& rq, response& rs) << TBODY << TR_INPUT ("name", "builds", params.name (), "*", true) << TR_INPUT ("version", "pv", params.version (), "*") - << TR_SELECT ("toolchain", "tc", ctc, toolchain_opts) + << TR_SELECT ("toolchain", "th", cth, toolchain_opts) + << TR_INPUT ("target", "tg", params.target (), "*") - << TR(CLASS="config") - << TH << "config" << ~TH + << TR(CLASS="tgt-config") + << TH << "tgt config" << ~TH << TD << *INPUT(TYPE="text", - NAME="cf", - VALUE=params.configuration (), + NAME="tc", + VALUE=params.target_config (), PLACEHOLDER="*", - LIST="configs") - << DATALIST(ID="configs") + LIST="target-configs") + << DATALIST(ID="target-configs") << *OPTION(VALUE="*"); - for (const auto& c: *build_conf_names_) - s << *OPTION(VALUE=c); + // Print unique config names from the target config map. + // + set<const char*, butl::compare_c_string> conf_names; + for (const auto& c: *target_conf_map_) + { + if (conf_names.insert (c.first.config.get ().c_str ()).second) + s << *OPTION(VALUE=c.first.config.get ()); + } s << ~DATALIST << ~TD << ~TR - << TR_INPUT ("machine", "mn", params.machine (), "*") - << TR_INPUT ("target", "tg", params.target (), "*") + << TR(CLASS="pkg-config") + << TH << "pkg config" << ~TH + << TD + << *INPUT(TYPE="text", + NAME="pc", + VALUE=params.package_config (), + PLACEHOLDER="*") + << ~TD + << ~TR << TR_SELECT ("result", "rs", params.result (), build_results) << ~TBODY << ~TABLE @@ -489,26 +518,25 @@ handle (request& rq, response& rs) s << DIV_COUNTER (build_count, "Build", "Builds"); }; + const string& tgt (params.target ()); + const string& tgt_cfg (params.target_config ()); + const string& pkg_cfg (params.package_config ()); + // We will not display hidden configurations, unless the configuration is // specified explicitly. // - bool exclude_hidden (params.configuration ().empty () || - path_pattern (params.configuration ())); + bool exclude_hidden (tgt_cfg.empty () || path_pattern (tgt_cfg)); - cstrings conf_names; + vector<build_target_config_id> conf_ids; + conf_ids.reserve (target_conf_map_->size ()); - if (exclude_hidden) + for (const auto& c: *target_conf_map_) { - for (const auto& c: *build_conf_map_) - { - if (belongs (*c.second, "all")) - conf_names.push_back (c.first); - } + if (!exclude_hidden || !belongs (*c.second, "hidden")) + conf_ids.push_back (c.first); } - else - conf_names = *build_conf_names_; - size_t count; + optional<size_t> count; size_t page (params.page ()); if (params.result () != "unbuilt") // Print package build configurations. @@ -523,37 +551,22 @@ handle (request& rq, response& rs) // printing the builds. // count = 0; - vector<shared_ptr<build>> builds; + vector<package_build> builds; builds.reserve (page_configs); - // Prepare the package build prepared query. + // Prepare the package build query. // using query = query<package_build>; - using prep_query = prepared_query<package_build>; - query q (build_query<package_build> ( - &conf_names, params, tn, nullopt /* archived */)); - - // Specify the portion. Note that we will be querying builds in chunks, - // not to hold locks for too long. - // - // Also note that for each build we also load the corresponding - // package. Nevertheless, we use a fairly large portion to speed-up the - // builds traversal but also cache the package objects (see below). - // - size_t offset (0); + query q (build_query<package_build> (&conf_ids, params, tn)); // Print package build configurations ordered by the timestamp (later goes // first). // - q += "ORDER BY" + query::build::timestamp + "DESC" + - "OFFSET" + query::_ref (offset) + "LIMIT 500"; + q += "ORDER BY" + query::build::timestamp + "DESC"; connection_ptr conn (build_db_->connection ()); - prep_query pq ( - conn->prepare_query<package_build> ("mod-builds-query", q)); - // Note that we can't skip the proper number of builds in the database // query for a page numbers greater than one. So we will query builds from // the very beginning and skip the appropriate number of them while @@ -569,81 +582,101 @@ handle (request& rq, response& rs) // session sn; - for (bool ne (true); ne; ) + transaction t (conn->begin ()); + + // For some reason PostgreSQL (as of 9.4) picks the nested loop join + // strategy for the below package_build query, which executes quite slow + // even for reasonably small number of builds. Thus, we just discourage + // PostgreSQL from using this strategy in the current transaction. + // + // @@ TMP Re-check for the later PostgreSQL versions if we can drop this + // hint. If drop, then also grep for other places where this hint + // is used. + // + conn->execute ("SET LOCAL enable_nestloop=off"); + + // Iterate over builds and cache build objects that should be printed. + // Skip the appropriate number of them (for page number greater than + // one). + // + for (auto& pb: build_db_->query<package_build> (q)) { - transaction t (conn->begin ()); + shared_ptr<build>& b (pb.build); + + auto i ( + target_conf_map_->find ( + build_target_config_id {b->target, b->target_config_name})); - // Query package builds (and cache the result). + assert (i != target_conf_map_->end ()); + + // Match the target configuration against the package build + // configuration expressions/constraints. // - auto bs (pq.execute ()); + shared_ptr<build_package> p ( + build_db_->load<build_package> (b->id.package)); + + const build_package_config* pc (find (b->package_config_name, + p->configs)); - if ((ne = !bs.empty ())) + // The package configuration should be present since the configurations + // set cannot change if the package version doesn't change. If that's + // not the case, then the database has probably been manually amended. + // In this case let's just skip such a build as if it excluded and log + // the warning. + // + if (pc == nullptr) { - offset += bs.size (); + warn << "cannot find configuration '" << b->package_config_name + << "' for package " << p->id.name << '/' << p->version; - // Iterate over builds and cache build objects that should be printed. - // Skip the appropriate number of them (for page number greater than - // one). - // - for (auto& pb: bs) - { - shared_ptr<build>& b (pb.build); + continue; + } - auto i (build_conf_map_->find (b->configuration.c_str ())); - assert (i != build_conf_map_->end ()); + if (!p->constraints_section.loaded ()) + build_db_->load (*p, p->constraints_section); - // Match the configuration against the package build - // expressions/constraints. + if (!exclude (*pc, p->builds, p->constraints, *i->second)) + { + if (skip != 0) + --skip; + else if (print != 0) + { + // As we query builds in multiple transactions we may see the same + // build multiple times. Let's skip the duplicates. Note: we don't + // increment the counter in this case. // - shared_ptr<build_package> p ( - build_db_->load<build_package> (b->id.package)); - - if (!exclude (p->builds, p->constraints, *i->second)) + if (find_if (builds.begin (), builds.end (), + [&b] (const package_build& pb) + { + return b->id == pb.build->id; + }) != builds.end ()) + continue; + + if (b->state == build_state::built) { - if (skip != 0) - --skip; - else if (print != 0) - { - // As we query builds in multiple transactions we may see the - // same build multiple times. Let's skip the duplicates. Note: - // we don't increment the counter in this case. - // - if (find_if (builds.begin (), - builds.end (), - [&b] (const shared_ptr<build>& pb) - { - return b->id == pb->id; - }) != builds.end ()) - continue; - - if (b->state == build_state::built) - { - build_db_->load (*b, b->results_section); + build_db_->load (*b, b->results_section); - // Let's clear unneeded result logs for builds being cached. - // - for (operation_result& r: b->results) - r.log.clear (); - } - - builds.push_back (move (b)); + // Let's clear unneeded result logs for builds being cached. + // + for (operation_result& r: b->results) + r.log.clear (); + } - --print; - } + builds.push_back (move (pb)); - ++count; - } + --print; } + + ++(*count); } + } - // Print the filter form after the build count is calculated. Note: - // query_toolchains() must be called inside the build db transaction. - // - else - print_form (query_toolchains (), count); + // Print the filter form after the build count is calculated. Note: + // query_toolchains() must be called inside the build db transaction. + // + print_form (query_toolchains (), count); - t.commit (); - } + t.commit (); // Finally, print the cached package build configurations. // @@ -652,34 +685,43 @@ handle (request& rq, response& rs) // Enclose the subsequent tables to be able to use nth-child CSS selector. // s << DIV; - for (const shared_ptr<build>& pb: builds) + for (const package_build& pb: builds) { - const build& b (*pb); + const build& b (*pb.build); string ts (butl::to_string (b.timestamp, "%Y-%m-%d %H:%M:%S %Z", true /* special */, true /* local */) + - " (" + butl::to_string (now - b.timestamp, false) + " ago)"); + " (" + butl::to_string (now - b.timestamp, false) + " ago"); + + if (pb.archived) + ts += ", archived"; + + ts += ')'; s << TABLE(CLASS="proplist build") << TBODY - << TR_NAME (b.package_name, string (), root, b.tenant) + << TR_NAME (b.package_name, root, b.tenant) << TR_VERSION (b.package_name, b.package_version, root, b.tenant) << TR_VALUE ("toolchain", b.toolchain_name + '-' + b.toolchain_version.string ()) - << TR_VALUE ("config", b.configuration) - << TR_VALUE ("machine", b.machine) << TR_VALUE ("target", b.target.string ()) - << TR_VALUE ("timestamp", ts) - << TR_BUILD_RESULT (b, host, root); + << TR_VALUE ("tgt config", b.target_config_name) + << TR_VALUE ("pkg config", b.package_config_name) + << TR_VALUE ("timestamp", ts); + + if (b.interactive) // Note: can only be present for the building state. + s << TR_VALUE ("login", *b.interactive); + + s << TR_BUILD_RESULT (b, pb.archived, host, root); // In the global view mode add the tenant builds link. Note that the // global view (and the link) makes sense only in the multi-tenant mode. // if (!tn && !b.tenant.empty ()) - s << TR_TENANT (tenant_name, "builds", root, b.tenant); + s << TR_TENANT (tenant_name, "builds", root, b.tenant); s << ~TBODY << ~TABLE; @@ -689,47 +731,73 @@ handle (request& rq, response& rs) else // Print unbuilt package configurations. { // Parameters to use for package build configurations queries. Note that - // we cleanup the machine and the result filter arguments, as they are - // irrelevant for unbuilt configurations. + // we cleanup the result filter argument, as it is irrelevant for unbuilt + // configurations. // params::builds bld_params (params); - bld_params.machine ().clear (); bld_params.result () = "*"; - // Query toolchains, filter build configurations and toolchains, and - // create the set of configuration/toolchain combinations, that we will - // print for packages. Also calculate the number of unbuilt package - // configurations. + // Query toolchains, filter build target configurations and toolchains, + // and create the set of target configuration/toolchain combinations, that + // we will print for package configurations. Also calculate the number of + // unbuilt package configurations. // toolchains toolchains; - // Note that config_toolchains contains shallow references to the - // toolchain names and versions. + // Target configuration/toolchain combination. // - set<config_toolchain> config_toolchains; + // Note: all members are the shallow references. + // + struct target_config_toolchain + { + const butl::target_triplet& target; + const string& target_config; + const string& toolchain_name; + const bpkg::version& toolchain_version; + }; + + // Cache the build package objects that would otherwise be loaded twice: + // first time during calculating the builds count and then during printing + // the builds. Note that the build package is a subset of the package + // object and normally has a small memory footprint. + // + // @@ TMP It feels that we can try to combine the mentioned steps and + // improve the performance a bit. We won't need the session in this + // case. + // + session sn; + + connection_ptr conn (build_db_->connection ()); + transaction t (conn->begin ()); + + // Discourage PostgreSQL from using the nested loop join strategy in the + // current transaction (see above for details). + // + conn->execute ("SET LOCAL enable_nestloop=off"); + + vector<target_config_toolchain> config_toolchains; { - transaction t (build_db_->begin ()); toolchains = query_toolchains (); - string tc_name; - version tc_version; - const string& tc (params.toolchain ()); + string th_name; + version th_version; + const string& th (params.toolchain ()); - if (tc != "*") + if (th != "*") try { - size_t p (tc.find ('-')); + size_t p (th.find ('-')); if (p == string::npos) // Invalid format. throw invalid_argument (""); - tc_name.assign (tc, 0, p); + th_name.assign (th, 0, p); // May throw invalid_argument. // // Note that an absent and zero revisions have the same semantics, // so the zero revision is folded (see above for details). // - tc_version = version (string (tc, p + 1)); + th_version = version (string (th, p + 1)); } catch (const invalid_argument&) { @@ -739,63 +807,63 @@ handle (request& rq, response& rs) throw invalid_request (400, "invalid toolchain"); } - const string& pc (params.configuration ()); - const string& tg (params.target ()); - vector<const build_config*> configs; + vector<const build_target_config*> target_configs; - for (const auto& c: *build_conf_) + for (const auto& c: *target_conf_) { - if ((pc.empty () || path_match (c.name, pc)) && // Filter by name. + // Filter by name. + // + if ((tgt_cfg.empty () || path_match (c.name, tgt_cfg)) && // Filter by target. // - (tg.empty () || path_match (c.target.string (), tg)) && + (tgt.empty () || path_match (c.target.string (), tgt)) && - (!exclude_hidden || belongs (c, "all"))) // Filter hidden. + (!exclude_hidden || !belongs (c, "hidden"))) // Filter hidden. { - configs.push_back (&c); + target_configs.push_back (&c); for (const auto& t: toolchains) { // Filter by toolchain. // - if (tc == "*" || (t.first == tc_name && t.second == tc_version)) - config_toolchains.insert ({c.name, t.first, t.second}); + if (th == "*" || (t.first == th_name && t.second == th_version)) + config_toolchains.push_back ( + target_config_toolchain {c.target, c.name, t.first, t.second}); } } } - // Calculate the number of unbuilt package configurations as a - // difference between the maximum possible number of unbuilt - // configurations and the number of existing package builds. - // - // Note that we also need to deduct the package-excluded configurations - // count from the maximum possible number of unbuilt configurations. The - // only way to achieve this is to traverse through the packages and - // match their build expressions/constraints against our configurations. - // - // Also note that some existing builds can now be excluded by packages - // due to the build configuration target or class set change. We should - // deduct such builds count from the number of existing package builds. - // - size_t nmax ( - config_toolchains.size () * - build_db_->query_value<buildable_package_count> ( - package_query<buildable_package_count> ( - params, tn, false /* archived */))); - - size_t ncur = build_db_->query_value<package_build_count> ( - build_query<package_build_count> ( - &conf_names, bld_params, tn, false /* archived */)); - - // From now we will be using specific package name and version for each - // build database query. - // - bld_params.name ().clear (); - bld_params.version ().clear (); - if (!config_toolchains.empty ()) { + // Calculate the number of unbuilt package configurations as a + // difference between the possible number of unbuilt configurations + // and the number of existing package builds. + // + // Note that some existing builds can now be excluded by package + // configurations due to the build target configuration class set + // change. We should deduct such builds count from the number of + // existing package configurations builds. + // + // The only way to calculate both numbers is to traverse through the + // package configurations and match their build + // expressions/constraints against our target configurations. + // + size_t npos (0); + + size_t ncur (build_db_->query_value<package_build_count> ( + build_query<package_build_count> (&conf_ids, bld_params, tn))); + + // From now we will be using specific values for the below filters for + // each build database query. Note that the toolchain is the only + // filter left in bld_params. + // + bld_params.name ().clear (); + bld_params.version ().clear (); + bld_params.target ().clear (); + bld_params.target_config ().clear (); + bld_params.package_config ().clear (); + // Prepare the build count prepared query. // // For each package-excluded configuration we will query the number of @@ -805,58 +873,82 @@ handle (request& rq, response& rs) using prep_bld_query = prepared_query<package_build_count>; package_id id; - string config; + target_triplet target; + string target_config_name; + string package_config_name; const auto& bid (bld_query::build::id); - bld_query bq (equal<package_build_count> (bid.package, id) && - bid.configuration == bld_query::_ref (config) && + bld_query bq ( + equal<package_build_count> (bid.package, id) && + bid.target == bld_query::_ref (target) && + bid.target_config_name == bld_query::_ref (target_config_name) && + bid.package_config_name == bld_query::_ref (package_config_name) && // Note that the query already constrains configurations via the - // configuration name and the tenant via the build package id. + // configuration name and target. + // + // Also note that while the query already constrains the tenant via + // the build package id, we still need to pass the tenant not to + // erroneously filter out the private tenants. // - build_query<package_build_count> (nullptr /* configs */, + build_query<package_build_count> (nullptr /* config_ids */, bld_params, - nullopt /* tenant */, - false /* archived */)); + tn)); prep_bld_query bld_prep_query ( build_db_->prepare_query<package_build_count> ( "mod-builds-build-count-query", bq)); - size_t nt (tc == "*" ? toolchains.size () : 1); + // Number of possible builds per package configuration. + // + size_t nt (th == "*" ? toolchains.size () : 1); // The number of packages can potentially be large, and we may // implement some caching in the future. However, the caching will not // be easy as the cached values depend on the filter form parameters. // query<buildable_package> q ( - package_query<buildable_package> ( - params, tn, false /* archived */)); + package_query<buildable_package> (params, tn)); for (auto& bp: build_db_->query<buildable_package> (q)) { - id = move (bp.id); + shared_ptr<build_package>& p (bp.package); - shared_ptr<build_package> p (build_db_->load<build_package> (id)); + id = p->id; - for (const auto& c: configs) + // Note: load the constrains section lazily. + // + for (const build_package_config& c: p->configs) { - if (exclude (p->builds, p->constraints, *c)) + // Filter by package config name. + // + if (pkg_cfg.empty () || path_match (c.name, pkg_cfg)) { - nmax -= nt; - - config = c->name; - ncur -= bld_prep_query.execute_value (); + for (const auto& tc: target_configs) + { + if (!p->constraints_section.loaded ()) + build_db_->load (*p, p->constraints_section); + + if (exclude (c, p->builds, p->constraints, *tc)) + { + target = tc->target; + target_config_name = tc->name; + package_config_name = c.name; + ncur -= bld_prep_query.execute_value (); + } + else + npos += nt; + } } } } - } - assert (nmax >= ncur); - count = nmax - ncur; - - t.commit (); + assert (npos >= ncur); + count = npos - ncur; + } + else + count = nullopt; // Unknown count. } // Print the filter form. @@ -870,9 +962,11 @@ handle (request& rq, response& rs) // 3: package tenant // 4: toolchain name // 5: toolchain version (descending) - // 6: configuration name + // 6: target + // 7: target configuration name + // 8: package configuration name // - // Prepare the build package prepared query. + // Prepare the build package query. // // Note that we can't skip the proper number of packages in the database // query for a page numbers greater than one. So we will query packages @@ -887,28 +981,14 @@ handle (request& rq, response& rs) // URL query parameter. Alternatively, we can invent the page number cap. // using pkg_query = query<buildable_package>; - using prep_pkg_query = prepared_query<buildable_package>; - - pkg_query pq ( - package_query<buildable_package> (params, tn, false /* archived */)); - // Specify the portion. Note that we will still be querying packages in - // chunks, not to hold locks for too long. For each package we will query - // its builds, so let's keep the portion small. - // - size_t offset (0); + pkg_query pq (package_query<buildable_package> (params, tn)); pq += "ORDER BY" + pkg_query::build_package::id.name + order_by_version_desc (pkg_query::build_package::id.version, false /* first */) + "," + - pkg_query::build_package::id.tenant + - "OFFSET" + pkg_query::_ref (offset) + "LIMIT 50"; - - connection_ptr conn (build_db_->connection ()); - - prep_pkg_query pkg_prep_query ( - conn->prepare_query<buildable_package> ("mod-builds-package-query", pq)); + pkg_query::build_package::id.tenant; // Prepare the build prepared query. // @@ -922,14 +1002,13 @@ handle (request& rq, response& rs) package_id id; - bld_query bq ( - equal<package_build> (bld_query::build::id.package, id) && + bld_query bq (equal<package_build> (bld_query::build::id.package, id) && - // Note that the query already constrains the tenant via the build - // package id. - // - build_query<package_build> ( - &conf_names, bld_params, nullopt /* tenant */, false /* archived */)); + // Note that while the query already constrains the tenant + // via the build package id, we still need to pass the + // tenant not to erroneously filter out the private tenants. + // + build_query<package_build> (&conf_ids, bld_params, tn)); prep_bld_query bld_prep_query ( conn->prepare_query<package_build> ("mod-builds-build-query", bq)); @@ -940,99 +1019,115 @@ handle (request& rq, response& rs) // Enclose the subsequent tables to be able to use nth-child CSS selector. // s << DIV; - while (print != 0) - { - transaction t (conn->begin ()); - // Query (and cache) buildable packages. - // - auto packages (pkg_prep_query.execute ()); + // Query (and cache) buildable packages. + // + auto packages (build_db_->query<buildable_package> (pq)); - if (packages.empty ()) - print = 0; - else + if (packages.empty ()) + print = 0; + else + { + // Iterate over packages and print unbuilt configurations. Skip the + // appropriate number of them first (for page number greater than one). + // + for (auto& bp: packages) { - offset += packages.size (); + shared_ptr<build_package>& p (bp.package); - // Iterate over packages and print unbuilt configurations. Skip the - // appropriate number of them first (for page number greater than one). + id = p->id; + + // Copy configuration/toolchain combinations for this package, + // skipping excluded configurations. // - for (auto& p: packages) - { - id = move (p.id); + set<config_toolchain> unbuilt_configs; - // Copy configuration/toolchain combinations for this package, - // skipping excluded configurations. + // Load the constrains section lazily. + // + for (const build_package_config& pc: p->configs) + { + // Filter by package config name. // - set<config_toolchain> unbuilt_configs; + if (pkg_cfg.empty () || path_match (pc.name, pkg_cfg)) { - shared_ptr<build_package> p (build_db_->load<build_package> (id)); - - for (const auto& ct: config_toolchains) + for (const target_config_toolchain& ct: config_toolchains) { - auto i (build_conf_map_->find (ct.configuration.c_str ())); - assert (i != build_conf_map_->end ()); - - if (!exclude (p->builds, p->constraints, *i->second)) - unbuilt_configs.insert (ct); + auto i ( + target_conf_map_->find ( + build_target_config_id {ct.target, ct.target_config})); + + assert (i != target_conf_map_->end ()); + + if (!p->constraints_section.loaded ()) + build_db_->load (*p, p->constraints_section); + + if (!exclude (pc, p->builds, p->constraints, *i->second)) + unbuilt_configs.insert ( + config_toolchain {ct.target, + ct.target_config, + pc.name, + ct.toolchain_name, + ct.toolchain_version}); } } + } - // Iterate through the package configuration builds and erase them - // from the unbuilt configurations set. - // - for (const auto& pb: bld_prep_query.execute ()) - { - const build& b (*pb.build); + // Iterate through the package configuration builds and erase them + // from the unbuilt configurations set. + // + for (const auto& pb: bld_prep_query.execute ()) + { + const build& b (*pb.build); - unbuilt_configs.erase ({ - b.id.configuration, b.toolchain_name, b.toolchain_version}); - } + unbuilt_configs.erase (config_toolchain {b.target, + b.target_config_name, + b.package_config_name, + b.toolchain_name, + b.toolchain_version}); + } - // Print unbuilt package configurations. - // - for (const auto& ct: unbuilt_configs) + // Print unbuilt package configurations. + // + for (const auto& ct: unbuilt_configs) + { + if (skip != 0) { - if (skip != 0) - { - --skip; - continue; - } - - auto i (build_conf_map_->find (ct.configuration.c_str ())); - assert (i != build_conf_map_->end ()); - - s << TABLE(CLASS="proplist build") - << TBODY - << TR_NAME (id.name, string (), root, id.tenant) - << TR_VERSION (id.name, p.version, root, id.tenant) - << TR_VALUE ("toolchain", - string (ct.toolchain_name) + '-' + - ct.toolchain_version.string ()) - << TR_VALUE ("config", ct.configuration) - << TR_VALUE ("target", i->second->target.string ()); - - // In the global view mode add the tenant builds link. Note that - // the global view (and the link) makes sense only in the - // multi-tenant mode. - // - if (!tn && !id.tenant.empty ()) - s << TR_TENANT (tenant_name, "builds", root, id.tenant); + --skip; + continue; + } - s << ~TBODY - << ~TABLE; + s << TABLE(CLASS="proplist build") + << TBODY + << TR_NAME (id.name, root, id.tenant) + << TR_VERSION (id.name, p->version, root, id.tenant) + << TR_VALUE ("toolchain", + string (ct.toolchain_name) + '-' + + ct.toolchain_version.string ()) + << TR_VALUE ("target", ct.target.string ()) + << TR_VALUE ("tgt config", ct.target_config) + << TR_VALUE ("pkg config", ct.package_config); + + // In the global view mode add the tenant builds link. Note that + // the global view (and the link) makes sense only in the + // multi-tenant mode. + // + if (!tn && !id.tenant.empty ()) + s << TR_TENANT (tenant_name, "builds", root, id.tenant); - if (--print == 0) // Bail out the configuration loop. - break; - } + s << ~TBODY + << ~TABLE; - if (print == 0) // Bail out the package loop. + if (--print == 0) // Bail out the configuration loop. break; } - } - t.commit (); + if (print == 0) // Bail out the package loop. + break; + } } + + t.commit (); + s << ~DIV; } @@ -1058,13 +1153,17 @@ handle (request& rq, response& rs) }; add_filter ("pv", params.version ()); - add_filter ("tc", params.toolchain (), "*"); - add_filter ("cf", params.configuration ()); - add_filter ("mn", params.machine ()); - add_filter ("tg", params.target ()); + add_filter ("th", params.toolchain (), "*"); + add_filter ("tg", tgt); + add_filter ("tc", tgt_cfg); + add_filter ("pc", pkg_cfg); add_filter ("rs", params.result (), "*"); - s << DIV_PAGER (page, count, page_configs, options_->build_pages (), u) + s << DIV_PAGER (page, + count ? *count : 0, + page_configs, + options_->build_pages (), + u) << ~DIV << ~BODY << ~HTML; diff --git a/mod/mod-ci.cxx b/mod/mod-ci.cxx index d2da93f..5974d45 100644 --- a/mod/mod-ci.cxx +++ b/mod/mod-ci.cxx @@ -3,18 +3,11 @@ #include <mod/mod-ci.hxx> -#include <ostream> - -#include <libbutl/uuid.hxx> -#include <libbutl/sendmail.mxx> -#include <libbutl/fdstream.mxx> -#include <libbutl/timestamp.mxx> -#include <libbutl/filesystem.mxx> -#include <libbutl/process-io.mxx> // operator<<(ostream, process_args) -#include <libbutl/manifest-parser.mxx> -#include <libbutl/manifest-serializer.mxx> - -#include <libbpkg/manifest.hxx> +#include <libbutl/fdstream.hxx> +#include <libbutl/manifest-parser.hxx> +#include <libbutl/manifest-serializer.hxx> + +#include <libbpkg/manifest.hxx> // package_manifest #include <libbpkg/package-name.hxx> #include <web/server/module.hxx> @@ -23,20 +16,35 @@ #include <mod/page.hxx> #include <mod/module-options.hxx> -#include <mod/external-handler.hxx> using namespace std; using namespace butl; using namespace web; using namespace brep::cli; +#ifdef BREP_CI_TENANT_SERVICE +brep::ci:: +ci (tenant_service_map& tsm) + : tenant_service_map_ (tsm) +{ +} +#endif + brep::ci:: +#ifdef BREP_CI_TENANT_SERVICE +ci (const ci& r, tenant_service_map& tsm) +#else ci (const ci& r) +#endif : handler (r), + ci_start (r), options_ (r.initialized_ ? r.options_ : nullptr), form_ (r.initialized_ || r.form_ == nullptr ? r.form_ : make_shared<xhtml::fragment> (*r.form_)) +#ifdef BREP_CI_TENANT_SERVICE + , tenant_service_map_ (tsm) +#endif { } @@ -45,22 +53,25 @@ init (scanner& s) { HANDLER_DIAG; +#ifdef BREP_CI_TENANT_SERVICE + { + shared_ptr<tenant_service_base> ts ( + dynamic_pointer_cast<tenant_service_base> (shared_from_this ())); + + assert (ts != nullptr); // By definition. + + tenant_service_map_["ci"] = move (ts); + } +#endif + options_ = make_shared<options::ci> ( s, unknown_mode::fail, unknown_mode::fail); - // Verify that the CI request handling is setup properly, if configured. + // Prepare for the CI requests handling, if configured. // if (options_->ci_data_specified ()) { - // Verify the data directory satisfies the requirements. - // - const dir_path& d (options_->ci_data ()); - - if (d.relative ()) - fail << "ci-data directory path must be absolute"; - - if (!dir_exists (d)) - fail << "ci-data directory '" << d << "' does not exist"; + ci_start::init (make_shared<options::ci_start> (*options_)); // Parse XHTML5 form file, if configured. // @@ -87,10 +98,6 @@ init (scanner& s) fail << "unable to read ci-form file '" << ci_form << "': " << e; } } - - if (options_->ci_handler_specified () && - options_->ci_handler ().relative ()) - fail << "ci-handler path must be absolute"; } if (options_->root ().empty ()) @@ -130,9 +137,8 @@ handle (request& rq, response& rs) // // return respond_error (); // Request is handled with an error. // - string request_id; // Will be set later. - auto respond_manifest = [&rs, &request_id] (status_code status, - const string& message) -> bool + auto respond_manifest = [&rs] (status_code status, + const string& message) -> bool { serializer s (rs.content (status, "text/manifest;charset=utf-8"), "response"); @@ -140,10 +146,6 @@ handle (request& rq, response& rs) s.next ("", "1"); // Start of manifest. s.next ("status", to_string (status)); s.next ("message", message); - - if (!request_id.empty ()) - s.next ("reference", request_id); - s.next ("", ""); // End of manifest. return true; }; @@ -234,9 +236,11 @@ handle (request& rq, response& rs) if (rl.empty () || rl.local ()) return respond_manifest (400, "invalid repository location"); - // Verify the package name[/version] arguments. + // Parse the package name[/version] arguments. // - for (const string& s: params.package()) + vector<package> packages; + + for (const string& s: params.package ()) { // Let's skip the potentially unfilled package form fields. // @@ -245,18 +249,21 @@ handle (request& rq, response& rs) try { + package pkg; size_t p (s.find ('/')); if (p != string::npos) { - package_name (string (s, 0, p)); + pkg.name = package_name (string (s, 0, p)); // Not to confuse with module::version. // - bpkg::version (string (s, p + 1)); + pkg.version = bpkg::version (string (s, p + 1)); } else - package_name p (s); // Not to confuse with the s variable declaration. + pkg.name = package_name (s); + + packages.push_back (move (pkg)); } catch (const invalid_argument&) { @@ -265,31 +272,49 @@ handle (request& rq, response& rs) } // Verify that unknown parameter values satisfy the requirements (contain - // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n'). + // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n') and + // stash them. // // Actually, the expected ones must satisfy too, so check them as well. // - string what; - for (const name_value& nv: rps) + vector<pair<string, string>> custom_request; { - if (nv.value && - !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t")) - return respond_manifest (400, - "invalid parameter " + nv.name + ": " + what); + string what; + for (const name_value& nv: rps) + { + if (nv.value && + !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t")) + return respond_manifest (400, + "invalid parameter " + nv.name + ": " + what); + + const string& n (nv.name); + + if (n != "repository" && + n != "_" && + n != "package" && + n != "overrides" && + n != "interactive" && + n != "simulate") + custom_request.emplace_back (n, nv.value ? *nv.value : ""); + } } // Parse and validate overrides, if present. // - vector<manifest_name_value> overrides; + vector<pair<string, string>> overrides; if (params.overrides_specified ()) try { istream& is (rq.open_upload ("overrides")); parser mp (is, "overrides"); - overrides = parse_manifest (mp); + vector<manifest_name_value> ovrs (parse_manifest (mp)); + + package_manifest::validate_overrides (ovrs, mp.name ()); - package_manifest::validate_overrides (overrides, mp.name ()); + overrides.reserve (ovrs.size ()); + for (manifest_name_value& nv: ovrs) + overrides.emplace_back (move (nv.name), move (nv.value)); } // Note that invalid_argument (thrown by open_upload() function call) can // mean both no overrides upload or multiple overrides uploads. @@ -310,381 +335,141 @@ handle (request& rq, response& rs) return respond_error (); } - try - { - // Note that from now on the result manifest we respond with will contain - // the reference value. - // - request_id = uuid::generate ().string (); - } - catch (const system_error& e) - { - error << "unable to generate request id: " << e; - return respond_error (); - } - - // Create the submission data directory. + // Stash the User-Agent HTTP header and the client IP address. // - dir_path dd (options_->ci_data () / dir_path (request_id)); - - try + optional<string> client_ip; + optional<string> user_agent; + for (const name_value& h: rq.headers ()) { - // It's highly unlikely but still possible that the directory already - // exists. This can only happen if the generated uuid is not unique. - // - if (try_mkdir (dd) == mkdir_status::already_exists) - throw_generic_error (EEXIST); + if (icasecmp (h.name, ":Client-IP") == 0) + client_ip = h.value; + else if (icasecmp (h.name, "User-Agent") == 0) + user_agent = h.value; } - catch (const system_error& e) - { - error << "unable to create directory '" << dd << "': " << e; - return respond_error (); - } - - auto_rmdir ddr (dd); - - // Serialize the CI request manifest to a stream. On the serialization error - // respond to the client with the manifest containing the bad request (400) - // code and return false, on the stream error pass through the io_error - // exception, otherwise return true. - // - timestamp ts (system_clock::now ()); - - auto rqm = [&request_id, - &rl, - &ts, - &simulate, - &rq, - &rps, - ¶ms, - &respond_manifest] - (ostream& os, bool long_lines = false) -> bool - { - try - { - serializer s (os, "request", long_lines); - // Serialize the submission manifest header. - // - s.next ("", "1"); // Start of manifest. - s.next ("id", request_id); - s.next ("repository", rl.string ()); - - for (const string& p: params.package ()) - { - if (!p.empty ()) // Skip empty package names (see above for details). - s.next ("package", p); - } - - s.next ("timestamp", - butl::to_string (ts, - "%Y-%m-%dT%H:%M:%SZ", - false /* special */, - false /* local */)); - - if (!simulate.empty ()) - s.next ("simulate", simulate); - - // Serialize the User-Agent HTTP header and the client IP address. - // - optional<string> ip; - optional<string> ua; - for (const name_value& h: rq.headers ()) - { - if (icasecmp (h.name, ":Client-IP") == 0) - ip = h.value; - else if (icasecmp (h.name, "User-Agent") == 0) - ua = h.value; - } - - if (ip) - s.next ("client-ip", *ip); - - if (ua) - s.next ("user-agent", *ua); - - // Serialize the request parameters. - // - // Note that the serializer constraints the parameter names (can't start - // with '#', can't contain ':' and the whitespaces, etc.). - // - for (const name_value& nv: rps) - { - const string& n (nv.name); - - if (n != "repository" && - n != "_" && - n != "package" && - n != "overrides" && - n != "simulate") - s.next (n, nv.value ? *nv.value : ""); - } - - s.next ("", ""); // End of manifest. - return true; - } - catch (const serialization& e) - { - respond_manifest (400, string ("invalid parameter: ") + e.what ()); - return false; - } - }; - - // Serialize the CI request manifest to the submission directory. - // - path rqf (dd / "request.manifest"); + optional<start_result> r (start (error, + warn, + verb_ ? &trace : nullptr, +#ifdef BREP_CI_TENANT_SERVICE + tenant_service ("", "ci"), +#else + nullopt /* service */, +#endif + rl, + packages, + client_ip, + user_agent, + (params.interactive_specified () + ? params.interactive () + : optional<string> ()), + (!simulate.empty () + ? simulate + : optional<string> ()), + custom_request, + overrides)); + + if (!r) + return respond_error (); // The diagnostics is already issued. try { - ofdstream os (rqf); - bool r (rqm (os)); - os.close (); - - if (!r) - return true; // The client is already responded with the manifest. - } - catch (const io_error& e) - { - error << "unable to write to '" << rqf << "': " << e; - return respond_error (); + serialize_manifest (*r, + rs.content (r->status, "text/manifest;charset=utf-8")); } - - // Serialize the CI overrides manifest to a stream. On the stream error pass - // through the io_error exception. - // - // Note that it can't throw the serialization exception as the override - // manifest is parsed from the stream and so verified. - // - auto ovm = [&overrides] (ostream& os, bool long_lines = false) + catch (const serialization& e) { - try - { - serializer s (os, "overrides", long_lines); - serialize_manifest (s, overrides); - } - catch (const serialization&) {assert (false);} // See above. - }; + error << "ref " << r->reference << ": unable to serialize handler's " + << "output: " << e; - // Serialize the CI overrides manifest to the submission directory. - // - path ovf (dd / "overrides.manifest"); - - if (!overrides.empty ()) - try - { - ofdstream os (ovf); - ovm (os); - os.close (); - } - catch (const io_error& e) - { - error << "unable to write to '" << ovf << "': " << e; return respond_error (); } - // Given that the submission data is now successfully persisted we are no - // longer in charge of removing it, except for the cases when the submission - // handler terminates with an error (see below for details). - // - ddr.cancel (); - - // If the handler terminates with non-zero exit status or specifies 5XX - // (HTTP server error) submission result manifest status value, then we - // stash the submission data directory for troubleshooting. Otherwise, if - // it's the 4XX (HTTP client error) status value, then we remove the - // directory. - // - // Note that leaving the directory in place in case of a submission error - // would have prevent the user from re-submitting until we research the - // issue and manually remove the directory. - // - auto stash_submit_dir = [&dd, error] () - { - if (dir_exists (dd)) - try - { - mvdir (dd, dir_path (dd + ".fail")); - } - catch (const system_error& e) - { - // Not much we can do here. Let's just log the issue and bail out - // leaving the directory in place. - // - error << "unable to rename directory '" << dd << "': " << e; - } - }; - - // Run the submission handler, if specified, reading the result manifest - // from its stdout and caching it as a name/value pair list for later use - // (forwarding to the client, sending via email, etc.). Otherwise, create - // implied result manifest. - // - status_code sc; - vector<manifest_name_value> rvs; - - if (options_->ci_handler_specified ()) - { - using namespace external_handler; - - optional<result_manifest> r (run (options_->ci_handler (), - options_->ci_handler_argument (), - dd, - options_->ci_handler_timeout (), - error, - warn, - verb_ ? &trace : nullptr)); - if (!r) - { - stash_submit_dir (); - return respond_error (); // The diagnostics is already issued. - } - - sc = r->status; - rvs = move (r->values); - } - else // Create the implied result manifest. - { - sc = 200; - - auto add = [&rvs] (string n, string v) - { - manifest_name_value nv { - move (n), move (v), - 0 /* name_line */, 0 /* name_column */, - 0 /* value_line */, 0 /* value_column */, - 0 /* start_pos */, 0 /* colon_pos */, 0 /* end_pos */}; - - rvs.emplace_back (move (nv)); - }; - - add ("status", "200"); - add ("message", "CI request is queued"); - add ("reference", request_id); - } - - assert (!rvs.empty ()); // Produced by the handler or is implied. - - // Serialize the submission result manifest to a stream. On the - // serialization error log the error description and return false, on the - // stream error pass through the io_error exception, otherwise return true. - // - auto rsm = [&rvs, &error, &request_id] (ostream& os, - bool long_lines = false) -> bool - { - try - { - serializer s (os, "result", long_lines); - serialize_manifest (s, rvs); - return true; - } - catch (const serialization& e) - { - error << "ref " << request_id << ": unable to serialize handler's " - << "output: " << e; - return false; - } - }; - - // If the submission data directory still exists then perform an appropriate - // action on it, depending on the submission result status. Note that the - // handler could move or remove the directory. - // - if (dir_exists (dd)) - { - // Remove the directory if the client error is detected. - // - if (sc >= 400 && sc < 500) - rmdir_r (dd); - - // Otherwise, save the result manifest, into the directory. Also stash the - // directory for troubleshooting in case of the server error. - // - else - { - path rsf (dd / "result.manifest"); - - try - { - ofdstream os (rsf); - - // Not being able to stash the result manifest is not a reason to - // claim the submission failed. The error is logged nevertheless. - // - rsm (os); - - os.close (); - } - catch (const io_error& e) - { - // Not fatal (see above). - // - error << "unable to write to '" << rsf << "': " << e; - } - - if (sc >= 500 && sc < 600) - stash_submit_dir (); - } - } - - // Send email, if configured, and the CI request submission is not simulated. - // Use the long lines manifest serialization mode for the convenience of - // copying/clicking URLs they contain. - // - // Note that we don't consider the email sending failure to be a submission - // failure as the submission data is successfully persisted and the handler - // is successfully executed, if configured. One can argue that email can be - // essential for the submission processing and missing it would result in - // the incomplete submission. In this case it's natural to assume that the - // web server error log is monitored and the email sending failure will be - // noticed. - // - if (options_->ci_email_specified () && simulate.empty ()) - try - { - // Redirect the diagnostics to the web server error log. - // - sendmail sm ([&trace, this] (const char* args[], size_t n) - { - l2 ([&]{trace << process_args {args, n};}); - }, - 2 /* stderr */, - options_->email (), - "CI request submission (" + request_id + ")", - {options_->ci_email ()}); - - // Write the CI request manifest. - // - bool r (rqm (sm.out, true /* long_lines */)); - assert (r); // The serialization succeeded once, so can't fail now. - - // Write the CI overrides manifest. - // - sm.out << "\n\n"; - - ovm (sm.out, true /* long_lines */); - - // Write the CI result manifest. - // - sm.out << "\n\n"; - - // We don't care about the result (see above). - // - rsm (sm.out, true /* long_lines */); - - sm.out.close (); + return true; +} - if (!sm.wait ()) - error << "sendmail " << *sm.exit; - } - // Handle process_error and io_error (both derive from system_error). - // - catch (const system_error& e) - { - error << "sendmail error: " << e; - } +#ifdef BREP_CI_TENANT_SERVICE +function<optional<string> (const brep::tenant_service&)> brep::ci:: +build_queued (const tenant_service&, + const vector<build>& bs, + optional<build_state> initial_state, + const build_queued_hints& hints, + const diag_epilogue& log_writer) const noexcept +{ + NOTIFICATION_DIAG (log_writer); + + l2 ([&]{trace << "initial_state: " + << (initial_state ? to_string (*initial_state) : "none") + << ", hints " + << hints.single_package_version << ' ' + << hints.single_package_config;}); + + return [&bs, initial_state] (const tenant_service& ts) + { + optional<string> r (ts.data); + + for (const build& b: bs) + { + string s ((!initial_state + ? "queued " + : "queued " + to_string (*initial_state) + ' ') + + b.package_name.string () + '/' + + b.package_version.string () + '/' + + b.target.string () + '/' + + b.target_config_name + '/' + + b.package_config_name + '/' + + b.toolchain_name + '/' + + b.toolchain_version.string ()); + + if (r) + { + *r += ", "; + *r += s; + } + else + r = move (s); + } + + return r; + }; +} - if (!rsm (rs.content (sc, "text/manifest;charset=utf-8"))) - return respond_error (); // The error description is already logged. +function<optional<string> (const brep::tenant_service&)> brep::ci:: +build_building (const tenant_service&, + const build& b, + const diag_epilogue&) const noexcept +{ + return [&b] (const tenant_service& ts) + { + string s ("building " + + b.package_name.string () + '/' + + b.package_version.string () + '/' + + b.target.string () + '/' + + b.target_config_name + '/' + + b.package_config_name + '/' + + b.toolchain_name + '/' + + b.toolchain_version.string ()); + + return ts.data ? *ts.data + ", " + s : s; + }; +} - return true; +function<optional<string> (const brep::tenant_service&)> brep::ci:: +build_built (const tenant_service&, + const build& b, + const diag_epilogue&) const noexcept +{ + return [&b] (const tenant_service& ts) + { + string s ("built " + + b.package_name.string () + '/' + + b.package_version.string () + '/' + + b.target.string () + '/' + + b.target_config_name + '/' + + b.package_config_name + '/' + + b.toolchain_name + '/' + + b.toolchain_version.string ()); + + return ts.data ? *ts.data + ", " + s : s; + }; } +#endif diff --git a/mod/mod-ci.hxx b/mod/mod-ci.hxx index 431f53b..1e2ee15 100644 --- a/mod/mod-ci.hxx +++ b/mod/mod-ci.hxx @@ -9,14 +9,39 @@ #include <libbrep/types.hxx> #include <libbrep/utility.hxx> +#include <libbrep/build.hxx> +#include <libbrep/common.hxx> // tenant_service + #include <mod/module.hxx> #include <mod/module-options.hxx> +#include <mod/ci-common.hxx> + +#ifdef BREP_CI_TENANT_SERVICE +# include <mod/tenant-service.hxx> +#endif + namespace brep { - class ci: public handler + class ci: public handler, + private ci_start +#ifdef BREP_CI_TENANT_SERVICE + , public tenant_service_build_queued, + public tenant_service_build_building, + public tenant_service_build_built +#endif { public: + +#ifdef BREP_CI_TENANT_SERVICE + explicit + ci (tenant_service_map&); + + // Create a shallow copy (handling instance) if initialized and a deep + // copy (context exemplar) otherwise. + // + ci (const ci&, tenant_service_map&); +#else ci () = default; // Create a shallow copy (handling instance) if initialized and a deep @@ -24,20 +49,44 @@ namespace brep // explicit ci (const ci&); +#endif virtual bool - handle (request&, response&); + handle (request&, response&) override; virtual const cli::options& - cli_options () const {return options::ci::description ();} + cli_options () const override {return options::ci::description ();} + +#ifdef BREP_CI_TENANT_SERVICE + virtual function<optional<string> (const tenant_service&)> + build_queued (const tenant_service&, + const vector<build>&, + optional<build_state> initial_state, + const build_queued_hints&, + const diag_epilogue& log_writer) const noexcept override; + + virtual function<optional<string> (const tenant_service&)> + build_building (const tenant_service&, + const build&, + const diag_epilogue& log_writer) const noexcept override; + + virtual function<optional<string> (const tenant_service&)> + build_built (const tenant_service&, + const build&, + const diag_epilogue& log_writer) const noexcept override; +#endif private: virtual void - init (cli::scanner&); + init (cli::scanner&) override; private: shared_ptr<options::ci> options_; shared_ptr<web::xhtml::fragment> form_; + +#ifdef BREP_CI_TENANT_SERVICE + tenant_service_map& tenant_service_map_; +#endif }; } diff --git a/mod/mod-package-details.cxx b/mod/mod-package-details.cxx index e0bd1ef..fcd50da 100644 --- a/mod/mod-package-details.cxx +++ b/mod/mod-package-details.cxx @@ -183,20 +183,20 @@ handle (request& rq, response& rs) // s << H2 << pkg->summary << ~H2; - if (const optional<string>& d = pkg->description) + if (const optional<typed_text>& d = pkg->package_description + ? pkg->package_description + : pkg->description) { const string id ("description"); const string what (name.string () + " description"); s << (full ? DIV_TEXT (*d, - *pkg->description_type, true /* strip_title */, id, what, error) : DIV_TEXT (*d, - *pkg->description_type, true /* strip_title */, options_->package_description (), url (!full, squery, page, id), @@ -227,7 +227,7 @@ handle (request& rq, response& rs) << ~TABLE; } - auto pkg_count ( + size_t pkg_count ( package_db_->query_value<package_count> ( search_params<package_count> (squery, tenant, name))); @@ -265,23 +265,12 @@ handle (request& rq, response& rs) assert (p->internal ()); - // @@ Shouldn't we make package repository name to be a link to the proper - // place of the About page, describing corresponding repository? - // Yes, I think that's sounds reasonable. - // Or maybe it can be something more valuable like a link to the - // repository package search page ? - // - // @@ In most cases package location will be the same for all versions - // of the same package. Shouldn't we put package location to the - // package summary part and display it here only if it differs - // from the one in the summary ? - // - // Hm, I am not so sure about this. Consider: stable/testing/unstable. + const repository_location& rl (p->internal_repository.load ()->location); + + // @@ Maybe the repository link can be something more valuable like a link + // to the repository package search page ? // - s << TR_REPOSITORY ( - p->internal_repository.object_id ().canonical_name, - root, - tenant) + s << TR_REPOSITORY (rl, root, tenant) << TR_DEPENDS (p->dependencies, root, tenant) << TR_REQUIRES (p->requirements) << ~TBODY diff --git a/mod/mod-package-version-details.cxx b/mod/mod-package-version-details.cxx index a7682ec..91923e5 100644 --- a/mod/mod-package-version-details.cxx +++ b/mod/mod-package-version-details.cxx @@ -9,6 +9,8 @@ #include <odb/database.hxx> #include <odb/transaction.hxx> +#include <libbutl/filesystem.hxx> // dir_iterator, dir_entry + #include <web/server/module.hxx> #include <web/server/mime-url-encoding.hxx> @@ -47,6 +49,12 @@ init (scanner& s) options_ = make_shared<options::package_version_details> ( s, unknown_mode::fail, unknown_mode::fail); + // Verify that the bindist-url option is specified when necessary. + // + if (options_->bindist_root_specified () && + !options_->bindist_url_specified ()) + fail << "bindist-url must be specified if bindist-root is specified"; + database_module::init (static_cast<const options::package_db&> (*options_), options_->package_db_retry ()); @@ -152,7 +160,7 @@ handle (request& rq, response& rs) const string& name (pkg->name.string ()); - const string title (name + " " + sver); + const string title (name + ' ' + sver); xml::serializer s (rs.content (), title); s << HTML @@ -181,20 +189,20 @@ handle (request& rq, response& rs) s << H2 << pkg->summary << ~H2; - if (const optional<string>& d = pkg->description) + if (const optional<typed_text>& d = pkg->package_description + ? pkg->package_description + : pkg->description) { const string id ("description"); const string what (title + " description"); s << (full - ? DIV_TEXT (*d, * - pkg->description_type, + ? DIV_TEXT (*d, true /* strip_title */, id, what, error) : DIV_TEXT (*d, - *pkg->description_type, true /* strip_title */, options_->package_description (), url (!full, id), @@ -214,14 +222,13 @@ handle (request& rq, response& rs) << TR_PRIORITY (pkg->priority) << TR_LICENSES (pkg->license_alternatives) - << TR_REPOSITORY (rl.canonical_name (), root, tenant) - << TR_LOCATION (rl); + << TR_REPOSITORY (rl, root, tenant); if (rl.type () == repository_type::pkg) { assert (pkg->location); - s << TR_LINK (rl.url ().string () + "/" + pkg->location->string (), + s << TR_LINK (rl.url ().string () + '/' + pkg->location->string (), pkg->location->leaf ().string (), "download"); } @@ -293,7 +300,7 @@ handle (request& rq, response& rs) if (dcon) s << ' ' - << A(HREF=u + "/" + p->version.string ()) << *dcon << ~A; + << A(HREF=u + '/' + p->version.string ()) << *dcon << ~A; } else if (p->internal ()) { @@ -321,31 +328,51 @@ handle (request& rq, response& rs) << TABLE(CLASS="proplist", ID="depends") << TBODY; - for (const auto& da: ds) + for (const auto& das: ds) { s << TR(CLASS="depends") << TH; - if (da.conditional) - s << "?"; - - if (da.buildtime) - s << "*"; + if (das.buildtime) + s << '*'; s << ~TH << TD << SPAN(CLASS="value"); - for (const auto& d: da) + for (const auto& da: das) { - if (&d != &da[0]) + if (&da != &das[0]) s << " | "; - print_dependency (d); + // Should we enclose multiple dependencies into curly braces as in the + // manifest? Somehow feels redundant here, since there can't be any + // ambiguity (dependency group version constraint is already punched + // into the specific dependencies without constraints). + // + for (const dependency& d: da) + { + if (&d != &da[0]) + s << ' '; + + print_dependency (d); + } + + if (da.enable) + { + s << " ? ("; + + if (full) + s << *da.enable; + else + s << "..."; + + s << ')'; + } } s << ~SPAN - << SPAN_COMMENT (da.comment) + << SPAN_COMMENT (das.comment) << ~TD << ~TR; } @@ -361,34 +388,59 @@ handle (request& rq, response& rs) << TABLE(CLASS="proplist", ID="requires") << TBODY; - for (const auto& ra: rm) + for (const requirement_alternatives& ras: rm) { s << TR(CLASS="requires") << TH; - if (ra.conditional) - s << "?"; - - if (ra.buildtime) - s << "*"; - - if (ra.conditional || ra.buildtime) - s << " "; + if (ras.buildtime) + s << '*'; s << ~TH << TD << SPAN(CLASS="value"); - for (const auto& r: ra) + for (const requirement_alternative& ra: ras) { - if (&r != &ra[0]) + if (&ra != &ras[0]) s << " | "; - s << r; + // Should we enclose multiple requirement ids into curly braces as in + // the manifest? Somehow feels redundant here, since there can't be + // any ambiguity (requirement group version constraint is already + // punched into the specific requirements without constraints). + // + for (const string& r: ra) + { + if (&r != &ra[0]) + s << ' '; + + s << r; + } + + if (ra.enable) + { + if (!ra.simple () || !ra[0].empty ()) + s << ' '; + + s << '?'; + + if (!ra.enable->empty ()) + { + s << " ("; + + if (full) + s << *ra.enable; + else + s << "..."; + + s << ')'; + } + } } s << ~SPAN - << SPAN_COMMENT (ra.comment) + << SPAN_COMMENT (ras.comment) << ~TD << ~TR; } @@ -401,7 +453,10 @@ handle (request& rq, response& rs) // // Print test dependencies of the specific type. // - auto print_tests = [&pkg, &s, &print_dependency] (test_dependency_type dt) + auto print_tests = [&pkg, + &s, + &print_dependency, + full] (test_dependency_type dt) { string id; @@ -429,11 +484,31 @@ handle (request& rq, response& rs) } s << TR(CLASS=id) + << TH; + + if (td.buildtime) + s << '*'; + + s << ~TH << TD << SPAN(CLASS="value"); print_dependency (td); + if (td.enable || td.reflect) + { + if (full) + { + if (td.enable) + s << " ? (" << *td.enable << ')'; + + if (td.reflect) + s << ' ' << *td.reflect; + } + else + s << " ..."; + } + s << ~SPAN << ~TD << ~TR; @@ -459,34 +534,203 @@ handle (request& rq, response& rs) { package_db_->load (*pkg, pkg->build_section); - // If the package has a singe build configuration class expression with - // exactly one underlying class and the class is none, then we just drop - // the page builds section altogether. + // If all package build configurations has a singe effective build + // configuration class expression with exactly one underlying class and + // the class is none, then we just drop the page builds section + // altogether. // - if (pkg->builds.size () == 1) + builds = false; + + for (const package_build_config& pc: pkg->build_configs) { - const build_class_expr& be (pkg->builds[0]); + const build_class_exprs& exprs (pc.effective_builds (pkg->builds)); - builds = be.underlying_classes.size () != 1 || - be.underlying_classes[0] != "none"; + if (exprs.size () != 1 || + exprs[0].underlying_classes.size () != 1 || + exprs[0].underlying_classes[0] != "none") + { + builds = true; + break; + } } } - bool archived (package_db_->load<brep::tenant> (tenant)->archived); + shared_ptr<brep::tenant> tn (package_db_->load<brep::tenant> (tenant)); t.commit (); - if (builds) + // Display the binary distribution packages for this tenant, package, and + // version, if present. Print the archive distributions last. + // + if (options_->bindist_root_specified ()) { - using bbot::build_config; + // Collect all the available package configurations by iterating over the + // <distribution> and <os-release> subdirectories and the <package-config> + // symlinks in the following filesystem hierarchy: + // + // [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config> + // + // Note that it is possible that new directories and symlinks are created + // and/or removed while we iterate over the filesystem entries in the + // above hierarchy, which may result with system_error exceptions. If that + // happens, we just ignore such exceptions, trying to collect what we can. + // + const dir_path& br (options_->bindist_root ()); + + dir_path d (br); + if (!tenant.empty ()) + d /= tenant; + + // Note that distribution and os_release are simple paths and the + // config_symlink and config_dir are relative to the bindist root + // directory. + // + struct bindist_config + { + dir_path distribution; // debian, fedora, archive + dir_path os_release; // fedora37, windows10 + path symlink; // .../x86_64, .../x86_64-release + dir_path directory; // .../x86_64-2023-05-11T10:13:43Z + + bool + operator< (const bindist_config& v) + { + if (int r = distribution.compare (v.distribution)) + return distribution.string () == "archive" ? false : + v.distribution.string () == "archive" ? true : + r < 0; + + if (int r = os_release.compare (v.os_release)) + return r < 0; + + return symlink < v.symlink; + } + }; + + vector<bindist_config> configs; + + if (dir_exists (d)) + try + { + for (const dir_entry& de: dir_iterator (d, dir_iterator::ignore_dangling)) + { + if (de.type () != entry_type::directory) + continue; + + // Distribution directory. + // + dir_path dd (path_cast<dir_path> (de.path ())); + + try + { + dir_path fdd (d / dd); + + for (const dir_entry& re: + dir_iterator (fdd, dir_iterator::ignore_dangling)) + { + if (re.type () != entry_type::directory) + continue; + + // OS release directory. + // + dir_path rd (path_cast<dir_path> (re.path ())); + + // Package version directory. + // + dir_path vd (fdd / + rd / + dir_path (pkg->project.string ()) / + dir_path (pn.string ()) / + dir_path (sver)); + + try + { + for (const dir_entry& ce: + dir_iterator (vd, dir_iterator::ignore_dangling)) + { + if (ce.ltype () != entry_type::symlink) + continue; + + // Skip the "hidden" symlinks which may potentially be used by + // the upload handlers until they expose the finalized upload + // directory. + // + const path& cl (ce.path ()); + if (cl.string () [0] == '.') + continue; + + try + { + path fcl (vd / cl); + dir_path cd (path_cast<dir_path> (followsymlink (fcl))); + + if (cd.sub (br)) + configs.push_back ( + bindist_config {dd, rd, fcl.leaf (br), cd.leaf (br)}); + } + catch (const system_error&) {} + } + } + catch (const system_error&) {} + } + } + catch (const system_error&) {} + } + } + catch (const system_error&) {} + + // Sort and print collected package configurations, if any. + // + if (!configs.empty ()) + { + sort (configs.begin (), configs.end ()); + + s << H3 << "Binaries" << ~H3 + << TABLE(ID="binaries") + << TBODY; + + for (const bindist_config& c: configs) + { + s << TR(CLASS="binaries") + << TD << SPAN(CLASS="value") << c.distribution << ~SPAN << ~TD + << TD << SPAN(CLASS="value") << c.os_release << ~SPAN << ~TD + << TD + << SPAN(CLASS="value") + << A + << HREF + << options_->bindist_url () << '/' << c.symlink + << ~HREF + << c.symlink.leaf () + << ~A + << " (" + << A + << HREF + << options_->bindist_url () << '/' << c.directory + << ~HREF + << "snapshot" + << ~A + << ")" + << ~SPAN + << ~TD + << ~TR; + } + + s << ~TBODY + << ~TABLE; + } + } + + if (builds) + { s << H3 << "Builds" << ~H3 << DIV(ID="builds"); - auto exclude = [&pkg, this] (const build_config& cfg, - string* reason = nullptr) + auto exclude = [&pkg, this] (const package_build_config& pc, + const build_target_config& tc, + string* rs = nullptr) { - return this->exclude (pkg->builds, pkg->build_constraints, cfg, reason); + return this->exclude (pc, pkg->builds, pkg->build_constraints, tc, rs); }; timestamp now (system_clock::now ()); @@ -498,13 +742,7 @@ handle (request& rq, response& rs) // Query toolchains seen for the package tenant to produce a list of the // unbuilt configuration/toolchain combinations. // - // Note that it only make sense to print those unbuilt configurations that - // may still be built. That's why we leave the toolchains list empty if - // the package tenant is achieved. - // vector<pair<string, version>> toolchains; - - if (!archived) { using query = query<toolchain>; @@ -515,49 +753,73 @@ handle (request& rq, response& rs) "ORDER BY" + query::build::id.toolchain_name + order_by_version_desc (query::build::id.toolchain_version, false /* first */))) + { toolchains.emplace_back (move (t.name), move (t.version)); + } } - // Collect configuration names and unbuilt configurations, skipping those - // that are hidden or excluded by the package. + // Compose the configuration filtering sub-query and collect unbuilt + // target configurations, skipping those that are hidden or excluded by + // the package configurations. // - cstrings conf_names; + using query = query<build>; + + query sq (false); set<config_toolchain> unbuilt_configs; - for (const auto& c: *build_conf_map_) + for (const package_build_config& pc: pkg->build_configs) { - const build_config& cfg (*c.second); - - if (belongs (cfg, "all") && !exclude (cfg)) + for (const auto& bc: *target_conf_map_) { - conf_names.push_back (c.first); + const build_target_config& tc (*bc.second); - // Note: we will erase built configurations from the unbuilt - // configurations set later (see below). - // - for (const auto& t: toolchains) - unbuilt_configs.insert ({cfg.name, t.first, t.second}); + if (!belongs (tc, "hidden") && !exclude (pc, tc)) + { + const build_target_config_id& id (bc.first); + + sq = sq || (query::id.target == id.target && + query::id.target_config_name == id.config && + query::id.package_config_name == pc.name); + + // Note: we will erase built configurations from the unbuilt + // configurations set later (see below). + // + for (const auto& t: toolchains) + unbuilt_configs.insert (config_toolchain {tc.target, + tc.name, + pc.name, + t.first, + t.second}); + } } } - // Print the package built configurations in the time-descending order. + // Let's not print the package configuration row if the default + // configuration is the only one. // - using query = query<build>; + bool ppc (pkg->build_configs.size () != 1); // Note: can't be empty. + // Print the package built configurations in the time-descending order. + // for (auto& b: build_db_->query<build> ( - (query::id.package == pkg->id && - - query::id.configuration.in_range (conf_names.begin (), - conf_names.end ())) + - + (query::id.package == pkg->id && query::state != "queued" && sq) + "ORDER BY" + query::timestamp + "DESC")) { string ts (butl::to_string (b.timestamp, "%Y-%m-%d %H:%M:%S %Z", true /* special */, true /* local */) + - " (" + butl::to_string (now - b.timestamp, false) + " ago)"); + " (" + butl::to_string (now - b.timestamp, false) + " ago"); + + if (tn->archived) + ts += ", archived"; + ts += ')'; + + // @@ Note that here we also load result logs which we don't need. + // Probably we should invent some table view to only load operation + // names and statuses. + // if (b.state == build_state::built) build_db_->load (b, b.results_section); @@ -566,19 +828,29 @@ handle (request& rq, response& rs) << TR_VALUE ("toolchain", b.toolchain_name + '-' + b.toolchain_version.string ()) - << TR_VALUE ("config", - b.configuration + " / " + b.target.string ()) - << TR_VALUE ("timestamp", ts) - << TR_BUILD_RESULT (b, host, root) + << TR_VALUE ("target", b.target.string ()) + << TR_VALUE ("tgt config", b.target_config_name); + + if (ppc) + s << TR_VALUE ("pkg config", b.package_config_name); + + s << TR_VALUE ("timestamp", ts); + + if (b.interactive) // Note: can only be present for the building state. + s << TR_VALUE ("login", *b.interactive); + + s << TR_BUILD_RESULT (b, tn->archived, host, root) << ~TBODY << ~TABLE; // While at it, erase the built configuration from the unbuilt // configurations set. // - unbuilt_configs.erase ({b.id.configuration, - b.toolchain_name, - b.toolchain_version}); + unbuilt_configs.erase (config_toolchain {b.target, + b.target_config_name, + b.package_config_name, + b.toolchain_name, + b.toolchain_version}); } // Print the package unbuilt configurations with the following sort @@ -586,42 +858,57 @@ handle (request& rq, response& rs) // // 1: toolchain name // 2: toolchain version (descending) - // 3: configuration name + // 3: target + // 4: target configuration name + // 5: package configuration name // for (const auto& ct: unbuilt_configs) { - auto i (build_conf_map_->find (ct.configuration.c_str ())); - assert (i != build_conf_map_->end ()); - s << TABLE(CLASS="proplist build") << TBODY << TR_VALUE ("toolchain", ct.toolchain_name + '-' + ct.toolchain_version.string ()) - << TR_VALUE ("config", - ct.configuration + " / " + - i->second->target.string ()) - << TR_VALUE ("result", "unbuilt") + << TR_VALUE ("target", ct.target.string ()) + << TR_VALUE ("tgt config", ct.target_config); + + if (ppc) + s << TR_VALUE ("pkg config", ct.package_config); + + s << TR_VALUE ("result", "unbuilt") << ~TBODY << ~TABLE; } - // Print the package build exclusions that belong to the 'default' class. + // Print the package build exclusions that belong to the 'default' class, + // unless the package is built interactively (normally for a single + // configuration). // - for (const auto& c: *build_conf_) + if (!tn->interactive) { - string reason; - if (belongs (c, "default") && exclude (c, &reason)) + for (const package_build_config& pc: pkg->build_configs) { - s << TABLE(CLASS="proplist build") - << TBODY - << TR_VALUE ("config", c.name + " / " + c.target.string ()) - << TR_VALUE ("result", - !reason.empty () - ? "excluded (" + reason + ')' - : "excluded") - << ~TBODY - << ~TABLE; + for (const auto& tc: *target_conf_) + { + string reason; + if (belongs (tc, "default") && exclude (pc, tc, &reason)) + { + s << TABLE(CLASS="proplist build") + << TBODY + << TR_VALUE ("target", tc.target.string ()) + << TR_VALUE ("tgt config", tc.name); + + if (ppc) + s << TR_VALUE ("pkg config", pc.name); + + s << TR_VALUE ("result", + !reason.empty () + ? "excluded (" + reason + ')' + : "excluded") + << ~TBODY + << ~TABLE; + } + } } } @@ -630,19 +917,25 @@ handle (request& rq, response& rs) s << ~DIV; } - const string& ch (pkg->changes); - - if (!ch.empty ()) + if (const optional<typed_text>& c = pkg->changes) { const string id ("changes"); + const string what (title + " changes"); s << H3 << "Changes" << ~H3 << (full - ? PRE_TEXT (ch, id) - : PRE_TEXT (ch, + ? DIV_TEXT (*c, + false /* strip_title */, + id, + what, + error) + : DIV_TEXT (*c, + false /* strip_title */, options_->package_changes (), - url (!full, "changes"), - id)); + url (!full, id), + id, + what, + error)); } s << ~DIV diff --git a/mod/mod-packages.cxx b/mod/mod-packages.cxx index 65c7c5b..6026024 100644 --- a/mod/mod-packages.cxx +++ b/mod/mod-packages.cxx @@ -49,8 +49,8 @@ init (scanner& s) options_->root (dir_path ("/")); // Check that the database 'package' schema matches the current one. It's - // enough to perform the check in just a single module implementation (and we - // don't do in the dispatcher because it doesn't use the database). + // enough to perform the check in just a single module implementation (and + // we don't do in the dispatcher because it doesn't use the database). // // Note that the failure can be reported by each web server worker process. // While it could be tempting to move the check to the @@ -136,8 +136,18 @@ handle (request& rq, response& rs) << DIV_HEADER (options_->logo (), options_->menu (), root, tenant) << DIV(ID="content"); + // On the first page print the search page description, if specified. + // + if (page == 0) + { + const web::xhtml::fragment& desc (options_->search_description ()); + + if (!desc.empty ()) + s << DIV(ID="search-description") << desc << ~DIV; + } + // If the tenant is empty then we are in the global view and will display - // packages from all the tenants. + // packages from all the public tenants. // optional<string> tn; if (!tenant.empty ()) @@ -146,7 +156,7 @@ handle (request& rq, response& rs) session sn; transaction t (package_db_->begin ()); - auto pkg_count ( + size_t pkg_count ( package_db_->query_value<latest_package_count> ( search_param<latest_package_count> (squery, tn))); @@ -167,11 +177,10 @@ handle (request& rq, response& rs) s << TABLE(CLASS="proplist package") << TBODY - << TR_NAME (p->name, equery, root, p->tenant) + << TR_NAME (p->name, root, p->tenant) << TR_SUMMARY (p->summary) << TR_LICENSE (p->license_alternatives) - << TR_DEPENDS (p->dependencies, root, p->tenant) - << TR_REQUIRES (p->requirements); + << TR_DEPENDS (p->dependencies, root, p->tenant); // In the global view mode add the tenant packages link. Note that the // global view (and the link) makes sense only in the multi-tenant mode. diff --git a/mod/mod-repository-details.cxx b/mod/mod-repository-details.cxx index 813b738..082903b 100644 --- a/mod/mod-repository-details.cxx +++ b/mod/mod-repository-details.cxx @@ -3,14 +3,12 @@ #include <mod/mod-repository-details.hxx> -#include <algorithm> // max() - #include <libstudxml/serializer.hxx> #include <odb/database.hxx> #include <odb/transaction.hxx> -#include <libbutl/timestamp.mxx> // to_string() +#include <libbutl/timestamp.hxx> // to_string() #include <web/server/module.hxx> #include <web/server/mime-url-encoding.hxx> @@ -100,7 +98,7 @@ handle (request& rq, response& rs) // string id (html_id (r.canonical_name)); s << H1(ID=id) - << A(HREF="#" + web::mime_url_encode (id, false)) + << A(HREF='#' + web::mime_url_encode (id, false)) << r.display_name << ~A << ~H1; diff --git a/mod/mod-repository-root.cxx b/mod/mod-repository-root.cxx index 02d6c93..34b4007 100644 --- a/mod/mod-repository-root.cxx +++ b/mod/mod-repository-root.cxx @@ -8,7 +8,6 @@ #include <cmark-gfm-core-extensions.h> #include <sstream> -#include <algorithm> // find() #include <web/server/module.hxx> @@ -17,6 +16,7 @@ #include <mod/mod-ci.hxx> #include <mod/mod-submit.hxx> +#include <mod/mod-upload.hxx> #include <mod/mod-builds.hxx> #include <mod/mod-packages.hxx> #include <mod/mod-build-log.hxx> @@ -108,24 +108,42 @@ namespace brep // repository_root:: repository_root () - : packages_ (make_shared<packages> ()), + : + // + // Only create and populate the tenant service map in the examplar + // passing a reference to it to all the sub-handler exemplars. Note + // that we dispatch the tenant service callbacks to the examplar + // without creating a new instance for each callback (thus the + // callbacks are const). + // + tenant_service_map_ (make_shared<tenant_service_map> ()), + packages_ (make_shared<packages> ()), package_details_ (make_shared<package_details> ()), package_version_details_ (make_shared<package_version_details> ()), repository_details_ (make_shared<repository_details> ()), - build_task_ (make_shared<build_task> ()), - build_result_ (make_shared<build_result> ()), - build_force_ (make_shared<build_force> ()), + build_task_ (make_shared<build_task> (*tenant_service_map_)), + build_result_ (make_shared<build_result> (*tenant_service_map_)), + build_force_ (make_shared<build_force> (*tenant_service_map_)), build_log_ (make_shared<build_log> ()), builds_ (make_shared<builds> ()), build_configs_ (make_shared<build_configs> ()), submit_ (make_shared<submit> ()), - ci_ (make_shared<ci> ()) +#ifdef BREP_CI_TENANT_SERVICE + ci_ (make_shared<ci> (*tenant_service_map_)), +#else + ci_ (make_shared<ci> ()), +#endif + upload_ (make_shared<upload> ()) { } repository_root:: repository_root (const repository_root& r) : handler (r), + tenant_service_map_ ( + r.initialized_ + ? r.tenant_service_map_ + : make_shared<tenant_service_map> ()), // // Deep/shallow-copy sub-handlers depending on whether this is an // exemplar/handler. @@ -150,15 +168,15 @@ namespace brep build_task_ ( r.initialized_ ? r.build_task_ - : make_shared<build_task> (*r.build_task_)), + : make_shared<build_task> (*r.build_task_, *tenant_service_map_)), build_result_ ( r.initialized_ ? r.build_result_ - : make_shared<build_result> (*r.build_result_)), + : make_shared<build_result> (*r.build_result_, *tenant_service_map_)), build_force_ ( r.initialized_ ? r.build_force_ - : make_shared<build_force> (*r.build_force_)), + : make_shared<build_force> (*r.build_force_, *tenant_service_map_)), build_log_ ( r.initialized_ ? r.build_log_ @@ -178,7 +196,15 @@ namespace brep ci_ ( r.initialized_ ? r.ci_ +#ifdef BREP_CI_TENANT_SERVICE + : make_shared<ci> (*r.ci_, *tenant_service_map_)), +#else : make_shared<ci> (*r.ci_)), +#endif + upload_ ( + r.initialized_ + ? r.upload_ + : make_shared<upload> (*r.upload_)), options_ ( r.initialized_ ? r.options_ @@ -205,6 +231,7 @@ namespace brep append (r, build_configs_->options ()); append (r, submit_->options ()); append (r, ci_->options ()); + append (r, upload_->options ()); return r; } @@ -250,6 +277,7 @@ namespace brep sub_init (*build_configs_, "build_configs"); sub_init (*submit_, "submit"); sub_init (*ci_, "ci"); + sub_init (*upload_, "upload"); // Parse own configuration options. // @@ -445,6 +473,13 @@ namespace brep return handle ("ci", param); } + else if (func == "upload") + { + if (handler_ == nullptr) + handler_.reset (new upload (*upload_)); + + return handle ("upload", param); + } else return nullopt; }; diff --git a/mod/mod-repository-root.hxx b/mod/mod-repository-root.hxx index 9e28797..aa60fda 100644 --- a/mod/mod-repository-root.hxx +++ b/mod/mod-repository-root.hxx @@ -9,6 +9,7 @@ #include <mod/module.hxx> #include <mod/module-options.hxx> +#include <mod/tenant-service.hxx> namespace brep { @@ -24,6 +25,7 @@ namespace brep class build_configs; class submit; class ci; + class upload; class repository_root: public handler { @@ -58,6 +60,8 @@ namespace brep version (); private: + shared_ptr<tenant_service_map> tenant_service_map_; + shared_ptr<packages> packages_; shared_ptr<package_details> package_details_; shared_ptr<package_version_details> package_version_details_; @@ -70,6 +74,7 @@ namespace brep shared_ptr<build_configs> build_configs_; shared_ptr<submit> submit_; shared_ptr<ci> ci_; + shared_ptr<upload> upload_; shared_ptr<options::repository_root> options_; diff --git a/mod/mod-submit.cxx b/mod/mod-submit.cxx index 9c93a36..5ee358a 100644 --- a/mod/mod-submit.cxx +++ b/mod/mod-submit.cxx @@ -5,14 +5,14 @@ #include <ostream> -#include <libbutl/sha256.mxx> -#include <libbutl/sendmail.mxx> -#include <libbutl/fdstream.mxx> -#include <libbutl/timestamp.mxx> -#include <libbutl/filesystem.mxx> -#include <libbutl/process-io.mxx> // operator<<(ostream, process_args) -#include <libbutl/manifest-types.mxx> -#include <libbutl/manifest-serializer.mxx> +#include <libbutl/sha256.hxx> +#include <libbutl/sendmail.hxx> +#include <libbutl/fdstream.hxx> +#include <libbutl/timestamp.hxx> +#include <libbutl/filesystem.hxx> +#include <libbutl/process-io.hxx> // operator<<(ostream, process_args) +#include <libbutl/manifest-types.hxx> +#include <libbutl/manifest-serializer.hxx> #include <web/server/module.hxx> @@ -163,7 +163,7 @@ handle (request& rq, response& rs) if (!options_->submit_data_specified ()) return respond_manifest (404, "submission disabled"); - // Parse the request form data and verifying the submission size limit. + // Parse the request form data and verify the submission size limit. // // Note that if it is exceeded, then there are parameters and this is the // submission rather than the form request, and so we respond with the @@ -292,8 +292,8 @@ handle (request& rq, response& rs) // However, using the abbreviated checksum can be helpful for // troubleshooting. // - td = dir_path (options_->submit_temp () / - dir_path (path::traits_type::temp_name (ref))); + td = options_->submit_temp () / + dir_path (path::traits_type::temp_name (ref)); // It's highly unlikely but still possible that the temporary directory // already exists. This can only happen due to the unclean web server @@ -553,7 +553,7 @@ handle (request& rq, response& rs) // Run the submission handler, if specified, reading the result manifest // from its stdout and caching it as a name/value pair list for later use - // (forwarding to the client, sending via email, etc.). Otherwise, create + // (forwarding to the client, sending via email, etc). Otherwise, create // implied result manifest. // status_code sc; @@ -683,7 +683,7 @@ handle (request& rq, response& rs) sendmail sm (print_args, 2 /* stderr */, options_->email (), - "new package submission " + a.string () + " (" + ref + ")", + "new package submission " + a.string () + " (" + ref + ')', {options_->submit_email ()}); // Write the submission request manifest. diff --git a/mod/mod-upload.cxx b/mod/mod-upload.cxx new file mode 100644 index 0000000..9f8b9de --- /dev/null +++ b/mod/mod-upload.cxx @@ -0,0 +1,763 @@ +// file : mod/mod-upload.cxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#include <mod/mod-upload.hxx> + +#include <odb/database.hxx> +#include <odb/transaction.hxx> + +#include <libbutl/uuid.hxx> +#include <libbutl/base64.hxx> +#include <libbutl/sha256.hxx> +#include <libbutl/sendmail.hxx> +#include <libbutl/fdstream.hxx> +#include <libbutl/timestamp.hxx> +#include <libbutl/filesystem.hxx> +#include <libbutl/process-io.hxx> // operator<<(ostream, process_args) +#include <libbutl/manifest-types.hxx> +#include <libbutl/manifest-serializer.hxx> + +#include <web/server/module.hxx> + +#include <libbrep/build.hxx> +#include <libbrep/build-odb.hxx> +#include <libbrep/build-package.hxx> +#include <libbrep/build-package-odb.hxx> + +#include <mod/module-options.hxx> +#include <mod/external-handler.hxx> + +using namespace std; +using namespace butl; +using namespace brep::cli; +using namespace odb::core; + +// While currently the user-defined copy constructor is not required (we don't +// need to deep copy nullptr's), it is a good idea to keep the placeholder +// ready for less trivial cases. +// +brep::upload:: +upload (const upload& r) + : build_result_module (r), + options_ (r.initialized_ ? r.options_ : nullptr) +{ +} + +void brep::upload:: +init (scanner& s) +{ + HANDLER_DIAG; + + options_ = make_shared<options::upload> ( + s, unknown_mode::fail, unknown_mode::fail); + + // Verify that the upload handling is setup properly, if configured. + // + for (const auto& ud: options_->upload_data ()) + { + const string& t (ud.first); + + if (t.empty ()) + fail << "empty upload type in upload-data configuration option"; + + if (ud.second.relative ()) + fail << t << " upload-data path '" << ud.second << "' is relative"; + + if (!dir_exists (ud.second)) + fail << t << " upload-data directory '" << ud.second + << "' does not exist"; + + const map<string, path>& uh (options_->upload_handler ()); + auto i (uh.find (t)); + + if (i != uh.end () && i->second.relative ()) + fail << t << " upload-handler path '" << i->second << "' is relative"; + } + + if (options_->upload_data_specified ()) + { + if (!options_->build_config_specified ()) + fail << "upload functionality is enabled but package building " + << "functionality is disabled"; + + build_result_module::init (*options_, *options_); + } +} + +bool brep::upload:: +handle (request& rq, response& rs) +{ + using brep::version; // Not to confuse with module::version. + + using serializer = manifest_serializer; + using serialization = manifest_serialization; + + HANDLER_DIAG; + + // We will respond with the manifest to the upload protocol violations and + // with a plain text message on the internal errors. In the latter case we + // will always respond with the same neutral message for security reason, + // logging the error details. Note that descriptions of exceptions caught by + // the web server are returned to the client (see web/module.hxx for + // details), and we want to avoid this when there is a danger of exposing + // sensitive data. + // + // Also we will pass through exceptions thrown by the underlying API, unless + // we need to handle them or add details for the description, in which case + // we will fallback to one of the above mentioned response methods. + // + // Note that both respond_manifest() and respond_error() are normally called + // right before the end of the request handling. They both always return + // true to allow bailing out with a single line, for example: + // + // return respond_error (); // Request is handled with an error. + // + string request_id; // Will be set later. + auto respond_manifest = [&rs, &request_id] (status_code status, + const string& message) -> bool + { + serializer s (rs.content (status, "text/manifest;charset=utf-8"), + "response"); + + s.next ("", "1"); // Start of manifest. + s.next ("status", to_string (status)); + s.next ("message", message); + + if (!request_id.empty ()) + s.next ("reference", request_id); + + s.next ("", ""); // End of manifest. + return true; + }; + + auto respond_error = [&rs] (status_code status = 500) -> bool + { + rs.content (status, "text/plain;charset=utf-8") + << "upload handling failed" << endl; + + return true; + }; + + // Check if the upload functionality is enabled. + // + // Note that this is not an upload protocol violation but it feels right to + // respond with the manifest, to help the client a bit. + // + if (!options_->upload_data_specified ()) + return respond_manifest (404, "upload disabled"); + + // Parse the request data and verify the upload size limit. + // + // Note that the size limit is upload type-specific. Thus, first, we need to + // determine the upload type which we expect to be specified in the URL as a + // value of the upload parameter. + // + string type; + dir_path dir; + + try + { + name_value_scanner s (rq.parameters (0 /* limit */, true /* url_only */)); + + // We only expect the upload=<type> parameter in URL. + // + params::upload params ( + params::upload (s, unknown_mode::fail, unknown_mode::fail)); + + type = move (params.type ()); + + if (type.empty ()) + return respond_manifest (400, "upload type expected"); + + // Check if this upload type is enabled. While at it, cache the upload + // data directory path. + // + const map<string, dir_path>& ud (options_->upload_data ()); + auto i (ud.find (type)); + + if (i == ud.end ()) + return respond_manifest (404, type + " upload disabled"); + + dir = i->second; + } + catch (const cli::exception&) + { + return respond_manifest (400, "invalid parameter"); + } + + try + { + const map<string, size_t>& us (options_->upload_max_size ()); + auto i (us.find (type)); + rq.parameters (i != us.end () ? i->second : 10485760); // 10M by default. + } + catch (const invalid_request& e) + { + if (e.status == 413) // Payload too large? + return respond_manifest (e.status, type + " upload size exceeds limit"); + + throw; + } + + // The request parameters are now parsed and the limit doesn't really matter. + // + const name_values& rps (rq.parameters (0 /* limit */)); + + // Verify the upload parameters we expect. The unknown ones will be + // serialized to the upload manifest. + // + params::upload params; + + try + { + name_value_scanner s (rps); + params = params::upload (s, unknown_mode::skip, unknown_mode::skip); + } + catch (const cli::exception&) + { + return respond_manifest (400, "invalid parameter"); + } + + const string& session (params.session ()); + const string& instance (params.instance ()); + const string& archive (params.archive ()); + const string& sha256sum (params.sha256sum ()); + + if (session.empty ()) + return respond_manifest (400, "upload session expected"); + + optional<vector<char>> challenge; + + if (params.challenge_specified ()) + try + { + challenge = base64_decode (params.challenge ()); + } + catch (const invalid_argument&) + { + return respond_manifest (400, "invalid challenge"); + } + + if (instance.empty ()) + return respond_manifest (400, "upload instance expected"); + + if (archive.empty ()) + return respond_manifest (400, "upload archive expected"); + + if (sha256sum.empty ()) + return respond_manifest (400, "upload archive checksum expected"); + + if (sha256sum.size () != 64) + return respond_manifest (400, "invalid upload archive checksum"); + + // Verify that unknown parameter values satisfy the requirements (contain + // only UTF-8 encoded graphic characters plus '\t', '\r', and '\n'). + // + // Actually, the expected ones must satisfy too, so check them as well. + // + string what; + for (const name_value& nv: rps) + { + if (nv.value && + !utf8 (*nv.value, what, codepoint_types::graphic, U"\n\r\t")) + return respond_manifest (400, + "invalid parameter " + nv.name + ": " + what); + } + + parse_session_result sess; + + try + { + sess = parse_session (session); + } + catch (const invalid_argument& e) + { + return respond_manifest (400, string ("invalid session: ") + e.what ()); + } + + // If the session expired (no such configuration, etc) then, similar to the + // build result module, we log this case with the warning severity and + // respond with manifest with the 200 status as if the session is valid (see + // the build result module for the reasoning). + // + auto warn_expired = [&session, &warn] (const string& d) + { + warn << "session '" << session << "' expired: " << d; + }; + + const build_id& id (sess.id); + + // Make sure the build configuration still exists. + // + const build_target_config* tc; + { + auto i (target_conf_map_->find ( + build_target_config_id {id.target, id.target_config_name})); + + if (i == target_conf_map_->end ()) + { + warn_expired ("no build configuration"); + return respond_manifest (200, type + " upload is queued"); + } + + tc = i->second; + } + + // Note that if the session authentication fails (probably due to the + // authentication settings change), then we log this case with the warning + // severity and respond with manifest with the 200 status as if the + // challenge is valid (see the build result module for the reasoning). + // + shared_ptr<build> bld; + shared_ptr<build_package> pkg; + shared_ptr<build_repository> rep; + { + transaction t (build_db_->begin ()); + + package_build pb; + shared_ptr<build> b; + if (!build_db_->query_one<package_build> ( + query<package_build>::build::id == id, pb)) + { + warn_expired ("no package build"); + } + else if ((b = move (pb.build))->state != build_state::building) + { + warn_expired ("package configuration state is " + to_string (b->state)); + } + else if (b->timestamp != sess.timestamp) + { + warn_expired ("non-matching timestamp"); + } + else if (authenticate_session (*options_, challenge, *b, session)) + { + bld = move (b); + pkg = build_db_->load<build_package> (id.package); + rep = pkg->internal_repository.load (); + } + + t.commit (); + } + + // Note that from now on the result manifest we respond with will contain + // the reference value. + // + try + { + request_id = uuid::generate ().string (); + } + catch (const system_error& e) + { + error << "unable to generate request id: " << e; + return respond_error (); + } + + if (bld == nullptr) + return respond_manifest (200, type + " upload is queued"); + + // Create the upload data directory. + // + dir_path dd (dir / dir_path (request_id)); + + try + { + // It's highly unlikely but still possible that the directory already + // exists. This can only happen if the generated uuid is not unique. + // + if (try_mkdir (dd) == mkdir_status::already_exists) + throw_generic_error (EEXIST); + } + catch (const system_error& e) + { + error << "unable to create directory '" << dd << "': " << e; + return respond_error (); + } + + auto_rmdir ddr (dd); + + // Save the package archive into the temporary directory and verify its + // checksum. + // + // Note that the archive file name can potentially contain directory path in + // the POSIX form, so let's strip it if that's the case. + // + path a; + path af; + + try + { + size_t n (archive.find_last_of ('/')); + a = path (n != string::npos ? string (archive, n + 1) : archive); + af = dd / a; + } + catch (const invalid_path&) + { + return respond_manifest (400, "invalid package archive name"); + } + + try + { + istream& is (rq.open_upload ("archive")); + + // Note that istream::read() sets failbit if unable to read the requested + // number of bytes. + // + is.exceptions (istream::badbit); + + sha256 sha; + char buf[8192]; + ofdstream os (af, fdopen_mode::binary); + + while (!eof (is)) + { + is.read (buf, sizeof (buf)); + + if (size_t n = is.gcount ()) + { + sha.append (buf, n); + os.write (buf, n); + } + } + + os.close (); + + // Respond with the unprocessable entity (422) code for the archive + // checksum mismatch. + // + if (sha.string () != sha256sum) + return respond_manifest (422, "upload archive checksum mismatch"); + } + // Note that invalid_argument (thrown by open_upload() function call) can + // mean both no archive upload or multiple archive uploads. + // + catch (const invalid_argument&) + { + return respond_manifest (400, "archive upload expected"); + } + catch (const io_error& e) + { + error << "unable to write package archive '" << af << "': " << e; + return respond_error (); + } + + // Serialize the upload request manifest to a stream. On the serialization + // error respond to the client with the manifest containing the bad request + // (400) code and return false, on the stream error pass through the + // io_error exception, otherwise return true. + // + timestamp ts (system_clock::now ()); + + auto rqm = [&request_id, + &ts, + &rps, + &session, + &instance, + &a, + &sha256sum, + &id, + &bld, + &pkg, + &rep, + &tc, + &sess, + &respond_manifest, + this] (ostream& os, bool long_lines = false) -> bool + { + try + { + serializer s (os, "request", long_lines); + + // Serialize the upload manifest header. + // + s.next ("", "1"); // Start of manifest. + s.next ("id", request_id); + s.next ("session", session); + s.next ("instance", instance); + s.next ("archive", a.string ()); + s.next ("sha256sum", sha256sum); + + s.next ("timestamp", + butl::to_string (ts, + "%Y-%m-%dT%H:%M:%SZ", + false /* special */, + false /* local */)); + + s.next ("name", id.package.name.string ()); + s.next ("version", pkg->version.string ()); + s.next ("project", pkg->project.string ()); + s.next ("target-config", tc->name); + s.next ("package-config", id.package_config_name); + s.next ("target", tc->target.string ()); + + if (!tenant.empty ()) + s.next ("tenant", tenant); + + s.next ("toolchain-name", id.toolchain_name); + s.next ("toolchain-version", sess.toolchain_version.string ()); + s.next ("repository-name", rep->canonical_name); + + s.next ("machine-name", bld->machine.name); + s.next ("machine-summary", bld->machine.summary); + + // Serialize the request parameters. + // + // Note that the serializer constraints the parameter names (can't start + // with '#', can't contain ':' and the whitespaces, etc.). + // + for (const name_value& nv: rps) + { + // Note that the upload parameter is renamed to '_' by the root + // handler (see the request_proxy class for details). + // + const string& n (nv.name); + if (n != "_" && + n != "session" && + n != "challenge" && + n != "instance" && + n != "archive" && + n != "sha256sum") + s.next (n, nv.value ? *nv.value : ""); + } + + s.next ("", ""); // End of manifest. + return true; + } + catch (const serialization& e) + { + respond_manifest (400, string ("invalid parameter: ") + e.what ()); + return false; + } + }; + + // Serialize the upload request manifest to the upload directory. + // + path rqf (dd / "request.manifest"); + + try + { + ofdstream os (rqf); + bool r (rqm (os)); + os.close (); + + if (!r) + return true; // The client is already responded with the manifest. + } + catch (const io_error& e) + { + error << "unable to write to '" << rqf << "': " << e; + return respond_error (); + } + + // Given that the upload data is now successfully persisted we are no longer + // in charge of removing it, except for the cases when the upload + // handler terminates with an error (see below for details). + // + ddr.cancel (); + + // If the handler terminates with non-zero exit status or specifies 5XX + // (HTTP server error) upload result manifest status value, then we stash + // the upload data directory for troubleshooting. Otherwise, if it's the 4XX + // (HTTP client error) status value, then we remove the directory. + // + auto stash_upload_dir = [&dd, error] () + { + if (dir_exists (dd)) + try + { + mvdir (dd, dir_path (dd + ".fail")); + } + catch (const system_error& e) + { + // Not much we can do here. Let's just log the issue and bail out + // leaving the directory in place. + // + error << "unable to rename directory '" << dd << "': " << e; + } + }; + + // Run the upload handler, if specified, reading the result manifest from + // its stdout and caching it as a name/value pair list for later use + // (forwarding to the client, sending via email, etc). Otherwise, create + // implied result manifest. + // + status_code sc; + vector<manifest_name_value> rvs; + + const map<string, path>& uh (options_->upload_handler ()); + auto hi (uh.find (type)); + + if (hi != uh.end ()) + { + auto range (options_->upload_handler_argument ().equal_range (type)); + + strings args; + for (auto i (range.first); i != range.second; ++i) + args.push_back (i->second); + + const map<string, size_t>& ht (options_->upload_handler_timeout ()); + auto i (ht.find (type)); + + optional<external_handler::result_manifest> r ( + external_handler::run (hi->second, + args, + dd, + i != ht.end () ? i->second : 0, + error, + warn, + verb_ ? &trace : nullptr)); + + if (!r) + { + stash_upload_dir (); + return respond_error (); // The diagnostics is already issued. + } + + sc = r->status; + rvs = move (r->values); + } + else // Create the implied result manifest. + { + sc = 200; + + auto add = [&rvs] (string n, string v) + { + manifest_name_value nv { + move (n), move (v), + 0 /* name_line */, 0 /* name_column */, + 0 /* value_line */, 0 /* value_column */, + 0 /* start_pos */, 0 /* colon_pos */, 0 /* end_pos */}; + + rvs.emplace_back (move (nv)); + }; + + add ("status", "200"); + add ("message", type + " upload is queued"); + add ("reference", request_id); + } + + assert (!rvs.empty ()); // Produced by the handler or is implied. + + // Serialize the upload result manifest to a stream. On the serialization + // error log the error description and return false, on the stream error + // pass through the io_error exception, otherwise return true. + // + auto rsm = [&rvs, + &error, + &request_id, + &type] (ostream& os, bool long_lines = false) -> bool + { + try + { + serializer s (os, "result", long_lines); + serialize_manifest (s, rvs); + return true; + } + catch (const serialization& e) + { + error << "ref " << request_id << ": unable to serialize " << type + << " upload handler's output: " << e; + return false; + } + }; + + // If the upload data directory still exists then perform an appropriate + // action on it, depending on the upload result status. Note that the + // handler could move or remove the directory. + // + if (dir_exists (dd)) + { + // Remove the directory if the client error is detected. + // + if (sc >= 400 && sc < 500) + { + rmdir_r (dd); + } + // + // Otherwise, save the result manifest, into the directory. Also stash the + // directory for troubleshooting in case of the server error. + // + else + { + path rsf (dd / "result.manifest"); + + try + { + ofdstream os (rsf); + + // Not being able to stash the result manifest is not a reason to + // claim the upload failed. The error is logged nevertheless. + // + rsm (os); + + os.close (); + } + catch (const io_error& e) + { + // Not fatal (see above). + // + error << "unable to write to '" << rsf << "': " << e; + } + + if (sc >= 500 && sc < 600) + stash_upload_dir (); + } + } + + // Send email, if configured. Use the long lines manifest serialization mode + // for the convenience of copying/clicking URLs they contain. + // + // Note that we don't consider the email sending failure to be an upload + // failure as the upload data is successfully persisted and the handler is + // successfully executed, if configured. One can argue that email can be + // essential for the upload processing and missing it would result in the + // incomplete upload. In this case it's natural to assume that the web + // server error log is monitored and the email sending failure will be + // noticed. + // + const map<string, string>& ue (options_->upload_email ()); + auto ei (ue.find (type)); + + if (ei != ue.end ()) + try + { + // Redirect the diagnostics to the web server error log. + // + sendmail sm ([&trace, this] (const char* args[], size_t n) + { + l2 ([&]{trace << process_args {args, n};}); + }, + 2 /* stderr */, + options_->email (), + type + " upload (" + request_id + ')', + {ei->second}); + + // Write the upload request manifest. + // + bool r (rqm (sm.out, true /* long_lines */)); + assert (r); // The serialization succeeded once, so can't fail now. + + // Write the upload result manifest. + // + sm.out << "\n\n"; + + // We don't care about the result (see above). + // + rsm (sm.out, true /* long_lines */); + + sm.out.close (); + + if (!sm.wait ()) + error << "sendmail " << *sm.exit; + } + // Handle process_error and io_error (both derive from system_error). + // + catch (const system_error& e) + { + error << "sendmail error: " << e; + } + + if (!rsm (rs.content (sc, "text/manifest;charset=utf-8"))) + return respond_error (); // The error description is already logged. + + return true; +} diff --git a/mod/mod-upload.hxx b/mod/mod-upload.hxx new file mode 100644 index 0000000..6cc723b --- /dev/null +++ b/mod/mod-upload.hxx @@ -0,0 +1,41 @@ +// file : mod/mod-upload.hxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef MOD_MOD_UPLOAD_HXX +#define MOD_MOD_UPLOAD_HXX + +#include <libbrep/types.hxx> +#include <libbrep/utility.hxx> + +#include <mod/module-options.hxx> +#include <mod/build-result-module.hxx> + +namespace brep +{ + class upload: public build_result_module + { + public: + upload () = default; + + // Create a shallow copy (handling instance) if initialized and a deep + // copy (context exemplar) otherwise. + // + explicit + upload (const upload&); + + virtual bool + handle (request&, response&); + + virtual const cli::options& + cli_options () const {return options::upload::description ();} + + private: + virtual void + init (cli::scanner&); + + private: + shared_ptr<options::upload> options_; + }; +} + +#endif // MOD_MOD_UPLOAD_HXX diff --git a/mod/module.cli b/mod/module.cli index b59158a..a107ffe 100644 --- a/mod/module.cli +++ b/mod/module.cli @@ -1,7 +1,11 @@ // file : mod/options.cli -*- C++ -*- // license : MIT; see accompanying LICENSE file +include <map>; +include <regex>; + include <libbpkg/manifest.hxx>; // repository_location +include <libbbot/manifest.hxx>; // interactive_mode include <web/xhtml/fragment.hxx>; @@ -17,7 +21,7 @@ namespace brep { // Option groups. // - class handler + class repository_email { string email { @@ -25,7 +29,10 @@ namespace brep "Repository email. This email is used for the \cb{From:} header in emails send by \cb{brep} (for example, build failure notifications)." } + }; + class repository_url + { string host { "<host>", @@ -44,7 +51,29 @@ namespace brep Specify '\cb{/}' to use the web server root (\cb{http://example.org/})." } + }; + + class build_email_notification: repository_email, repository_url + { + std::map<string, build_email> build-toolchain-email + { + "<name>=<mode>", + "Enable or disable package build notification emails. The valid <mode> + values are \cb{none}, \cb{latest}, and \cb{all}. If \cb{all} is + specified for a toolchain name, then emails are sent according to the + \cb{build-*email} package manifest values when all versions of a + package are built with this toolchain. If \cb{latest} is specified, + then for this toolchain name the emails are only sent for the latest + version of a package. If \cb{none} is specified, then no emails are + sent for this toolchain name. By default the \cb{latest} mode is + assumed. Repeat this option to enable/disable emails for multiple + toolchains. See \l{bpkg#manifest-package Package Manifest} for + details on \cb{build-*email} values." + } + }; + class handler + { string tenant-name = "tenant" { "<name>", @@ -102,14 +131,14 @@ namespace brep { "<user>", "Package database login user name. If not specified, then operating - system (login) name is used. See also \c{package-db-role}." + system (login) name is used. See also \cb{package-db-role}." } string package-db-role = "brep" { "<user>", "Package database execution user name. If not empty then the login - user will be switched (with \c{SET ROLE}) to this user prior to + user will be switched (with \cb{SET ROLE}) to this user prior to executing any statements. If not specified, then \cb{brep} is used." } @@ -193,45 +222,96 @@ namespace brep be specified in seconds. Default is 10 minutes." } - size_t build-normal-rebuild-timeout = 86400 + size_t build-soft-rebuild-timeout = 86400 { "<seconds>", - "Time to wait before considering a package for a normal rebuild. Must - be specified in seconds. Default is 24 hours." + "Time to wait before considering a package for a soft rebuild (only to + be performed if the build environment or any of the package + dependencies have changed). Must be specified in seconds. The special + zero value disables soft rebuilds. Default is 24 hours" } - size_t build-alt-rebuild-timeout + size_t build-alt-soft-rebuild-timeout { "<seconds>", - "Alternative package rebuild timeout to use instead of the normal - rebuild timeout (see \cb{build-normal-rebuild-timeout} for details) + "Alternative package soft rebuild timeout to use instead of the soft + rebuild timeout (see \cb{build-soft-rebuild-timeout} for details) during the time interval specified with the - \cb{build-alt-rebuild-start} and \cb{build-alt-rebuild-stop} options. - Must be specified in seconds. Default is the time interval length." + \cb{build-alt-soft-rebuild-start} and + \cb{build-alt-soft-rebuild-stop} options. Must be specified in + seconds. Default is the time interval length plus + \c{(\b{build-soft-rebuild-timeout} - 24h)} if soft rebuild timeout + is greater than 24 hours (thus the rebuild is only triggered within + the last 24 hours of the \cb{build-soft-rebuild-timeout} expiration)." } - duration build-alt-rebuild-start + duration build-alt-soft-rebuild-start { "<hours>:<minutes>", - "The start time of the alternative package rebuild timeout (see - \cb{build-alt-rebuild-timeout} for details). Must be specified as - a time of day in the local timezone. The \cb{build-alt-rebuild-start} - and \cb{build-alt-rebuild-stop} options must be either both specified - or absent. If unspecified, then no alternative rebuild timeout will - be used." + "The start time of the alternative package soft rebuild timeout (see + \cb{build-alt-soft-rebuild-timeout} for details). Must be specified + as a time of day in the local timezone. The + \cb{build-alt-soft-rebuild-start} and + \cb{build-alt-soft-rebuild-stop} options must be either both + specified or absent. If unspecified, then no alternative rebuild + timeout will be used." } - duration build-alt-rebuild-stop + duration build-alt-soft-rebuild-stop { "<hours>:<minutes>", - "The end time of the alternative package rebuild timeout (see - \cb{build-alt-rebuild-timeout} for details). Must be specified as - a time of day in the local timezone. If it is less than the - \cb{build-alt-rebuild-start} option value, then the time interval - extends through midnight. The \cb{build-alt-rebuild-start} and - \cb{build-alt-rebuild-stop} options must be either both specified or - absent. If unspecified, then no alternative rebuild timeout will be - used." + "The end time of the alternative package soft rebuild timeout (see + \cb{build-alt-soft-rebuild-timeout} for details). Must be specified + as a time of day in the local timezone. If it is less than the + \cb{build-alt-soft-rebuild-start} option value, then the time + interval extends through midnight. The + \cb{build-alt-soft-rebuild-start} and + \cb{build-alt-soft-rebuild-stop} options must be either both + specified or absent. If unspecified, then no alternative rebuild + timeout will be used." + } + + size_t build-hard-rebuild-timeout = 604800 + { + "<seconds>", + "Time to wait before considering a package for a hard rebuild (to be + performed unconditionally). Must be specified in seconds. The special + zero value disables hard rebuilds. Default is 7 days." + } + + size_t build-alt-hard-rebuild-timeout + { + "<seconds>", + "Alternative package hard rebuild timeout. The semantics is the + same as for the \cb{build-alt-soft-rebuild-timeout} option but + for the \cb{build-hard-rebuild-timeout} option." + } + + duration build-alt-hard-rebuild-start + { + "<hours>:<minutes>", + "The start time of the alternative package hard rebuild timeout (see + \cb{build-alt-hard-rebuild-timeout} for details). The semantics is + the same as for the \cb{build-alt-soft-rebuild-start} option but + for the \cb{build-hard-rebuild-timeout} option." + } + + duration build-alt-hard-rebuild-stop + { + "<hours>:<minutes>", + "The end time of the alternative package hard rebuild timeout (see + \cb{build-alt-hard-rebuild-timeout} for details). The semantics is + the same as for the \cb{build-alt-soft-rebuild-stop} option but + for the \cb{build-hard-rebuild-timeout} option." + } + + size_t build-queued-timeout = 30 + { + "<seconds>", + "Time to wait before assuming the \cb{queued} notifications are + delivered for package CI requests submitted via third-party services + (GitHub, etc). During this time a package is not considered for a + build. Must be specified in seconds. Default is 30 seconds." } }; @@ -241,14 +321,14 @@ namespace brep { "<user>", "Build database login user name. If not specified, then operating - system (login) name is used. See also \c{build-db-role}." + system (login) name is used. See also \cb{build-db-role}." } string build-db-role = "brep" { "<user>", "Build database execution user name. If not empty then the login - user will be switched (with \c{SET ROLE}) to this user prior to + user will be switched (with \cb{SET ROLE}) to this user prior to executing any statements. If not specified, then \cb{brep} is used." } @@ -298,6 +378,82 @@ namespace brep } }; + class build_upload + { + std::map<string, dir_path> upload-data + { + "<type>=<dir>", + "The directory to save upload data to for the specified upload type. + If unspecified, the build artifacts upload functionality will be + disabled for this type. See \l{brep The \cb{build2} Repository + Interface Manual} for more information on build artifacts upload. + + Note that the directory path must be absolute and the directory + itself must exist and have read, write, and execute permissions + granted to the user that runs the web server." + } + + std::map<string, size_t> upload-max-size + { + "<type>=<bytes>", + "The maximum size of the upload data accepted for the specified upload + type. Note that currently the entire upload request is read into + memory. The default is 10M." + } + + std::map<string, string> upload-email + { + "<type>=<email>", + "The build artifacts upload email. If specified, the upload request + and result manifests will be sent to this address. See \l{brep The + \cb{build2} Repository Interface Manual} for more information." + } + + std::map<string, path> upload-handler + { + "<type>=<path>", + "The handler program to be executed on build artifacts upload of the + specified type. The handler is executed as part of the HTTP request + and is passed additional arguments that can be specified with + \cb{upload-handler-argument} followed by the absolute path to the + upload directory (\cb{upload-data}). See \l{brep The \cb{build2} + Repository Interface Manual} for more information. Note that the + program path must be absolute." + } + + std::multimap<string, string> upload-handler-argument + { + "<type>=<arg>", + "Additional arguments to be passed to the upload handler program for + the specified upload type (see \cb{upload-handler} for details). + Repeat this option to specify multiple arguments." + } + + std::map<string, size_t> upload-handler-timeout + { + "<type>=<seconds>", + "The upload handler program timeout in seconds for the specified + upload type. If specified and the handler does not exit in the + allotted time, then it is killed and its termination is treated as + abnormal." + } + + std::multimap<string, string> upload-toolchain-exclude + { + "<type>=<name>", + "Disable upload of the specified type for the specified toolchain + name. Repeat this option to disable uploads for multiple toolchains." + } + + std::multimap<string, string> upload-repository-exclude + { + "<type>=<name>", + "Disable upload of the specified type for packages from the repository + with the specified canonical name. Repeat this option to disable + uploads for multiple repositories." + } + }; + class page { web::xhtml::fragment logo @@ -307,7 +463,7 @@ namespace brep edge. The value is treated as an XHTML5 fragment." } - vector<page_menu> menu; + vector<page_menu> menu { "<label=link>", "Web page menu. Each entry is displayed in the page header in the @@ -341,7 +497,7 @@ namespace brep The default is 500 (~ 80 characters * 6 lines)." } - uint16_t package-changes = 5000; + uint16_t package-changes = 5000 { "<len>", "Number of package changes characters to display in brief pages. The @@ -352,7 +508,7 @@ namespace brep // Handler options. // - class packages: search, package_db, page, handler + class packages: search, package_db, page, repository_url, handler { string search-title = "Packages" { @@ -360,24 +516,63 @@ namespace brep "Package search page title. It is placed inside XHTML5 <title> element." } + + web::xhtml::fragment search-description + { + "<xhtml>", + "Package search page description. If specified, it is displayed + before the search form on the first page only. The value is + treated as an XHTML5 fragment." + } }; - class package_details: package, search, package_db, page, handler + class package_details: package, package_db, + search, + page, + repository_url, + handler { }; class package_version_details: package, package_db, build, build_db, page, + repository_url, handler { + dir_path bindist-root + { + "<dir>", + "The root directory where the uploaded binary distribution packages + are saved to under the following directory hierarchy: + + \ + [<tenant>/]<distribution>/<os-release>/<project>/<package>/<version>/<package-config> + \ + + The package configuration directory symlinks that match these paths + are mapped to web URLs based on the \cb{bindist-url} value and + displayed on the package version details page. If this option is + specified, then \cb{bindist-url} must be specified as well." + } + + string bindist-url + { + "<url>", + "The root URL of the directory specified with the \cb{bindist-root} + option. This option must be specified if \cb{bindist-root} is + specified." + } }; - class repository_details: package_db, page, handler + class repository_details: package_db, page, repository_url, handler { }; - class build_task: build, build_db, handler + class build_task: build, build_db, + build_upload, + build_email_notification, + handler { size_t build-task-request-max-size = 102400 { @@ -394,11 +589,35 @@ namespace brep "Time to wait before considering the expected task result lost. Must be specified in seconds. The default is 3 hours." } + + vector<pair<std::regex, string>> build-interactive-login + { + "</regex/replacement/>", + "Regular expressions for transforming the interactive build login + information, for example, into the actual command that can be used + by the user. The regular expressions are matched against the + \"<agent>\ <interactive-login>\" string containing the respective + task request manifest values. The first matching expression is used + for the transformation. If no expression matches, then the task + request is considered invalid, unless no expressions are specified. + Repeat this option to specify multiple expressions." + } + + build_order build-package-order = build_order::stable + { + "<order>", + "Order in which packages are considered for build. The valid <order> + values are \cb{stable} and \cb{random}. If not specified, then + \cb{stable} is assumed. Note that interactive builds are always + preferred." + } }; - class build_result: build, package_db, build_db, handler + class build_result: build, build_db, + build_email_notification, + handler { - size_t build-result-request-max-size = 10240000 + size_t build-result-request-max-size = 10485760 { "<bytes>", "The maximum size of the build result manifest accepted. Note that the @@ -408,7 +627,7 @@ namespace brep } }; - class build_log: build, build_db, handler + class build_log: build, build_db, repository_url, handler { }; @@ -416,7 +635,7 @@ namespace brep { }; - class builds: build, build_db, page, handler + class builds: build, build_db, page, repository_url, handler { uint16_t build-page-entries = 20 { @@ -431,7 +650,7 @@ namespace brep } }; - class build_configs: build, page, handler + class build_configs: build, page, repository_url, handler { uint16_t build-config-page-entries = 20 { @@ -446,7 +665,7 @@ namespace brep } }; - class submit: page, handler + class submit: page, repository_email, repository_url, handler { dir_path submit-data { @@ -526,7 +745,7 @@ namespace brep } }; - class ci: page, handler + class ci_start: repository_email { dir_path ci-data { @@ -541,15 +760,6 @@ namespace brep granted to the user that runs the web server." } - path ci-form - { - "<file>", - "The package CI form fragment. If specified, then its contents are - treated as an XHTML5 fragment that is inserted into the <body> - element of the CI page. If unspecified, then no CI page will be - displayed. Note that the file path must be absolute." - } - string ci-email { "<email>", @@ -586,14 +796,44 @@ namespace brep } }; - class repository_root: handler + class ci_cancel + { + }; + + class ci: ci_start, page, repository_url, handler + { + // Classic CI-specific options. + // + + path ci-form + { + "<file>", + "The package CI form fragment. If specified, then its contents are + treated as an XHTML5 fragment that is inserted into the <body> + element of the CI page. If unspecified, then no CI page will be + displayed. Note that the file path must be absolute." + } + }; + + class ci_github: ci_start, ci_cancel, build_db, handler + { + // GitHub CI-specific options (e.g., request timeout when invoking + // GitHub APIs). + // + }; + + class upload: build, build_db, build_upload, repository_email, handler + { + }; + + class repository_root: repository_url, handler { string root-global-view = "packages" { "<service>", "The default view to display for the global repository root. The - <service> argument is one of the supported services (\c{packages}, - \c{builds}, \c{submit}, \c{ci}, etc). The default service is + <service> argument is one of the supported services (\cb{packages}, + \cb{builds}, \cb{submit}, \cb{ci}, etc). The default service is packages." } @@ -601,8 +841,8 @@ namespace brep { "<service>", "The default view to display for the tenant repository root. The - <service> argument is one of the supported services (\c{packages}, - \c{builds}, \c{submit}, \c{ci}, etc). The default service is + <service> argument is one of the supported services (\cb{packages}, + \cb{builds}, \cb{submit}, \cb{ci}, etc). The default service is packages." } }; @@ -659,9 +899,14 @@ namespace brep class build_task { - // Package repository canonical name (note: including pkg: type). + // Only consider packages from repositories with these canonical names + // (note: including pkg: type). // vector<string> repository | r; + + // Only consider tenants with this interactive build mode. + // + bbot::interactive_mode interactive = bbot::interactive_mode::both; }; class build_result @@ -694,9 +939,17 @@ namespace brep // string version | pv; + // Package build target. + // + string target | tg; + + // Target build configuration. + // + string target_config | tc; + // Package build configuration. // - string configuration | cf; + string package_config | pc; // Toolchain name. // @@ -730,13 +983,10 @@ namespace brep // // https://cppget.org/?builds=bbot // - // To support the already distributed URLs the name_legacy (pn) parameter - // overrides the name (builds) parameter, if present. Note that the - // builds parameter is renamed to '_' by the root handler (see the - // request_proxy class for details). + // Note that the builds parameter is renamed to '_' by the root handler + // (see the request_proxy class for details). // string name | _; - string name_legacy | pn; // Package version. If empty or *, then no version constraint is applied. // Otherwise the build package version must match the value exactly. @@ -747,22 +997,22 @@ namespace brep // toolchain constraint is applied. Otherwise the build toolchain name // and version must match the value exactly. // - string toolchain | tc = "*"; + string toolchain | th = "*"; - // Package build configuration name wildcard. An empty value is treated - // the same way as *. + // Package build target wildcard. An empty value is treated the same way + // as *. // - string configuration | cf; + string target | tg; - // Package build machine name wildcard. An empty value is treated the - // same way as *. + // Package build target configuration name wildcard. An empty value is + // treated the same way as *. // - string machine | mn; + string target_config | tc; - // Package build target wildcard. An empty value is treated the same way - // as *. + // Package build package configuration name wildcard. An empty value is + // treated the same way as *. // - string target | tg; + string package_config | pc; // Package build result. If *, then no build result constraint is // applied. Otherwise the value is supposed to be the one of the @@ -775,10 +1025,13 @@ namespace brep class build_configs { + // By default, display all build configurations except those which + // belong to the 'hidden' class. + // // Note that the build-configs parameter is renamed to '_' by the root // handler (see the request_proxy class for details). // - string class_name | _ = "all"; + string class_name | _; // Display build configurations list starting from this page. // @@ -837,9 +1090,53 @@ namespace brep // string overrides; + // Interactive build execution breakpoint. + // + string interactive; + // Submission simulation outcome. // string simulate; }; + + // Parameters other than challenge must be all present. + // + // Note also that besides these parameters there can be others. We don't + // recognize their semantics and just save them to the upload request + // manifest. + // + class upload + { + // Upload type. + // + // Note that the upload parameter is renamed to '_' by the root handler + // (see the request_proxy class for details). + // + string type | _; + + // Session id as returned by brep in the task response. + // + string session; + + // Answer to the private key challenge as posed by brep in the task + // response. It must be present only if the challenge value was present + // in the task response. + // + string challenge; + + // Upload instance name. + // + string instance; + + // Package archive file name. Must be <input type="file"/>. + // + // Note that it can potentially be not just a name but a file path. + // + string archive; + + // Package archive file SHA256 checksum. + // + string sha256sum; + }; } } diff --git a/mod/module.cxx b/mod/module.cxx index 06799d7..c8d0595 100644 --- a/mod/module.cxx +++ b/mod/module.cxx @@ -241,23 +241,46 @@ namespace brep initialized_ = m.initialized_; } -// For function func declared like this: -// using B = std::string (*)(int); -// using A = B (*)(int,int); -// A func(B (*)(char),B (*)(wchar_t)); -// __PRETTY_FUNCTION__ looks like this: -// virtual std::string (* (* brep::search::func(std::string (* (*)(char))(int) -// ,std::string (* (*)(wchar_t))(int)) const)(int, int))(int) -// + // Here are examples of __PRETTY_FUNCTION__ for some function declarations: + // + // 1) virtual bool brep::search::handle (web::request&, web::response&); + // + // virtual bool brep::search::handle(web::request&, web::response&) + // + // 2) using B = std::string (*) (int); + // virtual B brep::search::func (); + // + // virtual std::string (* brep::search::func())(int) + // + // 3) using B = std::string (*) (int); + // using A = B (*) (int,int); + // virtual A brep::search::func (B (*) (char), B (*) (wchar_t)); + // + // virtual std::string (* (* brep::search::func(std::string (* (*)(char))(int), std::string (* (*)(wchar_t))(int)))(int, int))(int) + // + // 4) using X = std::function<butl::optional<std::string> (int)> (*) (std::function<butl::optional<std::string> (long)>); + // X brep::search::func (std::function<butl::optional<std::string> (char)> (*) (std::function<butl::optional<std::string> (wchar_t)>)); + // + // std::function<std::optional<std::__cxx11::basic_string<char> >(int)> (* brep::search::func(std::function<std::optional<std::__cxx11::basic_string<char> >(char)> (*)(std::function<std::optional<std::__cxx11::basic_string<char> >(wchar_t)>)))(std::function<std::optional<std::__cxx11::basic_string<char> >(long int)>) + // + // 5) using X = std::function<butl::optional<std::string> (int)> (*) (std::function<butl::optional<std::string> (long)>); + // using Y = X (*) (int); + // Y brep::search::func (const char*); + // + // std::function<std::optional<std::__cxx11::basic_string<char> >(int)> (* (* brep::search::func(const char*))(int))(std::function<std::optional<std::__cxx11::basic_string<char> >(long int)>) + // string handler:: func_name (const char* pretty_name) { - const char* e (strchr (pretty_name, ')')); + // Position at the last ')' character, which is either the end of the + // function's arguments list or the returned function type argument list. + // + const char* e (strrchr (pretty_name, ')')); if (e && e > pretty_name) { - // Position e at last matching '(' which is the beginning of the - // argument list.. + // Position e at the matching '(' character which is the beginning of + // the mentioned argument list. // size_t d (1); @@ -273,11 +296,15 @@ namespace brep if (!d && e > pretty_name) { - // Position e at the character following the function name. + // Position e at the character which follows the function name. // - while (e > pretty_name && - (*e != '(' || *(e - 1) == ' ' || *(e - 1) == ')')) - --e; + // Specifically, go further to the left and stop at the '(' character + // which is preceded by the character other than ' ', ')', of '>'. + // + for (char c; + e > pretty_name && + !(*e == '(' && (c = *(e - 1)) != ' ' && c != ')' && c != '>'); + --e) ; if (e > pretty_name) { @@ -406,4 +433,10 @@ namespace brep else throw cli::eos_reached (); } + + size_t handler::name_value_scanner:: + position () + { + return (i_ - name_values_.begin ()) * 2 + (name_ ? 0 : 1); + } } diff --git a/mod/module.hxx b/mod/module.hxx index b3ed67b..f3e062e 100644 --- a/mod/module.hxx +++ b/mod/module.hxx @@ -135,6 +135,9 @@ namespace brep virtual void skip (); + virtual std::size_t + position (); + private: const name_values& name_values_; name_values::const_iterator i_; @@ -191,7 +194,7 @@ namespace brep log* log_ {nullptr}; // Diagnostics backend provided by the web server. private: - // Extract function name from a __PRETTY_FUNCTION__. + // Extract the full-qualified function name from a __PRETTY_FUNCTION__. // Throw invalid_argument if fail to parse. // static string diff --git a/mod/options-types.hxx b/mod/options-types.hxx index 4aa573f..f2b059b 100644 --- a/mod/options-types.hxx +++ b/mod/options-types.hxx @@ -25,6 +25,19 @@ namespace brep page_menu () = default; page_menu (string b, string l): label (move (b)), link (move (l)) {} }; + + enum class build_order + { + stable, + random + }; + + enum class build_email + { + none, + latest, // Only send emails for the latest package versions. + all + }; } #endif // MOD_OPTIONS_TYPES_HXX diff --git a/mod/page.cxx b/mod/page.cxx index 1e317f0..bc2e42d 100644 --- a/mod/page.cxx +++ b/mod/page.cxx @@ -7,10 +7,10 @@ #include <cmark-gfm-extension_api.h> #include <set> -#include <ios> // hex, uppercase, right +#include <ios> // hex, uppercase, right #include <sstream> -#include <iomanip> // setw(), setfill() -#include <algorithm> // min(), find() +#include <iomanip> // setw(), setfill() +#include <iterator> // back_inserter() #include <libstudxml/serializer.hxx> @@ -36,6 +36,20 @@ using namespace web::xhtml; // namespace brep { + static inline string + label_to_class (const string& label) + { + if (label.find (' ') == string::npos) + return label; + + string r; + transform (label.begin (), label.end (), + back_inserter (r), + [] (char c) {return c != ' ' ? c : '-';}); + + return r; + } + // CSS_LINKS // static const dir_path css_path ("@"); @@ -123,9 +137,17 @@ namespace brep void DIV_COUNTER:: operator() (serializer& s) const { - s << DIV(ID="count") - << count_ << " " - << (count_ % 10 == 1 && count_ % 100 != 11 ? singular_ : plural_) + s << DIV(ID="count"); + + if (count_) + s << *count_; + else + s << '?'; + + s << ' ' + << (count_ && *count_ % 10 == 1 && *count_ % 100 != 11 + ? singular_ + : plural_) << ~DIV; } @@ -134,7 +156,8 @@ namespace brep void TR_VALUE:: operator() (serializer& s) const { - s << TR(CLASS=label_) + string c (label_to_class (label_)); + s << TR(CLASS=c) << TH << label_ << ~TH << TD << SPAN(CLASS="value") << value_ << ~SPAN << ~TD << ~TR; @@ -145,7 +168,8 @@ namespace brep void TR_INPUT:: operator() (serializer& s) const { - s << TR(CLASS=label_) + string c (label_to_class (label_)); + s << TR(CLASS=c) << TH << label_ << ~TH << TD << INPUT(TYPE="text", NAME=name_); @@ -169,7 +193,8 @@ namespace brep void TR_SELECT:: operator() (serializer& s) const { - s << TR(CLASS=label_) + string c (label_to_class (label_)); + s << TR(CLASS=c) << TH << label_ << ~TH << TD << SELECT(NAME=name_); @@ -220,15 +245,9 @@ namespace brep << A << HREF << tenant_dir (root_, tenant_) / - path (mime_url_encode (name_.string (), false)); - - // Propagate search criteria to the package details page. - // - if (!query_.empty ()) - s << "?q=" << query_; - - s << ~HREF - << name_ + path (mime_url_encode (name_.string (), false)) + << ~HREF + << name_ << ~A << ~SPAN << ~TD @@ -416,47 +435,75 @@ namespace brep if (!dependencies_.empty ()) s << "; "; - for (const auto& d: dependencies_) + for (const dependency_alternatives& das: dependencies_) { - if (&d != &dependencies_[0]) + if (&das != &dependencies_[0]) s << ", "; - if (d.conditional) - s << "?"; - - if (d.buildtime) + if (das.buildtime) s << "*"; - // Suppress package name duplicates. + // Suppress dependency alternative duplicates, like in + // `{foo bar} < 1.1 | {foo bar} > 1.5`. + // + // Return the dependency package name space-separated list. // - set<package_name> names; - for (const auto& da: d) - names.emplace (da.name); + auto deps_list = [] (const dependency_alternative& da) + { + string r; + for (const dependency& d: da) + { + if (!r.empty ()) + r += ' '; + + r += d.name.string (); + } - bool mult (names.size () > 1); + return r; + }; + + set<string> alternatives; + for (const dependency_alternative& da: das) + alternatives.insert (deps_list (da)); + + // Note that we may end up with a single package name in parenthesis, if + // its duplicates were suppresses. This, however, may be helpful, + // indicating that there some alternatives for the package. + // + bool mult (das.size () > 1 || + (das.size () == 1 && das[0].size () > 1)); if (mult) - s << "("; + s << '('; bool first (true); - for (const auto& da: d) + for (const dependency_alternative& da: das) { - const package_name& n (da.name); - if (names.find (n) != names.end ()) - { - names.erase (n); + auto i (alternatives.find (deps_list (da))); - if (first) - first = false; - else - s << " | "; + if (i == alternatives.end ()) + continue; + + alternatives.erase (i); + + if (!first) + s << " | "; + else + first = false; + + for (const dependency& d: da) + { + if (&d != &da[0]) + s << ' '; // Try to display the dependency as a link if it is resolved. // Otherwise display it as plain text. // - if (da.package != nullptr) + const package_name& n (d.name); + + if (d.package != nullptr) { - shared_ptr<package> p (da.package.load ()); + shared_ptr<package> p (d.package.load ()); assert (p->internal () || !p->other_repositories.empty ()); shared_ptr<repository> r ( @@ -479,10 +526,13 @@ namespace brep else s << n; } + + if (da.enable) + s << " ?"; } if (mult) - s << ")"; + s << ')'; } s << ~SPAN @@ -507,25 +557,25 @@ namespace brep << SPAN(CLASS="value") << requirements_.size () << "; "; - for (const auto& r: requirements_) + for (const auto& ras: requirements_) { - if (&r != &requirements_[0]) + if (&ras != &requirements_[0]) s << ", "; - if (r.conditional) - s << "?"; + if (ras.buildtime) + s << '*'; - if (r.buildtime) - s << "*"; - - if (r.empty ()) + // If this is a simple requirement without id, then print the comment + // first word. + // + if (ras.simple () && ras[0][0].empty ()) { - // If there is no requirement alternatives specified, then print the - // comment first word. - // - const auto& c (r.comment); + const auto& c (ras.comment); if (!c.empty ()) { + if (ras[0].enable) + s << "? "; + auto n (c.find (' ')); s << string (c, 0, n); @@ -535,21 +585,31 @@ namespace brep } else { - bool mult (r.size () > 1); + bool mult (ras.size () > 1 || + (ras.size () == 1 && ras[0].size () > 1)); if (mult) - s << "("; + s << '('; - for (const auto& ra: r) + for (const auto& ra: ras) { - if (&ra != &r[0]) + if (&ra != &ras[0]) s << " | "; - s << ra; + for (const string& r: ra) + { + if (&r != &ra[0]) + s << ' '; + + s << r; + } + + if (ra.enable) + s << " ?"; } if (mult) - s << ")"; + s << ')'; } } @@ -563,7 +623,8 @@ namespace brep void TR_URL:: operator() (serializer& s) const { - s << TR(CLASS=label_) + string c (label_to_class (label_)); + s << TR(CLASS=c) << TH << label_ << ~TH << TD << SPAN(CLASS="value"); @@ -593,7 +654,8 @@ namespace brep void TR_EMAIL:: operator() (serializer& s) const { - s << TR(CLASS=label_) + string c (label_to_class (label_)); + s << TR(CLASS=c) << TH << label_ << ~TH << TD << SPAN(CLASS="value") @@ -643,32 +705,22 @@ namespace brep << A << HREF << tenant_dir (root_, tenant_) << "?about#" - << mime_url_encode (html_id (name_), false) + << mime_url_encode (html_id (location_.canonical_name ()), false) << ~HREF - << name_ + << location_ << ~A << ~SPAN << ~TD << ~TR; } - // TR_LOCATION - // - void TR_LOCATION:: - operator() (serializer& s) const - { - s << TR(CLASS="location") - << TH << "location" << ~TH - << TD << SPAN(CLASS="value") << location_ << ~SPAN << ~TD - << ~TR; - } - // TR_LINK // void TR_LINK:: operator() (serializer& s) const { - s << TR(CLASS=label_) + string c (label_to_class (label_)); + s << TR(CLASS=c) << TH << label_ << ~TH << TD << SPAN(CLASS="value") << A(HREF=url_) << text_ << ~A << ~SPAN @@ -697,8 +749,24 @@ namespace brep << TD << SPAN(CLASS="value"); + // Print the ' | ' separator if this is not the first item and reset the + // `first` flag to false otherwise. + // + bool first (true); + auto separate = [&s, &first] () + { + if (first) + first = false; + else + s << " | "; + }; + if (build_.state == build_state::building) - s << SPAN(CLASS="building") << "building" << ~SPAN << " | "; + { + separate (); + + s << SPAN(CLASS="building") << "building" << ~SPAN; + } else { // If no unsuccessful operation results available, then print the @@ -711,7 +779,10 @@ namespace brep if (build_.results.empty () || *build_.status == result_status::success) { assert (build_.status); - s << SPAN_BUILD_RESULT_STATUS (*build_.status) << " | "; + + separate (); + + s << SPAN_BUILD_RESULT_STATUS (*build_.status); } if (!build_.results.empty ()) @@ -719,6 +790,9 @@ namespace brep for (const auto& r: build_.results) { if (r.status != result_status::success) + { + separate (); + s << SPAN_BUILD_RESULT_STATUS (r.status) << " (" << A << HREF @@ -726,26 +800,33 @@ namespace brep << ~HREF << r.operation << ~A - << ") | "; + << ")"; + } } + separate (); + s << A << HREF << build_log_url (host_, root_, build_) << ~HREF << "log" - << ~A - << " | "; + << ~A; } } - if (build_.force == (build_.state == build_state::building - ? force_state::forcing - : force_state::forced)) - s << SPAN(CLASS="pending") << "pending" << ~SPAN; - else - s << A - << HREF << build_force_url (host_, root_, build_) << ~HREF - << "rebuild" - << ~A; + if (!archived_) + { + separate (); + + if (build_.force == (build_.state == build_state::building + ? force_state::forcing + : force_state::forced)) + s << SPAN(CLASS="pending") << "pending" << ~SPAN; + else + s << A + << HREF << build_force_url (host_, root_, build_) << ~HREF + << "rebuild" + << ~A; + } s << ~SPAN << ~TD @@ -873,14 +954,16 @@ namespace brep void DIV_TEXT:: operator() (serializer& s) const { - switch (type_) + const string& t (text_.text); + + switch (text_.type) { case text_type::plain: { // To keep things regular we wrap the preformatted text into <div>. // s << DIV(ID=id_, CLASS="plain"); - serialize_pre_text (s, text_, length_, url_, "" /* id */); + serialize_pre_text (s, t, length_, url_, "" /* id */); s << ~DIV; break; } @@ -900,9 +983,9 @@ namespace brep // calls to fail is the inability to allocate memory. Unfortunately, // instead of reporting the failure to the caller, the API issues // diagnostics to stderr and aborts the process. Let's decrease the - // probability of such an event by limiting the text size to 64K. + // probability of such an event by limiting the text size to 1M. // - if (text_.size () > 64 * 1024) + if (t.size () > 1024 * 1024) { print_error (what_ + " is too long"); return; @@ -914,37 +997,38 @@ namespace brep { // Parse Markdown into the AST. // + // Note that the footnotes extension needs to be enabled via the + // CMARK_OPT_FOOTNOTES flag rather than the + // cmark_parser_attach_syntax_extension() function call. + // unique_ptr<cmark_parser, void (*)(cmark_parser*)> parser ( - cmark_parser_new (CMARK_OPT_DEFAULT | CMARK_OPT_VALIDATE_UTF8), + cmark_parser_new (CMARK_OPT_DEFAULT | + CMARK_OPT_FOOTNOTES | + CMARK_OPT_VALIDATE_UTF8), [] (cmark_parser* p) {cmark_parser_free (p);}); // Enable GitHub extensions in the parser, if requested. // - if (type_ == text_type::github_mark) + if (text_.type == text_type::github_mark) { auto add = [&parser] (const char* ext) - { - cmark_syntax_extension* e ( - cmark_find_syntax_extension (ext)); + { + cmark_syntax_extension* e ( + cmark_find_syntax_extension (ext)); - // Built-in extension is only expected. - // - assert (e != nullptr); + // Built-in extension is only expected. + // + assert (e != nullptr); - cmark_parser_attach_syntax_extension (parser.get (), e); - }; + cmark_parser_attach_syntax_extension (parser.get (), e); + }; add ("table"); add ("strikethrough"); add ("autolink"); - - // Somehow feels unsafe (there are some nasty warnings when - // upstream's tasklist.c is compiled), so let's disable for now. - // - // add ("tasklist"); } - cmark_parser_feed (parser.get (), text_.c_str (), text_.size ()); + cmark_parser_feed (parser.get (), t.c_str (), t.size ()); unique_ptr<cmark_node, void (*)(cmark_node*)> doc ( cmark_parser_finish (parser.get ()), diff --git a/mod/page.hxx b/mod/page.hxx index cc9840e..7329e2d 100644 --- a/mod/page.hxx +++ b/mod/page.hxx @@ -82,21 +82,24 @@ namespace brep // Generate counter element. // - // It could be redunant to distinguish between singular and plural word forms - // if it wouldn't be so cheap in English, and phrase '1 Packages' wouldn't - // look that ugly. + // If the count argument is nullopt, then it is assumed that the count is + // unknown and the '?' character is printed instead of the number. + // + // Note that it could be redunant to distinguish between singular and plural + // word forms if it wouldn't be so cheap in English, and phrase '1 Packages' + // wouldn't look that ugly. // class DIV_COUNTER { public: - DIV_COUNTER (size_t c, const char* s, const char* p) + DIV_COUNTER (optional<size_t> c, const char* s, const char* p) : count_ (c), singular_ (s), plural_ (p) {} void operator() (xml::serializer&) const; private: - size_t count_; + optional<size_t> count_; const char* singular_; const char* plural_; }; @@ -193,24 +196,19 @@ namespace brep const string& tenant_; }; - // Generate package name element with an optional search criteria. The - // search string should be url-encoded, if specified. + // Generate package name element. // class TR_NAME { public: - TR_NAME (const package_name& n, - const string& q, - const dir_path& r, - const string& t) - : name_ (n), query_ (q), root_ (r), tenant_ (t) {} + TR_NAME (const package_name& n, const dir_path& r, const string& t) + : name_ (n), root_ (r), tenant_ (t) {} void operator() (xml::serializer&) const; private: const package_name& name_; - const string& query_; const dir_path& root_; const string& tenant_; }; @@ -424,32 +422,20 @@ namespace brep class TR_REPOSITORY { public: - TR_REPOSITORY (const string& n, const dir_path& r, const string& t) - : name_ (n), root_ (r), tenant_ (t) {} + TR_REPOSITORY (const repository_location& l, + const dir_path& r, + const string& t) + : location_ (l), root_ (r), tenant_ (t) {} void operator() (xml::serializer&) const; private: - const string& name_; + const repository_location& location_; const dir_path& root_; const string& tenant_; }; - // Generate repository location element. - // - class TR_LOCATION - { - public: - TR_LOCATION (const repository_location& l): location_ (l) {} - - void - operator() (xml::serializer&) const; - - private: - const repository_location& location_; - }; - // Generate link element. // class TR_LINK @@ -486,14 +472,23 @@ namespace brep class TR_BUILD_RESULT { public: - TR_BUILD_RESULT (const build& b, const string& h, const dir_path& r): - build_ (b), host_ (h), root_ (r) {} + TR_BUILD_RESULT (const build& b, + bool a, + const string& h, + const dir_path& r): + build_ (b), archived_ (a), host_ (h), root_ (r) + { + // We don't expect a queued build to ever be displayed. + // + assert (build_.state != build_state::queued); + } void operator() (xml::serializer&) const; private: const build& build_; + bool archived_; const string& host_; const dir_path& root_; }; @@ -599,16 +594,14 @@ namespace brep public: // Generate a full text element. // - DIV_TEXT (const string& t, - text_type tp, + DIV_TEXT (const typed_text& t, bool st, const string& id, const string& what, const basic_mark& diag) : text_ (t), - type_ (tp), strip_title_ (st), - length_ (t.size ()), + length_ (t.text.size ()), url_ (nullptr), id_ (id), what_ (what), @@ -618,8 +611,7 @@ namespace brep // Generate a brief text element. // - DIV_TEXT (const string& t, - text_type tp, + DIV_TEXT (const typed_text& t, bool st, size_t l, const string& u, @@ -627,7 +619,6 @@ namespace brep const string& what, const basic_mark& diag) : text_ (t), - type_ (tp), strip_title_ (st), length_ (l), url_ (&u), @@ -641,8 +632,7 @@ namespace brep operator() (xml::serializer&) const; private: - const string& text_; - text_type type_; + const typed_text& text_; bool strip_title_; size_t length_; const string* url_; // Full page url. diff --git a/mod/tenant-service.hxx b/mod/tenant-service.hxx new file mode 100644 index 0000000..9205f76 --- /dev/null +++ b/mod/tenant-service.hxx @@ -0,0 +1,155 @@ +// file : mod/tenant-service.hxx -*- C++ -*- +// license : MIT; see accompanying LICENSE file + +#ifndef MOD_TENANT_SERVICE_HXX +#define MOD_TENANT_SERVICE_HXX + +#include <map> + +#include <libbrep/types.hxx> +#include <libbrep/utility.hxx> + +#include <libbrep/build.hxx> + +#include <mod/diagnostics.hxx> + +namespace brep +{ + class tenant_service_base + { + public: + virtual ~tenant_service_base () = default; + }; + + // Possible build notifications: + // + // queued + // building + // built + // + // Possible transitions: + // + // -> queued + // queued -> building + // building -> queued (interrupted & re-queued due to higher priority task) + // building -> built + // built -> queued (periodic or user-forced rebuild) + // + // While the implementation tries to make sure the notifications arrive in + // the correct order, this is currently done by imposing delays (some + // natural, such as building->built, and some artificial, such as + // queued->building). As result, it is unlikely but possible to be notified + // about the state transitions in the wrong order, especially if the + // notifications take a long time. To minimize the chance of this happening, + // the service implementation should strive to batch the queued state + // notifications (or which there could be hundreds) in a single request if + // at all possible. Also, if supported by the third-party API, it makes + // sense for the implementation to protect against overwriting later states + // with earlier. For example, if it's possible to place a condition on a + // notification, it makes sense to only set the state to queued if none of + // the later states (e.g., building) are already in effect. + // + // Note also that it's possible for the build to get deleted at any stage + // without any further notifications. This can happen, for example, due to + // data retention timeout or because the build configuration (buildtab + // entry) is no longer present. There is no explicit `deleted` transition + // notification because such situations (i.e., when a notification sequence + // is abandoned half way) are not expected to arise ordinarily in a + // properly-configured brep instance. And the third-party service is + // expected to deal with them using some overall timeout/expiration + // mechanism which it presumably has. + // + // Each build notification is in its own interface since a service may not + // be interested in all of them while computing the information to pass is + // expensive. + + class tenant_service_build_queued: public virtual tenant_service_base + { + public: + // If the returned function is not NULL, it is called to update the + // service data. It should return the new data or nullopt if no update is + // necessary. Note: tenant_service::data passed to the callback and to the + // returned function may not be the same. Also, the returned function may + // be called multiple times (on transaction retries). + // + // The passed initial_state indicates the logical initial state and is + // either absent, `building` (interrupted), or `built` (rebuild). Note + // that all the passed build objects are for the same package version and + // have the same initial state. + // + // The implementation of this and the below functions should normally not + // need to make any decisions based on the passed build::state. Rather, + // the function name suffix (_queued, _building, _built) signify the + // logical end state. + // + // The build_queued_hints can be used to omit certain components from the + // build id. If single_package_version is true, then this tenant contains + // a single (non-test) package version and this package name and package + // version can be omitted. If single_package_config is true, then the + // package version being built only has the default package configuration + // and thus it can be omitted. + // + struct build_queued_hints + { + bool single_package_version; + bool single_package_config; + }; + + virtual function<optional<string> (const tenant_service&)> + build_queued (const tenant_service&, + const vector<build>&, + optional<build_state> initial_state, + const build_queued_hints&, + const diag_epilogue& log_writer) const noexcept = 0; + }; + + class tenant_service_build_building: public virtual tenant_service_base + { + public: + virtual function<optional<string> (const tenant_service&)> + build_building (const tenant_service&, + const build&, + const diag_epilogue& log_writer) const noexcept = 0; + }; + + class tenant_service_build_built: public virtual tenant_service_base + { + public: + virtual function<optional<string> (const tenant_service&)> + build_built (const tenant_service&, + const build&, + const diag_epilogue& log_writer) const noexcept = 0; + }; + + // Map of service type (tenant_service::type) to service. + // + using tenant_service_map = std::map<string, shared_ptr<tenant_service_base>>; + + // Every notification callback function that needs to produce any + // diagnostics shall begin with: + // + // NOTIFICATION_DIAG (log_writer); + // + // This will instantiate the error, warn, info, and trace diagnostics + // streams with the function's name. + // + // Note that a callback function is not expected to throw any exceptions. + // This is, in particular, why this macro doesn't instantiate the fail + // diagnostics stream. + // +#define NOTIFICATION_DIAG(log_writer) \ + const basic_mark error (severity::error, \ + log_writer, \ + __PRETTY_FUNCTION__); \ + const basic_mark warn (severity::warning, \ + log_writer, \ + __PRETTY_FUNCTION__); \ + const basic_mark info (severity::info, \ + log_writer, \ + __PRETTY_FUNCTION__); \ + const basic_mark trace (severity::trace, \ + log_writer, \ + __PRETTY_FUNCTION__) +} + +#endif // MOD_TENANT_SERVICE_HXX diff --git a/mod/types-parsers.cxx b/mod/types-parsers.cxx index dc21e97..f135608 100644 --- a/mod/types-parsers.cxx +++ b/mod/types-parsers.cxx @@ -3,12 +3,17 @@ #include <mod/types-parsers.hxx> -#include <libbutl/timestamp.mxx> // from_string() +#include <sstream> + +#include <libbutl/regex.hxx> +#include <libbutl/timestamp.hxx> // from_string() #include <mod/module-options.hxx> using namespace std; +using namespace butl; using namespace bpkg; +using namespace bbot; using namespace web::xhtml; namespace brep @@ -75,9 +80,9 @@ namespace brep string t ("1970-01-01 "); t += v; - x = butl::from_string (t.c_str (), - "%Y-%m-%d %H:%M", - false /* local */).time_since_epoch (); + x = from_string (t.c_str (), + "%Y-%m-%d %H:%M", + false /* local */).time_since_epoch (); return; } catch (const invalid_argument&) {} @@ -110,6 +115,29 @@ namespace brep } } + // Parse interactive_mode. + // + void parser<interactive_mode>:: + parse (interactive_mode& x, bool& xs, scanner& s) + { + xs = true; + const char* o (s.next ()); + + if (!s.more ()) + throw missing_value (o); + + const string v (s.next ()); + + try + { + x = to_interactive_mode (v); + } + catch (const invalid_argument&) + { + throw invalid_value (o, v); + } + } + // Parse page_form. // void parser<page_form>:: @@ -176,10 +204,84 @@ namespace brep { x = fragment (v, o); } - catch (const xml::parsing&) + catch (const xml::parsing& e) { - throw invalid_value (o, v); + throw invalid_value (o, v, e.what ()); } } + + // Parse the '/regex/replacement/' string into the regex/replacement pair. + // + void parser<pair<std::regex, string>>:: + parse (pair<std::regex, string>& x, bool& xs, scanner& s) + { + xs = true; + const char* o (s.next ()); + + if (!s.more ()) + throw missing_value (o); + + const char* v (s.next ()); + + try + { + x = regex_replace_parse (v); + } + catch (const invalid_argument& e) + { + throw invalid_value (o, v, e.what ()); + } + catch (const regex_error& e) + { + // Sanitize the description. + // + ostringstream os; + os << e; + + throw invalid_value (o, v, os.str ()); + } + } + + // Parse build_order. + // + void parser<build_order>:: + parse (build_order& x, bool& xs, scanner& s) + { + xs = true; + const char* o (s.next ()); + + if (!s.more ()) + throw missing_value (o); + + const string v (s.next ()); + if (v == "stable") + x = build_order::stable; + else if (v == "random") + x = build_order::random; + else + throw invalid_value (o, v); + } + + // Parse build_email. + // + void parser<build_email>:: + parse (build_email& x, bool& xs, scanner& s) + { + xs = true; + const char* o (s.next ()); + + if (!s.more ()) + throw missing_value (o); + + const string v (s.next ()); + if (v == "none") + x = build_email::none; + else if (v == "latest") + x = build_email::latest; + else if (v == "all") + x = build_email::all; + else + throw invalid_value (o, v); + } } } diff --git a/mod/types-parsers.hxx b/mod/types-parsers.hxx index 6b851eb..d48ae0b 100644 --- a/mod/types-parsers.hxx +++ b/mod/types-parsers.hxx @@ -7,7 +7,10 @@ #ifndef MOD_TYPES_PARSERS_HXX #define MOD_TYPES_PARSERS_HXX +#include <regex> + #include <libbpkg/manifest.hxx> // repository_location +#include <libbbot/manifest.hxx> // interactive_mode #include <web/xhtml/fragment.hxx> @@ -56,6 +59,13 @@ namespace brep }; template <> + struct parser<bbot::interactive_mode> + { + static void + parse (bbot::interactive_mode&, bool&, scanner&); + }; + + template <> struct parser<page_form> { static void @@ -75,6 +85,27 @@ namespace brep static void parse (web::xhtml::fragment&, bool&, scanner&); }; + + template <> + struct parser<pair<std::regex, string>> + { + static void + parse (pair<std::regex, string>&, bool&, scanner&); + }; + + template <> + struct parser<build_order> + { + static void + parse (build_order&, bool&, scanner&); + }; + + template <> + struct parser<build_email> + { + static void + parse (build_email&, bool&, scanner&); + }; } } diff --git a/monitor/monitor.cli b/monitor/monitor.cli index edfc004..3a58a1d 100644 --- a/monitor/monitor.cli +++ b/monitor/monitor.cli @@ -60,18 +60,26 @@ namespace brep { "\h|OPTIONS|" - std::size_t --build-timeout + std::size_t --soft-rebuild-timeout { "<seconds>", - "Time to wait (in seconds) before considering a package build as + "Time to wait (in seconds) before considering a package soft (re)build as delayed. If unspecified, it is the sum of the package rebuild timeout - (normal rebuild timeout if the alternative timeout is unspecified and - the maximum of two otherwise) and the build result timeout (see the - \cb{build-normal-rebuild-timeout}, \cb{build-alt-rebuild-*}, and - \cb{build-result-timeout} \c{brep} module configuration options - for details). - - Note that a package that was not built before it was archived is + (soft rebuild timeout if the alternative timeout is unspecified and + the maximum of two otherwise) and the build result timeout (see + the \cb{build-soft-rebuild-timeout}, \cb{build-alt-soft-rebuild-*}, + and \cb{build-result-timeout} \cb{brep} module configuration options + for details). The special zero value disables monitoring of soft + rebuilds. + + Note that if both soft and hard rebuilds are disabled in the + \cb{brep} module configuration, then \cb{brep-monitor} is unable to + come up with a reasonable build timeout on its own. In this case, to + monitor the initial package build delays, you may need to specify + either \cb{--soft-rebuild-timeout} or \cb{--hard-rebuild-timeout} + explicitly. + + Also note that a package that was not built before it was archived is always considered as delayed. However, to distinguish this case from a situation where a package was archived before a configuration have been added, \cb{brep-monitor} needs to observe the package as @@ -81,6 +89,16 @@ namespace brep timeout." } + std::size_t --hard-rebuild-timeout + { + "<seconds>", + "Time to wait (in seconds) before considering a package hard (re)build + as delayed. If unspecified, it is calculated in the same way as for + \cb{--soft-rebuild-timeout} but using the + \cb{build-hard-rebuild-timeout} and \cb{build-alt-hard-rebuild-*} + \cb{brep} module configuration options." + } + std::size_t --report-timeout { "<seconds>", diff --git a/monitor/monitor.cxx b/monitor/monitor.cxx index bbab0a5..42d481d 100644 --- a/monitor/monitor.cxx +++ b/monitor/monitor.cxx @@ -5,7 +5,6 @@ #include <set> #include <chrono> #include <iostream> -#include <algorithm> // find_if() #include <odb/database.hxx> #include <odb/transaction.hxx> @@ -13,10 +12,7 @@ #include <odb/pgsql/database.hxx> -#include <libbutl/pager.mxx> -#include <libbutl/utility.mxx> // compare_c_string - -#include <libbbot/build-config.hxx> +#include <libbutl/pager.hxx> #include <libbrep/build.hxx> #include <libbrep/common.hxx> @@ -25,14 +21,13 @@ #include <libbrep/build-package-odb.hxx> #include <libbrep/database-lock.hxx> -#include <mod/build-config.hxx> +#include <mod/build-target-config.hxx> #include <monitor/module-options.hxx> #include <monitor/monitor-options.hxx> using namespace std; using namespace butl; -using namespace bbot; using namespace odb::core; namespace brep @@ -41,6 +36,289 @@ namespace brep // struct failed {}; + // We will collect and report build delays as separate steps not to hold + // database locks while printing to stderr. Also we need to order delays + // properly, so while printing reports we could group delays by toolchain + // and target configuration. + // + // To achieve that, we will iterate through all possible package builds + // creating the list of delays with the following sort priority: + // + // 1: toolchain name + // 2: toolchain version (descending) + // 3: target configuration name + // 4: target + // 5: tenant + // 6: package name + // 7: package version (descending) + // 8: package configuration name + // + struct compare_delay + { + bool + operator() (const shared_ptr<const build_delay>& x, + const shared_ptr<const build_delay>& y) const + { + if (int r = x->toolchain_name.compare (y->toolchain_name)) + return r < 0; + + if (int r = x->toolchain_version.compare (y->toolchain_version)) + return r > 0; + + if (int r = x->target_config_name.compare (y->target_config_name)) + return r < 0; + + if (int r = x->target.compare (y->target)) + return r < 0; + + if (int r = x->tenant.compare (y->tenant)) + return r < 0; + + if (int r = x->package_name.compare (y->package_name)) + return r < 0; + + if (int r = x->package_version.compare (y->package_version)) + return r > 0; + + return x->package_config_name.compare (y->package_config_name) < 0; + } + }; + + // The ordered list of delays to report. + // + class delay_report + { + public: + // Note that in the brief mode we also need to print the total number of + // delays (reported or not) per target configuration. Thus, we add all + // delays to the report object, marking them if we need to report them or + // not. + // + void + add_delay (shared_ptr<build_delay>, bool custom_bot, bool report); + + bool + empty () const {return reported_delay_count_ == 0;} + + // In the brief mode (if full is false) print the number of reported/total + // (if total is true) delayed package configuration builds per target + // configuration rather than the package configurations themselves. + // + void + print (const char* header, bool total, bool full) const; + + private: + // Maps delays to the custom bot and report flag. + // + struct delay_info + { + bool custom_bot; + bool report; + }; + + map<shared_ptr<const build_delay>, delay_info, compare_delay> delays_; + size_t reported_delay_count_ = 0; + + // Number of reported/total delayed package configurations which need to + // be built with the custom build bots. + // + size_t custom_total_delay_count_ = 0; + size_t custom_reported_delay_count_ = 0; + }; + + void delay_report:: + add_delay (shared_ptr<build_delay> delay, bool custom_bot, bool report) + { + delays_.emplace (move (delay), delay_info {custom_bot, report}); + + if (custom_bot) + ++custom_total_delay_count_; + + if (report) + { + ++reported_delay_count_; + + if (custom_bot) + ++custom_reported_delay_count_; + } + } + + void delay_report:: + print (const char* header, bool total, bool full) const + { + if (empty ()) + return; + + cerr << header << " (" << reported_delay_count_; + + if (total) + cerr << '/' << delays_.size (); + + if (custom_reported_delay_count_ != 0 || + (total && custom_total_delay_count_ != 0)) + { + cerr << " including " << custom_reported_delay_count_; + + if (total) + cerr << '/' << custom_total_delay_count_; + + cerr << " custom"; + } + + cerr << "):" << endl; + + // Group the printed delays by toolchain and target configuration. + // + const string* toolchain_name (nullptr); + const version* toolchain_version (nullptr); + const string* target_config_name (nullptr); + const target_triplet* target (nullptr); + + size_t config_reported_delay_count (0); + size_t config_total_delay_count (0); + + size_t config_custom_reported_delay_count (0); + size_t config_custom_total_delay_count (0); + + auto brief_config = [&target_config_name, + &target, + &config_reported_delay_count, + &config_total_delay_count, + &config_custom_reported_delay_count, + &config_custom_total_delay_count, + total] () + { + if (target_config_name != nullptr) + { + assert (target != nullptr); + + // Only print configurations with delays that needs to be reported. + // + if (config_reported_delay_count != 0) + { + cerr << " " << *target_config_name << '/' << *target << " (" + << config_reported_delay_count; + + if (total) + cerr << '/' << config_total_delay_count; + + if (config_custom_reported_delay_count != 0 || + (total && config_custom_total_delay_count != 0)) + { + cerr << " including " << config_custom_reported_delay_count; + + if (total) + cerr << '/' << config_custom_total_delay_count; + + cerr << " custom"; + } + + cerr << ')' << endl; + } + + config_reported_delay_count = 0; + config_total_delay_count = 0; + + config_custom_reported_delay_count = 0; + config_custom_total_delay_count = 0; + } + }; + + for (const auto& dr: delays_) + { + bool report (dr.second.report); + + if (full && !report) + continue; + + bool custom_bot (dr.second.custom_bot); + const shared_ptr<const build_delay>& d (dr.first); + + // Print the toolchain, if changed. + // + if (toolchain_name == nullptr || + d->toolchain_name != *toolchain_name || + d->toolchain_version != *toolchain_version) + { + if (!full) + brief_config (); + + if (toolchain_name != nullptr) + cerr << endl; + + cerr << " " << d->toolchain_name; + + if (!d->toolchain_version.empty ()) + cerr << "/" << d->toolchain_version; + + cerr << endl; + + toolchain_name = &d->toolchain_name; + toolchain_version = &d->toolchain_version; + target_config_name = nullptr; + target = nullptr; + } + + // Print the configuration, if changed. + // + if (target_config_name == nullptr || + d->target_config_name != *target_config_name || + d->target != *target) + { + if (full) + { + if (target_config_name != nullptr) + cerr << endl; + + cerr << " " << d->target_config_name << '/' << d->target << endl; + } + else + brief_config (); + + target_config_name = &d->target_config_name; + target = &d->target; + } + + // Print the delayed build package configuration in the full report mode + // and count configuration builds otherwise. + // + if (full) + { + // We can potentially extend this information with the archived flag + // or the delay duration. + // + cerr << " " << d->package_name << '/' << d->package_version + << ' ' << d->package_config_name; + + if (custom_bot) + cerr << " (custom bot)"; + + if (!d->tenant.empty ()) + cerr << ' ' << d->tenant; + + cerr << endl; + } + else + { + if (report) + { + ++config_reported_delay_count; + + if (custom_bot) + ++config_custom_reported_delay_count; + } + + ++config_total_delay_count; + + if (custom_bot) + ++config_custom_total_delay_count; + } + } + + if (!full) + brief_config (); + } + static const char* help_info ( " info: run 'brep-monitor --help' for more information"); @@ -141,12 +419,24 @@ namespace brep return 1; } - if (mod_ops.build_alt_rebuild_start_specified () != - mod_ops.build_alt_rebuild_stop_specified ()) + auto bad_alt = [&f] (const char* what) + { + cerr << "build-alt-" << what << "-rebuild-start and build-alt-" + << what << "-rebuild-stop configuration options must both be " + << "either specified or not in '" << f << "'" << endl; + }; + + if (mod_ops.build_alt_hard_rebuild_start_specified () != + mod_ops.build_alt_hard_rebuild_stop_specified ()) + { + bad_alt("hard"); + return 1; + } + + if (mod_ops.build_alt_soft_rebuild_start_specified () != + mod_ops.build_alt_soft_rebuild_stop_specified ()) { - cerr << "build-alt-rebuild-start and build-alt-rebuild-stop " - << "configuration options must both be either specified or not " - << "in '" << f << "'" << endl; + bad_alt("soft"); return 1; } } @@ -221,11 +511,11 @@ namespace brep return 0; } - build_configs configs; + build_target_configs configs; try { - configs = parse_buildtab (mod_ops.build_config ()); + configs = bbot::parse_buildtab (mod_ops.build_config ()); } catch (const tab_parsing& e) { @@ -276,13 +566,12 @@ namespace brep // if (ops.clean ()) { - using config_map = map<const char*, - const build_config*, - compare_c_string>; + using config_map = map<build_target_config_id, + const build_target_config*>; config_map conf_map; - for (const build_config& c: configs) - conf_map[c.name.c_str ()] = &c; + for (const build_target_config& c: configs) + conf_map[build_target_config_id {c.target, c.name}] = &c; // Prepare the build delay prepared query. // @@ -301,15 +590,17 @@ namespace brep size_t offset (0); query q ("ORDER BY" + - query::id.package.tenant + "," + - query::id.package.name + + query::id.package.tenant + "," + + query::id.package.name + order_by_version (query::id.package.version, false /* first */) + "," + - query::id.configuration + "," + - query::id.toolchain_name + + query::id.target + "," + + query::id.target_config_name + "," + + query::id.package_config_name + "," + + query::id.toolchain_name + order_by_version (query::id.toolchain_version, - false /* first */) + - "OFFSET" + query::_ref (offset) + "LIMIT 100"); + false /* first */) + + "OFFSET" + query::_ref (offset) + "LIMIT 2000"); connection_ptr conn (db.connection ()); @@ -351,7 +642,9 @@ namespace brep // // Check that the build configuration is still present. // - (ci = conf_map.find (d.configuration.c_str ())) == + (ci = conf_map.find ( + build_target_config_id {d.target, + d.target_config_name})) == conf_map.end ()); // Check that the package still present, is buildable and doesn't @@ -365,12 +658,23 @@ namespace brep p = db.find<build_package> (pid); } - cleanup = (p == nullptr || - !p->buildable || - exclude (p->builds, - p->constraints, - *ci->second, - configs.class_inheritance_map)); + const build_package_config* pc (p != nullptr + ? find (d.package_config_name, + p->configs) + : nullptr); + + cleanup = (pc == nullptr || !p->buildable); + + if (!cleanup) + { + db.load (*p, p->constraints_section); + + cleanup = exclude (*pc, + p->builds, + p->constraints, + *ci->second, + configs.class_inheritance_map); + } } if (cleanup) @@ -384,59 +688,16 @@ namespace brep } } - // Collect and report delays as separate steps not to hold database locks - // while printing to stderr. Also we need to properly order delays for - // printing. - // - // Iterate through all possible package builds creating the list of delays - // with the following sort priority: - // - // 1: toolchain name - // 2: toolchain version (descending) - // 3: configuration name - // 4: tenant - // 5: package name - // 6: package version (descending) - // - // Such ordering will allow us to group build delays by toolchain and - // configuration while printing the report. - // - struct compare_delay - { - bool - operator() (const shared_ptr<const build_delay>& x, - const shared_ptr<const build_delay>& y) const - { - if (int r = x->toolchain_name.compare (y->toolchain_name)) - return r < 0; - - if (int r = x->toolchain_version.compare (y->toolchain_version)) - return r > 0; - - if (int r = x->configuration.compare (y->configuration)) - return r < 0; - - if (int r = x->tenant.compare (y->tenant)) - return r < 0; - - if (int r = x->package_name.compare (y->package_name)) - return r < 0; - - return x->package_version.compare (y->package_version) > 0; - } - }; - - size_t reported_delay_count (0); - size_t total_delay_count (0); - - set<shared_ptr<const build_delay>, compare_delay> delays; + delay_report hard_delays_report; + delay_report soft_delays_report; + set<shared_ptr<const build_delay>, compare_delay> update_delays; { connection_ptr conn (db.connection ()); // Prepare the buildable package prepared query. // - // Query buildable packages in chunks in order not to hold locks for - // too long. + // Query buildable packages in chunks in order not to hold locks for too + // long. // using pquery = query<buildable_package>; using prep_pquery = prepared_query<buildable_package>; @@ -456,80 +717,157 @@ namespace brep conn->prepare_query<buildable_package> ("buildable-package-query", pq)); - // Prepare the package build prepared query. + // Prepare the package configuration build prepared queries. // + using bquery = query<build>; + using prep_bquery = prepared_query<build>; + + build_id id; + // This query will only be used for toolchains that have no version // specified on the command line to obtain the latest completed build // across all toolchain versions, if present, and the latest incomplete // build otherwise. // - using bquery = query<package_build>; - using prep_bquery = prepared_query<package_build>; - - build_id id; - const auto& bid (bquery::build::id); - - bquery bq ((equal<package_build> (bid.package, id.package) && - bid.configuration == bquery::_ref (id.configuration) && - bid.toolchain_name == bquery::_ref (id.toolchain_name)) + - "ORDER BY" + - bquery::build::completion_timestamp + "DESC, " + - bquery::build::timestamp + "DESC" + - "LIMIT 1"); - - prep_bquery pbq ( - conn->prepare_query<package_build> ("package-build-query", bq)); + // Why don't we pick the latest toolchain version? We don't want to + // stuck with it on the toolchain rollback. Instead we prefer the + // toolchain that built the package last and if there are none, pick the + // one for which the build task was issued last. + // + // @@ TMP Check if we can optimize this query by adding index for + // soft_timestamp and/or by setting enable_nestloop=off (or some + // such) as we do in mod/mod-builds.cxx. + // + bquery lbq ((equal<build> (bquery::id, + id, + false /* toolchain_version */) && + bquery::state != "queued") + + "ORDER BY" + + bquery::soft_timestamp + "DESC, " + + bquery::timestamp + "DESC" + + "LIMIT 1"); + + prep_bquery plbq ( + conn->prepare_query<build> ("package-latest-build-query", lbq)); + + // This query will only be used to retrieve a specific build by id. + // + bquery bq (equal<build> (bquery::id, id) && bquery::state != "queued"); + prep_bquery pbq (conn->prepare_query<build> ("package-build-query", bq)); - duration build_timeout; + timestamp now (system_clock::now ()); - // If the build timeout is not specified explicitly, then calculate it - // as the sum of the package rebuild timeout (normal rebuild timeout if - // the alternative timeout is unspecified and the maximum of two - // otherwise) and the build result timeout. + // Calculate the build/rebuild expiration time, based on the respective + // --{soft,hard}-rebuild-timeout monitor options and the + // build-{soft,hard}-rebuild-timeout and + // build-alt-{soft,hard}-rebuild-{start,stop,timeout} brep module + // configuration options. + // + // If the --*-rebuild-timeout monitor option is zero or is not specified + // and the respective build-*-rebuild-timeout brep's configuration + // option is zero, then return timestamp_unknown to indicate 'never + // expire'. Note that this value is less than any build timestamp value, + // including timestamp_nonexistent. + // + // NOTE: there is a similar code in mod/mod-build-task.cxx. // - if (!ops.build_timeout_specified ()) + auto build_expiration = [&now, &mod_ops] ( + optional<size_t> rebuild_timeout, + const optional<pair<duration, duration>>& alt_interval, + optional<size_t> alt_timeout, + size_t normal_timeout) { - duration normal_rebuild_timeout ( - chrono::seconds (mod_ops.build_normal_rebuild_timeout ())); + duration t; - if (mod_ops.build_alt_rebuild_start_specified ()) + // If the rebuild timeout is not specified explicitly, then calculate + // it as the sum of the package rebuild timeout (normal rebuild + // timeout if the alternative timeout is unspecified and the maximum + // of two otherwise) and the build result timeout. + // + if (!rebuild_timeout) { - // Calculate the alternative rebuild timeout as the time interval - // lenght, unless it is specified explicitly. - // - if (!mod_ops.build_alt_rebuild_timeout_specified ()) + if (normal_timeout == 0) + return timestamp_unknown; + + chrono::seconds nt (normal_timeout); + + if (alt_interval) { - const duration& start (mod_ops.build_alt_rebuild_start ()); - const duration& stop (mod_ops.build_alt_rebuild_stop ()); + // Calculate the alternative timeout, unless it is specified + // explicitly. + // + if (!alt_timeout) + { + const duration& start (alt_interval->first); + const duration& stop (alt_interval->second); + + // Note that if the stop time is less than the start time then + // the interval extends through the midnight. + // + t = start <= stop ? (stop - start) : ((24h - start) + stop); - // Note that if the stop time is less than the start time then the - // interval extends through the midnight. + // If the normal rebuild time out is greater than 24 hours, then + // increase the default alternative timeout by (normal - 24h) + // (see build-alt-soft-rebuild-timeout configuration option for + // details). + // + if (nt > 24h) + t += nt - 24h; + } + else + t = chrono::seconds (*alt_timeout); + + // Take the maximum of the alternative and normal rebuild + // timeouts. // - build_timeout = start <= stop - ? stop - start - : (24h - start) + stop; + if (t < nt) + t = nt; } else - build_timeout = - chrono::seconds (mod_ops.build_alt_rebuild_timeout ()); + t = nt; - // Take the maximum of the alternative and normal rebuild timeouts. + // Summarize the rebuild and build result timeouts. // - if (build_timeout < normal_rebuild_timeout) - build_timeout = normal_rebuild_timeout; + t += chrono::seconds (mod_ops.build_result_timeout ()); } else - build_timeout = normal_rebuild_timeout; + { + if (*rebuild_timeout == 0) + return timestamp_unknown; - // Summarize the rebuild and build result timeouts. - // - build_timeout += chrono::seconds (mod_ops.build_result_timeout ()); - } - else - build_timeout = chrono::seconds (ops.build_timeout ()); + t = chrono::seconds (*rebuild_timeout); + } - timestamp now (system_clock::now ()); - timestamp build_expiration (now - build_timeout); + return now - t; + }; + + timestamp hard_rebuild_expiration ( + build_expiration ( + (ops.hard_rebuild_timeout_specified () + ? ops.hard_rebuild_timeout () + : optional<size_t> ()), + (mod_ops.build_alt_hard_rebuild_start_specified () + ? make_pair (mod_ops.build_alt_hard_rebuild_start (), + mod_ops.build_alt_hard_rebuild_stop ()) + : optional<pair<duration, duration>> ()), + (mod_ops.build_alt_hard_rebuild_timeout_specified () + ? mod_ops.build_alt_hard_rebuild_timeout () + : optional<size_t> ()), + mod_ops.build_hard_rebuild_timeout ())); + + timestamp soft_rebuild_expiration ( + build_expiration ( + (ops.soft_rebuild_timeout_specified () + ? ops.soft_rebuild_timeout () + : optional<size_t> ()), + (mod_ops.build_alt_soft_rebuild_start_specified () + ? make_pair (mod_ops.build_alt_soft_rebuild_start (), + mod_ops.build_alt_soft_rebuild_stop ()) + : optional<pair<duration, duration>> ()), + (mod_ops.build_alt_soft_rebuild_timeout_specified () + ? mod_ops.build_alt_soft_rebuild_timeout () + : optional<size_t> ()), + mod_ops.build_soft_rebuild_timeout ())); timestamp report_expiration ( now - chrono::seconds (ops.report_timeout ())); @@ -548,139 +886,205 @@ namespace brep for (auto& bp: bps) { - shared_ptr<build_package> p (db.load<build_package> (bp.id)); + shared_ptr<build_package>& p (bp.package); - for (const build_config& c: configs) - { - if (exclude (p->builds, - p->constraints, - c, - configs.class_inheritance_map)) - continue; + db.load (*p, p->constraints_section); - for (const pair<string, version>& t: toolchains) + for (const build_package_config& pc: p->configs) + { + for (const build_target_config& tc: configs) { - id = build_id (p->id, c.name, t.first, t.second); - - // If the toolchain version is unspecified then search for the - // latest build across all toolchain versions and search for a - // specific build otherwise. + // Note that we also don't build a package configuration if we + // are unable to assign all the required auxiliary machines + // for the build (see mod/mod-build-task.cxx for details). + // That means that we will also report delays which happen due + // to such an inability, which can potentially be not only + // because of the infrastructural problem but also because of + // an error in the package manifest (build auxiliary + // configuration pattern doesn't match any machine + // configuration anymore, etc). It doesn't seem easy to + // distinguish here which type of problem causes a delay. + // Thus, for now let's wait and see if it ever becomes a + // problem. // - shared_ptr<build> b; - - if (id.toolchain_version.empty ()) + if (exclude (pc, + p->builds, + p->constraints, + tc, + configs.class_inheritance_map)) + continue; + + for (const pair<string, version>& t: toolchains) { - auto pbs (pbq.execute ()); + id = build_id (p->id, + tc.target, tc.name, + pc.name, + t.first, t.second); + + // If the toolchain version is unspecified then search for + // the latest build across all toolchain versions and search + // for a specific build otherwise. + // + shared_ptr<build> b (id.toolchain_version.empty () + ? plbq.execute_one () + : pbq.execute_one ()); + + // Note that we consider a build as delayed if it is not + // completed in the expected timeframe. So even if the build + // task have been issued recently we may still consider the + // build as delayed. + // + timestamp bht (b != nullptr + ? b->hard_timestamp + : timestamp_nonexistent); - if (!pbs.empty ()) - b = move (pbs.begin ()->build); - } - else - b = db.find<build> (id); + timestamp bst (b != nullptr + ? b->soft_timestamp + : timestamp_nonexistent); - // Note that we consider a build as delayed if it is not - // completed in the expected timeframe. So even if the build - // task have been issued recently we may still consider the - // build as delayed. - // - timestamp bct (b != nullptr - ? b->completion_timestamp - : timestamp_nonexistent); + // Create the delay object to record a timestamp when the + // package configuration build could have potentially been + // started, unless it already exists. + // + shared_ptr<build_delay> d (db.find<build_delay> (id)); - // Create the delay object to record a timestamp when the - // package build could have potentially been started, unless - // it already exists. - // - shared_ptr<build_delay> d (db.find<build_delay> (id)); + if (d == nullptr) + { + // If the archived package has no build nor build delay + // for this configuration, then we assume that the + // configuration was added after the package tenant has + // been archived and so the package could have never been + // built for this configuration. Thus, we don't consider + // this build as delayed and so skip it. + // + if (bp.archived && b == nullptr) + continue; + + // Use the build hard, soft, or status change timestamp + // (see the timestamps description for their ordering + // information) as the build delay tracking starting point + // and fallback to the current time if there is no build + // yet. + // + timestamp pts (b == nullptr ? now : + bht != timestamp_nonexistent ? bht : + bst != timestamp_nonexistent ? bst : + b->timestamp); + + d = make_shared<build_delay> (move (id.package.tenant), + move (id.package.name), + p->version, + move (id.target), + move (id.target_config_name), + move (id.package_config_name), + move (id.toolchain_name), + t.second, + pts); + db.persist (d); + } - if (d == nullptr) - { - // If the archived package has no build nor build delay - // for this configuration, then we assume that the - // configuration was added after the package tenant has - // been archived and so the package could have never been - // built for this configuration. Thus, we don't consider - // this build as delayed and so skip it. + // Handle package builds differently based on their tenant's + // archive status. // - if (bp.archived && b == nullptr) - continue; - - // Use the build completion or build status change - // timestamp, whichever is earlier, as the build delay - // tracking starting point and fallback to the current time - // if there is no build yet. + // If the package is not archived then consider it as + // delayed if it is not (re-)built by the expiration + // time. Otherwise, consider it as delayed if it is unbuilt. // - timestamp pts ( - b == nullptr ? now : - bct != timestamp_nonexistent && bct < b->timestamp ? bct : - b->timestamp); - - d = make_shared<build_delay> (move (id.package.tenant), - move (id.package.name), - p->version, - move (id.configuration), - move (id.toolchain_name), - t.second, - pts); - db.persist (d); - } + // We also don't need to report an unbuilt archived package + // twice, as both soft and hard build delays. + // + bool hard_delayed; + bool soft_delayed; - // Handle package builds differently based on their tenant's - // archive status. - // - // If the package is not archived then consider it as delayed - // if it is not (re-)built by the expiration time. Otherwise, - // consider it as delayed if it is unbuilt. - // - bool delayed; + if (!bp.archived) + { + auto delayed = [&d] (timestamp bt, timestamp be) + { + timestamp t (bt != timestamp_nonexistent + ? bt + : d->package_timestamp); + return t <= be; + }; + + hard_delayed = delayed (bht, hard_rebuild_expiration); + soft_delayed = delayed (bst, soft_rebuild_expiration); + } + else + { + hard_delayed = (bst == timestamp_nonexistent); + soft_delayed = false; + } - if (!bp.archived) - { - timestamp bts (bct != timestamp_nonexistent - ? bct - : d->package_timestamp); + // If there is a delay, then deduce if this package + // configuration needs to be built with a custom build bot. + // + // Note: only meaningful if there is a delay. + // + bool custom_bot (false); - delayed = (bts <= build_expiration); - } - else - delayed = (bct == timestamp_nonexistent); + if (hard_delayed || soft_delayed) + { + if (!p->bot_keys_section.loaded ()) + db.load (*p, p->bot_keys_section); - if (delayed) - { - // If the report timeout is zero then report the delay - // unconditionally. Otherwise, report the active package - // build delay if the report timeout is expired and the - // archived package build delay if it was never reported. - // Note that fixing the building infrastructure won't help - // building an archived package, so reporting its build - // delays repeatedly is meaningless. + custom_bot = !pc.effective_bot_keys (p->bot_keys).empty (); + } + + // Add hard/soft delays to the respective reports and + // collect the delay for update, if it is reported. // - if (ops.report_timeout () == 0 || - (!bp.archived - ? d->report_timestamp <= report_expiration - : d->report_timestamp == timestamp_nonexistent)) + // Note that we update the delay objects persistent state + // later, after we successfully print the reports. + // + bool reported (false); + + if (hard_delayed) { - // Note that we update the delay objects persistent state - // later, after we successfully print the report. + // If the report timeout is zero then report the delay + // unconditionally. Otherwise, report the active package + // build delay if the report timeout is expired and the + // archived package build delay if it was never reported. + // Note that fixing the building infrastructure won't help + // building an archived package, so reporting its build + // delays repeatedly is meaningless. // - d->report_timestamp = now; - delays.insert (move (d)); + bool report ( + ops.report_timeout () == 0 || + (!bp.archived + ? d->report_hard_timestamp <= report_expiration + : d->report_hard_timestamp == timestamp_nonexistent)); + + if (report) + { + d->report_hard_timestamp = now; + reported = true; + } - ++reported_delay_count; + hard_delays_report.add_delay (d, custom_bot, report); } - // - // In the brief mode also collect unreported delays to - // deduce and print the total number of delays per - // configuration. Mark such delays with the - // timestamp_nonexistent report timestamp. - // - else if (!ops.full_report ()) + + if (soft_delayed) { - d->report_timestamp = timestamp_nonexistent; - delays.insert (move (d)); + bool report (ops.report_timeout () == 0 || + d->report_soft_timestamp <= report_expiration); + + if (report) + { + d->report_soft_timestamp = now; + reported = true; + } + + soft_delays_report.add_delay (d, custom_bot, report); } - ++total_delay_count; + // If we don't consider the report timestamps for reporting + // delays, it seems natural not to update these timestamps + // either. Note that reporting all delays and still updating + // the report timestamps can be achieved by specifying the + // zero report timeout. + // + if (reported && ops.report_timeout_specified ()) + update_delays.insert (move (d)); } } } @@ -691,161 +1095,48 @@ namespace brep } } - // Report package build delays, if any. + // Print delay reports, if not empty. // - if (reported_delay_count != 0) + if (!hard_delays_report.empty () || !soft_delays_report.empty ()) try { - // Print the report. - // cerr.exceptions (ostream::badbit | ostream::failbit); // Don't print the total delay count if the report timeout is zero since // all delays are reported in this case. // - bool print_total_delay_count (ops.report_timeout () != 0); - - cerr << "Package build delays (" << reported_delay_count; - - if (print_total_delay_count) - cerr << '/' << total_delay_count; + bool total (ops.report_timeout () != 0); - cerr << "):" << endl; + hard_delays_report.print ("Package hard rebuild delays", + total, + ops.full_report ()); - // Group the printed delays by toolchain and configuration. + // Separate reports with an empty line. // - const string* toolchain_name (nullptr); - const version* toolchain_version (nullptr); - const string* configuration (nullptr); - - // In the brief report mode print the number of reported/total delayed - // package builds per configuration rather than the packages themselves. - // - size_t config_reported_delay_count (0); - size_t config_total_delay_count (0); - - auto brief_config = [&configuration, - &config_reported_delay_count, - &config_total_delay_count, - print_total_delay_count] () - { - if (configuration != nullptr) - { - // Only print configurations with delays that needs to be reported. - // - if (config_reported_delay_count != 0) - { - cerr << " " << *configuration << " (" - << config_reported_delay_count; - - if (print_total_delay_count) - cerr << '/' << config_total_delay_count; - - cerr << ')' << endl; - } - - config_reported_delay_count = 0; - config_total_delay_count = 0; - } - }; + if (!hard_delays_report.empty () && !soft_delays_report.empty ()) + cerr << endl; - for (shared_ptr<const build_delay> d: delays) - { - // Print the toolchain, if changed. - // - if (toolchain_name == nullptr || - d->toolchain_name != *toolchain_name || - d->toolchain_version != *toolchain_version) - { - if (!ops.full_report ()) - brief_config (); - - if (toolchain_name != nullptr) - cerr << endl; - - cerr << " " << d->toolchain_name; - - if (!d->toolchain_version.empty ()) - cerr << "/" << d->toolchain_version; - - cerr << endl; - - toolchain_name = &d->toolchain_name; - toolchain_version = &d->toolchain_version; - configuration = nullptr; - } - - // Print the configuration, if changed. - // - if (configuration == nullptr || d->configuration != *configuration) - { - if (ops.full_report ()) - { - if (configuration != nullptr) - cerr << endl; - - cerr << " " << d->configuration << endl; - } - else - brief_config (); - - configuration = &d->configuration; - } - - // Print the delayed build package in the full report mode and count - // configuration builds otherwise. - // - if (ops.full_report ()) - { - // We can potentially extend this information with the archived flag - // or the delay duration. - // - cerr << " " << d->package_name << "/" << d->package_version; - - if (!d->tenant.empty ()) - cerr << " " << d->tenant; - - cerr << endl; - } - else - { - if (d->report_timestamp != timestamp_nonexistent) - ++config_reported_delay_count; - - ++config_total_delay_count; - } - } - - if (!ops.full_report ()) - brief_config (); - - // Persist the delay report timestamps. - // - // If we don't consider the report timestamps for reporting delays, it - // seems natural not to update these timestamps either. Note that - // reporting all delays and still updating the report timestamps can be - // achieved by specifying the zero report timeout. - // - if (ops.report_timeout_specified ()) - { - transaction t (db.begin ()); - - for (shared_ptr<const build_delay> d: delays) - { - // Only update timestamps for delays that needs to be reported. - // - if (d->report_timestamp != timestamp_nonexistent) - db.update (d); - } - - t.commit (); - } + soft_delays_report.print ("Package soft rebuild delays", + total, + ops.full_report ()); } catch (const io_error&) { return 1; // Not much we can do on stderr writing failure. } + // Persist the delay report timestamps. + // + if (!update_delays.empty ()) + { + transaction t (db.begin ()); + + for (shared_ptr<const build_delay> d: update_delays) + db.update (d); + + t.commit (); + } + return 0; } catch (const database_locked&) diff --git a/repositories.manifest b/repositories.manifest index f6ba123..faed09f 100644 --- a/repositories.manifest +++ b/repositories.manifest @@ -19,23 +19,23 @@ location: ../libbutl.bash.git##HEAD : role: prerequisite -location: https://git.build2.org/packaging/libapr/libapr1.git##HEAD +location: ../bpkg-util.git##HEAD : role: prerequisite -location: https://git.build2.org/packaging/libapreq/libapreq2.git##HEAD +location: https://git.build2.org/packaging/libapr/libapr1.git##HEAD : role: prerequisite -location: https://git.build2.org/packaging/cmark-gfm/cmark-gfm.git##HEAD +location: https://git.build2.org/packaging/libapreq/libapreq2.git##HEAD : role: prerequisite -location: https://git.codesynthesis.com/odb/libodb.git##HEAD +location: https://git.build2.org/packaging/cmark-gfm/cmark-gfm.git##HEAD : role: prerequisite -location: https://git.codesynthesis.com/odb/libodb-pgsql.git##HEAD +location: https://git.codesynthesis.com/odb/odb.git##HEAD : role: prerequisite diff --git a/tests/ci/ci-load.testscript b/tests/ci/ci-load.testscript index 57fa9d1..eb9ba7c 100644 --- a/tests/ci/ci-load.testscript +++ b/tests/ci/ci-load.testscript @@ -105,6 +105,14 @@ email: user@example.org %depends: \\* build2 .+% %depends: \\* bpkg .+% + bootstrap-build:\\ + project = libhello + %.+ + \\ + root-build:\\ + cxx.std = latest + %.+ + \\ location: libhello %fragment: .+% : @@ -116,6 +124,14 @@ email: user@example.org %depends: \\* build2 .+% %depends: \\* bpkg .+% + bootstrap-build:\\ + project = hello + %.+ + \\ + root-build:\\ + cxx.std = latest + %.+ + \\ location: hello %fragment: .+% EOE @@ -148,6 +164,14 @@ email: user@example.org %depends: \\* build2 .+% %depends: \\* bpkg .+% + bootstrap-build:\\ + project = hello + %.+ + \\ + root-build:\\ + cxx.std = latest + %.+ + \\ location: hello %fragment: .+% EOE @@ -181,6 +205,14 @@ email: user@example.org %depends: \\* build2 .+% %depends: \\* bpkg .+% + bootstrap-build:\\ + project = libhello + %.+ + \\ + root-build:\\ + cxx.std = latest + %.+ + \\ location: libhello %fragment: .+% EOE @@ -200,7 +232,11 @@ %. reference: $request_id EOO - %.*:.*%+ + %.+cache:cache% + : 1 + %.+ + : 1 + %.+ EOE } } diff --git a/tests/ci/data.testscript b/tests/ci/data.testscript index 74a1527..6f44c85 100644 --- a/tests/ci/data.testscript +++ b/tests/ci/data.testscript @@ -3,11 +3,11 @@ # Pre-created CI request data directory that will be copied by subsequent # tests and scope setup commands. The common approach will be that group -# scopes copy and modify the parent scope submission directory as required +# scopes copy and modify the parent scope request data directory as required # by the nested tests and scopes. Tests will also clone the parent scope -# submission data directory to optionally modify it, use and cleanup at the -# end. Note that configuration can not be shared between multiple submission -# handler processes. Also we need to make sure that submission data +# request data data directory to optionally modify it, use and cleanup at the +# end. Note that request data directory can not be shared between multiple +# submission handler processes. Also we need to make sure that request data # directories are not cloned while being used by submission handler scripts. # data_dir = $regex.replace($path_search('*/request.manifest', $src_base), \ @@ -34,10 +34,10 @@ root_data_dir = $~/$data_dir # The most commonly used submission data directory cloning command that copies # it from the parent scope working directory. # -clone_data = cp --no-cleanup -r ../$data_dir ./ -clone_data_clean = cp --no-cleanup -r ../$data_dir ./ &$data_dir/*** +clone_data = [cmdline] cp --no-cleanup -r ../$data_dir ./ +clone_data_clean = [cmdline] cp --no-cleanup -r ../$data_dir ./ &$data_dir/*** # Clones the original submission data directory. # -clone_root_data = cp --no-cleanup -r $root_data_dir ./ -clone_root_data_clean = cp --no-cleanup -r $root_data_dir ./ &$data_dir/*** +clone_root_data = [cmdline] cp --no-cleanup -r $root_data_dir ./ +clone_root_data_clean = [cmdline] cp --no-cleanup -r $root_data_dir ./ &$data_dir/*** diff --git a/tests/load/1/math/libexp-+2-1.2+1.tar.gz b/tests/load/1/math/libexp-+2-1.2+1.tar.gz Binary files differindex 5beeb84..b223d9f 100644 --- a/tests/load/1/math/libexp-+2-1.2+1.tar.gz +++ b/tests/load/1/math/libexp-+2-1.2+1.tar.gz diff --git a/tests/load/1/math/libfoo-+0-X.Y.tar.gz b/tests/load/1/math/libfoo-+0-X.Y.tar.gz Binary files differindex 6867d4f..95364bb 100644 --- a/tests/load/1/math/libfoo-+0-X.Y.tar.gz +++ b/tests/load/1/math/libfoo-+0-X.Y.tar.gz diff --git a/tests/load/1/math/libfoo-1.0.tar.gz b/tests/load/1/math/libfoo-1.0.tar.gz Binary files differindex 2d445ec..3f23ab9 100644 --- a/tests/load/1/math/libfoo-1.0.tar.gz +++ b/tests/load/1/math/libfoo-1.0.tar.gz diff --git a/tests/load/1/math/libfoo-1.2.4+1.tar.gz b/tests/load/1/math/libfoo-1.2.4+1.tar.gz Binary files differindex 74678eb..db22a19 100644 --- a/tests/load/1/math/libfoo-1.2.4+1.tar.gz +++ b/tests/load/1/math/libfoo-1.2.4+1.tar.gz diff --git a/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz b/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz Binary files differindex 391eb6f..f1c9ba0 100644 --- a/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz +++ b/tests/load/1/math/libfoo-benchmarks-1.2.4.tar.gz diff --git a/tests/load/1/math/libfoo-examples-1.2.4.tar.gz b/tests/load/1/math/libfoo-examples-1.2.4.tar.gz Binary files differindex eac5190..00164e6 100644 --- a/tests/load/1/math/libfoo-examples-1.2.4.tar.gz +++ b/tests/load/1/math/libfoo-examples-1.2.4.tar.gz diff --git a/tests/load/1/math/libfoo-tests-1.2.4.tar.gz b/tests/load/1/math/libfoo-tests-1.2.4.tar.gz Binary files differindex 223e24d..84a7913 100644 --- a/tests/load/1/math/libfoo-tests-1.2.4.tar.gz +++ b/tests/load/1/math/libfoo-tests-1.2.4.tar.gz diff --git a/tests/load/1/math/libpq-0.tar.gz b/tests/load/1/math/libpq-0.tar.gz Binary files differindex a689660..d4beb18 100644 --- a/tests/load/1/math/libpq-0.tar.gz +++ b/tests/load/1/math/libpq-0.tar.gz diff --git a/tests/load/1/math/libstudxml-1.0.0+1.tar.gz b/tests/load/1/math/libstudxml-1.0.0+1.tar.gz Binary files differindex 41c9637..dcf0ee5 100644 --- a/tests/load/1/math/libstudxml-1.0.0+1.tar.gz +++ b/tests/load/1/math/libstudxml-1.0.0+1.tar.gz diff --git a/tests/load/1/math/packages.manifest b/tests/load/1/math/packages.manifest index ea37e2a..574370a 100644 --- a/tests/load/1/math/packages.manifest +++ b/tests/load/1/math/packages.manifest @@ -1,5 +1,5 @@ : 1 -sha256sum: b85ba3a0ba45b98e1fbb2507f199bc4b218a4a413ec6ba4094e214a7507490a2 +sha256sum: 521d17cbd396275aa9eb9b00d456beaaaabae1c004eff6de712bb615c18bb59b : name: libexp version: +2-1.2+1 @@ -19,15 +19,23 @@ builds: default legacy build-include: windows**d/x86_64** build-include: windows-vc_13** build-exclude: **; Only supported on Windows. +bootstrap-build:\ +project = libexp + +\ location: libexp-+2-1.2+1.tar.gz -sha256sum: 317c8c6f45d9dfdfdef3a823411920cecd51729c7c4f58f9a0b0bbd681c07bd6 +sha256sum: d90cfe583890cd0c05cdfc204e69dd3b986c2da49851f7a87fa0ca870788ff79 : name: libfoo version: +0-X.Y summary: The Foo Library license: MIT +bootstrap-build:\ +project = libfoo + +\ location: libfoo-+0-X.Y.tar.gz -sha256sum: c994fd49f051ab7fb25f3a4e68ca878e484c5d3c2cb132b37d41224b0621b618 +sha256sum: c25e5cae2f72664a3961c3ef88a82e67150c4bcc2a5e1fb4d250e621c5574187 : name: libfoo version: 1.0 @@ -37,8 +45,12 @@ build-email: foo-builds@example.com builds: default legacy; Stable configurations only. builds: -32; 64-bit targets only builds: &msvc_13_up; Not too old MSVC. +bootstrap-build:\ +project = libfoo + +\ location: libfoo-1.0.tar.gz -sha256sum: e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76 +sha256sum: 7382152bac5b4ce10215a5ecd6c94c490d0efc007031d3b03f407d068b74e624 : name: libfoo version: 1.2.4+1 @@ -48,24 +60,39 @@ license: LGPLv2, MIT; If using with GNU TLS. license: BSD; If using with OpenSSL. topics: math library, math API, libbaz fork keywords: c++ foo math best -description: \ +description:\ A modern C++ library with easy to use linear algebra and lot of optimization tools. There are over 100 functions in total with an extensive test suite. The API is -similar to ~~mathlab~~ **MATLAB**. +similar to ~~mathlab~~ **MATLAB**.[^mathlab] Useful for conversion of research code into production environments. +[^mathlab]: MATLAB Capabilities: TODO \ description-type: text/markdown -changes: \ -1.2.4+1 +package-description:\ +This project builds and defines the build2 package for the libfoo library. + +A modern C++ library with easy to use linear algebra and lot of optimization +tools. + +There are over 100 functions in total with an extensive test suite. The API is +similar to ~~mathlab~~ **MATLAB**.[^mathlab] + +Useful for conversion of research code into production environments. +[^mathlab]: MATLAB Capabilities: TODO +\ +package-description-type: text/markdown +changes:\ +**1.2.4+1** * applied patch for critical bug-219 * regenerated documentation -1.2.4 +**1.2.4** * test suite extended significantly \ +changes-type: text/markdown url: http://www.example.com/foo/; Project home page. doc-url: http://www.example.org/projects/libfoo/man.xhtml; Documentation page. src-url: http://scm.example.com/?p=odb/libodb.git\;a=tree; Source tree url. @@ -74,47 +101,94 @@ email: foo-users@example.com; Public mailing list. Read FAQ before posting. package-email: pack@example.com; Current packager. depends: libmisc < 1.1 | libmisc > 2.3.0+0; Crashes with 1.1.0-2.3.0. depends: libexp >= 1.0 -depends: ? libstudxml | libexpat; The newer the better. +depends: libstudxml ? ($cxx.target.class == 'windows') | libexpat ?\ + ($cxx.target.class != 'windows'); The newer the better. requires: linux | windows | macosx; Symbian support is coming. requires: c++11 requires: ? ; libc++ standard library if using Clang on Mac OS X. -requires: ? vc++ >= 12.0; Only if using VC++ on Windows. -tests: libfoo-tests == 1.2.4 +requires: ; X11 libs. +requires: ? ($windows); Only 64-bit. +requires: x86_64 ? ; Only if on Windows. +requires: * vc++ >= 12.0 ? (windows); Only if using VC++ on Windows. +requires: host +tests: * libfoo-tests == 1.2.4 ? (!$defined(config.libfoo_tests.test))\ + config.libfoo_tests.test=libfoo examples: libfoo-examples benchmarks: libfoo-benchmarks > 0.0.1 +builds: all +network-build-include: windows-vc_14d/x86_64-microsoft-win32-msvc14.0 +network-build-exclude: ** +network-build-config: config.libfoo.network=true; Enable networking API. +cache-builds: default +cache-builds: -linux +cache-build-include: windows-vc_14d/x86_64-microsoft-win32-msvc14.0 +cache-build-exclude: ** +cache-build-config:\ +config.libfoo.cache=true +config.libfoo.buffer=4096 +; +Enable caching. +\ +bootstrap-build:\ +project = libfoo + +\ +root-build:\ +config [bool] config.libfoo.network ?= false + +config [bool] config.libfoo.cache ?= false +config [uint64] config.libfoo.buffer ?= 1024 + +\ location: libfoo-1.2.4+1.tar.gz -sha256sum: c02b6033107387e05f48aa62ee6498152c967deb0e91a62f1e618fe9fd1bc644 +sha256sum: ffce9d3e3ca9899d3fd6da1f6b93c07cce2c3f6b7004948b59757dae420f801b : name: libfoo-benchmarks version: 1.2.4 summary: The Foo Math Library benchmarks license: MIT builds: 64; Fails building for 32 bits. +bootstrap-build:\ +project = libfoo-benchmarks + +\ location: libfoo-benchmarks-1.2.4.tar.gz -sha256sum: ba664343db5b9bd574450175834b0dd39d038dcff7387477b6eff0d5783a8ac4 +sha256sum: 8392db99b1ea0c78fe2c73d8c0ae35f8a31d798c8ed26ebf09b4bf557b4e3ce0 : name: libfoo-examples version: 1.2.4 summary: The Foo Math Library examples license: MIT builds: 64; Fails building for 32 bits. +bootstrap-build:\ +project = libfoo-examples + +\ location: libfoo-examples-1.2.4.tar.gz -sha256sum: 1343d1826c3ae5446ad965bc9aa7b1586e4238c7736c344e63a4a6bae3d57a88 +sha256sum: de1bf595994a63361262727594de94edbd77fff8234066da74672e44eb4349f2 : name: libfoo-tests version: 1.2.4 summary: The Foo Math Library tests license: MIT builds: 64; Fails building for 32 bits. +bootstrap-build:\ +project = libfoo-tests + +\ +root-build:\ +config [strings] config.libfoo_tests.test + +\ location: libfoo-tests-1.2.4.tar.gz -sha256sum: c5c0520b4e612fa2f8948c42824f3e199926c2395bf2c2f898e83f9eb19261a4 +sha256sum: 29a97b3356c42602dd81ee2766c242f8974b0a92d8560cb107dd464655d3d527 : name: libpq version: 0 summary: PostgreSQL C API client library license: PostgreSQL License; Permissive free software license. keywords: postgresql database client library c -description: \ +description:\ PostgreSQL is an object-relational SQL database management system with libpq being its C client library. Applications can use this library to pass queries to the PostgreSQL backend server and to receive the results of those queries @@ -142,8 +216,12 @@ package-url: https://git.build2.org/cgit/packaging/postgresql/ email: pgsql-general@postgresql.org; Mailing list. package-email: packaging@build2.org; Mailing list. requires: build2 >= 0.4.0 +bootstrap-build:\ +project = libpq + +\ location: libpq-0.tar.gz -sha256sum: 75958d000b641c588cdf48e3574584e070104097702dccffdad77947e37f9bd0 +sha256sum: 2aee2bb1d58d51c657903bbab6253c5d4566b6f3f299ba118da24c7756caebfd : name: libstudxml version: 1.0.0+1 @@ -158,5 +236,9 @@ build-warning-email: studxml-warnings@example.com build-error-email: studxml-errors@example.com depends: libexpat >= 2.0.0 depends: libgenx +bootstrap-build:\ +project = libstudxml + +\ location: libstudxml-1.0.0+1.tar.gz -sha256sum: 1833906dd93ccc0cda832d6a1b3ef9ed7877bb9958b46d9b2666033d4a7919c9 +sha256sum: aa52d5b49ee1bad825cd6bca554f72636e8451f93c74f9a443bafce3c2bf82c0 diff --git a/tests/load/1/math/repositories.manifest b/tests/load/1/math/repositories.manifest index c0293c4..177fcad 100644 --- a/tests/load/1/math/repositories.manifest +++ b/tests/load/1/math/repositories.manifest @@ -8,7 +8,7 @@ role: prerequisite # email: repoman@dev.cppget.org summary: Math C++ package repository -description: \ +description:\ This is the awesome C++ package repository full of remarkable algorithms and APIs. \ diff --git a/tests/load/1/misc/packages.manifest b/tests/load/1/misc/packages.manifest index f02ce01..86620dd 100644 --- a/tests/load/1/misc/packages.manifest +++ b/tests/load/1/misc/packages.manifest @@ -15,6 +15,7 @@ depends: libfoo depends: libmath >= 2.0.0 requires: linux | windows | macosx changes: some changes +changes-type: text/plain location: libbar-2.4.0+3.tar.gz sha256sum: 70ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93 : @@ -25,7 +26,7 @@ license: MIT url: http://www.example.com/foo/ email: foo-users@example.com location: libfoo-1.0.tar.gz -sha256sum: 754cba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93 +sha256sum: e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76 : name: libfoo version: 0.1 @@ -43,7 +44,7 @@ license: MIT url: http://www.example.com/foo/ email: foo-users@example.com location: libfoo-1.2.4+1.tar.gz -sha256sum: 35ccba3da34dd0296866027a26b6bacf08cacc80f54516d3b8d8eeccbe31ab93 +sha256sum: 6692a487e0908598e36bdeb9c25ed1e4a35bb99587dbc475807d314fa0719ac6 : name: libfoo version: 1.2.4+2 diff --git a/tests/load/1/stable/libfoo-1.0.tar.gz b/tests/load/1/stable/libfoo-1.0.tar.gz Binary files differindex 2d445ec..3f23ab9 100644 --- a/tests/load/1/stable/libfoo-1.0.tar.gz +++ b/tests/load/1/stable/libfoo-1.0.tar.gz diff --git a/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz b/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz Binary files differindex aa5665e..1dfff70 100644 --- a/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz +++ b/tests/load/1/stable/libfoo-1.2.2-alpha.1.tar.gz diff --git a/tests/load/1/stable/libfoo-1.2.2.tar.gz b/tests/load/1/stable/libfoo-1.2.2.tar.gz Binary files differindex 94aca23..22eb89b 100644 --- a/tests/load/1/stable/libfoo-1.2.2.tar.gz +++ b/tests/load/1/stable/libfoo-1.2.2.tar.gz diff --git a/tests/load/1/stable/libfoo-1.2.3+4.tar.gz b/tests/load/1/stable/libfoo-1.2.3+4.tar.gz Binary files differindex 254f355..76439b0 100644 --- a/tests/load/1/stable/libfoo-1.2.3+4.tar.gz +++ b/tests/load/1/stable/libfoo-1.2.3+4.tar.gz diff --git a/tests/load/1/stable/libfoo-1.2.4.tar.gz b/tests/load/1/stable/libfoo-1.2.4.tar.gz Binary files differindex dc64431..da70cd3 100644 --- a/tests/load/1/stable/libfoo-1.2.4.tar.gz +++ b/tests/load/1/stable/libfoo-1.2.4.tar.gz diff --git a/tests/load/1/stable/packages.manifest b/tests/load/1/stable/packages.manifest index 85109f6..f15ab90 100644 --- a/tests/load/1/stable/packages.manifest +++ b/tests/load/1/stable/packages.manifest @@ -1,5 +1,5 @@ : 1 -sha256sum: 17ae44db4b176fc7629fe9a6a5e43aa9ab4da0fda2b93f3b1fe6e993ba92cd8b +sha256sum: 2fe1a38177da668bb79d1912ecb5e935e0a77c984b9db522c9321ca205b2863b : name: libfoo version: 1.0 @@ -9,8 +9,12 @@ build-email: foo-builds@example.com builds: default legacy; Stable configurations only. builds: -32; 64-bit targets only builds: &msvc_13_up; Not too old MSVC. +bootstrap-build:\ +project = libfoo + +\ location: libfoo-1.0.tar.gz -sha256sum: e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76 +sha256sum: 7382152bac5b4ce10215a5ecd6c94c490d0efc007031d3b03f407d068b74e624 : name: libfoo version: 1.2.2-alpha.1 @@ -23,8 +27,12 @@ email: foo-users@example.com depends: libmisc [0.1 2.0-) | libmisc [2.0 5.0] depends: libgenx (0.2 3.0) depends: libexpat < 5.2 | libexpat (1 5.1] +bootstrap-build:\ +project = libfoo + +\ location: libfoo-1.2.2-alpha.1.tar.gz -sha256sum: f5d3e9e6e8f9621a638b1375d31f0eb50e6279d8066170b25da21e84198cfd82 +sha256sum: 71321f6616036380ac5c9c5dc81efa04b23577ef9dc18f1ce413587bb57677c9 : name: libfoo version: 1.2.2 @@ -35,8 +43,12 @@ url: http://www.example.com/foo/ email: foo-users@example.com depends: libbar <= 2.4.0 depends: libexp == +2-1.2 +bootstrap-build:\ +project = libfoo + +\ location: libfoo-1.2.2.tar.gz -sha256sum: 088068ea3d69542a153f829cf836013374763148fba0a43d8047974f58b5efd7 +sha256sum: 75d2a7d3eec62d63afd3d3a84d91bd02b05ecb16cd0907d5b0db1fc654e3753f : name: libfoo version: 1.2.3+4 @@ -47,8 +59,12 @@ keywords: c++ foo url: http://www.example.com/foo/ email: foo-users@example.com depends: libmisc >= 2.0.0 +bootstrap-build:\ +project = libfoo + +\ location: libfoo-1.2.3+4.tar.gz -sha256sum: f2ebecac6cac8addd7c623bc1becf055e76b13a0d2dd385832b92c38c58956d8 +sha256sum: 24c53899bd4dbfdde6a727e07724984bfb4ca7f20142291c40e30304f15434c3 : name: libfoo version: 1.2.4 @@ -59,8 +75,13 @@ description: Very good foo library. description-type: text/plain changes: some changes 1 changes: some changes 2 +changes-type: text/plain url: http://www.example.com/foo/ email: foo-users@example.com depends: libmisc >= 2.0.0 +bootstrap-build:\ +project = libfoo + +\ location: libfoo-1.2.4.tar.gz -sha256sum: aa1606323bfc59b70de642629dc5d8318cc5348e3646f90ed89406d975db1e1d +sha256sum: 98f80ca0cd1c053fd45ab37f72a6a31f1a0304747c636822df8d573420284642 diff --git a/tests/load/1/stable/repositories.manifest b/tests/load/1/stable/repositories.manifest index 49a0685..1907ed6 100644 --- a/tests/load/1/stable/repositories.manifest +++ b/tests/load/1/stable/repositories.manifest @@ -14,35 +14,36 @@ role: prerequisite email: repoman@dev.cppget.org; public mailing list summary: General C++ package stable repository description: This is the awesome C++ package repository full of exciting stuff. -certificate: \ +certificate:\ -----BEGIN CERTIFICATE----- -MIIFOzCCAyOgAwIBAgIJAIsajMs6HOxHMA0GCSqGSIb3DQEBCwUAMDcxFzAVBgNV -BAoMDkNvZGUgU3ludGhlc2lzMRwwGgYDVQQDDBNuYW1lOmRldi5jcHBnZXQub3Jn -MB4XDTE3MDcwNzA2MzgzNFoXDTIyMDcwNjA2MzgzNFowNzEXMBUGA1UECgwOQ29k -ZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2LmNwcGdldC5vcmcwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDK8NqNbQckQpv9B3hBRjnTcpkgKq2e -1HOFLQJxgS1TS2QfqUTKePpd10LbDgXOhI2iycKCf7Zv/uf3RE+VyQ/BthNUvQ0O -bWPsEKo+DQOLPjqIaS+u2bmMXzCDjwjufbd9ruPY2PYRTBOsXgTL1+GGIQu0bP5u -i1mEGn95xuYhEJ4x1UUsVWV0l0D37orV/OaOVffPY3xhlQE++aiXLptof1gzM2D8 -lsQPvWLizrtDAHpiwb4oXQQbifDyeXj+qh7OdIqL10rxZZ/0Q0GqrTOyeSlXuo5i -C3MdNSlRmWNGqvPwpushFBQec04exXI3AjQZ/DUlMxtDx2xIqQMtaYOQ5iqm9426 -crgrUoXZG/5ePYTCmnSbpZVak9md44inJWqSESTL0+EfWuLdXop0QV7LZrIaV2pV -BJba0/jiS5mltR/ikiJ7gaP/bbfutJGGfzyk1PrvyehhK/snGUh6Nr0NMHozS+J+ -7QXdSEMjLXbmF5hBsvEfrGub+YSexEEODA34YnBIA453ph4CIo/3nTpDLrm3EkSF -1jV5vGhg3vzB6v+TIP9MXALm4/NUurn8I643KMoNSS9RCDuiqLnE8V1uCmSP8LR8 -OO7vxlmaM/OfqHehAALgsU/KFT1lgpAfHE2x5YBxT6s407DJJpaPkbHMiCNHScWQ -5ezqnH0UMNwsawIDAQABo0owSDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww -CgYIKwYBBQUHAwMwHgYDVR0RBBcwFYETaW5mb0BkZXYuY3BwZ2V0Lm9yZzANBgkq -hkiG9w0BAQsFAAOCAgEAlJnw+1TMlY8GGKwF0MBfH0bAW+AhkHd7E33DSFoU12LT -mx99QaiFd4r2dkE3dA4c8OJXK+vzhi2R+kWvzR+ZF4XEGfkM6Z6pZWUK8/5hymiq -pXbPQA21QHJh1RkN0xOxaPwjB4BWj2hk2aUqiuH/R4c2u6U4RfSwimBSbI+QSqF3 -Ho5eAuaezicxWKRQya70FpXGFn+vN6E9HZ8mlp+7eSV3A4eYKaGliqfoVHagYaFz -EM/SFueGhynAHtWzx21f3RhlPWJ1QZcLQayZT8980KJKWO70abKZdcuOTpYBDiYZ -SKcAu4fhCWuhkxlKltwxdRx1FqE/UZpoj2LJnw5pEzVmF9X30VC1f5F6YWicedJr -GCmdQhK3qPZKvNM7i19IBlizo5BKuVB6TsdxWgTTzmOZN6oEwsbVtGTxPek7jGJj -V0vi3zeCCaGJ5K+t6MahAT47CpA/+lJVLCGT6Clw9DvFEJmIr01bmD9uUGZwIgc3 -w88Hh4ap5/u7w07cNwYtncA7cKQCBG9vXi2cXpudBL6uLeM5rqYBSD5hj4zDjzpd -VglIFXohfyMfGh3kDPkQ4dw627S0NuxzmocE0jjdsXfQuLNeg+JRNEHB8QPwTC8X -EY1xZfPv9XzlVQxd7gLDKA8QbbKWpNe73XMoZXUyeyVuf5q1g+c6m1uPB5jJpdw= +MIIFRjCCAy6gAwIBAgIUc9xEjZAXCpw+00SGYGDTY0t2nLUwDQYJKoZIhvcNAQEL +BQAwNzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2 +LmNwcGdldC5vcmcwHhcNMjExMTI0MTIxNjMwWhcNMzExMTIyMTIxNjMwWjA3MRcw +FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEcMBoGA1UEAwwTbmFtZTpkZXYuY3BwZ2V0 +Lm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrw2o1tByRCm/0H +eEFGOdNymSAqrZ7Uc4UtAnGBLVNLZB+pRMp4+l3XQtsOBc6EjaLJwoJ/tm/+5/dE +T5XJD8G2E1S9DQ5tY+wQqj4NA4s+OohpL67ZuYxfMIOPCO59t32u49jY9hFME6xe +BMvX4YYhC7Rs/m6LWYQaf3nG5iEQnjHVRSxVZXSXQPfuitX85o5V989jfGGVAT75 +qJcum2h/WDMzYPyWxA+9YuLOu0MAemLBvihdBBuJ8PJ5eP6qHs50iovXSvFln/RD +QaqtM7J5KVe6jmILcx01KVGZY0aq8/Cm6yEUFB5zTh7FcjcCNBn8NSUzG0PHbEip +Ay1pg5DmKqb3jbpyuCtShdkb/l49hMKadJullVqT2Z3jiKclapIRJMvT4R9a4t1e +inRBXstmshpXalUEltrT+OJLmaW1H+KSInuBo/9tt+60kYZ/PKTU+u/J6GEr+ycZ +SHo2vQ0wejNL4n7tBd1IQyMtduYXmEGy8R+sa5v5hJ7EQQ4MDfhicEgDjnemHgIi +j/edOkMuubcSRIXWNXm8aGDe/MHq/5Mg/0xcAubj81S6ufwjrjcoyg1JL1EIO6Ko +ucTxXW4KZI/wtHw47u/GWZoz85+od6EAAuCxT8oVPWWCkB8cTbHlgHFPqzjTsMkm +lo+RscyII0dJxZDl7OqcfRQw3CxrAgMBAAGjSjBIMA4GA1UdDwEB/wQEAwIHgDAW +BgNVHSUBAf8EDDAKBggrBgEFBQcDAzAeBgNVHREEFzAVgRNpbmZvQGRldi5jcHBn +ZXQub3JnMA0GCSqGSIb3DQEBCwUAA4ICAQBvVUHRUj9vR+QgDQGOtXBcOB1G/1xX +1gU6ivjP9UzZEmXmqukgx0aYcjxctAm7Yf0lsj2xOwmVPGcMC3tGYJG8yOvpW5dQ +8uOGmNqNaRq7AJN4gio6uc9dkipNhcBmVilY08yv67wGaIGhHt4kbDiF/3YAzsMR +/YfVQ3Kb4EyQpC/p7dsio2/m8gRb5lUr6K22fdPr2AfQbdNsAF6EMAfEfCDC9yAa +uzB5Dc+wpqWvsPp+ohiroJqC99hwmfzPMxAYTB2cmEqmVHGSwqoC9bn7fI59t9l6 +N2fynRhenLookYfy7jqb4a6O702fAHefGD6teHYbTpg6dwlVY/PLI+T1SPSisH3k +jS2WV03FK8aTNGe7E2RBxH2smca0Z1oaJ9RaluZ9HIRrw434m9+z01DL5w0EWRpC +qa62iYSgGFcxkXRwb8VeWPtGb9/CPXtVFDtW19mOeeHqr8/xVOCjR2MCKAsxLazl +yGQhAUu19n2y7vuj4FOEeJ1mwaaUyu2MPqZWZ3loM14muk/ZJfFsJRfdHg7+hSel +alE0ujce0to39AApGZRIwozut17hYjl5m3314+46KaNuxRmo2xw5wNl8UslEgQYC +fK3CY+6p9n64QJXnE+2+KGrDuYYXQP3TClHLv/IftgAlf6cZxu4RsNdvUsU15M0J +BGQjsz7h0bI3fw== -----END CERTIFICATE----- \
\ No newline at end of file diff --git a/tests/load/1/stable/signature.manifest b/tests/load/1/stable/signature.manifest index 45d160e..bb18e13 100644 --- a/tests/load/1/stable/signature.manifest +++ b/tests/load/1/stable/signature.manifest @@ -1,13 +1,13 @@ : 1 -sha256sum: f4fadfdc3586c8b94dce871328625f43636e43b4a073c4ee577af41780eeb8f3 -signature: \ -J40HIcLQQI+1rzXzRo0OOJRk1NTmLNNFk+RUi/NM7M6vUFjjq4nIABR+bg8RdenpybVASTyJYNTL -/pLStEF+hg5SnufsYvpJJmstBMY4JXAcOfngUz4UVe8QS9zu3YGL5y/MBnYU+SKBbHnjRsTbRgak -LjoVbTKH6kkPkZ8MZMXzo/0un2L4w4uZLNDeqjJBg2OFztPIV3RYzBdCcxJ+k20UJjVfmNQWaBki -eaXVIsOxQ1bqWfg5uvACYvSgAIB+6ZIQFf8VYAJvnSieTswyWTxBXvj0D6aNnksQisaSbsRUuVb/ -zKYNWDrZFOjlNLEt+3bv3WQ5r0RqeRu7x9tdlXMfX/z9qu3SRe/mmaEpArkO67OKuVW1EDIo4Pwd -6zOQZUBvy3wd5RJWCfEbcuIBNBWL3jWOhWK36V9NzCWzeDzh22D0P9WEYNGd9Xn0+GeXpuiD/1w1 -IkVy9FA/2DYBb/UgNvWLaGWeTFqmv+ZcLKs8uuNHoM33EpI2jQetDS24QeOmqImmRsLROjyCxVye -yU3Dew5HRLK3KxNtC5XewK24eEdAQID7UEesn7X/MYfuyS1hq415YUibZswG7UIChj082xKHHcME -zrEgdJ29+I3ZQkpTBaY/77Pu6UFvcpPbak0SCc6FHPvWKwXHPv2ATsInKs0J/WLdsL8sgaM2f3I= +sha256sum: defa57373e20beb8f22a391b1e97ec7f5e50469e050c2551e8522fab74f304e1 +signature:\ +f9b/Q+mBos6MwwFPIiIBqSEidqO+rMsktQ1ESWEkO48uHN+hjNCog0gp3Vf4QHj0p2KKU5Uz4exj +8h7R3RB0F4B10/lDyHw4XlvAyP1uE+YS76rEXHXuGBEnGvBK9818WkCJ5yfFJYg3AuGt2Cyd3QHF +Uxv+fDkI05KrZNGzLo9euDr6yhHOMTjwSntu/lt6ytfyzTFHcs0xOM03qEtszD5QrNdC32z7kmDE +8ISUlMUyqOjsz8h25F04NyiojccGTpfUTgqA2zXqMAwRn+fG9wU5Vwnau/oIcAO+nUruR4i1VrDd +D5q/gjbOpcBTt7bmbVInR5glbgdPZ7r7gpqfOVwybxeTrArj72jA/XmmYyZlaTTW1RXcJuRIWAcP +2Z61O+cwP9CqW8ktQDNGkgDxxXJR7aEG64G8q7uZeb6v1FaQCwo2JEe/Tv0JDp+DBVKwBm7ZDZi/ +TVtltbADgISCU8bTVz/r4Q0qwHeiQo2GV+Di4h7KvWS8H2Q7sjpyWrI3/UzujOp+zB/BP+6dNZTr +6Mf8CJ+9L5YY4lzX9jeVQLOuKOqLLZWD2VQiyaYZp79X5OtSHuNvCWcaWUa9Rpu/goLCPrk6QHD+ +wUTYcIdsEbX2jDN3YQwe53WklytPbMxy7taRF1obpGOpDDju3InD3IRXS7ch4G9XzqtsylzMIeE= \ diff --git a/tests/load/cert b/tests/load/cert index 893d84d..c2da7fa 100755 --- a/tests/load/cert +++ b/tests/load/cert @@ -6,17 +6,18 @@ # Copy cert.pem content to the certificate value of the following manifest # files: -# 1/stable/repositories -# pkg/1/dev.cppget.org/signed/repositories +# 1/stable/repositories.manifest +# pkg/1/dev.cppget.org/signed/repositories.manifest # -openssl req -x509 -new -key key.pem -days 1825 -config openssl.cnf > cert.pem +openssl req -x509 -new -key key.pem -days 3650 -config openssl.cnf > cert.pem # To regenerate the packages and signature manifest files run: # -# ../../../bpkg/bpkg/bpkg rep-create 1/stable --key key.pem -# ../../../bpkg/bpkg/bpkg rep-create pkg/1/dev.cppget.org/signed --key key.pem +# bpkg rep-create 1/math +# bpkg rep-create 1/stable --key key.pem +# bpkg rep-create pkg/1/dev.cppget.org/signed --key key.pem # # Update certificate fingerprint in loadtab for dev.cppget.org/signed # repository. To print the fingerprint run: # -# ../../../bpkg/bpkg/bpkg rep-info --cert-fingerprint pkg/1/dev.cppget.org/signed/ +# bpkg rep-info --cert-fingerprint pkg/1/dev.cppget.org/signed/ diff --git a/tests/load/cert.pem b/tests/load/cert.pem index dc7c756..13a55f4 100644 --- a/tests/load/cert.pem +++ b/tests/load/cert.pem @@ -1,30 +1,31 @@ -----BEGIN CERTIFICATE----- -MIIFOzCCAyOgAwIBAgIJAIsajMs6HOxHMA0GCSqGSIb3DQEBCwUAMDcxFzAVBgNV -BAoMDkNvZGUgU3ludGhlc2lzMRwwGgYDVQQDDBNuYW1lOmRldi5jcHBnZXQub3Jn -MB4XDTE3MDcwNzA2MzgzNFoXDTIyMDcwNjA2MzgzNFowNzEXMBUGA1UECgwOQ29k -ZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2LmNwcGdldC5vcmcwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDK8NqNbQckQpv9B3hBRjnTcpkgKq2e -1HOFLQJxgS1TS2QfqUTKePpd10LbDgXOhI2iycKCf7Zv/uf3RE+VyQ/BthNUvQ0O -bWPsEKo+DQOLPjqIaS+u2bmMXzCDjwjufbd9ruPY2PYRTBOsXgTL1+GGIQu0bP5u -i1mEGn95xuYhEJ4x1UUsVWV0l0D37orV/OaOVffPY3xhlQE++aiXLptof1gzM2D8 -lsQPvWLizrtDAHpiwb4oXQQbifDyeXj+qh7OdIqL10rxZZ/0Q0GqrTOyeSlXuo5i -C3MdNSlRmWNGqvPwpushFBQec04exXI3AjQZ/DUlMxtDx2xIqQMtaYOQ5iqm9426 -crgrUoXZG/5ePYTCmnSbpZVak9md44inJWqSESTL0+EfWuLdXop0QV7LZrIaV2pV -BJba0/jiS5mltR/ikiJ7gaP/bbfutJGGfzyk1PrvyehhK/snGUh6Nr0NMHozS+J+ -7QXdSEMjLXbmF5hBsvEfrGub+YSexEEODA34YnBIA453ph4CIo/3nTpDLrm3EkSF -1jV5vGhg3vzB6v+TIP9MXALm4/NUurn8I643KMoNSS9RCDuiqLnE8V1uCmSP8LR8 -OO7vxlmaM/OfqHehAALgsU/KFT1lgpAfHE2x5YBxT6s407DJJpaPkbHMiCNHScWQ -5ezqnH0UMNwsawIDAQABo0owSDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww -CgYIKwYBBQUHAwMwHgYDVR0RBBcwFYETaW5mb0BkZXYuY3BwZ2V0Lm9yZzANBgkq -hkiG9w0BAQsFAAOCAgEAlJnw+1TMlY8GGKwF0MBfH0bAW+AhkHd7E33DSFoU12LT -mx99QaiFd4r2dkE3dA4c8OJXK+vzhi2R+kWvzR+ZF4XEGfkM6Z6pZWUK8/5hymiq -pXbPQA21QHJh1RkN0xOxaPwjB4BWj2hk2aUqiuH/R4c2u6U4RfSwimBSbI+QSqF3 -Ho5eAuaezicxWKRQya70FpXGFn+vN6E9HZ8mlp+7eSV3A4eYKaGliqfoVHagYaFz -EM/SFueGhynAHtWzx21f3RhlPWJ1QZcLQayZT8980KJKWO70abKZdcuOTpYBDiYZ -SKcAu4fhCWuhkxlKltwxdRx1FqE/UZpoj2LJnw5pEzVmF9X30VC1f5F6YWicedJr -GCmdQhK3qPZKvNM7i19IBlizo5BKuVB6TsdxWgTTzmOZN6oEwsbVtGTxPek7jGJj -V0vi3zeCCaGJ5K+t6MahAT47CpA/+lJVLCGT6Clw9DvFEJmIr01bmD9uUGZwIgc3 -w88Hh4ap5/u7w07cNwYtncA7cKQCBG9vXi2cXpudBL6uLeM5rqYBSD5hj4zDjzpd -VglIFXohfyMfGh3kDPkQ4dw627S0NuxzmocE0jjdsXfQuLNeg+JRNEHB8QPwTC8X -EY1xZfPv9XzlVQxd7gLDKA8QbbKWpNe73XMoZXUyeyVuf5q1g+c6m1uPB5jJpdw= +MIIFRjCCAy6gAwIBAgIUc9xEjZAXCpw+00SGYGDTY0t2nLUwDQYJKoZIhvcNAQEL +BQAwNzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2 +LmNwcGdldC5vcmcwHhcNMjExMTI0MTIxNjMwWhcNMzExMTIyMTIxNjMwWjA3MRcw +FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEcMBoGA1UEAwwTbmFtZTpkZXYuY3BwZ2V0 +Lm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrw2o1tByRCm/0H +eEFGOdNymSAqrZ7Uc4UtAnGBLVNLZB+pRMp4+l3XQtsOBc6EjaLJwoJ/tm/+5/dE +T5XJD8G2E1S9DQ5tY+wQqj4NA4s+OohpL67ZuYxfMIOPCO59t32u49jY9hFME6xe +BMvX4YYhC7Rs/m6LWYQaf3nG5iEQnjHVRSxVZXSXQPfuitX85o5V989jfGGVAT75 +qJcum2h/WDMzYPyWxA+9YuLOu0MAemLBvihdBBuJ8PJ5eP6qHs50iovXSvFln/RD +QaqtM7J5KVe6jmILcx01KVGZY0aq8/Cm6yEUFB5zTh7FcjcCNBn8NSUzG0PHbEip +Ay1pg5DmKqb3jbpyuCtShdkb/l49hMKadJullVqT2Z3jiKclapIRJMvT4R9a4t1e +inRBXstmshpXalUEltrT+OJLmaW1H+KSInuBo/9tt+60kYZ/PKTU+u/J6GEr+ycZ +SHo2vQ0wejNL4n7tBd1IQyMtduYXmEGy8R+sa5v5hJ7EQQ4MDfhicEgDjnemHgIi +j/edOkMuubcSRIXWNXm8aGDe/MHq/5Mg/0xcAubj81S6ufwjrjcoyg1JL1EIO6Ko +ucTxXW4KZI/wtHw47u/GWZoz85+od6EAAuCxT8oVPWWCkB8cTbHlgHFPqzjTsMkm +lo+RscyII0dJxZDl7OqcfRQw3CxrAgMBAAGjSjBIMA4GA1UdDwEB/wQEAwIHgDAW +BgNVHSUBAf8EDDAKBggrBgEFBQcDAzAeBgNVHREEFzAVgRNpbmZvQGRldi5jcHBn +ZXQub3JnMA0GCSqGSIb3DQEBCwUAA4ICAQBvVUHRUj9vR+QgDQGOtXBcOB1G/1xX +1gU6ivjP9UzZEmXmqukgx0aYcjxctAm7Yf0lsj2xOwmVPGcMC3tGYJG8yOvpW5dQ +8uOGmNqNaRq7AJN4gio6uc9dkipNhcBmVilY08yv67wGaIGhHt4kbDiF/3YAzsMR +/YfVQ3Kb4EyQpC/p7dsio2/m8gRb5lUr6K22fdPr2AfQbdNsAF6EMAfEfCDC9yAa +uzB5Dc+wpqWvsPp+ohiroJqC99hwmfzPMxAYTB2cmEqmVHGSwqoC9bn7fI59t9l6 +N2fynRhenLookYfy7jqb4a6O702fAHefGD6teHYbTpg6dwlVY/PLI+T1SPSisH3k +jS2WV03FK8aTNGe7E2RBxH2smca0Z1oaJ9RaluZ9HIRrw434m9+z01DL5w0EWRpC +qa62iYSgGFcxkXRwb8VeWPtGb9/CPXtVFDtW19mOeeHqr8/xVOCjR2MCKAsxLazl +yGQhAUu19n2y7vuj4FOEeJ1mwaaUyu2MPqZWZ3loM14muk/ZJfFsJRfdHg7+hSel +alE0ujce0to39AApGZRIwozut17hYjl5m3314+46KaNuxRmo2xw5wNl8UslEgQYC +fK3CY+6p9n64QJXnE+2+KGrDuYYXQP3TClHLv/IftgAlf6cZxu4RsNdvUsU15M0J +BGQjsz7h0bI3fw== -----END CERTIFICATE----- diff --git a/tests/load/driver.cxx b/tests/load/driver.cxx index 8192827..0f2c8de 100644 --- a/tests/load/driver.cxx +++ b/tests/load/driver.cxx @@ -3,15 +3,14 @@ #include <iostream> #include <exception> -#include <algorithm> // sort(), find() #include <odb/session.hxx> #include <odb/transaction.hxx> #include <odb/pgsql/database.hxx> -#include <libbutl/process.mxx> -#include <libbutl/filesystem.mxx> +#include <libbutl/process.hxx> +#include <libbutl/filesystem.hxx> #include <libbrep/types.hxx> #include <libbrep/utility.hxx> @@ -19,6 +18,9 @@ #include <libbrep/package.hxx> #include <libbrep/package-odb.hxx> +#undef NDEBUG +#include <cassert> + using std::cerr; using std::endl; @@ -27,7 +29,6 @@ using namespace butl; using namespace brep; using labels = small_vector<string, 5>; -using req_alts = small_vector<string, 1>; static const path packages ("packages.manifest"); static const path repositories ("repositories.manifest"); @@ -37,7 +38,7 @@ check_location (shared_ptr<package>& p) { if (p->internal ()) return p->location && *p->location == - path (p->name.string () + "-" + p->version.string () + ".tar.gz"); + path (p->name.string () + '-' + p->version.string () + ".tar.gz"); else return !p->location; } @@ -56,7 +57,7 @@ check_external (const package& p) !p.internal () && p.other_repositories.size () > 0 && p.priority == priority () && - p.changes.empty () && + !p.changes && p.license_alternatives.empty () && p.dependencies.empty () && p.requirements.empty () && @@ -210,7 +211,7 @@ dep (const char* n, optional<version_constraint> c) static inline version dep_ver (const char* v) { - return version (v, false /* fold_zero_revision */); + return version (v, version::none); } static void @@ -267,7 +268,7 @@ test_git_repos (const cstrings& loader_args, assert (p->dependencies.size () == 1); assert (p->dependencies[0].size () == 1); - assert (p->dependencies[0][0] == + assert (p->dependencies[0][0][0] == dep ("libmisc", version_constraint ( dep_ver ("1.0"), false, dep_ver ("1.0"), false))); @@ -383,7 +384,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpvxy->other_repositories.empty ()); assert (fpvxy->priority == priority::low); - assert (fpvxy->changes.empty ()); + assert (!fpvxy->changes); assert (fpvxy->license_alternatives.size () == 1); assert (fpvxy->license_alternatives[0].size () == 1); @@ -395,7 +396,7 @@ test_pkg_repos (const cstrings& loader_args, assert (check_location (fpvxy)); assert (fpvxy->sha256sum && *fpvxy->sha256sum == - "c994fd49f051ab7fb25f3a4e68ca878e484c5d3c2cb132b37d41224b0621b618"); + "c25e5cae2f72664a3961c3ef88a82e67150c4bcc2a5e1fb4d250e621c5574187"); assert (fpvxy->buildable); @@ -419,7 +420,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv1->other_repositories[1].load () == cr); assert (fpv1->priority == priority::low); - assert (fpv1->changes.empty ()); + assert (!fpv1->changes); assert (fpv1->license_alternatives.size () == 1); assert (fpv1->license_alternatives[0].size () == 1); @@ -431,7 +432,7 @@ test_pkg_repos (const cstrings& loader_args, assert (check_location (fpv1)); assert (fpv1->sha256sum && *fpv1->sha256sum == - "e89c6d746f8b1ea3ec58d294946d2f683d133438d2ac8c88549ba24c19627e76"); + "7382152bac5b4ce10215a5ecd6c94c490d0efc007031d3b03f407d068b74e624"); assert (fpv1->buildable); @@ -453,7 +454,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv2->internal_repository.load () == sr); assert (fpv2->other_repositories.empty ()); assert (fpv2->priority == priority::low); - assert (fpv2->changes.empty ()); + assert (!fpv2->changes); assert (fpv2->license_alternatives.size () == 1); assert (fpv2->license_alternatives[0].size () == 1); @@ -463,12 +464,12 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv2->dependencies[0].size () == 1); assert (fpv2->dependencies[1].size () == 1); - assert (fpv2->dependencies[0][0] == + assert (fpv2->dependencies[0][0][0] == dep ("libbar", version_constraint ( nullopt, true, dep_ver ("2.4.0"), false))); - assert (fpv2->dependencies[1][0] == + assert (fpv2->dependencies[1][0][0] == dep ("libexp", version_constraint ( dep_ver ("+2-1.2"), false, dep_ver ("+2-1.2"), false))); @@ -476,7 +477,7 @@ test_pkg_repos (const cstrings& loader_args, assert (check_location (fpv2)); assert (fpv2->sha256sum && *fpv2->sha256sum == - "088068ea3d69542a153f829cf836013374763148fba0a43d8047974f58b5efd7"); + "75d2a7d3eec62d63afd3d3a84d91bd02b05ecb16cd0907d5b0db1fc654e3753f"); assert (!fpv2->buildable); @@ -499,7 +500,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv2a->internal_repository.load () == sr); assert (fpv2a->other_repositories.empty ()); assert (fpv2a->priority == priority::security); - assert (fpv2a->changes.empty ()); + assert (!fpv2a->changes); assert (fpv2a->license_alternatives.size () == 1); assert (fpv2a->license_alternatives[0].size () == 1); @@ -510,27 +511,27 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv2a->dependencies[1].size () == 1); assert (fpv2a->dependencies[2].size () == 2); - assert (fpv2a->dependencies[0][0] == + assert (fpv2a->dependencies[0][0][0] == dep ("libmisc", version_constraint ( dep_ver ("0.1"), false, dep_ver ("2.0.0-"), true))); - assert (fpv2a->dependencies[0][1] == + assert (fpv2a->dependencies[0][1][0] == dep ("libmisc", version_constraint ( dep_ver ("2.0"), false, dep_ver ("5.0"), false))); - assert (fpv2a->dependencies[1][0] == + assert (fpv2a->dependencies[1][0][0] == dep ("libgenx", version_constraint ( dep_ver ("0.2"), true, dep_ver ("3.0"), true))); - assert (fpv2a->dependencies[2][0] == + assert (fpv2a->dependencies[2][0][0] == dep ("libexpat", version_constraint ( nullopt, true, dep_ver ("5.2"), true))); - assert (fpv2a->dependencies[2][1] == + assert (fpv2a->dependencies[2][1][0] == dep ("libexpat", version_constraint ( dep_ver ("1"), true, dep_ver ("5.1"), false))); @@ -540,7 +541,7 @@ test_pkg_repos (const cstrings& loader_args, assert (check_location (fpv2a)); assert (fpv2a->sha256sum && *fpv2a->sha256sum == - "f5d3e9e6e8f9621a638b1375d31f0eb50e6279d8066170b25da21e84198cfd82"); + "71321f6616036380ac5c9c5dc81efa04b23577ef9dc18f1ce413587bb57677c9"); assert (!fpv2a->buildable); @@ -562,7 +563,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv3->other_repositories.empty ()); assert (fpv3->priority == priority::medium); - assert (fpv3->changes.empty ()); + assert (!fpv3->changes); assert (fpv3->license_alternatives.size () == 1); assert (fpv3->license_alternatives[0].size () == 1); @@ -570,7 +571,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv3->dependencies.size () == 1); assert (fpv3->dependencies[0].size () == 1); - assert (fpv3->dependencies[0][0] == + assert (fpv3->dependencies[0][0][0] == dep ("libmisc", version_constraint ( dep_ver ("2.0.0"), false, nullopt, true))); @@ -578,7 +579,7 @@ test_pkg_repos (const cstrings& loader_args, assert (check_location (fpv3)); assert (fpv3->sha256sum && *fpv3->sha256sum == - "f2ebecac6cac8addd7c623bc1becf055e76b13a0d2dd385832b92c38c58956d8"); + "24c53899bd4dbfdde6a727e07724984bfb4ca7f20142291c40e30304f15434c3"); assert (!fpv3->buildable); @@ -590,7 +591,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv4->summary == "The Foo Library"); assert (fpv4->keywords == labels ({"c++", "foo"})); - assert (*fpv4->description == "Very good foo library."); + assert (fpv4->description->text == "Very good foo library."); assert (fpv4->url && fpv4->url->string () == "http://www.example.com/foo/"); assert (!fpv4->package_url); assert (fpv4->email && *fpv4->email == "foo-users@example.com"); @@ -599,7 +600,10 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv4->internal_repository.load () == sr); assert (fpv4->other_repositories.empty ()); assert (fpv4->priority == priority::low); - assert (fpv4->changes == "some changes 1\n\nsome changes 2"); + + assert (fpv4->changes && + fpv4->changes->text == "some changes 1\n\nsome changes 2" && + fpv4->changes->type == text_type::plain); assert (fpv4->license_alternatives.size () == 1); assert (fpv4->license_alternatives[0].comment == @@ -609,7 +613,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv4->dependencies.size () == 1); assert (fpv4->dependencies[0].size () == 1); - assert (fpv4->dependencies[0][0] == + assert (fpv4->dependencies[0][0][0] == dep ("libmisc", version_constraint ( dep_ver ("2.0.0"), false, nullopt, true))); @@ -617,7 +621,7 @@ test_pkg_repos (const cstrings& loader_args, assert (check_location (fpv4)); assert (fpv4->sha256sum && *fpv4->sha256sum == - "aa1606323bfc59b70de642629dc5d8318cc5348e3646f90ed89406d975db1e1d"); + "98f80ca0cd1c053fd45ab37f72a6a31f1a0304747c636822df8d573420284642"); assert (!fpv4->buildable); @@ -685,7 +689,7 @@ test_pkg_repos (const cstrings& loader_args, assert (xpv->internal_repository.load () == mr); assert (xpv->other_repositories.empty ()); assert (xpv->priority == priority::low); - assert (xpv->changes.empty ()); + assert (!xpv->changes); assert (xpv->license_alternatives.size () == 1); assert (xpv->license_alternatives[0].size () == 1); @@ -693,20 +697,20 @@ test_pkg_repos (const cstrings& loader_args, assert (xpv->dependencies.size () == 2); assert (xpv->dependencies[0].size () == 1); - assert (xpv->dependencies[0][0] == + assert (xpv->dependencies[0][0][0] == dep ("libexpat", version_constraint ( dep_ver ("2.0.0"), false, nullopt, true))); assert (xpv->dependencies[1].size () == 1); - assert (xpv->dependencies[1][0] == dep ("libgenx", nullopt)); + assert (xpv->dependencies[1][0][0] == dep ("libgenx", nullopt)); assert (xpv->requirements.empty ()); assert (check_location (xpv)); assert (xpv->sha256sum && *xpv->sha256sum == - "1833906dd93ccc0cda832d6a1b3ef9ed7877bb9958b46d9b2666033d4a7919c9"); + "aa52d5b49ee1bad825cd6bca554f72636e8451f93c74f9a443bafce3c2bf82c0"); assert (xpv->buildable); @@ -722,12 +726,28 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv5->topics == labels ({"math library", "math API", "libbaz fork"})); assert (fpv5->keywords == labels ({"c++", "foo", "math", "best"})); - assert (*fpv5->description == + + assert (fpv5->description->text == "A modern C++ library with easy to use linear algebra and lot " "of optimization\ntools.\n\nThere are over 100 functions in " "total with an extensive test suite. The API is\nsimilar to " - "~~mathlab~~ **MATLAB**.\n\nUseful for conversion of research " - "code into production environments."); + "~~mathlab~~ **MATLAB**.[^mathlab]\n\nUseful for conversion of " + "research code into production environments.\n" + "[^mathlab]: MATLAB Capabilities: TODO"); + + assert (fpv5->description->type == text_type::github_mark); + + assert (fpv5->package_description->text == + "This project builds and defines the build2 package for the " + "libfoo library.\n\n" + "A modern C++ library with easy to use linear algebra and lot " + "of optimization\ntools.\n\nThere are over 100 functions in " + "total with an extensive test suite. The API is\nsimilar to " + "~~mathlab~~ **MATLAB**.[^mathlab]\n\nUseful for conversion of " + "research code into production environments.\n" + "[^mathlab]: MATLAB Capabilities: TODO"); + + assert (fpv5->package_description->type == text_type::github_mark); assert (fpv5->url && fpv5->url->string () == "http://www.example.com/foo/"); @@ -753,14 +773,16 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv5->priority.comment == "Critical bug fixes, performance improvement."); - const char ch[] = R"DLM(1.2.4+1 + const char ch[] = R"DLM(**1.2.4+1** * applied patch for critical bug-219 * regenerated documentation -1.2.4 +**1.2.4** * test suite extended significantly)DLM"; - assert (fpv5->changes == ch); + assert (fpv5->changes && + fpv5->changes->text == ch && + fpv5->changes->type == text_type::github_mark); assert (fpv5->license_alternatives.size () == 2); assert (fpv5->license_alternatives[0].comment == @@ -778,12 +800,12 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv5->dependencies[0].comment == "Crashes with 1.1.0-2.3.0."); - assert (fpv5->dependencies[0][0] == + assert (fpv5->dependencies[0][0][0] == dep ("libmisc", version_constraint ( nullopt, true, dep_ver ("1.1"), true))); - assert (fpv5->dependencies[0][1] == + assert (fpv5->dependencies[0][1][0] == dep ("libmisc", version_constraint ( dep_ver ("2.3.0+0"), true, nullopt, true))); @@ -791,7 +813,7 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv5->dependencies[1].size () == 1); assert (fpv5->dependencies[1].comment.empty ()); - assert (fpv5->dependencies[1][0] == + assert (fpv5->dependencies[1][0][0] == dep ("libexp", version_constraint ( dep_ver ("1.0"), false, nullopt, true))); @@ -799,33 +821,48 @@ test_pkg_repos (const cstrings& loader_args, assert (fpv5->dependencies[2].size () == 2); assert (fpv5->dependencies[2].comment == "The newer the better."); - assert (fpv5->dependencies[2][0] == dep ("libstudxml", nullopt)); - assert (fpv5->dependencies[2][1] == dep ("libexpat", nullopt)); + assert (fpv5->dependencies[2][0][0] == dep ("libstudxml", nullopt)); + assert (fpv5->dependencies[2][1][0] == dep ("libexpat", nullopt)); requirements& fpvr5 (fpv5->requirements); - assert (fpvr5.size () == 4); + assert (fpvr5.size () == 8); - assert (fpvr5[0] == req_alts ({"linux", "windows", "macosx"})); - assert (!fpvr5[0].conditional); + assert (fpvr5[0][0][0] == "linux"); + assert (fpvr5[0][1][0] == "windows"); + assert (fpvr5[0][2][0] == "macosx"); assert (fpvr5[0].comment == "Symbian support is coming."); - assert (fpvr5[1] == req_alts ({"c++11"})); - assert (!fpvr5[1].conditional); + assert (fpvr5[1][0][0] == "c++11"); assert (fpvr5[1].comment.empty ()); - assert (fpvr5[2].empty ()); - assert (fpvr5[2].conditional); + assert (fpvr5[2][0][0] == ""); + assert (fpvr5[2][0].enable && *fpvr5[2][0].enable == ""); assert (fpvr5[2].comment == "libc++ standard library if using Clang on Mac OS X."); - assert (fpvr5[3] == req_alts ({"vc++ >= 12.0"})); - assert (fpvr5[3].conditional); - assert (fpvr5[3].comment == "Only if using VC++ on Windows."); + assert (fpvr5[3][0][0] == ""); + assert (!fpvr5[3][0].enable); + assert (fpvr5[3].comment == "X11 libs."); + + assert (fpvr5[4][0][0] == ""); + assert (fpvr5[4][0].enable && *fpvr5[4][0].enable == "$windows"); + assert (fpvr5[4].comment == "Only 64-bit."); + + assert (fpvr5[5][0][0] == "x86_64"); + assert (fpvr5[5][0].enable && *fpvr5[5][0].enable == ""); + assert (fpvr5[5].comment == "Only if on Windows."); + + assert (fpvr5[6][0][0] == "vc++ >= 12.0"); + assert (fpvr5[6][0].enable && *fpvr5[6][0].enable == "windows"); + assert (fpvr5[6].buildtime); + assert (fpvr5[6].comment == "Only if using VC++ on Windows."); + + assert (fpvr5[7][0][0] == "host"); assert (check_location (fpv5)); assert (fpv5->sha256sum && *fpv5->sha256sum == - "c02b6033107387e05f48aa62ee6498152c967deb0e91a62f1e618fe9fd1bc644"); + "ffce9d3e3ca9899d3fd6da1f6b93c07cce2c3f6b7004948b59757dae420f801b"); assert (fpv5->buildable); @@ -841,7 +878,7 @@ test_pkg_repos (const cstrings& loader_args, assert (epv->project == "mathLab"); assert (epv->summary == "The exponent"); assert (epv->keywords == labels ({"mathlab", "c++", "exponent"})); - assert (epv->description && *epv->description == + assert (epv->description && epv->description->text == "The exponent math function."); assert (epv->url && epv->url->string () == "http://exp.example.com"); assert (!epv->package_url); @@ -852,7 +889,7 @@ test_pkg_repos (const cstrings& loader_args, assert (epv->internal_repository.load () == mr); assert (epv->other_repositories.empty ()); assert (epv->priority == priority (priority::low)); - assert (epv->changes.empty ()); + assert (!epv->changes); assert (epv->license_alternatives.size () == 1); assert (epv->license_alternatives[0].size () == 1); @@ -860,10 +897,10 @@ test_pkg_repos (const cstrings& loader_args, assert (epv->dependencies.size () == 2); assert (epv->dependencies[0].size () == 1); - assert (epv->dependencies[0][0] == dep ("libmisc", nullopt)); + assert (epv->dependencies[0][0][0] == dep ("libmisc", nullopt)); assert (epv->dependencies[1].size () == 1); - assert (epv->dependencies[1][0] == + assert (epv->dependencies[1][0][0] == dep ("libpq", version_constraint ( dep_ver ("9.0.0"), false, nullopt, true))); @@ -884,7 +921,7 @@ test_pkg_repos (const cstrings& loader_args, assert (check_location (epv)); assert (epv->sha256sum && *epv->sha256sum == - "317c8c6f45d9dfdfdef3a823411920cecd51729c7c4f58f9a0b0bbd681c07bd6"); + "d90cfe583890cd0c05cdfc204e69dd3b986c2da49851f7a87fa0ca870788ff79"); // Verify libpq package version. // diff --git a/tests/load/loadtab b/tests/load/loadtab index b6ce020..e919a32 100644 --- a/tests/load/loadtab +++ b/tests/load/loadtab @@ -1,5 +1,5 @@ http://dev.cppget.org/1/stable stable cache:1/stable buildable:no http://dev.cppget.org/1/math math cache:1/math http://dev.cppget.org/1/testing testing cache:1/testing buildable:no -http://dev.cppget.org/1/signed signed cache:pkg/1/dev.cppget.org/signed fingerprint:C3:EC:12:53:AD:64:41:0E:35:3A:9A:A6:EE:57:BF:E6:05:40:42:2B:FF:AF:2C:B0:99:AD:E9:4A:9C:48:40:22 +http://dev.cppget.org/1/signed signed cache:pkg/1/dev.cppget.org/signed fingerprint:40:DD:B7:AD:88:87:C1:7A:11:94:45:22:2B:A2:E7:B3:F6:DE:92:6C:A0:DB:4B:EB:34:94:85:7A:C1:24:9A:E8 http://dev.cppget.org/1/unsigned unsigned cache:pkg/1/dev.cppget.org/unsigned fingerprint: diff --git a/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest b/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest index 584c490..67d157f 100644 --- a/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest +++ b/tests/load/pkg/1/dev.cppget.org/signed/packages.manifest @@ -1,2 +1,2 @@ : 1 -sha256sum: ab258d8d475c9dde36591df5f9c73bced79919ddec33408ef871025cbeab01d5 +sha256sum: 22e2ee564571d9fc9ac2748764ab45a3d64e717226dc41936a2197ef961751ac diff --git a/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest b/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest index ecd89f7..fd7a3a0 100644 --- a/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest +++ b/tests/load/pkg/1/dev.cppget.org/signed/repositories.manifest @@ -1,35 +1,36 @@ : 1 # Local repository manifest (this repository). # -certificate: \ +certificate:\ -----BEGIN CERTIFICATE----- -MIIFOzCCAyOgAwIBAgIJAIsajMs6HOxHMA0GCSqGSIb3DQEBCwUAMDcxFzAVBgNV -BAoMDkNvZGUgU3ludGhlc2lzMRwwGgYDVQQDDBNuYW1lOmRldi5jcHBnZXQub3Jn -MB4XDTE3MDcwNzA2MzgzNFoXDTIyMDcwNjA2MzgzNFowNzEXMBUGA1UECgwOQ29k -ZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2LmNwcGdldC5vcmcwggIiMA0G -CSqGSIb3DQEBAQUAA4ICDwAwggIKAoICAQDK8NqNbQckQpv9B3hBRjnTcpkgKq2e -1HOFLQJxgS1TS2QfqUTKePpd10LbDgXOhI2iycKCf7Zv/uf3RE+VyQ/BthNUvQ0O -bWPsEKo+DQOLPjqIaS+u2bmMXzCDjwjufbd9ruPY2PYRTBOsXgTL1+GGIQu0bP5u -i1mEGn95xuYhEJ4x1UUsVWV0l0D37orV/OaOVffPY3xhlQE++aiXLptof1gzM2D8 -lsQPvWLizrtDAHpiwb4oXQQbifDyeXj+qh7OdIqL10rxZZ/0Q0GqrTOyeSlXuo5i -C3MdNSlRmWNGqvPwpushFBQec04exXI3AjQZ/DUlMxtDx2xIqQMtaYOQ5iqm9426 -crgrUoXZG/5ePYTCmnSbpZVak9md44inJWqSESTL0+EfWuLdXop0QV7LZrIaV2pV -BJba0/jiS5mltR/ikiJ7gaP/bbfutJGGfzyk1PrvyehhK/snGUh6Nr0NMHozS+J+ -7QXdSEMjLXbmF5hBsvEfrGub+YSexEEODA34YnBIA453ph4CIo/3nTpDLrm3EkSF -1jV5vGhg3vzB6v+TIP9MXALm4/NUurn8I643KMoNSS9RCDuiqLnE8V1uCmSP8LR8 -OO7vxlmaM/OfqHehAALgsU/KFT1lgpAfHE2x5YBxT6s407DJJpaPkbHMiCNHScWQ -5ezqnH0UMNwsawIDAQABo0owSDAOBgNVHQ8BAf8EBAMCB4AwFgYDVR0lAQH/BAww -CgYIKwYBBQUHAwMwHgYDVR0RBBcwFYETaW5mb0BkZXYuY3BwZ2V0Lm9yZzANBgkq -hkiG9w0BAQsFAAOCAgEAlJnw+1TMlY8GGKwF0MBfH0bAW+AhkHd7E33DSFoU12LT -mx99QaiFd4r2dkE3dA4c8OJXK+vzhi2R+kWvzR+ZF4XEGfkM6Z6pZWUK8/5hymiq -pXbPQA21QHJh1RkN0xOxaPwjB4BWj2hk2aUqiuH/R4c2u6U4RfSwimBSbI+QSqF3 -Ho5eAuaezicxWKRQya70FpXGFn+vN6E9HZ8mlp+7eSV3A4eYKaGliqfoVHagYaFz -EM/SFueGhynAHtWzx21f3RhlPWJ1QZcLQayZT8980KJKWO70abKZdcuOTpYBDiYZ -SKcAu4fhCWuhkxlKltwxdRx1FqE/UZpoj2LJnw5pEzVmF9X30VC1f5F6YWicedJr -GCmdQhK3qPZKvNM7i19IBlizo5BKuVB6TsdxWgTTzmOZN6oEwsbVtGTxPek7jGJj -V0vi3zeCCaGJ5K+t6MahAT47CpA/+lJVLCGT6Clw9DvFEJmIr01bmD9uUGZwIgc3 -w88Hh4ap5/u7w07cNwYtncA7cKQCBG9vXi2cXpudBL6uLeM5rqYBSD5hj4zDjzpd -VglIFXohfyMfGh3kDPkQ4dw627S0NuxzmocE0jjdsXfQuLNeg+JRNEHB8QPwTC8X -EY1xZfPv9XzlVQxd7gLDKA8QbbKWpNe73XMoZXUyeyVuf5q1g+c6m1uPB5jJpdw= +MIIFRjCCAy6gAwIBAgIUc9xEjZAXCpw+00SGYGDTY0t2nLUwDQYJKoZIhvcNAQEL +BQAwNzEXMBUGA1UECgwOQ29kZSBTeW50aGVzaXMxHDAaBgNVBAMME25hbWU6ZGV2 +LmNwcGdldC5vcmcwHhcNMjExMTI0MTIxNjMwWhcNMzExMTIyMTIxNjMwWjA3MRcw +FQYDVQQKDA5Db2RlIFN5bnRoZXNpczEcMBoGA1UEAwwTbmFtZTpkZXYuY3BwZ2V0 +Lm9yZzCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAMrw2o1tByRCm/0H +eEFGOdNymSAqrZ7Uc4UtAnGBLVNLZB+pRMp4+l3XQtsOBc6EjaLJwoJ/tm/+5/dE +T5XJD8G2E1S9DQ5tY+wQqj4NA4s+OohpL67ZuYxfMIOPCO59t32u49jY9hFME6xe +BMvX4YYhC7Rs/m6LWYQaf3nG5iEQnjHVRSxVZXSXQPfuitX85o5V989jfGGVAT75 +qJcum2h/WDMzYPyWxA+9YuLOu0MAemLBvihdBBuJ8PJ5eP6qHs50iovXSvFln/RD +QaqtM7J5KVe6jmILcx01KVGZY0aq8/Cm6yEUFB5zTh7FcjcCNBn8NSUzG0PHbEip +Ay1pg5DmKqb3jbpyuCtShdkb/l49hMKadJullVqT2Z3jiKclapIRJMvT4R9a4t1e +inRBXstmshpXalUEltrT+OJLmaW1H+KSInuBo/9tt+60kYZ/PKTU+u/J6GEr+ycZ +SHo2vQ0wejNL4n7tBd1IQyMtduYXmEGy8R+sa5v5hJ7EQQ4MDfhicEgDjnemHgIi +j/edOkMuubcSRIXWNXm8aGDe/MHq/5Mg/0xcAubj81S6ufwjrjcoyg1JL1EIO6Ko +ucTxXW4KZI/wtHw47u/GWZoz85+od6EAAuCxT8oVPWWCkB8cTbHlgHFPqzjTsMkm +lo+RscyII0dJxZDl7OqcfRQw3CxrAgMBAAGjSjBIMA4GA1UdDwEB/wQEAwIHgDAW +BgNVHSUBAf8EDDAKBggrBgEFBQcDAzAeBgNVHREEFzAVgRNpbmZvQGRldi5jcHBn +ZXQub3JnMA0GCSqGSIb3DQEBCwUAA4ICAQBvVUHRUj9vR+QgDQGOtXBcOB1G/1xX +1gU6ivjP9UzZEmXmqukgx0aYcjxctAm7Yf0lsj2xOwmVPGcMC3tGYJG8yOvpW5dQ +8uOGmNqNaRq7AJN4gio6uc9dkipNhcBmVilY08yv67wGaIGhHt4kbDiF/3YAzsMR +/YfVQ3Kb4EyQpC/p7dsio2/m8gRb5lUr6K22fdPr2AfQbdNsAF6EMAfEfCDC9yAa +uzB5Dc+wpqWvsPp+ohiroJqC99hwmfzPMxAYTB2cmEqmVHGSwqoC9bn7fI59t9l6 +N2fynRhenLookYfy7jqb4a6O702fAHefGD6teHYbTpg6dwlVY/PLI+T1SPSisH3k +jS2WV03FK8aTNGe7E2RBxH2smca0Z1oaJ9RaluZ9HIRrw434m9+z01DL5w0EWRpC +qa62iYSgGFcxkXRwb8VeWPtGb9/CPXtVFDtW19mOeeHqr8/xVOCjR2MCKAsxLazl +yGQhAUu19n2y7vuj4FOEeJ1mwaaUyu2MPqZWZ3loM14muk/ZJfFsJRfdHg7+hSel +alE0ujce0to39AApGZRIwozut17hYjl5m3314+46KaNuxRmo2xw5wNl8UslEgQYC +fK3CY+6p9n64QJXnE+2+KGrDuYYXQP3TClHLv/IftgAlf6cZxu4RsNdvUsU15M0J +BGQjsz7h0bI3fw== -----END CERTIFICATE----- \ diff --git a/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest b/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest index 46cea28..1d0d8e0 100644 --- a/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest +++ b/tests/load/pkg/1/dev.cppget.org/signed/signature.manifest @@ -1,13 +1,13 @@ : 1 -sha256sum: 99ad81bc39c66e4eeeae438c46b22e2ab7bba3b3406c01df8bb301abbe8149d8 -signature: \ -tzjAIL6BA9D4L1eflZeM04a0sJjCGi8exyS0EAGlRZXRcXROX83Q+xIMbX5uPpAhMxbOHRfcywRi -xM9UYjxVsy9sIrd9hwc4UBzm/BoFvvpQdjalSE4CN11MAbaFhiwWK0YkprTV8kTo2jYWfjTtT32Y -o9sRhP6+WE4H32905ln3hudBEdn5fzWzIDmTs/ip2Gac7KMgMFAdqffjVWyCvR50IhpZiHUnnxVB -NQ21NuckCS6ST94Sl/SXtFjMGJNjZGrKHYMRGfAysF++wl0FtWwxcbyNVMl1ED8ymu4cS8ydEvhO -TsB2ENhQv5tAf5GSOeEE3GW3F6Xae8/ohE0K+mWasLWlgszAuLp8376H0L2x5lkgItXbYuzYfFKz -6A5lUTfk8XV2ss+5DBop0AIm26m7rVO66s7w5Gpt3K8F7WLcQCQr4Ja/+AxE6YKtHbyqQ+PseNp1 -FSEdCZlZxdGnvA5/NWzpLZbsx0bCgKJ8RnMHrdOKsou/fbFHS9gv9AoFNVB1/l60aT1E28H0YsuN -nx+rArDQZvxZKRL/O/p0YfmKkiQO3ikutBn5COhnbjMputb/TMed01lu9cbXGy1dskWxRmsDCVTI -al15nByn6b3MI3Bd8cbvXwojaRMlombhDb+ybccMtAMmDqmX+7IFx0mkh1XLDKCgPtvpRN2xqDc= +sha256sum: ef6a4f9d8f82794c484acea48ab6bb25ccaba270aa77a87848c5e06de9e6bca5 +signature:\ +sshgL1u/38ny3mVchVoML7TRTpPTTJyUqHcK5/iAJifCHCe1D7zqFzHtEO0To/9PcdN6vyF+0FEs +IRkzVw8LnGkKWAeOwTftlO54zPC9swSh4BvuwMu+FEFa+3IcB/eecvRu0mPe+W2GZMoBBiCFJw2F +4tOn6a4qDDqq5SbRdJ63IJnT8sRsBLQLFfbIsuN35KFvdpgBG7QBkP+dy2G5bbEr4TJqBQgQqmC/ +FCmGGEAwJ51ZCivGjFJcJ9QK89CI0s+z755TCHdcQBOBvjXDjFoRqw1MHDF77ZNeyKv1QL5ivLsO +HqtV6YEYNiqBdpO7n14jwgWTBA8vZm5tCQ1G+vnie0q56Rde3nqKFpxHI3/uv59fhvXk9isikRRs +cI4JIMNWld7Z1si4jrTA2ix/7PZwWTPLZTkJ6c+RRlHuBkGUvrC5n2mA0WXWpewWH0BZqgzSv4AW +xFxLurFiIliYPMf4O7fvTtbARIsPzTFlZ0VU8QBOg7/pbxn20wHDwlQOBG8Y2y69KyMGdPysdCm6 +8QLjmIvYGy3osfK7bh9ZpjgsHe2Tx+rjwq87Zpo9H8D3DjVilC5WblaB+2YAydc/q4oZsKb/X2QQ +KXer6XF/Y8l7xLpiZLrUg2oA5j0uIFog6kX7sKeFv2vDRgAjBLf4lXLVKd5VQoNQTOxObAoKmys= \ diff --git a/tests/submit/data.testscript b/tests/submit/data.testscript index 875b4eb..b0fe8f0 100644 --- a/tests/submit/data.testscript +++ b/tests/submit/data.testscript @@ -3,12 +3,13 @@ # Pre-created submission data directory that will be copied by subsequent # tests and scope setup commands. The common approach will be that group -# scopes copy and modify the parent scope submission directory as required by -# the nested tests and scopes. Tests will also clone the parent scope -# submission data directory to optionally modify it, use and cleanup at the -# end. Note that configuration can not be shared between multiple submission -# handler processes. Also we need to make sure that submission data -# directories are not cloned while being used by submission handler scripts. +# scopes copy and modify the parent scope submission data directory as +# required by the nested tests and scopes. Tests will also clone the parent +# scope submission data directory to optionally modify it, use and cleanup at +# the end. Note that submission data directory can not be shared between +# multiple submission handler processes. Also we need to make sure that +# submission data directories are not cloned while being used by submission +# handler scripts. # data_dir = $regex.replace($path_search('*/request.manifest', $src_base), \ '(.*)/.*', \ @@ -25,10 +26,10 @@ root_data_dir = $~/$data_dir # The most commonly used submission data directory cloning command that copies # it from the parent scope working directory. # -clone_data = cp --no-cleanup -r ../$data_dir ./ -clone_data_clean = cp --no-cleanup -r ../$data_dir ./ &$data_dir/*** +clone_data = [cmdline] cp --no-cleanup -r ../$data_dir ./ +clone_data_clean = [cmdline] cp --no-cleanup -r ../$data_dir ./ &$data_dir/*** # Clones the original submission data directory. # -clone_root_data = cp --no-cleanup -r $root_data_dir ./ -clone_root_data_clean = cp --no-cleanup -r $root_data_dir ./ &$data_dir/*** +clone_root_data = [cmdline] cp --no-cleanup -r $root_data_dir ./ +clone_root_data_clean = [cmdline] cp --no-cleanup -r $root_data_dir ./ &$data_dir/*** diff --git a/tests/submit/submit-dir.testscript b/tests/submit/submit-dir.testscript index 81dc494..285710f 100644 --- a/tests/submit/submit-dir.testscript +++ b/tests/submit/submit-dir.testscript @@ -77,7 +77,16 @@ $* >>"EOO" : 1 status: 400 - message: archive is not a valid package \(run bpkg pkg-verify for details\) + message:\\ + package archive is not valid + + gzip: libhello-0.1.0.tar.gz: not in gzip format + tar: This does not look like a tar archive + tar: libhello-0.1.0/manifest: Not found in archive + tar: Exiting with failure status due to previous errors + info: libhello-0.1.0.tar.gz does not appear to be a bpkg package + info: run bpkg pkg-verify for details + \\ reference: $checksum EOO } diff --git a/tests/submit/submit-git.testscript b/tests/submit/submit-git.testscript index c0a31fe..5197afc 100644 --- a/tests/submit/submit-git.testscript +++ b/tests/submit/submit-git.testscript @@ -9,19 +9,23 @@ # # test.redirects += 2>! -g = git 2>! >&2 +g = [cmdline] git 2>! >&2 # Create and clone the reference repository. # root_ref = $~/ref.git root_ref_dir = $~/ref -clone_root_ref = cp --no-cleanup -r $root_ref ./ &ref.git/*** +clone_root_ref = [cmdline] cp --no-cleanup -r $root_ref ./ &ref.git/*** +mkdir --no-cleanup $root_ref +$g -C $root_ref init --bare &ref.git/*** -+$g clone $root_ref $root_ref_dir &ref/*** +# Convert specific warnings to infos as we expect them to appear. This, in +# particular, prevents bbot workers to set task result status to warning. +# ++git clone $root_ref $root_ref_dir &ref/*** 2>&1 | \ + sed -e 's/warning: (.*cloned an empty repository.*)/info: \1/' >&2 2>! +cat <<EOI >=$root_ref_dir/submit.config.bash sections[alpha]=1/alpha @@ -31,6 +35,8 @@ clone_root_ref = cp --no-cleanup -r $root_ref ./ &ref.git/*** owners=owners EOI ++$g -C $root_ref_dir config user.name 'Test Script' ++$g -C $root_ref_dir config user.email 'testscript@example.com' +$g -C $root_ref_dir add '*' +$g -C $root_ref_dir commit -m 'Add submit.config.bash' +$g -C $root_ref_dir push @@ -42,7 +48,7 @@ root_tgt_url = "file:///$~/tgt.git" +cp -r $root_ref $root_tgt -clone_root_tgt = cp --no-cleanup -r $root_tgt ./ &tgt.git/*** +clone_root_tgt = [cmdline] cp --no-cleanup -r $root_tgt ./ &tgt.git/*** # Extract the package repository. # @@ -94,10 +100,10 @@ pkg_ctl="$prj_ctl/hello.git" : success : { - : ref-unknown-tgt-aquire-prj-pkg + : ref-unknown-tgt-acquire-prj-pkg : : Test that on the first package submission the project and package names - : ownership is successfully aquired. Authentication is enabled on both the + : ownership is successfully acquired. Authentication is enabled on both the : reference and target repos. : : Note that here we also test that --commiter-* options are picked up @@ -173,7 +179,7 @@ pkg_ctl="$prj_ctl/hello.git" : ref-disabled-tgt-aquire-prj-pkg : : Test that on the first package submit the project and package names - : ownership is successfully aquired. Authentication is disabled for the + : ownership is successfully acquired. Authentication is disabled for the : reference repo. : { @@ -190,6 +196,8 @@ pkg_ctl="$prj_ctl/hello.git" # owners=owners EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref commit -am 'Disable ownership'; $g -C ref push; @@ -203,10 +211,10 @@ pkg_ctl="$prj_ctl/hello.git" EOO } - : ref-absent-tgt-aquire-prj-pkg + : ref-absent-tgt-acquire-prj-pkg : : Test that on the first package submit the project and package names - : ownership is successfully aquired. Reference repo is absent. + : ownership is successfully acquired. Reference repo is absent. : : Note that here we also pass the --result-url option. : @@ -251,6 +259,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $pkg_ctl EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt add owners; $g -C tgt commit -m 'Add ownership info'; $g -C tgt push; @@ -288,6 +298,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $prj_ctl/ EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add owners; $g -C ref commit -m 'Add ownership info'; $g -C ref push; @@ -305,6 +317,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $pkg_ctl EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt add owners; $g -C tgt commit -m 'Add ownership info'; $g -C tgt push; @@ -357,6 +371,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $pkg_ctl EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add owners; $g -C ref commit -m 'Add ownership info'; $g -C ref push; @@ -392,6 +408,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $prj_ctl/ EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add owners; $g -C ref commit -m 'Add ownership info'; $g -C ref push; @@ -425,6 +443,8 @@ pkg_ctl="$prj_ctl/hello.git" # owners=owners EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt commit -am 'Disable ownership'; $g -C tgt push; @@ -436,6 +456,78 @@ pkg_ctl="$prj_ctl/hello.git" EOO } + : ref-absent-tgt-pkg-rev + : + : Test that the package revision is removed. + : + { + $clone_root_data; + + $clone_root_tgt; + $g clone tgt.git &tgt/***; + + cat <<EOI >=tgt/submit.config.bash; + sections['*']=1/alpha + EOI + + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; + $g -C tgt commit -am 'Add config and archive'; + $g -C tgt push; + + # Initial submission. + # + $* "file:///$~/tgt.git" $data_dir >>"EOO"; + : 1 + status: 200 + message: package submission is queued: libhello/0.1.0 + reference: $checksum + EOO + + $g -C tgt pull; + + test -f tgt/1/alpha/hello/libhello-0.1.0.tar.gz; + + # Revision submission. + # + # Here we test that the handler removes the previous revision. + # + $clone_root_data_clean; + + tar -xf $~/$data_dir/libhello-0.1.0.tar.gz; + sed -i -e 's/(version: 0.1.0)/\1+1/' libhello-0.1.0/manifest; + mv libhello-0.1.0 libhello-0.1.0+1; + tar cfz $~/$data_dir/libhello-0.1.0+1.tar.gz libhello-0.1.0+1; + rm -r libhello-0.1.0+1; + rm $~/$data_dir/libhello-0.1.0.tar.gz; + sed -i -e 's/(archive: libhello-0.1.0)(.tar.gz)/\1+1\2/' $data_dir/request.manifest; + + $* "file:///$~/tgt.git" $data_dir >>"EOO" &tgt/1/alpha/hello/libhello-0.1.0+1.tar.gz; + : 1 + status: 200 + message: package submission is queued: libhello/0.1.0+1 + reference: $checksum + EOO + + $g -C tgt pull; + + test -f tgt/1/alpha/hello/libhello-0.1.0.tar.gz == 1; + test -f tgt/1/alpha/hello/libhello-0.1.0+1.tar.gz; + + # While at it, test the older revision submission. + # + $clone_root_data_clean; + + $* "file:///$~/tgt.git" $data_dir >>"EOO"; + : 1 + status: 422 + message: newer revision libhello/0.1.0+1 is present + reference: $checksum + EOO + + test -f tgt/1/alpha/hello/libhello-0.1.0+1.tar.gz + } + : section-fallback : { @@ -454,6 +546,8 @@ pkg_ctl="$prj_ctl/hello.git" owners=owners EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt commit -am 'Add section name fallback'; $g -C tgt push; @@ -571,6 +665,8 @@ pkg_ctl="$prj_ctl/hello.git" mkdir -p ref/1/alpha/hello; cp $data_dir/libhello-0.1.0.tar.gz ref/1/alpha/hello/; + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add 1/; $g -C ref commit -m 'Add libhello-0.1.0.tar.gz'; $g -C ref push; @@ -612,6 +708,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $prj_ctl/foo EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add owners; $g -C ref commit -m 'Add ownership info'; $g -C ref push; @@ -644,6 +742,8 @@ pkg_ctl="$prj_ctl/hello.git" control: https://example.com/foo EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add owners/hello/project-owner.manifest; $g -C ref commit -m 'Add project ownership info'; $g -C ref push; @@ -685,6 +785,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $prj_ctl/foo EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add owners; $g -C ref commit -m 'Add ownership info'; $g -C ref push; @@ -711,6 +813,8 @@ pkg_ctl="$prj_ctl/hello.git" mkdir -p tgt/1/alpha/hello; cp $data_dir/libhello-0.1.0.tar.gz tgt/1/alpha/hello/; + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt add 1/; $g -C tgt commit -m 'Add libhello-0.1.0.tar.gz'; $g -C tgt push; @@ -751,6 +855,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $prj_ctl/foo EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt add owners; $g -C tgt commit -m 'Add ownership info'; $g -C tgt push; @@ -783,6 +889,8 @@ pkg_ctl="$prj_ctl/hello.git" # owners=owners EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt commit -am 'Disable ownership'; $g -C tgt push; @@ -816,6 +924,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $prj_ctl/ EOI + $g -C ref config user.name 'Test Script'; + $g -C ref config user.email 'testscript@example.com'; $g -C ref add owners; $g -C ref commit -m 'Add project ownership info'; $g -C ref push; @@ -831,6 +941,8 @@ pkg_ctl="$prj_ctl/hello.git" # owners=owners EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt commit -am 'Disable ownership'; $g -C tgt push; @@ -871,6 +983,8 @@ pkg_ctl="$prj_ctl/hello.git" control: $prj_ctl/foo EOI + $g -C tgt config user.name 'Test Script'; + $g -C tgt config user.email 'testscript@example.com'; $g -C tgt add owners; $g -C tgt commit -m 'Add ownership info'; $g -C tgt push; diff --git a/tests/submit/submit-pub.testscript b/tests/submit/submit-pub.testscript index b73d108..8c042a7 100644 --- a/tests/submit/submit-pub.testscript +++ b/tests/submit/submit-pub.testscript @@ -17,7 +17,7 @@ root_rep=$~/pkg-1 +echo ": 1" >=$root_rep/1/repositories.manifest +bpkg rep-create $root_rep/1 2>! &$root_rep/1/packages.manifest -clone_root_rep = cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock +clone_root_rep = [cmdline] cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock : args { @@ -76,7 +76,7 @@ clone_root_rep = cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock : for-real : - : Here we create the (fake) package revision which is expected to be removed + : Here we also create the package revision which is expected to be removed : by the handler. : { @@ -84,9 +84,8 @@ clone_root_rep = cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock $clone_root_rep; ln -s pkg-1 pkg; - mkdir --no-cleanup pkg-1/1/prj; - touch --no-cleanup pkg-1/1/prj/libhello-0.1.0+1.tar.gz; - + # Initial submission. + # $* $~/pkg $~/$data_dir &!pkg-1/*** &pkg-*/*** >>"EOO"; : 1 status: 200 @@ -94,16 +93,55 @@ clone_root_rep = cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock reference: $checksum EOO + test -f pkg/1/hello/libhello-0.1.0.tar.gz; + # While at it, test the duplicate submission. # $clone_root_data_clean; - $* $~/pkg $~/$data_dir >>"EOO" + $* $~/pkg $~/$data_dir >>"EOO"; : 1 status: 422 message: duplicate submission reference: $checksum EOO + + test -f pkg/1/hello/libhello-0.1.0.tar.gz; + + # Revision submission. + # + # Here we test that the handler removes the previous revision. + # + tar -xf $~/$data_dir/libhello-0.1.0.tar.gz; + sed -i -e 's/(version: 0.1.0)/\1+1/' libhello-0.1.0/manifest; + mv libhello-0.1.0 libhello-0.1.0+1; + tar cfz $~/$data_dir/libhello-0.1.0+1.tar.gz libhello-0.1.0+1; + rm -r libhello-0.1.0+1; + rm $~/$data_dir/libhello-0.1.0.tar.gz; + sed -i -e 's/(archive: libhello-0.1.0)(.tar.gz)/\1+1\2/' $data_dir/request.manifest; + + $* $~/pkg $~/$data_dir >>"EOO"; + : 1 + status: 200 + message: package is published: libhello/0.1.0+1 + reference: $checksum + EOO + + test -f pkg/1/hello/libhello-0.1.0.tar.gz == 1; + test -f pkg/1/hello/libhello-0.1.0+1.tar.gz; + + # While at it, test the older revision submission. + # + $clone_root_data_clean; + + $* $~/pkg $~/$data_dir >>"EOO"; + : 1 + status: 422 + message: newer revision libhello/0.1.0+1 is present + reference: $checksum + EOO + + test -f pkg/1/hello/libhello-0.1.0+1.tar.gz } : result-url @@ -144,7 +182,7 @@ clone_root_rep = cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock $* $~/brep-loader $~/pkg $~/$data_dir >>~"%EOO%" : 1 status: 400 - message: \\ + message:\\ submitted archive is not a valid package %.+ \\ @@ -165,7 +203,7 @@ clone_root_rep = cp --no-cleanup -r $root_rep ./ &pkg-1/*** &?pkg.lock $* $~/brep-loader $~/pkg $~/$data_dir >>~"%EOO%" : 1 status: 400 - message: \\ + message:\\ unable to add package to repository %.+ \\ diff --git a/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/archive.tar b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/archive.tar Binary files differnew file mode 100644 index 0000000..d3b5b17 --- /dev/null +++ b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/archive.tar diff --git a/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/request.manifest b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/request.manifest new file mode 100644 index 0000000..c59303b --- /dev/null +++ b/tests/upload/545f1f78-63ea-4acf-bcb8-37b2513a78c8/request.manifest @@ -0,0 +1,22 @@ +: 1 +id: 545f1f78-63ea-4acf-bcb8-37b2513a78c8 +session: 4d4c8b36-56c5-42e0-91d1-58bfd1228002/libhello/1.1.0+10/x86_64-linux-\ +gnu/linux_fedora_37-gcc_12.2-bindist/default/queue/0.3.0/1683122318585120886 +instance: archive +archive: archive.tar +sha256sum: 4fa79e4e11a03db321514800806a2b0a3a8eef9375dc22963f4e5a16764c0d5e +timestamp: 2023-05-08T09:18:20Z +name: libhello +version: 1.1.0+10 +project: hello +target-config: linux_fedora_37-gcc_12.2-bindist +package-config: default +target: x86_64-linux-gnu +tenant: 4d4c8b36-56c5-42e0-91d1-58bfd1228002 +toolchain-name: queue +toolchain-version: 0.3.0 +repository-name: git:build2.org/var/scm/hello/libhello#master@7f62790591b66bd\ +a248140013bdbd12bf078c2a2 +machine-name: linux_fedora_37-bindist-gcc_12.2 +machine-summary: Fedora Linux 37 with system-default GCC 12.2.1 and bpkg-pkg-\ +bindist prerequisites diff --git a/tests/upload/buildfile b/tests/upload/buildfile new file mode 100644 index 0000000..32d7720 --- /dev/null +++ b/tests/upload/buildfile @@ -0,0 +1,13 @@ +# file : tests/upload/buildfile +# license : MIT; see accompanying LICENSE file + +dir = ../../brep/handler/upload/ + +include $dir + +commons = data + +./: testscript{* -{$commons}} common_testscript{$commons} {*/ -test/}{**} \ + $dir/exe{brep-upload-bindist} + +testscript{upload-bindist}@./: test = $out_base/$dir/brep-upload-bindist diff --git a/tests/upload/data.testscript b/tests/upload/data.testscript new file mode 100644 index 0000000..3d3eede --- /dev/null +++ b/tests/upload/data.testscript @@ -0,0 +1,34 @@ +# file : tests/upload/data.testscript +# license : MIT; see accompanying LICENSE file + +# Pre-created upload data directory that will be copied by subsequent tests +# and scope setup commands. The common approach will be that group scopes copy +# and modify the parent scope upload data directory as required by the nested +# tests and scopes. Tests will also clone the parent scope upload data +# directory to optionally modify it, use and cleanup at the end. Note that +# upload data directory can not be shared between multiple upload handler +# processes. Also we need to make sure that upload data directories are not +# cloned while being used by upload handler scripts. +# +data_dir = $regex.replace($path_search('*/request.manifest', $src_base), \ + '(.*)/.*', \ + '\1') + +request_id = "$data_dir" + +# Copy the original upload data directory to the root scope. +# ++cp -r $src_base/$data_dir ./ + +root_data_dir = $~/$data_dir + +# The most commonly used upload data directory cloning command that copies it +# from the parent scope working directory. +# +clone_data = [cmdline] cp --no-cleanup -r ../$data_dir ./ +clone_data_clean = [cmdline] cp --no-cleanup -r ../$data_dir ./ &$data_dir/*** + +# Clones the original upload data directory. +# +clone_root_data = [cmdline] cp --no-cleanup -r $root_data_dir ./ +clone_root_data_clean = [cmdline] cp --no-cleanup -r $root_data_dir ./ &$data_dir/*** diff --git a/tests/upload/upload-bindist.testscript b/tests/upload/upload-bindist.testscript new file mode 100644 index 0000000..d43c567 --- /dev/null +++ b/tests/upload/upload-bindist.testscript @@ -0,0 +1,126 @@ +# file : tests/upload/upload-bindist.testscript +# license : MIT; see accompanying LICENSE file + +.include data.testscript + +: args +{ + : no-dir + : + $* 2>>~%EOE% != 0 + %\[.+\] \[brep:error\] \[ref \] \[brep-upload-bindist\]: usage: .+brep-upload-bindist \[<options>\] <root> <dir>% + EOE + + : no-root + : + $* $~/dir 2>>~%EOE% != 0 + %\[.+\] \[brep:error\] \[ref dir\] \[brep-upload-bindist\]: usage: .+brep-upload-bindist \[<options>\] <root> <dir>% + EOE + + : root-not-exist + : + : While at it, also test that the trailing slash is stripped from the + : directory paths. + : + $* $~/root/ $~/dir/ 2>>~%EOE% != 0 + %\[.+\] \[brep:error\] \[ref dir\] \[brep-upload-bindist\]: '.+root' does not exist or is not a directory% + EOE + + : data-not-exist + : + mkdir root; + $* $~/root $~/dir 2>>~%EOE% != 0 + %\[.+\] \[brep:error\] \[ref dir\] \[brep-upload-bindist\]: '.+dir' does not exist or is not a directory% + EOE +} + +: success +: +{ + mkdir --no-cleanup bindist-root/ &bindist-root/***; + + # Test the first upload. + # + $clone_data; + + $* $~/bindist-root/ $~/$data_dir >>"EOO"; + : 1 + status: 200 + message: binary distribution packages are published + reference: $request_id + EOO + + timestamp = '2023-05-08T09:18:20Z'; + tenant = '4d4c8b36-56c5-42e0-91d1-58bfd1228002'; + dir = [dir_path] bindist-root/$tenant/archive/fedora35/hello/libhello/1.1.0+10/; + + test -f $dir/default/libhello-1.0.0+10.tar.xz; + test -f $dir/default-$timestamp/libhello-1.0.0+10.tar.xz; + + # Repeat the upload using the same timestamp to make sure that we properly + # handle this situation (by adding the retry number as a suffix to the + # package configuration directory name). + # + $clone_data; + + $* $~/bindist-root/ $~/$data_dir >>"EOO" &bindist-root/***; + : 1 + status: 200 + message: binary distribution packages are published + reference: $request_id + EOO + + test -f $dir/default/libhello-1.0.0+10.tar.xz; + test -f $dir/default-$timestamp-0/libhello-1.0.0+10.tar.xz; + test -d $dir/default-$timestamp/ != 0; + + # Test the second upload without --keep-previous option. + # + data_dir2 = 22222222-2222-2222-2222-222222222222; + request_id2 = $data_dir2; + timestamp2 = '2023-05-09T09:18:20Z'; + + cp --no-cleanup -r ../$data_dir ./$data_dir2; + + sed -i -e "s%^\(id:\) .+\$%\\1 $request_id2%" \ + $data_dir2/request.manifest; + + sed -i -e "s%^\(timestamp:\) .+\$%\\1 $timestamp2%" \ + $data_dir2/request.manifest; + + $* $~/bindist-root/ $~/$data_dir2 >>"EOO"; + : 1 + status: 200 + message: binary distribution packages are published + reference: $request_id2 + EOO + + test -f $dir/default/libhello-1.0.0+10.tar.xz; + test -f $dir/default-$timestamp2/libhello-1.0.0+10.tar.xz; + test -d $dir/default-$timestamp.0/ != 0; + + # Test the third upload with --keep-previous option. + # + data_dir3 = 33333333-3333-3333-3333-333333333333; + request_id3 = $data_dir3; + timestamp3 = '2023-05-10T09:18:20Z'; + + cp --no-cleanup -r ../$data_dir ./$data_dir3; + + sed -i -e "s%^\(id:\) .+\$%\\1 $request_id3%" \ + $data_dir3/request.manifest; + + sed -i -e "s%^\(timestamp:\) .+\$%\\1 $timestamp3%" \ + $data_dir3/request.manifest; + + $* --keep-previous $~/bindist-root/ $~/$data_dir3 >>"EOO"; + : 1 + status: 200 + message: binary distribution packages are published + reference: $request_id3 + EOO + + test -f $dir/default/libhello-1.0.0+10.tar.xz; + test -f $dir/default-$timestamp3/libhello-1.0.0+10.tar.xz; + test -f $dir/default-$timestamp2/libhello-1.0.0+10.tar.xz +} diff --git a/tests/web/xhtml/driver.cxx b/tests/web/xhtml/driver.cxx index a0135de..3393eb3 100644 --- a/tests/web/xhtml/driver.cxx +++ b/tests/web/xhtml/driver.cxx @@ -8,6 +8,9 @@ #include <web/xhtml/serialization.hxx> +#undef NDEBUG +#include <cassert> + using namespace std; using namespace xml; diff --git a/web/server/apache/request.cxx b/web/server/apache/request.cxx index a413081..f6e9f15 100644 --- a/web/server/apache/request.cxx +++ b/web/server/apache/request.cxx @@ -34,9 +34,9 @@ #include <streambuf> #include <algorithm> // min() -#include <libbutl/utility.mxx> // icasecmp() -#include <libbutl/optional.mxx> -#include <libbutl/timestamp.mxx> +#include <libbutl/utility.hxx> // icasecmp() +#include <libbutl/optional.hxx> +#include <libbutl/timestamp.hxx> #include <web/server/mime-url-encoding.hxx> @@ -789,7 +789,7 @@ namespace web if (is != nullptr) { if (r != nullptr) - throw invalid_argument ("multiple uploads for '" + name + "'"); + throw invalid_argument ("multiple uploads for '" + name + '\''); r = is; } diff --git a/web/server/apache/service.cxx b/web/server/apache/service.cxx index 9fb23da..6d02c1a 100644 --- a/web/server/apache/service.cxx +++ b/web/server/apache/service.cxx @@ -15,8 +15,8 @@ #include <cstring> // strlen(), strcmp() #include <exception> -#include <libbutl/utility.mxx> // function_cast() -#include <libbutl/optional.mxx> +#include <libbutl/utility.hxx> // function_cast() +#include <libbutl/optional.hxx> #include <web/server/module.hxx> #include <web/server/apache/log.hxx> @@ -47,7 +47,7 @@ namespace web for (const auto& o: od) { auto i ( - option_descriptions_.emplace (name_ + "-" + o.first, o.second)); + option_descriptions_.emplace (name_ + '-' + o.first, o.second)); assert (i.second); *d++ = diff --git a/web/server/apache/service.txx b/web/server/apache/service.txx index 1b16d0b..9e1037b 100644 --- a/web/server/apache/service.txx +++ b/web/server/apache/service.txx @@ -8,7 +8,7 @@ #include <utility> // move() #include <exception> -#include <libbutl/utility.mxx> // operator<<(ostream, exception) +#include <libbutl/utility.hxx> // operator<<(ostream, exception) namespace web { diff --git a/web/server/mime-url-encoding.cxx b/web/server/mime-url-encoding.cxx index fd1e4e8..fd09cd2 100644 --- a/web/server/mime-url-encoding.cxx +++ b/web/server/mime-url-encoding.cxx @@ -6,7 +6,7 @@ #include <string> #include <iterator> // back_inserter -#include <libbutl/url.mxx> +#include <libbutl/url.hxx> using namespace std; using namespace butl; diff --git a/web/server/module.hxx b/web/server/module.hxx index beda73c..20f6217 100644 --- a/web/server/module.hxx +++ b/web/server/module.hxx @@ -9,13 +9,14 @@ #include <vector> #include <iosfwd> #include <chrono> +#include <memory> // enable_shared_from_this #include <cstdint> // uint16_t #include <cstddef> // size_t #include <utility> // move() #include <stdexcept> // runtime_error -#include <libbutl/path.mxx> -#include <libbutl/optional.mxx> +#include <libbutl/path.hxx> +#include <libbutl/optional.hxx> namespace web { @@ -236,7 +237,7 @@ namespace web // directories (e.g., apache/) if you need to see the code that // does this. // - class handler + class handler: public std::enable_shared_from_this<handler> { public: virtual diff --git a/www/builds-body.css b/www/builds-body.css index 6c27b09..b5275c3 100644 --- a/www/builds-body.css +++ b/www/builds-body.css @@ -37,16 +37,17 @@ .build th, #filter th { - width: 7.0em; + width: 7.4em; } .build tr.name td .value, .build tr.version td .value, .build tr.toolchain td .value, -.build tr.config td .value, -.build tr.machine td .value, .build tr.target td .value, +.build tr.tgt-config td .value, +.build tr.pkg-config td .value, .build tr.timestamp td .value, +.build tr.login td .value, .build tr.result td .value, .build tr.tenant td .value { diff --git a/www/ci.xhtml b/www/ci.xhtml index 185f08b..573cca7 100644 --- a/www/ci.xhtml +++ b/www/ci.xhtml @@ -13,6 +13,10 @@ <th>package</th> <td><input type="text" name="package"/></td> </tr> + <tr> + <th>interactive</th> + <td><input type="text" name="interactive"/></td> + </tr> </tbody> </table> <table class="form-table"> diff --git a/www/package-details-body.css b/www/package-details-body.css index 940b493..1083c54 100644 --- a/www/package-details-body.css +++ b/www/package-details-body.css @@ -184,7 +184,6 @@ table.version th {width: 7.6em;} table.version tr.version td .value, table.version tr.priority td .value, -table.version tr.repository td .value, table.version tr.depends td .value, table.version tr.requires td .value { diff --git a/www/package-version-details-body.css b/www/package-version-details-body.css index 772f9eb..1c41ed5 100644 --- a/www/package-version-details-body.css +++ b/www/package-version-details-body.css @@ -145,7 +145,6 @@ h1, h2, h3 #version tr.version td .value, #version tr.priority td .value, -#version tr.repository td .value { /* <code> style. */ font-family: monospace; @@ -244,10 +243,15 @@ h1, h2, h3 font-size: 0.94em; } +/* + * Tests, examples, and benchmarks tables. + */ #tests {margin-top: .4em; margin-bottom: 1em;} +#tests th {width: 2.8em; text-align: center;} +#tests th:after{content: "";} #tests tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);} -#tests td {margin-left: 2.8em; padding-left: .4em;} +#tests td {padding-left: .4em;} #tests tr.tests td .value { @@ -257,9 +261,11 @@ h1, h2, h3 } #examples {margin-top: .4em; margin-bottom: 1em;} +#examples th {width: 2.8em; text-align: center;} +#examples th:after{content: "";} #examples tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);} -#examples td {margin-left: 2.8em; padding-left: .4em;} +#examples td {padding-left: .4em;} #examples tr.examples td .value { @@ -269,9 +275,11 @@ h1, h2, h3 } #benchmarks {margin-top: .4em; margin-bottom: 1em;} +#benchmarks th {width: 2.8em; text-align: center;} +#benchmarks th:after{content: "";} #benchmarks tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);} -#benchmarks td {margin-left: 2.8em; padding-left: .4em;} +#benchmarks td {padding-left: .4em;} #benchmarks tr.benchmarks td .value { @@ -281,6 +289,54 @@ h1, h2, h3 } /* + * Binaries. + */ +#binaries +{ + width: calc(100% + .8rem); + margin-left: -.4rem; + border: none; + border-spacing: 0 0; + + margin-top: .4em; + margin-bottom: 1em; + border-collapse: collapse; +} + +#binaries tr:nth-child(even) td {background-color: rgba(0, 0, 0, 0.07);} + +#binaries td +{ + padding: .08em .4rem; +} + +#binaries td:last-child {width: 100%;} + +#binaries td .value +{ + display: inline-block; + white-space: nowrap; + + /* <code> style. */ + font-family: monospace; + font-size: 0.94em; +} + +/* Re-styling for full page variant. */ + +.full #binaries td +{ + vertical-align: top; +} + +.full #binaries td .value +{ + margin-right: 1em; + + white-space: normal; +} + +/* * Builds. */ #builds {margin-bottom: 1em;} @@ -297,12 +353,15 @@ h1, h2, h3 .build th { - width: 7.0em; + width: 7.4em; } .build tr.toolchain td .value, -.build tr.config td .value, +.build tr.target td .value, +.build tr.tgt-config td .value, +.build tr.pkg-config td .value, .build tr.timestamp td .value, +.build tr.login td .value, .build tr.result td .value { /* <code> style. */ @@ -319,13 +378,51 @@ h1, h2, h3 .build .abnormal {color: #ff0000;} /* - * Changes. + * Changes (plain text). * * This is a <pre> block that fits lines up to 80 characters long and * wraps longer ones. */ -#changes +#changes.plain pre { font-size: 0.85em; - margin: .5em 0 .5em 0; } + +/* + * Changes (Markdown). + * + * These are descendants of the <div> block containing the result of + * Markdown-to-HTML translation. + * + * Note that the Markdown code blocks are translated into the + * <pre><code>...<code/></pre> element construct. + */ +#changes.markdown h1, +#changes.markdown h2 +{ + white-space: normal; +} + +/* code-box.css */ +#changes.markdown :not(pre) > code +{ + background-color: rgba(0, 0, 0, 0.05); + border-radius: 0.2em; + padding: .2em .32em .18em .32em; +} + +/* pre-box.css */ +#changes.markdown pre +{ + background-color: rgba(0, 0, 0, 0.05); + border-radius: 0.2em; + padding: .8em .4em .8em .4em; + margin: 2em -.4em 2em -.4em; /* Use paddings of #content. */ +} + +#changes.markdown pre > code +{ + font-size: inherit; +} + +#changes.markdown .error {color: #ff0000;} diff --git a/www/packages-body.css b/www/packages-body.css index 79911d4..986308f 100644 --- a/www/packages-body.css +++ b/www/packages-body.css @@ -33,7 +33,6 @@ .package tr.name td .value, .package tr.depends td .value, -.package tr.requires td .value, .package tr.tenant td .value { /* <code> style. */ |